code
stringlengths
101
5.91M
def dynamic_load(pkg_name, pkg_dict, model_name): assert (model_name in pkg_dict.keys()) module_path = f'{pkg_name}.{pkg_dict[model_name][0]}' module = import_module(module_path) return getattr(module, pkg_dict[model_name][1])
def get_downstream_svhn(args): training_file_name = 'train.npz' testing_file_name = 'test.npz' if (args.encoder_usage_info == 'cifar10'): print('test_transform_cifar10') test_transform = test_transform_cifar10 elif (args.encoder_usage_info == 'stl10'): print('test_transform_stl10') test_transform = test_transform_stl10 elif (args.encoder_usage_info == 'CLIP'): print('test_transform_CLIP') test_transform = test_transform_CLIP training_file_name = 'train_224.npz' testing_file_name = 'test_224.npz' elif (args.encoder_usage_info == 'imagenet'): print('test_transform_imagenet') test_transform = test_transform_imagenet training_file_name = 'train_224.npz' testing_file_name = 'test_224.npz' else: raise NotImplementedError target_dataset = ReferenceImg(reference_file=args.reference_file, transform=test_transform) memory_data = CIFAR10Mem(numpy_file=(args.data_dir + training_file_name), class_type=classes, transform=test_transform) test_data_backdoor = BadEncoderTestBackdoor(numpy_file=(args.data_dir + testing_file_name), trigger_file=args.trigger_file, reference_label=args.reference_label, transform=test_transform) test_data_clean = CIFAR10Mem(numpy_file=(args.data_dir + testing_file_name), class_type=classes, transform=test_transform) return (target_dataset, memory_data, test_data_clean, test_data_backdoor)
class Conv2d(nn.Conv2d): def __init__(self, in_channels: int, out_channels: int, output_dim: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None): super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode) self.layer_config = layer_config if ('options' not in self.layer_config): self.layer_config['options'] = {'constrain_weights': False, 'init': 'xavier', 'gradient_clip': False} self.options = self.layer_config['options'] self.init = self.options['init'] self.loss_gradient = None self.weight_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_channels, self.kernel_size[0], self.kernel_size[1])), requires_grad=False) self.bias_backward = None if (self.bias is not None): self.bias_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_channels)), requires_grad=False) self.init_parameters() if (('constrain_weights' in self.options) and self.options['constrain_weights']): self.norm_initial_weights = torch.linalg.norm(self.weight) self.register_backward_hook(self.dfa_backward_hook) self.weight_ratio = 0 def init_parameters(self) -> None: (fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight) if (self.init == 'xavier'): nn.init.xavier_uniform_(self.weight) nn.init.xavier_uniform_(self.weight_backward) self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out)))) if (self.bias is not None): nn.init.constant_(self.bias, 0) nn.init.constant_(self.bias_backward, 0) else: nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5)) self.scaling_factor = (1 / math.sqrt((3 * fan_in))) if (self.bias is not None): bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0) nn.init.uniform_(self.bias, (- bound), bound) nn.init.uniform_(self.bias_backward, (- bound), bound) def compute_weight_ratio(self): with torch.no_grad(): self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight)) return self.weight_diff def forward(self, x): with torch.no_grad(): if (('constrain_weights' in self.options) and self.options['constrain_weights']): self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight))) return Conv2dGrad.apply(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def dfa_backward_hook(module, grad_input, grad_output): if (grad_input[0] is None): return grad_input else: out_grad = module.loss_gradient.unsqueeze(2).repeat(1, 1, grad_output[0].size()[2]) out_grad = out_grad.unsqueeze(3).repeat(1, 1, 1, grad_output[0].size()[3]) grad_dfa = torch.nn.grad.conv2d_input(input_size=grad_input[0].shape, weight=module.weight_backward, grad_output=out_grad, stride=module.stride, padding=module.padding, dilation=module.dilation, groups=module.groups) if (len(grad_input) == 2): return (grad_dfa, grad_input[1]) else: return (grad_dfa, grad_input[1], grad_input[2])
def report_func(epoch, batch, num_batches, progress_step, start_time, lr, report_stats): if ((batch % opt.report_every) == ((- 1) % opt.report_every)): report_stats.output(epoch, (batch + 1), num_batches, start_time) if opt.exp_host: report_stats.log('progress', experiment, lr) if opt.tensorboard: report_stats.log_tensorboard('progress', writer, lr, progress_step) report_stats = onmt.Statistics() return report_stats
class ResNeXt101_32x4d(nn.Module): def __init__(self, nb_classes=1000): super(ResNeXt101_32x4d, self).__init__() self.features = resnext101_32x4d_features self.avgpool = nn.AvgPool2d((7, 7), (1, 1)) self.fc = nn.Linear(2048, nb_classes) def forward(self, input): x = self.features(input) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
.parametrize('spec', spec_list) def test_env_semantics(spec): logger.warn('Skipping this test. Existing hashes were generated in a bad way') return with open(ROLLOUT_FILE) as data_file: rollout_dict = json.load(data_file) if (spec.id not in rollout_dict): if (not spec.nondeterministic): logger.warn('Rollout does not exist for {}, run generate_json.py to generate rollouts for new envs'.format(spec.id)) return logger.info('Testing rollout for {} environment...'.format(spec.id)) (observations_now, actions_now, rewards_now, dones_now) = generate_rollout_hash(spec) errors = [] if (rollout_dict[spec.id]['observations'] != observations_now): errors.append('Observations not equal for {} -- expected {} but got {}'.format(spec.id, rollout_dict[spec.id]['observations'], observations_now)) if (rollout_dict[spec.id]['actions'] != actions_now): errors.append('Actions not equal for {} -- expected {} but got {}'.format(spec.id, rollout_dict[spec.id]['actions'], actions_now)) if (rollout_dict[spec.id]['rewards'] != rewards_now): errors.append('Rewards not equal for {} -- expected {} but got {}'.format(spec.id, rollout_dict[spec.id]['rewards'], rewards_now)) if (rollout_dict[spec.id]['dones'] != dones_now): errors.append('Dones not equal for {} -- expected {} but got {}'.format(spec.id, rollout_dict[spec.id]['dones'], dones_now)) if len(errors): for error in errors: logger.warn(error) raise ValueError(errors)
class LayoutLMTokenizerFast(BertTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION model_input_names = ['attention_mask']
class GroupRandomCrop(object): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, img_group): (w, h) = img_group[0].size (th, tw) = self.size out_images = list() x1 = random.randint(0, (w - tw)) y1 = random.randint(0, (h - th)) for img in img_group: assert ((img.size[0] == w) and (img.size[1] == h)) if ((w == tw) and (h == th)): out_images.append(img) else: out_images.append(img.crop((x1, y1, (x1 + tw), (y1 + th)))) return out_images
class FuseMatMulRequantizeTransformer(GraphRewriterBase): def __init__(self, model, device='cpu'): super().__init__(model) self.device = device self.graph_analyzer = GraphAnalyzer() self.graph_analyzer.graph = self.model self.eps = 1e-05 self.graph_info = self.graph_analyzer.parse_graph() def do_transformation(self): uint8_type = dtypes.quint8.as_datatype_enum float32_type = dtypes.float32.as_datatype_enum qint32_type = dtypes.qint32.as_datatype_enum while True: target_nodes = self.graph_analyzer.query_fusion_pattern_nodes([['QuantizedMatMulWithBiasAndRelu'], ['Requantize']]) if (len(target_nodes) == 0): break i = target_nodes[0] quantized_node_name = i[0] quantized_node = self.graph_info[quantized_node_name].node requantize_node_name = i[1] requantize_node = self.graph_info[requantize_node_name].node requested_output_min_name = requantize_node.input[3] requested_output_max_name = requantize_node.input[4] quantized_node_op = i[(- 1)][0] new_node = node_def_pb2.NodeDef() new_node.op = (quantized_node_op + 'AndRequantize') new_node.name = requantize_node_name for (_, value) in enumerate(quantized_node.input): new_node.input.append(value) new_node.input.append(requested_output_min_name) new_node.input.append(requested_output_max_name) if ('T1' in quantized_node.attr): new_node.attr['T1'].CopyFrom(quantized_node.attr['T1']) if ('T2' in quantized_node.attr): new_node.attr['T2'].CopyFrom(quantized_node.attr['T2']) parent_node_name = Helper.node_name_from_input(quantized_node.input[0]) max_filter_node = self.graph_info[new_node.input[6]].node min_filter_node = self.graph_info[new_node.input[5]].node last_node = self.graph_info[new_node.input[0]].node is_min_first = bool((quantized_node.attr['input_quant_mode'].s == b'MIN_FIRST')) weight_node = self.graph_info[new_node.input[1]].node bias_node = self.graph_info[new_node.input[2]].node max_input_node = self.graph_info[last_node.input[(- 1)]].node min_input_node = self.graph_info[last_node.input[(- 2)]].node if (max_input_node.op == 'Enter'): min_input_parent_name = Helper.node_name_from_input(min_input_node.input[0]) max_input_parent_name = Helper.node_name_from_input(max_input_node.input[0]) min_input_parent_node = self.graph_info[min_input_parent_name].node max_input_parent_node = self.graph_info[max_input_parent_name].node if ((min_input_parent_node.op != 'Const') or (max_input_parent_node.op != 'Const')): continue min_input_node = min_input_parent_node max_input_node = max_input_parent_node if (max_filter_node.op == 'Enter'): min_filter_parent_name = Helper.node_name_from_input(min_filter_node.input[0]) max_filter_parent_name = Helper.node_name_from_input(max_filter_node.input[0]) min_filter_parent_node = self.graph_info[min_filter_parent_name].node max_filter_parent_node = self.graph_info[max_filter_parent_name].node if ((min_filter_parent_node.op != 'Const') or (max_filter_parent_node.op != 'Const')): continue min_filter_node = min_filter_parent_node max_filter_node = max_filter_parent_node if (weight_node.op == 'Enter'): weight_parent_name = Helper.node_name_from_input(weight_node.input[0]) weight_parent_node = self.graph_info[weight_parent_name].node if (weight_parent_node.op != 'Const'): continue weight_node = weight_parent_node bias_enter_node = None if (bias_node.op == 'Enter'): bias_enter_node = bias_node bias_parent_name = Helper.node_name_from_input(bias_node.input[0]) bias_parent_node = self.graph_info[bias_parent_name].node if (bias_parent_node.op != 'Const'): continue bias_node = bias_parent_node if ((last_node.op.find('Requantize') != (- 1)) or (last_node.op.find('QuantizeV2') != (- 1))): min_input_value = min_input_node.attr['value'].tensor.float_val[0] max_input_value = max_input_node.attr['value'].tensor.float_val[0] max_filter_value = max_filter_node.attr['value'].tensor.float_val[0] min_filter_value = min_filter_node.attr['value'].tensor.float_val[0] weights_tensor = tensor_util.MakeNdarray(weight_node.attr['value'].tensor) bias_tensor = tensor_util.MakeNdarray(bias_node.attr['value'].tensor) input_range = ((max_input_value - min_input_value) if is_min_first else max(abs(max_input_value), abs(min_input_value))) if ((- self.eps) <= input_range <= self.eps): input_range += self.eps if ((- self.eps) <= (max_input_value - min_input_value) <= self.eps): max_input_value += self.eps int32_bias = Helper.generate_int32_bias_for_matmul(bias_tensor, weights_tensor, input_range, max_input_value, min_input_value, max_filter_value, min_filter_value) bias_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=(float32_type if (self.device == 'gpu') else qint32_type))) bias_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto((bias_tensor if (self.device == 'gpu') else int32_bias), (dtypes.float32 if (self.device == 'gpu') else dtypes.int32), bias_tensor.shape))) bias_node.attr['value'].tensor.dtype = (float32_type if (self.device == 'gpu') else qint32_type) new_node.attr['Tbias'].CopyFrom(attr_value_pb2.AttrValue(type=(float32_type if (self.device == 'gpu') else qint32_type))) new_node.attr['Toutput'].CopyFrom(attr_value_pb2.AttrValue(type=uint8_type)) if (quantized_node_op.find('Relu') == (- 1)): deq_node_name = self.graph_info[requantize_node_name].outputs[0] deq_node = self.graph_info[deq_node_name].node deq_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=uint8_type)) if bias_enter_node: bias_enter_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=(float32_type if (self.device == 'gpu') else qint32_type))) else: new_node.attr['Tbias'].CopyFrom(attr_value_pb2.AttrValue(type=float32_type)) self.graph_analyzer.replace_single_node(new_node, [parent_node_name], quantized_node_name, [self.graph_info[requantize_node_name].outputs[0]], requantize_node_name) self.graph_analyzer.remove_node(quantized_node_name) return self.graph_analyzer.dump_graph()
def get_cpu_qusub_script(name, hostname, queue, out_dir, script_to_execute, memory=20): s = '#!/bin/bash\n#\n#$ -N {}\n#\n## otherwise the default shell would be used\n#$ -S /bin/bash\n\n## demand gpu resource\n#$ -l hostname={}\n\n## <= 1h is short queue, <= 6h is middle queue, <= 48 h is long queue\n#$ -q {}.*\n\n## the maximum memory usage of this job, (below 4G does not make much sense)\n#$ -l h_vmem={}G\n\n## stderr and stdout are merged together to stdout\n#$ -j y\n\n# logging directory. preferrably on your scratch\n#$ -o {}\n\n# if you need to export custom libs, you can do that here\nsource /usr/Setpath.sh\n\n# call your calculation executable, redirect output\n{}\n'.format(name, hostname, queue, memory, out_dir, script_to_execute) return s
def init_appid_apikey(appid_user, apikey_user): global appid global apikey appid = appid_user apikey = apikey_user
def test_5_digits_suffix_version_new(): ref_line = u'{any prefix}1310.12345v9 [physics.ins-det]{any postfix}' r = tag_arxiv(ref_line) assert (r.strip(': ') == u'{any prefix}<cds.ARXIV>arXiv:1310.12345 [physics.ins-det]</cds.ARXIV>{any postfix}')
def load_text_from_file(path: Union[(Path, str)], text_name: str='', open_text: bool=False) -> None: path = zpy.files.verify_path(path) if (bpy.data.texts.get(text_name, None) is None): _text = bpy.data.texts.load(str(path), internal=True) _text.name = text_name else: bpy.data.texts[text_name].from_string(path.read_text()) if open_text: for area in bpy.context.screen.areas: if (area.type == 'TEXT_EDITOR'): area.spaces[0].text = bpy.data.texts[text_name]
def test_module_converter_convert_dummy_net_copy_weights(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, copy_weights=True, output_dim=output_dim) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data) np.testing.assert_array_almost_equal(w2, converted.fc.weight.data)
class ValidationDatapoint(utils.JsonSerializable): n_parse_success: int n_comp_success: int n_test_success: int total_time_consumed: float gen_datapoint: GenerationDatapoint def compilable_by_parsable(self) -> float: return (self.n_comp_success / self.n_parse_success) def plausible_by_parsable(self) -> float: return (self.n_test_success / self.n_parse_success) def unique_compilation_rate(self) -> float: return utils.safe_div(self.n_comp_success, ((self.gen_datapoint.n_unique - self.gen_datapoint.n_pruned) - self.gen_datapoint.n_unfinished)) def unique_plausible_rate(self) -> float: return utils.safe_div(self.n_test_success, ((self.gen_datapoint.n_unique - self.gen_datapoint.n_pruned) - self.gen_datapoint.n_unfinished)) def compilation_rate(self) -> float: return (self.n_comp_success / self.gen_datapoint.n_total) def plausible_rate(self) -> float: return (self.n_test_success / self.gen_datapoint.n_total) def to_json(self) -> Any: return {'n_parse_success': self.n_parse_success, 'n_comp_success': self.n_comp_success, 'n_test_success': self.n_test_success, 'total_time_consumed': self.total_time_consumed, 'gen_datapoint': self.gen_datapoint.to_json()} def from_json(cls, d: dict) -> 'ValidationDatapoint': return ValidationDatapoint(int(d['n_parse_success']), int(d['n_comp_success']), int(d['n_test_success']), float(d['total_time_consumed']), GenerationDatapoint.from_json(d['gen_datapoint'])) def __add__(self, other: 'ValidationDatapoint') -> 'ValidationDatapoint': return ValidationDatapoint((self.n_parse_success + other.n_parse_success), (self.n_comp_success + other.n_comp_success), (self.n_test_success + other.n_test_success), (self.total_time_consumed + other.total_time_consumed), (self.gen_datapoint + other.gen_datapoint)) def zero(cls) -> 'ValidationDatapoint': return ValidationDatapoint(0, 0, 0, 0.0, GenerationDatapoint.zero())
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('label_dir', type=str, help='Path to the SoccerNetV2 labels') parser.add_argument('frame_dir', type=str, help='Path to extracted video frames') parser.add_argument('-o', '--out_dir', type=str, help='Path to output parsed dataset') return parser.parse_args()
_model def gluon_senet154(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['gluon_senet154'] block_args = dict(attn_layer=SEModule) model = ResNet(Bottleneck, [3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', down_kernel_size=3, block_reduce_first=2, num_classes=num_classes, in_chans=in_chans, block_args=block_args, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def parse_resume_step_from_filename(filename): if (filename[(- 3):] == '.pt'): return int(filename[(- 9):(- 3)]) else: return 0
def collate_eval(batch): indice = [b[0] for b in batch] image = torch.stack([b[1] for b in batch]) label = torch.stack([b[2] for b in batch]) return (indice, image, label)
class StitchWidget(DOMWidget): _model_name = Unicode('StitchModel').tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode('StitchView').tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) kernelmsg = Unicode('').tag(sync=True) clientmsg = Unicode('').tag(sync=True) srcdoc = Unicode('<p>srcdoc should be defined by the user</p>').tag(sync=True) initial_height = Unicode('1px').tag(sync=True) initial_width = Unicode('1px').tag(sync=True) initial_border = Unicode('0').tag(sync=True)
def get_host_info(): host_info = {} for (k, v) in host_info_gatherers.items(): try: host_info[k] = v() except IgnoreHostInfo: pass return host_info
_register_to_config class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): sample_size: int = 32 in_channels: int = 4 down_block_types: Tuple[str] = ('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D') only_cross_attention: Union[(bool, Tuple[bool])] = False block_out_channels: Tuple[int] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[(int, Tuple[int])] = 8 num_attention_heads: Optional[Union[(int, Tuple[int])]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 controlnet_conditioning_channel_order: str = 'rgb' conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256) def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) controlnet_cond_shape = (1, 3, (self.sample_size * 8), (self.sample_size * 8)) controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)['params'] def setup(self): block_out_channels = self.block_out_channels time_embed_dim = (block_out_channels[0] * 4) num_attention_heads = (self.num_attention_heads or self.attention_head_dim) self.conv_in = nn.Conv(block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) self.time_proj = FlaxTimesteps(block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift) self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding(conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels) only_cross_attention = self.only_cross_attention if isinstance(only_cross_attention, bool): only_cross_attention = ((only_cross_attention,) * len(self.down_block_types)) if isinstance(num_attention_heads, int): num_attention_heads = ((num_attention_heads,) * len(self.down_block_types)) down_blocks = [] controlnet_down_blocks = [] output_channel = block_out_channels[0] controlnet_block = nn.Conv(output_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) controlnet_down_blocks.append(controlnet_block) for (i, down_block_type) in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = (i == (len(block_out_channels) - 1)) if (down_block_type == 'CrossAttnDownBlock2D'): down_block = FlaxCrossAttnDownBlock2D(in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=(not is_final_block), use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype) else: down_block = FlaxDownBlock2D(in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=(not is_final_block), dtype=self.dtype) down_blocks.append(down_block) for _ in range(self.layers_per_block): controlnet_block = nn.Conv(output_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) controlnet_down_blocks.append(controlnet_block) if (not is_final_block): controlnet_block = nn.Conv(output_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) controlnet_down_blocks.append(controlnet_block) self.down_blocks = down_blocks self.controlnet_down_blocks = controlnet_down_blocks mid_block_channel = block_out_channels[(- 1)] self.mid_block = FlaxUNetMidBlock2DCrossAttn(in_channels=mid_block_channel, dropout=self.dropout, num_attention_heads=num_attention_heads[(- 1)], use_linear_projection=self.use_linear_projection, dtype=self.dtype) self.controlnet_mid_block = nn.Conv(mid_block_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) def __call__(self, sample, timesteps, encoder_hidden_states, controlnet_cond, conditioning_scale: float=1.0, return_dict: bool=True, train: bool=False) -> Union[(FlaxControlNetOutput, Tuple)]: channel_order = self.controlnet_conditioning_channel_order if (channel_order == 'bgr'): controlnet_cond = jnp.flip(controlnet_cond, axis=1) if (not isinstance(timesteps, jnp.ndarray)): timesteps = jnp.array([timesteps], dtype=jnp.int32) elif (isinstance(timesteps, jnp.ndarray) and (len(timesteps.shape) == 0)): timesteps = timesteps.astype(dtype=jnp.float32) timesteps = jnp.expand_dims(timesteps, 0) t_emb = self.time_proj(timesteps) t_emb = self.time_embedding(t_emb) sample = jnp.transpose(sample, (0, 2, 3, 1)) sample = self.conv_in(sample) controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample += controlnet_cond down_block_res_samples = (sample,) for down_block in self.down_blocks: if isinstance(down_block, FlaxCrossAttnDownBlock2D): (sample, res_samples) = down_block(sample, t_emb, encoder_hidden_states, deterministic=(not train)) else: (sample, res_samples) = down_block(sample, t_emb, deterministic=(not train)) down_block_res_samples += res_samples sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=(not train)) controlnet_down_block_res_samples = () for (down_block_res_sample, controlnet_block) in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) down_block_res_samples = [(sample * conditioning_scale) for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if (not return_dict): return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput(down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample)
_torch class TrainerCallbackTest(unittest.TestCase): def setUp(self): self.output_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.output_dir) def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs): train_dataset = RegressionDataset(length=train_len) eval_dataset = RegressionDataset(length=eval_len) config = RegressionModelConfig(a=a, b=b) model = RegressionPreTrainedModel(config) args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, **kwargs) return Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=callbacks) def check_callbacks_equality(self, cbs1, cbs2): self.assertEqual(len(cbs1), len(cbs2)) cbs1 = list(sorted(cbs1, key=(lambda cb: (cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)))) cbs2 = list(sorted(cbs2, key=(lambda cb: (cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)))) for (cb1, cb2) in zip(cbs1, cbs2): if (isinstance(cb1, type) and isinstance(cb2, type)): self.assertEqual(cb1, cb2) elif (isinstance(cb1, type) and (not isinstance(cb2, type))): self.assertEqual(cb1, cb2.__class__) elif ((not isinstance(cb1, type)) and isinstance(cb2, type)): self.assertEqual(cb1.__class__, cb2) else: self.assertEqual(cb1, cb2) def get_expected_events(self, trainer): expected_events = ['on_init_end', 'on_train_begin'] step = 0 train_dl_len = len(trainer.get_eval_dataloader()) evaluation_events = ((['on_prediction_step'] * len(trainer.get_eval_dataloader())) + ['on_log', 'on_evaluate']) for _ in range(trainer.state.num_train_epochs): expected_events.append('on_epoch_begin') for _ in range(train_dl_len): step += 1 expected_events += ['on_step_begin', 'on_step_end'] if ((step % trainer.args.logging_steps) == 0): expected_events.append('on_log') if ((trainer.args.evaluation_strategy == EvaluationStrategy.STEPS) and ((step % trainer.args.eval_steps) == 0)): expected_events += evaluation_events.copy() if ((step % trainer.args.save_steps) == 0): expected_events.append('on_save') expected_events.append('on_epoch_end') if (trainer.args.evaluation_strategy == EvaluationStrategy.EPOCH): expected_events += evaluation_events.copy() expected_events += ['on_log', 'on_train_end'] return expected_events def test_init_callback(self): trainer = self.get_trainer() expected_callbacks = (DEFAULT_CALLBACKS.copy() + [ProgressCallback]) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(MyTestTrainerCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer(disable_tqdm=True) expected_callbacks = (DEFAULT_CALLBACKS.copy() + [PrinterCallback]) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_add_remove_callback(self): expected_callbacks = (DEFAULT_CALLBACKS.copy() + [ProgressCallback]) trainer = self.get_trainer() trainer.remove_callback(DefaultFlowCallback) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.pop_callback(DefaultFlowCallback) self.assertEqual(cb.__class__, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(DefaultFlowCallback) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.callback_handler.callbacks[0] trainer.remove_callback(cb) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb1 = trainer.callback_handler.callbacks[0] cb2 = trainer.pop_callback(cb1) self.assertEqual(cb1, cb2) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(cb1) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_event_flow(self): import warnings warnings.simplefilter(action='ignore', category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() events = trainer.callback_handler.callbacks[(- 2)].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) trainer.train() events = trainer.callback_handler.callbacks[(- 2)].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) trainer.train() events = trainer.callback_handler.callbacks[(- 2)].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy='steps') trainer.train() events = trainer.callback_handler.callbacks[(- 2)].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy='epoch') trainer.train() events = trainer.callback_handler.callbacks[(- 2)].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy='steps') trainer.train() events = trainer.callback_handler.callbacks[(- 2)].events self.assertEqual(events, self.get_expected_events(trainer)) with unittest.mock.patch('transformers.trainer_callback.logger.warn') as warn_mock: trainer = self.get_trainer(callbacks=[MyTestTrainerCallback, MyTestTrainerCallback]) assert (str(MyTestTrainerCallback) in warn_mock.call_args[0][0])
def bottleNeck(nin, nmid): return nn.Sequential(nn.BatchNorm2d(nin), nn.ReLU(), nn.Conv2d(nin, nmid, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(nmid), nn.ReLU(), nn.Conv2d(nmid, nmid, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(nmid), nn.ReLU(), nn.Conv2d(nmid, (nmid * 4), kernel_size=1, stride=1, padding=0)) self.resBlock = nn.Sequential() def forward(self, input): out = self.resBlock(input) return (out + input)
class Accuracy(nn.Module): def __init__(self, topk=(1,)): super().__init__() self.topk = topk def forward(self, pred, target): return accuracy(pred, target, self.topk)
class NNBase(nn.Module): def __init__(self, recurrent: bool, recurrent_input_size: int, hidden_size: int): super().__init__() self._hidden_size = hidden_size self._recurrent = recurrent if recurrent: self.gru = nn.GRU(recurrent_input_size, hidden_size) for (name, param) in self.gru.named_parameters(): if ('bias' in name): nn.init.constant_(param, 0) elif ('weight' in name): nn.init.orthogonal_(param) def is_recurrent(self) -> bool: return self._recurrent def recurrent_hidden_state_size(self) -> int: if self._recurrent: return self._hidden_size return 1 def output_size(self): return self._hidden_size def _forward_gru(self, x, hxs, masks): if (x.size(0) == hxs.size(0)): (x, hxs) = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0)) x = x.squeeze(0) hxs = hxs.squeeze(0) else: N = hxs.size(0) T = int((x.size(0) / N)) x = x.view(T, N, x.size(1)) masks = masks.view(T, N) has_zeros = (masks[1:] == 0.0).any(dim=(- 1)).nonzero().squeeze().cpu() if (has_zeros.dim() == 0): has_zeros = [(has_zeros.item() + 1)] else: has_zeros = (has_zeros + 1).numpy().tolist() has_zeros = (([0] + has_zeros) + [T]) hxs = hxs.unsqueeze(0) outputs = [] for i in range((len(has_zeros) - 1)): start_idx = has_zeros[i] end_idx = has_zeros[(i + 1)] (rnn_scores, hxs) = self.gru(x[start_idx:end_idx], (hxs * masks[start_idx].view(1, (- 1), 1))) outputs.append(rnn_scores) x = torch.cat(outputs, dim=0) x = x.view((T * N), (- 1)) hxs = hxs.squeeze(0) return (x, hxs)
class _scaledL2(Function): def forward(ctx, X, C, S): if X.is_cuda: SL = lib.gpu.scaled_l2_forward(X, C, S) else: raise NotImplemented ctx.save_for_backward(X, C, S, SL) return SL def backward(ctx, gradSL): (X, C, S, SL) = ctx.saved_variables if X.is_cuda: (gradX, gradC, gradS) = lib.gpu.scaled_l2_backward(gradSL, X, C, S, SL) else: raise NotImplemented return (gradX, gradC, gradS)
_dataset_obj('svhn2mnist') class Svhn2MNIST(CycleGANDataset): def __init__(self, root, train=True, transform=None, target_transform=None, download=False): if (not train): print('No test set for svhn2mnist.') self.image_paths = [] else: super(Svhn2MNIST, self).__init__(root, '*_fake_B.png', transform=transform, target_transform=target_transform, download=download)
def EfficientNetB7(include_top=True, input_tensor=None, input_shape=None, pooling=None, classes=1000, dropout_rate=0.5, drop_connect_rate=0.2, **kwargs): return EfficientNet(2.0, 3.1, 600, model_name='efficientnet-b7', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, **kwargs)
def inspect_all_records(valid_list: list, invalid_list: list, sym_list: list, tree: ArithExpTree, sign: str) -> bool: for inputs in valid_list: val = [] for argNum in sym_list: val.append(inputs[argNum]) res = eval(f'tree.evaluate(val) {sign} 0') if (not res): return False if (sign == '=='): return True for inputs in invalid_list: val = [] for argNum in sym_list: val.append(inputs[argNum]) res = eval(f'tree.evaluate(val) {sign} 0') if (not res): return True return False
class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_digits_cosine_greedi_ll_sparse(): model = SaturatedCoverageSelection(100, 'precomputed', optimizer='greedi', optimizer_kwds={'optimizer1': 'lazy', 'optimizer2': 'lazy'}, random_state=0) model.fit(X_digits_cosine_sparse) assert_array_equal(model.ranking[:2], digits_cosine_greedi_ranking[:2]) assert_array_almost_equal(model.gains[:2], digits_cosine_greedi_gains[:2], 4)
def mkdirs(paths): if (isinstance(paths, list) and (not isinstance(paths, str))): for path in paths: mkdir(path) else: mkdir(paths)
class ImageNetBase(Dataset): def __init__(self, config=None): self.config = (config or OmegaConf.create()) if (not (type(self.config) == dict)): self.config = OmegaConf.to_container(self.config) self.keep_orig_class_label = self.config.get('keep_orig_class_label', False) self.process_images = True self._prepare() self._prepare_synset_to_human() self._prepare_idx_to_synset() self._prepare_human_to_integer_label() self._load() def __len__(self): return len(self.data) def __getitem__(self, i): return self.data[i] def _prepare(self): raise NotImplementedError() def _filter_relpaths(self, relpaths): ignore = set(['n_9591.JPEG']) relpaths = [rpath for rpath in relpaths if (not (rpath.split('/')[(- 1)] in ignore))] if ('sub_indices' in self.config): indices = str_to_indices(self.config['sub_indices']) synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) files = [] for rpath in relpaths: syn = rpath.split('/')[0] if (syn in synsets): files.append(rpath) return files else: return relpaths def _prepare_synset_to_human(self): SIZE = 2655750 URL = ' self.human_dict = os.path.join(self.root, 'synset_human.txt') if ((not os.path.exists(self.human_dict)) or (not (os.path.getsize(self.human_dict) == SIZE))): download(URL, self.human_dict) def _prepare_idx_to_synset(self): URL = ' self.idx2syn = os.path.join(self.root, 'index_synset.yaml') if (not os.path.exists(self.idx2syn)): download(URL, self.idx2syn) def _prepare_human_to_integer_label(self): URL = ' self.human2integer = os.path.join(self.root, 'imagenet1000_clsidx_to_labels.txt') if (not os.path.exists(self.human2integer)): download(URL, self.human2integer) with open(self.human2integer, 'r') as f: lines = f.read().splitlines() assert (len(lines) == 1000) self.human2integer_dict = dict() for line in lines: (value, key) = line.split(':') self.human2integer_dict[key] = int(value) def _load(self): with open(self.txt_filelist, 'r') as f: self.relpaths = f.read().splitlines() l1 = len(self.relpaths) self.relpaths = self._filter_relpaths(self.relpaths) print('Removed {} files from filelist during filtering.'.format((l1 - len(self.relpaths)))) self.synsets = [p.split('/')[0] for p in self.relpaths] self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] unique_synsets = np.unique(self.synsets) class_dict = dict(((synset, i) for (i, synset) in enumerate(unique_synsets))) if (not self.keep_orig_class_label): self.class_labels = [class_dict[s] for s in self.synsets] else: self.class_labels = [self.synset2idx[s] for s in self.synsets] with open(self.human_dict, 'r') as f: human_dict = f.read().splitlines() human_dict = dict((line.split(maxsplit=1) for line in human_dict)) self.human_labels = [human_dict[s] for s in self.synsets] labels = {'relpath': np.array(self.relpaths), 'synsets': np.array(self.synsets), 'class_label': np.array(self.class_labels), 'human_label': np.array(self.human_labels)} if self.process_images: self.size = retrieve(self.config, 'size', default=256) self.data = ImagePaths(self.abspaths, labels=labels, size=self.size, random_crop=self.random_crop) else: self.data = self.abspaths
class TestTridentRoIHead(TestCase): def setUp(self): register_all_modules() self.roi_head_cfg = get_roi_head_cfg('tridentnet/tridentnet_r50-caffe_1x_coco.py') def test_init(self): roi_head = MODELS.build(self.roi_head_cfg) self.assertTrue(roi_head.with_bbox) self.assertTrue(roi_head.with_shared_head) def test_trident_roi_head_predict(self): if (not torch.cuda.is_available()): return unittest.skip('test requires GPU and torch+cuda') roi_head_cfg = copy.deepcopy(self.roi_head_cfg) roi_head = MODELS.build(roi_head_cfg) roi_head = roi_head.cuda() s = 256 feats = [] for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)): feats.append(torch.rand(1, 1024, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))).to(device='cuda')) image_shapes = [(3, s, s)] batch_data_samples = demo_mm_inputs(batch_size=1, image_shapes=image_shapes, num_items=[0], num_classes=4, with_mask=True, device='cuda')['data_samples'] proposals_list = demo_mm_proposals(image_shapes=image_shapes, num_proposals=100, device='cuda') roi_head.predict(feats, proposals_list, batch_data_samples) roi_head_cfg.test_branch_idx = (- 1) roi_head = MODELS.build(roi_head_cfg) roi_head = roi_head.cuda() roi_head.predict(feats, proposals_list, batch_data_samples)
class ImageFileTrain(ImageFile): def __init__(self, alpha_dir='train_alpha', fg_dir='train_fg', bg_dir='train_bg', alpha_ext='.jpg', fg_ext='.jpg', bg_ext='.jpg'): super(ImageFileTrain, self).__init__(phase='train') self.alpha_dir = alpha_dir self.fg_dir = fg_dir self.bg_dir = bg_dir self.alpha_ext = alpha_ext self.fg_ext = fg_ext self.bg_ext = bg_ext self.logger.debug('Load Training Images From Folders') self.valid_fg_list = self._get_valid_names(self.fg_dir, self.alpha_dir) self.valid_bg_list = [os.path.splitext(name)[0] for name in os.listdir(self.bg_dir)] self.alpha = self._list_abspath(self.alpha_dir, self.alpha_ext, self.valid_fg_list) self.fg = self._list_abspath(self.fg_dir, self.fg_ext, self.valid_fg_list) self.bg = self._list_abspath(self.bg_dir, self.bg_ext, self.valid_bg_list) def __len__(self): return len(self.alpha)
def upsampleSum(x, conv, filters=128, ratio=0.5, activation=ACTIVATION, name=None): with tf.name_scope(name) as scope: x_up = tf.keras.layers.UpSampling2D(size=(2, 2), interpolation='nearest')(x) p = (((1.0 - ratio) * x_up) + (ratio * conv2DBatchNorm(conv, filters=filters, kernel_size=(1, 1), name=('upsum' + name)))) return p
def test_detector(args): os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu model = Detector(args) model = model.cuda() model.load_state_dict(torch.load(args.model_path)) model.eval() velodyne_dir = os.path.join(args.data_dir, 'sequences', args.test_seq, 'velodyne_txt') velodyne_names = os.listdir(velodyne_dir) velodyne_names = sorted(velodyne_names) for filename in velodyne_names: filepath = os.path.join(velodyne_dir, filename) kp_path = os.path.join(args.save_dir, 'keypoints', filename) (pc, sn) = get_pointcloud(filepath, args.npoints) feature = torch.cat((pc, sn), dim=(- 1)) feature = feature.unsqueeze(0) feature = feature.cuda() startT = datetime.datetime.now() (kp, sigmas, _, _) = model(feature) endT = datetime.datetime.now() computation_time = (endT - startT).microseconds kp_sigmas = torch.cat((kp, sigmas.unsqueeze(1)), dim=1) kp_sigmas = kp_sigmas.squeeze().cpu().detach().numpy().transpose() np.savetxt(kp_path, kp_sigmas)
class UniNorm2d(_UniNorm): def _check_input_dim(self, input): if (input.dim() != 4): raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
def prepare_dataset(voxel_size): print(':: Load two point clouds and disturb initial pose.') demo_icp_pcds = o3d.data.DemoICPPointClouds() source = o3d.io.read_point_cloud(demo_icp_pcds.paths[0]) target = o3d.io.read_point_cloud(demo_icp_pcds.paths[1]) trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) source.transform(trans_init) draw_registration_result(source, target, np.identity(4)) (source_down, source_fpfh) = preprocess_point_cloud(source, voxel_size) (target_down, target_fpfh) = preprocess_point_cloud(target, voxel_size) return (source, target, source_down, target_down, source_fpfh, target_fpfh)
class LiltPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def load_n2d(encoder_id, manifold_id): man = pickle.load(open(manifold_id, 'rb')) out = n2d(10, man) out.encoder = load_model(encoder_id, compile=False) return out
def parse_args(): parser = argparse.ArgumentParser(description='Goes through all the inline-links in markdown files and reports the breakages') parser.add_argument('--num-threads', type=int, default=100, help='Number of processes to confirm the link') parser.add_argument('-- type=str, help=' proxy') parser.add_argument('--out', type=str, default='link_reports.txt', help='output path of reports') args = parser.parse_args() return args
class SoftEntropy(nn.Module): def __init__(self): super(SoftEntropy, self).__init__() self.logsoftmax = nn.LogSoftmax(dim=1).cuda() def forward(self, inputs, targets): log_probs = self.logsoftmax(inputs) loss = ((- F.softmax(targets, dim=1).detach()) * log_probs).mean(0).sum() return loss
def rouge_n(eval_sentences, ref_sentences, n=2): f1_scores = [] for (eval_sentence, ref_sentence) in zip(eval_sentences, ref_sentences): eval_ngrams = _get_ngrams(n, eval_sentence) ref_ngrams = _get_ngrams(n, ref_sentence) ref_count = len(ref_ngrams) eval_count = len(eval_ngrams) overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) overlapping_count = len(overlapping_ngrams) if (eval_count == 0): precision = 0.0 else: precision = (float(overlapping_count) / eval_count) if (ref_count == 0): recall = 0.0 else: recall = (float(overlapping_count) / ref_count) f1_scores.append((2.0 * ((precision * recall) / ((precision + recall) + 1e-08)))) return np.mean(f1_scores, dtype=np.float32)
def test_minesweeper__does_not_smoke(minesweeper_env: Minesweeper) -> None: check_env_does_not_smoke(env=minesweeper_env)
def sum_logits(args, mask=None, name=None): with tf.name_scope((name or 'sum_logits')): if ((args is None) or (nest.is_sequence(args) and (not args))): raise ValueError('`args` must be specified') if (not nest.is_sequence(args)): args = [args] rank = len(args[0].get_shape()) logits = sum((tf.reduce_sum(arg, (rank - 1)) for arg in args)) if (mask is not None): logits = exp_mask(logits, mask) return logits
class experiment(): _init_args def __init__(self, config, experiments_prefix, logfile_name='log'): all_defaults = {} for key in vars(config): all_defaults[key] = get_base_parser().get_default(key) self.default_config = all_defaults config.resume = False if (not config.debug): if os.path.isdir(self.experiment_dir): print('log exists: {}'.format(self.experiment_dir)) config.resume = True print(config) self._makedir() def _makedir(self): os.makedirs(self.experiment_dir, exist_ok=True) def experiment_dir(self): if self.config.debug: return './' else: arg_g = dict() for group in get_base_parser()._action_groups: group_d = {a.dest: self.default_config.get(a.dest, None) for a in group._group_actions} arg_g[group.title] = argparse.Namespace(**group_d) identifier = '' for (key, value) in sorted(vars(arg_g['model_configs']).items()): if (getattr(self.config, key) != value): identifier += (key + str(getattr(self.config, key))) for (key, value) in sorted(vars(arg_g['model_configs_show']).items()): identifier += (key + str(getattr(self.config, key))) if (identifier == ''): identifier = 'all_defaults' return os.path.join(self.experiments_prefix, identifier) def log_file(self): return os.path.join(self.experiment_dir, self.logfile_name) def register_directory(self, dirname): directory = os.path.join(self.experiment_dir, dirname) os.makedirs(directory, exist_ok=True) setattr(self, dirname, directory) def _register_existing_directories(self): for item in os.listdir(self.experiment_dir): fullpath = os.path.join(self.experiment_dir, item) if os.path.isdir(fullpath): setattr(self, item, fullpath) def __enter__(self): if self.config.debug: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m-%d %H:%M') else: print('log saving to', self.log_file) logging.basicConfig(filename=self.log_file, filemode='a+', level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m-%d %H:%M') self.log = logging.getLogger() self.start_time = time.time() return self def __exit__(self, *args): logging.shutdown() def elapsed_time(self): return ((time.time() - self.start_time) / 3600)
def test_multiplicity(precision='d'): from phcpy.solutions import make_solution pols = ['x**2+y-3;', 'x+0.125*y**2-1.5;'] sol = make_solution(['x', 'y'], [1, 2]) if (precision == 'd'): mul = standard_multiplicity(pols, sol, verbose=True) elif (precision == 'dd'): mul = dobldobl_multiplicity(pols, sol, verbose=True) elif (precision == 'qd'): mul = quaddobl_multiplicity(pols, sol, verbose=True) else: print('wrong level of precision')
def main(_): if (not FLAGS.dataset_dir): raise ValueError('You must supply the dataset directory with --dataset_dir') tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default(): deploy_config = model_deploy.DeploymentConfig(num_clones=FLAGS.num_clones, clone_on_cpu=FLAGS.clone_on_cpu, replica_id=FLAGS.task, num_replicas=FLAGS.worker_replicas, num_ps_tasks=FLAGS.num_ps_tasks) with tf.device(deploy_config.variables_device()): global_step = slim.create_global_step() dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) network_fn = nets_factory.get_network_fn(FLAGS.model_name, num_classes=(dataset.num_classes - FLAGS.labels_offset), weight_decay=FLAGS.weight_decay, is_training=True, width_multiplier=FLAGS.width_multiplier) preprocessing_name = (FLAGS.preprocessing_name or FLAGS.model_name) image_preprocessing_fn = preprocessing_factory.get_preprocessing(preprocessing_name, is_training=True) with tf.device(deploy_config.inputs_device()): provider = slim.dataset_data_provider.DatasetDataProvider(dataset, num_readers=FLAGS.num_readers, common_queue_capacity=(20 * FLAGS.batch_size), common_queue_min=(10 * FLAGS.batch_size)) [image, img_shape, gt_labels, gt_bboxes] = provider.get(['image', 'shape', 'object/label', 'object/bbox']) (image, gt_labels, gt_bboxes) = image_preprocessing_fn(image, config.IMG_HEIGHT, config.IMG_WIDTH, labels=gt_labels, bboxes=gt_bboxes) anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32) (input_mask, labels_input, box_delta_input, box_input) = encode_annos(gt_labels, gt_bboxes, anchors, config.NUM_CLASSES) (images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input) = tf.train.batch([image, input_mask, labels_input, box_delta_input, box_input], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, capacity=(5 * FLAGS.batch_size)) batch_queue = slim.prefetch_queue.prefetch_queue([images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input], capacity=(2 * deploy_config.num_clones)) def clone_fn(batch_queue): (images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input) = batch_queue.dequeue() anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32) end_points = network_fn(images) end_points['viz_images'] = images conv_ds_14 = end_points['MobileNet/conv_ds_14/depthwise_conv'] dropout = slim.dropout(conv_ds_14, keep_prob=0.5, is_training=True) num_output = (config.NUM_ANCHORS * ((config.NUM_CLASSES + 1) + 4)) predict = slim.conv2d(dropout, num_output, kernel_size=(3, 3), stride=1, padding='SAME', activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.0001), scope='MobileNet/conv_predict') with tf.name_scope('Interpre_prediction') as scope: (pred_box_delta, pred_class_probs, pred_conf, ious, det_probs, det_boxes, det_class) = interpre_prediction(predict, b_input_mask, anchors, b_box_input) end_points['viz_det_probs'] = det_probs end_points['viz_det_boxes'] = det_boxes end_points['viz_det_class'] = det_class with tf.name_scope('Losses') as scope: losses(b_input_mask, b_labels_input, ious, b_box_delta_input, pred_class_probs, pred_conf, pred_box_delta) return end_points summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue]) first_clone_scope = deploy_config.clone_scope(0) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) end_points = clones[0].outputs for end_point in end_points: if (end_point not in ['viz_images', 'viz_det_probs', 'viz_det_boxes', 'viz_det_class']): x = end_points[end_point] summaries.add(tf.summary.histogram(('activations/' + end_point), x)) summaries.add(tf.summary.scalar(('sparsity/' + end_point), tf.nn.zero_fraction(x))) for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): summaries.add(tf.summary.scalar(('losses/%s' % loss.op.name), loss)) for variable in slim.get_model_variables(): summaries.add(tf.summary.histogram(variable.op.name, variable)) if FLAGS.moving_average_decay: moving_average_variables = slim.get_model_variables() variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay, global_step) else: (moving_average_variables, variable_averages) = (None, None) with tf.device(deploy_config.optimizer_device()): learning_rate = _configure_learning_rate(dataset.num_samples, global_step) optimizer = _configure_optimizer(learning_rate) summaries.add(tf.summary.scalar('learning_rate', learning_rate)) if FLAGS.sync_replicas: optimizer = tf.train.SyncReplicasOptimizer(opt=optimizer, replicas_to_aggregate=FLAGS.replicas_to_aggregate, variable_averages=variable_averages, variables_to_average=moving_average_variables, replica_id=tf.constant(FLAGS.task, tf.int32, shape=()), total_num_replicas=FLAGS.worker_replicas) elif FLAGS.moving_average_decay: update_ops.append(variable_averages.apply(moving_average_variables)) variables_to_train = _get_variables_to_train() (total_loss, clones_gradients) = model_deploy.optimize_clones(clones, optimizer, var_list=variables_to_train) summaries.add(tf.summary.scalar('total_loss', total_loss)) grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops) train_tensor = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op') summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) summary_op = tf.summary.merge(list(summaries), name='summary_op') slim.learning.train(train_tensor, logdir=FLAGS.train_dir, master=FLAGS.master, is_chief=(FLAGS.task == 0), init_fn=_get_init_fn(), summary_op=summary_op, number_of_steps=FLAGS.max_number_of_steps, log_every_n_steps=FLAGS.log_every_n_steps, save_summaries_secs=FLAGS.save_summaries_secs, save_interval_secs=FLAGS.save_interval_secs, sync_optimizer=(optimizer if FLAGS.sync_replicas else None))
def main(): warnings.filterwarnings('ignore') args = get_args() with open(args.prompts_description, 'r') as fin: original_continuations = json.loads(fin.read()) sequence2length = [(k, v[0]) for (k, v) in original_continuations.items()] assert all(((float(v) >= 6.0) for (_, v) in sequence2length)) sequence2length.sort(key=(lambda x: x[1])) to_take = set((v[0] for v in sequence2length[:args.take_shortest])) with open(args.manifest, 'r') as fin: fin.readline() linenum2file = dict([(i, l.split('__')[0]) for (i, l) in enumerate(fin)]) max_files = max(linenum2file.keys()) continuations = defaultdict(list) mean_length_after = 0 n_examples = 0 with open(args.asr_transcript, 'r') as fin: for line in fin: n_examples += 1 line = line.split() sequence_id = int(line[(- 1)].split('-')[1][:(- 1)]) assert (sequence_id <= max_files) sequence_name = linenum2file[sequence_id] continuations[sequence_name].append(line[:(- 1)]) mean_length_after += len(line) mean_length_after /= n_examples print(f'Mean length of continuations, in words: {mean_length_after}') metric_values = [] mean_ground_truth_words = 0 n_examples = 0 n_candidates = 0 for (k, candidates) in continuations.items(): if (k not in to_take): continue n_examples += 1 ground_truth = original_continuations[k][1].split() n_candidates += len(candidates) bleu = sentence_bleu(candidates, ground_truth, weights=(0.5, 0.5), no_length_penalty=True, averaging_mode='geometric') mean_ground_truth_words += len(ground_truth) metric_values.append(bleu) n = len(metric_values) print(f'Median BLEU over {n} examples: {np.median(metric_values)} +- {(np.std(metric_values) / np.sqrt(n))}')
class OA(nn.Module): def __init__(self, features, disable_gen1=False, disable_gen2=False, bn=True, relu=False, refl_pad=True): super().__init__() self.disable_gen1 = disable_gen1 self.disable_gen2 = disable_gen2 if relu: act_func = nn.ReLU else: act_func = nn.ELU if bn: bn_func = nn.BatchNorm2d else: bn_func = nn.Identity if refl_pad: if (not self.disable_gen1): self.delta_gen1 = nn.Sequential(nn.Conv2d((features * 2), features, kernel_size=1, bias=False), bn_func(features), act_func(), Conv3x3(features, 2, bias=False)) self.delta_gen1[3].conv.weight.data.zero_() if (not self.disable_gen2): self.delta_gen2 = nn.Sequential(nn.Conv2d((features * 2), features, kernel_size=1, bias=False), bn_func(features), act_func(), Conv3x3(features, 2, bias=False)) self.delta_gen2[3].conv.weight.data.zero_() else: if (not self.disable_gen1): self.delta_gen1 = nn.Sequential(nn.Conv2d((features * 2), features, kernel_size=1, bias=False), bn_func(features), act_func(), nn.Conv2d(features, 2, kernel_size=3, padding=1, bias=False)) self.delta_gen1[3].weight.data.zero_() if (not self.disable_gen2): self.delta_gen2 = nn.Sequential(nn.Conv2d((features * 2), features, kernel_size=1, bias=False), bn_func(features), act_func(), nn.Conv2d(features, 2, kernel_size=3, padding=1, bias=False)) self.delta_gen2[3].weight.data.zero_() def bilinear_interpolate_torch_gridsample(self, input, size, delta=0): (out_h, out_w) = size (n, c, h, w) = input.shape s = 2.0 norm = torch.tensor([[[[((out_w - 1) / s), ((out_h - 1) / s)]]]]).type_as(input).to(input.device) w_list = torch.linspace((- 1.0), 1.0, out_h).view((- 1), 1).repeat(1, out_w) h_list = torch.linspace((- 1.0), 1.0, out_w).repeat(out_h, 1) grid = torch.cat((h_list.unsqueeze(2), w_list.unsqueeze(2)), 2) grid = grid.repeat(n, 1, 1, 1).type_as(input).to(input.device) grid = (grid + (delta.permute(0, 2, 3, 1) / norm)) output = F.grid_sample(input, grid, align_corners=True) return output def forward(self, high_stage, low_stage): delta_dict = {} (h, w) = (low_stage.size(2), low_stage.size(3)) high_stage = F.interpolate(input=high_stage, size=(h, w), mode='bilinear', align_corners=True) concat = torch.cat((low_stage, high_stage), 1) if (not self.disable_gen1): delta1 = self.delta_gen1(concat) high_stage = self.bilinear_interpolate_torch_gridsample(high_stage, (h, w), delta1) delta_dict[1] = delta1 if (not self.disable_gen2): delta2 = self.delta_gen2(concat) low_stage = self.bilinear_interpolate_torch_gridsample(low_stage, (h, w), delta2) delta_dict[2] = delta2 fuse_high_stage = (high_stage + low_stage) return (fuse_high_stage, high_stage, delta_dict)
def aggregate_graph(g_intermediate: nx.DiGraph, city: str, cutoff: int=9) -> nx.Graph: g_aggr = nx.Graph() coarse_nodes = [n for n in g_intermediate.nodes if (len(n) == 3)] for source in coarse_nodes: outgoing_edges = [(source, n_fine) for n_fine in g_intermediate.predecessors(source)] g_intermediate.add_edges_from(outgoing_edges) for target in nx.single_source_shortest_path(g_intermediate, source=source, cutoff=cutoff): if ((len(target) == 3) and (source != target)): g_aggr.add_edge(source[:2], target[:2]) try: assert ((source[0] - target[0]) in [(- 1), 0, 1]), f'{city} {source}-{target}' assert ((source[1] - target[1]) in [(- 1), 0, 1]), f'{city} {source}-{target}' except AssertionError as e: print(nx.shortest_path(g_intermediate, source, target)) raise e g_intermediate.remove_edges_from(outgoing_edges) return g_aggr
def ReadFileSL(x_axis, tthread, batchInterval, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity): (w, h) = (2, len(x_axis)) y = [[] for _ in range(w)] for batchInterval in x_axis: inputEvents = (tthread * batchInterval) op_gs_path = getPathSL('OP_NS', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity) lines = open(op_gs_path).readlines() throughput = lines[0].split(': ')[1] y[0].append(float(throughput)) for batchInterval in x_axis: inputEvents = (tthread * batchInterval) op_dfs_path = getPathSL('OP_BFS', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity) lines = open(op_dfs_path).readlines() throughput = lines[0].split(': ')[1] y[1].append(float(throughput)) print(y) return y
class nnUNetTrainerV2_DA2(nnUNetTrainerV2): def setup_DA_params(self): super().setup_DA_params() self.data_aug_params['independent_scale_factor_for_each_axis'] = True if self.threeD: self.data_aug_params['rotation_p_per_axis'] = 0.5 else: self.data_aug_params['rotation_p_per_axis'] = 1 self.data_aug_params['do_additive_brightness'] = True
def fetch_data(data, count, idx_batch, vocab_size, params, labels=None): batch_size = len(idx_batch) data_batch = np.zeros((batch_size, vocab_size)) if labels: if params.multilabel: label_batch = ([([0] * params.num_labels)] * batch_size) else: label_batch = (- np.ones(batch_size, dtype=np.int32)) count_batch = [] mask = np.zeros(batch_size) indices = [] values = [] for (i, doc_id) in enumerate(idx_batch): if (doc_id != (- 1)): if labels: label_batch[i] = labels[doc_id] for (word_id, freq) in data[doc_id].items(): data_batch[(i, word_id)] = freq count_batch.append(count[doc_id]) mask[i] = 1.0 else: count_batch.append(0) if labels: return (data_batch, count_batch, mask, label_batch) return (data_batch, count_batch, mask)
class Slice(Sentence): def __init__(self, *args, **kwargs): self._start = kwargs.pop('start', 0) Sentence.__init__(self, *args, **kwargs) def start(self): return self._start def stop(self): return (self._start + len(self.words))
_model def tresnet_m_448(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['tresnet_m_448'] model = TResNet(layers=[3, 4, 11, 3], num_classes=num_classes, in_chans=in_chans, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def init(name: str, sim: str=None, dataset: str=None, config: Dict=None, api_key: str=None) -> None: if (api_key is None): raise PermissionError('please input zpy api_key') global logger logger = logging.getLogger(__name__) exp = Experiment(name=name, sim=sim, dataset=dataset, config=config, api_key=api_key) global experiment experiment = exp exp._create()
def test_uniform_dequantizer_returns_correct_shape(): (init_fun, bijector_info) = UniformDequantizer([1, 3, 4]) (params, forward_fun, inverse_fun) = init_fun(random.PRNGKey(0), x.shape[(- 1)]) conditions = jnp.zeros((3, 1)) (fwd_outputs, fwd_log_det) = forward_fun(params, x, conditions=conditions) assert (fwd_outputs.shape == x.shape) assert (fwd_log_det.shape == x.shape[:1]) (inv_outputs, inv_log_det) = inverse_fun(params, x, conditions=conditions) assert (inv_outputs.shape == x.shape) assert (inv_log_det.shape == x.shape[:1])
def test_setattr(): cfg = Config() cfg.item1 = [1, 2] cfg.item2 = {'a': 0} cfg['item5'] = {'a': {'b': None}} assert (cfg._cfg_dict['item1'] == [1, 2]) assert (cfg.item1 == [1, 2]) assert (cfg._cfg_dict['item2'] == {'a': 0}) assert (cfg.item2.a == 0) assert (cfg._cfg_dict['item5'] == {'a': {'b': None}}) assert (cfg.item5.a.b is None)
class PoissonProcess(PointProcess): def __init__(self, intensity: float, seed=43): super().__init__(seed) self.init_params(intensity) def init_params(self, intensity: float): if (not (intensity > 0)): raise ValueError('parameters must be positive.') self.params = ParameterDict() self.params['intensity'] = Parameter(torch.tensor(float(intensity)), requires_grad=True) def conditional_intensity(self, time: float, history: EventSeq) -> torch.Tensor: return self.params['intensity'] def event_impact(self): return 0 def sample(self, history: EventSeq, **kwargs) -> EventSeq: return history.append(self.sample_candidate(history)) def sample_seq(self, history: EventSeq, end_time: float, max_event=float('inf'), **kwargs) -> EventSeq: n_sampled_event = 0 while True: candidate_time_stamp = self.sample_candidate(history) if (candidate_time_stamp > end_time): break history.append(candidate_time_stamp) n_sampled_event += 1 if (n_sampled_event >= max_event): break if (max_event == float('inf')): history.obs_period[1] = end_time return history def sample_candidate(self, history: EventSeq) -> float: return (history.obs_period[1] + self.rng.exponential(scale=(1.0 / float(self.params['intensity'])))) def neg_ll(self, history: EventSeq, **kwargs) -> torch.Tensor: return (((- len(history)) * torch.log(self.params['intensity'])) + ((history.obs_period[1] - history.obs_period[0]) * self.params['intensity']))
def compare_graphs_undirected(true_graph, estimated_graph): num_edges = len(true_graph[np.where((true_graph != 0.0))]) tam = np.array([[(1 if (x != 0.0) else 0.0) for x in y] for y in true_graph]) tam_undir = (tam + tam.T) tam_undir = np.array([[(1 if (x != 0.0) else 0.0) for x in y] for y in tam_undir]) tam_undir = np.triu(tam_undir) eam = np.array([[(1 if (x != 0.0) else 0.0) for x in y] for y in estimated_graph]) eam_undir = (eam + eam.T) eam_undir = [[(1 if (x > 0) else 0) for x in y] for y in eam_undir] eam_undir = np.triu(eam_undir) tp = len(np.argwhere(((tam_undir + eam_undir) == 2))) fp = len(np.argwhere(((tam_undir - eam_undir) < 0))) tn = len(np.argwhere(((tam_undir + eam_undir) == 0))) fn = (num_edges - tp) return [tp, fp, tn, fn]
def stack_states(rssm_states: list, dim): return RSSMState(torch.stack([state.mean for state in rssm_states], dim=dim), torch.stack([state.std for state in rssm_states], dim=dim), torch.stack([state.stoch for state in rssm_states], dim=dim), torch.stack([state.deter for state in rssm_states], dim=dim))
def generate_trees(source, tree_reader=ptb_read_tree, max_sents=(- 1), return_empty=False, allow_empty_labels=False, allow_empty_words=False): if (type(source) == type('')): source = open(source) count = 0 while True: tree = tree_reader(source, return_empty, allow_empty_labels, allow_empty_words) if (tree == 'Empty'): (yield None) continue if (tree is None): return (yield tree) count += 1 if (count >= max_sents > 0): return
class SupervisedTrainer(object): def __init__(self, model, data_loader, optimizer=None, criterion=None): self.global_step = 0 self.model = model.to(tt.arg.device) self.data_loader = data_loader self.optimizer = (optimizer or optim.Adam(model.parameters())) self.criterion = (criterion or nn.CrossEntropyLoss()) def train(self, inputs): (x, y) = inputs if tt.arg.cuda: z = nn.DataParallel(self.model)(x) else: z = self.model(x) loss = self.criterion(z, y) acc = tt.accuracy(z, y) self.optimizer.zero_grad() loss.backward() self.optimizer.step() tt.log_scalar('loss', loss, self.global_step) tt.log_scalar('acc', acc, self.global_step) def epoch(self, ep_no=None): pass def run(self): tt.arg.experiment = (tt.arg.experiment or self.model.__class__.__name__.lower()) self.global_step = self.model.load_model() (epoch, min_step) = divmod(self.global_step, len(self.data_loader)) while (epoch < (tt.arg.epoch or 1)): epoch += 1 for (step, inputs) in enumerate(self.data_loader, (min_step + 1)): if (step > len(self.data_loader)): break self.global_step += 1 for param_group in self.optimizer.param_groups: param_group['lr'] = tt.arg.lr if (type(inputs) in [list, tuple]): self.train([tt.var(d) for d in inputs]) else: self.train(tt.var(inputs)) tt.log_weight(self.model, global_step=self.global_step) tt.log_gradient(self.model, global_step=self.global_step) tt.log_step(epoch=epoch, global_step=self.global_step, max_epoch=(tt.arg.epoch or 1), max_step=len(self.data_loader)) self.model.save_model(self.global_step) self.epoch(epoch) self.model.save_model(self.global_step, force=True)
def train_and_eval(model, train_loader, eval_loader, tb_log, ckpt_dir, log_f): model.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) def lr_lbmd(cur_epoch): cur_decay = 1 for decay_step in args.decay_step_list: if (cur_epoch >= decay_step): cur_decay = (cur_decay * args.lr_decay) return max(cur_decay, (args.lr_clip / args.lr)) lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd) total_it = 0 for epoch in range(1, (args.epochs + 1)): lr_scheduler.step(epoch) total_it = train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f) if ((epoch % args.ckpt_save_interval) == 0): with torch.no_grad(): avg_iou = eval_one_epoch(model, eval_loader, epoch, tb_log, log_f) ckpt_name = os.path.join(ckpt_dir, ('checkpoint_epoch_%d' % epoch)) save_checkpoint(model, epoch, ckpt_name)
def _create_learning_rate(learning_rate_config, global_summaries, global_step): learning_rate = None learning_rate_type = learning_rate_config.WhichOneof('learning_rate') if (learning_rate_type == 'constant_learning_rate'): config = learning_rate_config.constant_learning_rate learning_rate = config.learning_rate elif (learning_rate_type == 'exponential_decay_learning_rate'): config = learning_rate_config.exponential_decay_learning_rate learning_rate = tf.train.exponential_decay(config.initial_learning_rate, global_step, config.decay_steps, config.decay_factor, staircase=config.staircase) if (learning_rate is None): raise ValueError(('Learning_rate %s not supported.' % learning_rate_type)) global_summaries.add(tf.summary.scalar('Learning_Rate', learning_rate)) return learning_rate
def get_model_file(model_name, local_model_store_dir_path=os.path.join('~', '.torch', 'models')): (error, sha1_hash, repo_release_tag) = get_model_name_suffix_data(model_name) short_sha1 = sha1_hash[:8] file_name = '{name}-{error}-{short_sha1}.pth'.format(name=model_name, error=error, short_sha1=short_sha1) local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path) file_path = os.path.join(local_model_store_dir_path, file_name) if os.path.exists(file_path): if _check_sha1(file_path, sha1_hash): return file_path else: logging.warning('Mismatch in the content of model file detected. Downloading again.') else: logging.info('Model file not found. Downloading to {}.'.format(file_path)) if (not os.path.exists(local_model_store_dir_path)): os.makedirs(local_model_store_dir_path) zip_file_path = (file_path + '.zip') _download(url='{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip'.format(repo_url=imgclsmob_repo_url, repo_release_tag=repo_release_tag, file_name=file_name), path=zip_file_path, overwrite=True) with zipfile.ZipFile(zip_file_path) as zf: zf.extractall(local_model_store_dir_path) os.remove(zip_file_path) if _check_sha1(file_path, sha1_hash): return file_path else: raise ValueError('Downloaded file has different hash. Please try again.')
class PPO_BLIP(): def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, lr=None, eps=None, max_grad_norm=None, use_clipped_value_loss=True): self.actor_critic = actor_critic self.clip_param = clip_param self.ppo_epoch = ppo_epoch self.num_mini_batch = num_mini_batch self.value_loss_coef = value_loss_coef self.entropy_coef = entropy_coef self.max_grad_norm = max_grad_norm self.use_clipped_value_loss = use_clipped_value_loss self.lr = lr self.eps = eps self.EWC_task_count = 0 def renew_optimizer(self): self.optimizer = optim.Adam(self.actor_critic.parameters(), lr=self.lr, eps=self.eps) def update(self, rollouts, task_num): advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)]) advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05)) value_loss_epoch = 0 action_loss_epoch = 0 dist_entropy_epoch = 0 for e in range(self.ppo_epoch): if self.actor_critic.is_recurrent: data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch) else: data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch) for sample in data_generator: (obs_batch, recurrent_hidden_states_batch, actions_batch, value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample (values, action_log_probs, dist_entropy, _) = self.actor_critic.evaluate_actions(obs_batch, recurrent_hidden_states_batch, masks_batch, actions_batch, task_num) ratio = torch.exp((action_log_probs - old_action_log_probs_batch)) surr1 = (ratio * adv_targ) surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ) action_loss = (- torch.min(surr1, surr2).mean()) if self.use_clipped_value_loss: value_pred_clipped = (value_preds_batch + (values - value_preds_batch).clamp((- self.clip_param), self.clip_param)) value_losses = (values - return_batch).pow(2) value_losses_clipped = (value_pred_clipped - return_batch).pow(2) value_loss = (0.5 * torch.max(value_losses, value_losses_clipped).mean()) else: value_loss = (0.5 * (return_batch - values).pow(2).mean()) self.optimizer.zero_grad() (((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward() nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm) self.optimizer.step() for m in self.actor_critic.features.modules(): if (isinstance(m, Conv2d_Q) or isinstance(m, Linear_Q)): m.clipping() value_loss_epoch += value_loss.item() action_loss_epoch += action_loss.item() dist_entropy_epoch += dist_entropy.item() num_updates = (self.ppo_epoch * self.num_mini_batch) value_loss_epoch /= num_updates action_loss_epoch /= num_updates dist_entropy_epoch /= num_updates return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch) def ng_post_processing(self, rollouts, task_id): self.estimate_fisher(rollouts, task_id) for m in self.actor_critic.features.modules(): if (isinstance(m, Conv2d_Q) or isinstance(m, Linear_Q)): m.update_bits(task=task_id, C=(0.5 / math.log(2))) m.sync_weight() m.update_fisher(task=task_id) def estimate_fisher(self, rollouts, task_num): advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)]) advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05)) def _save_state(module, input, results): module._state = input[0].clone() def _save_costate(module, grad_input, grad_output): module._costate = grad_output[0].clone() for m in self.actor_critic.features.modules(): if (isinstance(m, Linear_Q) or isinstance(m, Conv2d_Q)): m.handle_forward = m.register_forward_hook(_save_state) m.handle_backward = m.register_backward_hook(_save_costate) self.actor_critic.eval() total_data = 0 num_round = 1 for _ in range(num_round): if self.actor_critic.is_recurrent: data_generator = rollouts.recurrent_generator(advantages, 32) else: data_generator = rollouts.feed_forward_generator(advantages, 32) for sample in data_generator: (obs_batch, recurrent_hidden_states_batch, _, _, _, masks_batch, _, _) = sample batch_size_t = obs_batch.shape[0] total_data += batch_size_t self.actor_critic.zero_grad() (actor_features, _) = self.actor_critic.features(obs_batch, recurrent_hidden_states_batch, masks_batch) batch_action_dist = self.actor_critic.dist[task_num](actor_features) sampled_actions = batch_action_dist.sample() sampled_action_log_probs = batch_action_dist.log_probs(sampled_actions) (- sampled_action_log_probs.mean()).backward() update_fisher_exact(self.actor_critic) self.actor_critic.zero_grad() for m in self.actor_critic.features.modules(): if (isinstance(m, Linear_Q) or isinstance(m, Conv2d_Q)): m.Fisher_w /= total_data if (m.bias is not None): m.Fisher_b /= total_data m.handle_forward.remove() m.handle_backward.remove() self.actor_critic.train()
class BertweetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, normalization=False, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs): super().__init__(max_len=128, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs) try: from emoji import demojize self.demojizer = demojize except ImportError: logger.warning('emoji is not installed, thus not converting emoticons or emojis into text. Please install emoji: pip3 install emoji') self.demojizer = None self.vocab_file = vocab_file self.merges_file = merges_file self.encoder = {} self.encoder[self.bos_token] = 0 self.encoder[self.pad_token] = 1 self.encoder[self.eos_token] = 2 self.encoder[self.unk_token] = 3 self.add_from_file(vocab_file) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:(- 1)] merges = [tuple(merge.split()[:(- 1)]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} self.normalization = normalization self.tweetPreprocessor = TweetTokenizer() self.special_puncts = {'': "'", '...': '...'} def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.') return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0)) if (token_ids_1 is None): return (([1] + ([0] * len(token_ids_0))) + [1]) return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if (token in self.cache): return self.cache[token] word = tuple(token) word = tuple((list(word[:(- 1)]) + [(word[(- 1)] + '</w>')])) pairs = get_pairs(word) if (not pairs): return token while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) word = word[:(- 4)] self.cache[token] = word return word def _tokenize(self, text): if self.normalization: text = self.normalizeTweet(text) split_tokens = [] words = re.findall('\\S+\\n?', text) for token in words: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def normalizeTweet(self, tweet): for punct in self.special_puncts: tweet = tweet.replace(punct, self.special_puncts[punct]) tokens = self.tweetPreprocessor.tokenize(tweet) normTweet = ' '.join([self.normalizeToken(token) for token in tokens]) normTweet = normTweet.replace('cannot ', 'can not ').replace("n't ", " n't ").replace("n 't ", " n't ").replace("ca n't", "can't").replace("ai n't", "ain't") normTweet = normTweet.replace("'m ", " 'm ").replace("'re ", " 're ").replace("'s ", " 's ").replace("'ll ", " 'll ").replace("'d ", " 'd ").replace("'ve ", " 've ") normTweet = normTweet.replace(' p . m .', ' p.m.').replace(' p . m ', ' p.m ').replace(' a . m .', ' a.m.').replace(' a . m ', ' a.m ') return ' '.join(normTweet.split()) def normalizeToken(self, token): lowercased_token = token.lower() if token.startswith(''): return '' elif (lowercased_token.startswith(' or lowercased_token.startswith('www')): return 'HTTPURL' elif (len(token) == 1): if (token in self.special_puncts): return self.special_puncts[token] if (self.demojizer is not None): return self.demojizer(token) else: return token else: return token def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ', '').strip() return out_string def save_vocabulary(self, save_directory): if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) out_merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file']) if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)): copyfile(self.vocab_file, out_vocab_file) if (os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file)): copyfile(self.merges_file, out_merge_file) return (out_vocab_file, out_merge_file) def add_from_file(self, f): if isinstance(f, str): try: with open(f, 'r', encoding='utf-8') as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(f)) return lines = f.readlines() for lineTmp in lines: line = lineTmp.strip() idx = line.rfind(' ') if (idx == (- 1)): raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'") word = line[:idx] self.encoder[word] = len(self.encoder)
def query(string, service=GOOGLE, **kwargs): service = service.lower() if (service in (GOOGLE, 'google', 'g')): engine = Google if (service in (YAHOO, 'yahoo', 'y!')): engine = Yahoo if (service in (BING, 'bing')): engine = Bing if (service in (DUCKDUCKGO, 'duckduckgo', 'ddg')): engine = DuckDuckGo if (service in (TWITTER, 'twitter', 'tw')): engine = Twitter if (service in (FACEBOOK, 'facebook', 'fb')): engine = Facebook if (service in (WIKIPEDIA, 'wikipedia', 'wp')): engine = Wikipedia if (service in (WIKIA, 'wikia')): engine = Wikia if (service in (DBPEDIA, 'dbpedia', 'dbp')): engine = DBPedia if (service in (FLICKR, 'flickr')): engine = Flickr try: kw = {} for a in ('license', 'throttle', 'language'): if (a in kwargs): kw[a] = kwargs.pop(a) return engine(kw).search(string, **kwargs) except UnboundLocalError: raise SearchEngineError(("unknown search engine '%s'" % service))
_config def merge_mlp(): cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'merge_method': 'merge_operators.MLP'}}}
def train(args, model, train_loader, optimizer, epoch): model.train() iteration = 0 loss_cum = 0 with tqdm(enumerate(train_loader), total=len(train_loader)) as t: for (idx, batch) in t: iteration += 1 (conv_seq, label_seq, sentence_index, token_type_seq, input_mask) = batch (conv_seq, label_seq, sentence_index, token_type_seq, input_mask) = (conv_seq.to(args.device), label_seq.to(args.device), [s.to(args.device) for s in sentence_index], token_type_seq.to(args.device), input_mask.to(args.device)) (loss, loss_gen, loss_plan, loss_bow) = model(conv_seq, label_seq, sentence_index, token_type_seq, input_mask) loss = (loss / args.gradient_accumulation_steps) if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if ((iteration % args.gradient_accumulation_steps) == 0): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm) optimizer.step() optimizer.zero_grad() loss_cum += loss.item() t.set_description(('Epoch %i' % epoch)) t.set_postfix(loss=(loss_cum / (idx + 1)))
def parse_args(): parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task') parser.add_argument('--task_name', type=str, default=None, help='The name of the glue task to train on.', choices=list(task_to_keys.keys())) parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.') parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.') parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.') parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.') parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True) parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).') parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.') parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.') parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.') parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.') parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.') parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup']) parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.') parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.') parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.') parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.') parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.') parser.add_argument('--tune', action='store_true', help='tune a best model with Intel Extension for Transformers.') parser.add_argument('--quantization_approach', type=str, default='PostTrainingStatic', help='Quantization approach. Supported approach are PostTrainingStatic, PostTrainingDynamic and QuantizationAwareTraining.') parser.add_argument('--metric_name', type=str, default=None, help='Metric name used for the tuning strategy.') parser.add_argument('--is_relative', type=bool, default=True, help='Metric tolerance model, expected to be relative or absolute.') parser.add_argument('--perf_tol', type=float, default=0.01, help='Performance tolerance when optimizing the model.') parser.add_argument('--benchmark', action='store_true', help='run benchmark.') parser.add_argument('--int8', action='store_true', help='run benchmark with int8 model.') parser.add_argument('--accuracy_only', action='store_true', help='Whether to only test accuracy for model tuned by Neural Compressor.') parser.add_argument('-i', '--iter', default=0, type=int, help='For accuracy measurement only.') parser.add_argument('-w', '--warmup_iter', default=1, type=int, help='For benchmark measurement only.') args = parser.parse_args() if ((args.task_name is None) and (args.train_file is None) and (args.validation_file is None)): raise ValueError('Need either a task name or a training/validation file.') else: if (args.train_file is not None): extension = args.train_file.split('.')[(- 1)] assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.' if (args.validation_file is not None): extension = args.validation_file.split('.')[(- 1)] assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.' if args.push_to_hub: assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.' return args
def SEResNeXt101(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=False, stride_size=2, init_filters=64, repetitions=(3, 4, 23, 3), **kwargs): return SENet(MODELS_PARAMS['seresnext101'], input_shape=input_shape, input_tensor=input_tensor, include_top=include_top, classes=classes, weights=weights, stride_size=stride_size, init_filters=init_filters, repetitions=repetitions, **kwargs)
class BERTScorer(): PENALTY_SIGMA = 6.0 def __init__(self, refs=None, model_type=None, num_layers=None, verbose=False, idf=False, batch_size=16, nthreads=2, all_layers=False, lang=None, rescale_with_baseline=False, penalty=False): assert ((lang is not None) or (model_type is not None)), 'Either lang or model_type should be specified' if rescale_with_baseline: assert (lang is not None), 'Need to specify Language when rescaling with baseline' if (model_type is None): lang = lang.lower() model_type = lang2model[lang] if (num_layers is None): num_layers = model2layers[model_type] if model_type.startswith('scibert'): tokenizer = AutoTokenizer.from_pretrained(cache_scibert(model_type)) else: tokenizer = AutoTokenizer.from_pretrained(model_type) model = get_model(model_type, num_layers, all_layers) if (not idf): idf_dict = defaultdict((lambda : 1.0)) idf_dict[tokenizer.sep_token_id] = 0 idf_dict[tokenizer.cls_token_id] = 0 elif isinstance(idf, dict): if verbose: print('using predefined IDF dict...') idf_dict = idf else: if verbose: print('preparing IDF dict...') start = time.perf_counter() idf_dict = get_idf_dict(refs, tokenizer, nthreads=nthreads) if verbose: print('done in {:.2f} seconds'.format((time.perf_counter() - start))) self.batch_size = batch_size self.verbose = verbose self.all_layers = all_layers self.penalty = penalty self.tokenizer = tokenizer self.model = model self.idf_dict = idf_dict self.device = 'cpu' self.baselines = None if rescale_with_baseline: self.baselines = torch.tensor([0.6666, 0.6666, 0.6662]) def cuda(self): self.device = 'cuda:0' self.model.cuda() return self def score(self, cands, refs): assert (len(cands) == len(refs)) if self.verbose: print('calculating scores...') start = time.perf_counter() all_preds = bert_cos_score_idf(self.model, refs, cands, self.tokenizer, self.idf_dict, verbose=self.verbose, device=self.device, batch_size=self.batch_size, all_layers=self.all_layers).cpu() if (self.baselines is not None): all_preds = ((all_preds - self.baselines) / (1 - self.baselines)) out = (all_preds[(..., 0)], all_preds[(..., 1)], all_preds[(..., 2)]) if self.penalty: for (idx, (cand, ref)) in enumerate(zip(cands, refs)): toks1 = self.tokenizer.tokenize(cand) toks2 = self.tokenizer.tokenize(ref) penalty = (np.e ** ((- ((len(toks1) - len(toks2)) ** 2)) / (2 * (self.PENALTY_SIGMA ** 2)))) out[(- 1)][idx] *= penalty if self.verbose: time_diff = (time.perf_counter() - start) print(f'done in {time_diff:.2f} seconds, {(len(refs) / time_diff):.2f} sentences/sec') return out
def get_coo_indexes(lil): rows = [] cols = [] for (i, el) in enumerate(lil): if (type(el) != list): el = [el] for j in el: rows.append(i) cols.append(j) return (rows, cols)
def get_scores(trainer, problems): t = time.time() (metrics, logging) = trainer.run_validation_epoch(trainer.training_state, problems) jax.tree_map((lambda x: x.block_until_ready()), metrics) metrics['total_time'] = (time.time() - t) if (trainer.config.num_devices > 1): metrics = reduce_from_devices(metrics, axis=0) logging = fetch_from_devices(logging, as_numpy=True) logging['score_matrix'] = np.concatenate(logging['score_matrix'], axis=(- 1)) return (metrics, logging) return (metrics, logging)
_torch class TestConversionUtils(unittest.TestCase): def test_renaming_multilingual(self): old_names = ['opus-mt-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi', 'opus-mt-cmn+cn-fi', 'opus-mt-en-de', 'opus-mt-en-de'] expected = ['opus-mt-ZH-fi', 'opus-mt-cmn_cn-fi', 'opus-mt-en-de', 'opus-mt-en-de'] self.assertListEqual(expected, [convert_opus_name_to_hf_name(x) for x in old_names]) def test_undoing_renaming(self): hf_names = ['opus-mt-ZH-fi', 'opus-mt-cmn_cn-fi', 'opus-mt-en-de', 'opus-mt-en-de'] converted_opus_names = [convert_hf_name_to_opus_name(x) for x in hf_names] expected_opus_names = ['cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi', 'cmn+cn-fi', 'en-de', 'en-de'] self.assertListEqual(expected_opus_names, converted_opus_names)
def get_poi_xy(): poi2xy = {} f_i = open('poi2_xy') readlines = f_i.readlines() f_i.close() for line in readlines: new_line = line.strip().split('\t') poi = int(new_line[0]) x = float(new_line[1]) y = float(new_line[2]) poi2xy[poi] = [x, y] return poi2xy
class CAPEval(object): def __init__(self, taskpath, seed=1111): logging.debug('***** Transfer task : Coreference Arc Prediction binary Classification*****') self.seed = seed logging.debug('***** Task path: {}*****\n\n'.format(taskpath)) train = self.loadFile(os.path.join(taskpath, 'train.txt')) valid = self.loadFile(os.path.join(taskpath, 'dev.txt')) test = self.loadFile(os.path.join(taskpath, 'test.txt')) self.samples = ((((([item[0][0] for item in train] + [item[1][0] for item in train]) + [item[0][0] for item in valid]) + [item[1][0] for item in valid]) + [item[0][0] for item in test]) + [item[1][0] for item in test]) self.data = {'train': train, 'valid': valid, 'test': test} def do_prepare(self, params, prepare): return prepare(params, self.samples) def loadFile(self, fpath): data = [] item = [] i = 1 with codecs.open(fpath, 'rb', 'utf-8') as fin: for line in fin: if (line.strip() == ''): data.append(item) item = [] i = 0 elif (i < 3): (sentence, start, end) = line.strip().split('\t') start = int(start) end = int(end) words = sentence.split() item.append([words, start, end]) else: item.append(int(line.strip())) i += 1 return data def run(self, params, batcher): (self.X, self.y) = ({}, {}) for key in self.data: if (key not in self.X): self.X[key] = [] if (key not in self.y): self.y[key] = [] input1 = [(item[0] + [None]) for item in self.data[key]] input2 = [(item[1] + [None]) for item in self.data[key]] labels = np.array([item[2] for item in self.data[key]]) enc_input = [] n_labels = len(labels) for ii in range(0, n_labels, params.batch_size): batch1 = input1[ii:(ii + params.batch_size)] batch2 = input2[ii:(ii + params.batch_size)] if ((len(batch1) == len(batch2)) and (len(batch1) > 0)): (enc1, _) = batcher(params, batch1) (enc2, _) = batcher(params, batch2) enc_input.append(np.hstack((enc1, enc2, (enc1 * enc2), np.abs((enc1 - enc2))))) if (((ii * params.batch_size) % (20000 * params.batch_size)) == 0): logging.info(('PROGRESS (encoding): %.2f%%' % ((100 * ii) / n_labels))) self.X[key] = np.vstack(enc_input) self.y[key] = labels config = {'nclasses': 2, 'seed': self.seed, 'usepytorch': params.usepytorch, 'cudaEfficient': True, 'nhid': params.nhid, 'noreg': True} config_classifier = copy.deepcopy(params.classifier) config_classifier['max_epoch'] = 15 config_classifier['epoch_size'] = 1 config['classifier'] = config_classifier clf = SplitClassifier(self.X, self.y, config) (devacc, testacc) = clf.run() logging.debug('Dev acc : {0} Test acc : {1} for PreCo\n'.format(devacc, testacc)) return {'devacc': devacc, 'acc': testacc, 'ndev': len(self.data['valid']), 'ntest': len(self.data['test'])}
class TFBytesDataset(TFDataset): def get_num_partitions(self): return self.train_rdd.getNumPartitions() def __init__(self, string_rdd, batch_size, batch_per_thread, hard_code_batch_size=False, validation_string_rdd=None, sequential_order=False, shuffle=True): import tensorflow as tf tensor_structure = (TensorMeta(dtype=tf.string, shape=(), name='input'),) super(TFBytesDataset, self).__init__(tensor_structure, batch_size, batch_per_thread, hard_code_batch_size) self.train_rdd = string_rdd self.validation_rdd = validation_string_rdd self.sequential_order = sequential_order self.shuffle = shuffle def _get_prediction_data(self): jvalue = callZooFunc('float', 'createMiniBatchRDDFromStringRDD', self.train_rdd, self.batch_per_thread) rdd = jvalue.value().toJavaRDD() return rdd def _get_evaluation_data(self): jvalue = callZooFunc('float', 'createMiniBatchRDDFromStringRDD', self.train_rdd, self.batch_per_thread) rdd = jvalue.value().toJavaRDD() return rdd def _get_training_data(self): jvalue = callZooFunc('float', 'createMiniBatchFeatureSetFromStringRDD', self.train_rdd, self.batch_size, self.sequential_order, self.shuffle) fs = FeatureSet(jvalue) return fs def _get_validation_data(self): if (self.validation_rdd is not None): jvalue = callZooFunc('float', 'createMiniBatchFeatureSetFromStringRDD', self.validation_rdd, self.batch_size, self.sequential_order, self.shuffle) fs = FeatureSet(jvalue) return fs return None
def parse_sum_group_component(component, line, line_buffer): line = consume_token(component, line) line = consume_token('<Sizes>', line) sizes = line.strip().strip('[]').strip().replace(' ', ',') return {'<Sizes>': sizes}
(scope='module') def lapicque_reset_none_instance(): return snn.Lapicque(beta=0.5, reset_mechanism='none')
def save_df(data: pd.DataFrame, filename: str) -> None: if filename.lower().endswith('csv'): data.to_csv(filename) elif filename.lower().endswith('parquet'): data.to_parquet(filename) else: raise ValueError(f'DataFrame {filename} is an unsupported type')
def plasma_data_creator(meta_data, object_store_address, workers_per_node=1, batch_size=1): def create_plasma_dataloader(config): dataset = PlasmaNDArrayDataset(meta_data, object_store_address, workers_per_node, batch_size) loader = DataLoader(dataset, batch_size=None, shuffle=False, collate_fn=None) return loader return create_plasma_dataloader
def main(): flags = initialize() logging.debug(f'Loading from {flags.in_path}') a = np.load(flags.in_path, allow_pickle=True) all_results_3d = {} for (image_path, coords3d_pred) in zip(a['image_path'], a['coords3d_pred_world']): image_path = image_path.decode('utf8') all_results_3d.setdefault(image_path, []).append(coords3d_pred.tolist()) logging.info(f'Writing to file {flags.out_path}') util.dump_json(all_results_3d, flags.out_path)
def load_image(image_file: Union[(PurePath, str)], target_size: Tuple[(int, int)]=None, grayscale: bool=False, img_formats: List[str]=IMG_FORMATS) -> np.ndarray: try: img = Image.open(image_file) if (img.format not in img_formats): logger.warning(f'Invalid image format {img.format}!') return None else: if (img.mode != 'RGB'): img = img.convert('RGBA').convert('RGB') img = preprocess_image(img, target_size=target_size, grayscale=grayscale) return img except Exception as e: logger.warning(f'''Invalid image file {image_file}: {e}''') return None
def _cast_if_autocast_enabled(*args): if (not torch.is_autocast_enabled()): return args else: return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
def step(params, X, y, opt_state): loss = mll_loss(params, X, y) grads = dloss(params, X, y) opt_state = opt_update(0, grads, opt_state) params = get_params(opt_state) return (params, opt_state, loss)
class MPTTSModelForCausalLM(TSModelForCausalLM): def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, **kwargs) -> CausalLMOutputWithPast: if (attention_mask is None): attention_mask = torch.ones_like(input_ids) inputs = {'input_ids': input_ids, 'attention_mask': attention_mask} if self.use_cache: if (past_key_values is None): nb_pkv = 2 num_layers = self.normalized_config.num_layers num_attention_heads = self.normalized_config.num_attention_heads hidden_size = self.normalized_config.hidden_size d_k = (hidden_size // num_attention_heads) new_key_shape = [input_ids.shape[0], num_attention_heads, 0, d_k] new_value_shape = [input_ids.shape[0], num_attention_heads, 0, d_k] empty_key_tensor = torch.empty(size=new_key_shape) empty_value_tensor = torch.empty(size=new_value_shape) if (self.model_dtype is not None): empty_key_tensor = empty_key_tensor.to(self.model_dtype) empty_value_tensor = empty_value_tensor.to(self.model_dtype) pkv = (empty_key_tensor, empty_value_tensor) past_key_values = tuple((tuple(pkv) for _ in range(num_layers))) inputs['past_key_values'] = past_key_values outputs = self.model(**inputs) if isinstance(outputs, tuple): outputs = CausalLMOutputWithPast(logits=outputs[0], past_key_values=(outputs[1] if self.use_cache else None)) else: outputs = CausalLMOutputWithPast(logits=outputs['logits'], past_key_values=(outputs['past_key_values'] if self.use_cache else None)) return outputs
def set_degree_of_denominator(deg, vrblvl=0): if (vrblvl > 0): print('in set_degree_of_denominator, deg :', deg) return set_parameter_value(3, deg, vrblvl)
class LRN2D(ZooKerasLayer): def __init__(self, alpha=0.0001, k=1.0, beta=0.75, n=5, dim_ordering='th', input_shape=None, **kwargs): super(LRN2D, self).__init__(None, float(alpha), float(k), float(beta), n, dim_ordering, (list(input_shape) if input_shape else None), **kwargs)
_arg_scope def stack_blocks_dense_split(net, blocks, n_branches=1, split_at_block=3, output_stride=None, store_non_strided_activations=False, outputs_collections=None): current_strides = [1] rates = [1] nets = [net] for (i_block, block) in enumerate(blocks): if (i_block == split_at_block): (current_strides, nets, rates) = zip(*[resnet_block_fn(block, current_strides[0], nets[0], output_stride, outputs_collections, rates[0], store_non_strided_activations, suffix(i_branch)) for i_branch in range(n_branches)]) else: (current_strides, nets, rates) = zip(*[resnet_block_fn(block, current_stride, net, output_stride, outputs_collections, rate, store_non_strided_activations, suffix(i_branch)) for (i_branch, (net, current_stride, rate)) in enumerate(zip(nets, current_strides, rates))]) if ((output_stride is not None) and (current_strides[0] != output_stride)): raise ValueError('The target output_stride cannot be reached.') return nets
def resolve_rval_symbols(node: Union[(str, ast.AST)], should_update_usage_info: bool=True) -> Set[Symbol]: if isinstance(node, str): node = ast.parse(node).body[0] if isinstance(node, (ast.Assign, ast.AnnAssign, ast.AugAssign)): node = node.value rval_symbols = ResolveRvalSymbols(should_update_usage_info)(node) if (len(rval_symbols) == 0): prev_cell = cells().current_cell().prev_cell static_rval_symbols = static_resolve_rvals(node, cell_ctr=((- 1) if (prev_cell is None) else prev_cell.cell_ctr), scope=tracer().active_scope) if should_update_usage_info: Timestamp.update_usage_info(static_rval_symbols, used_node=node) rval_symbols = {sym.sym for sym in static_rval_symbols} return rval_symbols
def test_wrap_experiment_builds_git_archive_deleted_files(): prefix = 'wrap_exp_test_builds_git_archive_deleted_files' exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix) _hard_rmtree(exp_path) expected_path = ((exp_path / 'test_exp') / 'launch_archive.tar.xz') with tempfile.TemporaryDirectory() as launcher_dir: launch_dir = pathlib.Path(launcher_dir) subprocess.check_call(('git', 'init'), cwd=launcher_dir) to_delete = (launch_dir / 'to_delete.txt') to_delete.touch() subprocess.check_call(('git', 'add', str(to_delete)), cwd=launcher_dir) subprocess.check_call(('git', '-c', 'user.name=Test User', '-c', 'user.email=', 'commit', '-m', 'Initial commit'), cwd=launcher_dir) to_delete.unlink() subdir = (launch_dir / 'subdir') subdir.mkdir() launcher_path = ((pathlib.Path(launcher_dir) / 'subdir') / 'run_exp.py') (snapshot_dir, _) = _run_launcher(launcher_path, prefix) archive_path = os.path.join(snapshot_dir, 'launch_archive.tar.xz') assert expected_path.samefile(archive_path) assert expected_path.exists() archive_size = expected_path.stat().st_size assert (archive_size > 250) contents = subprocess.check_output(('tar', '--list', '--file', archive_path)).decode('utf-8') assert ('subdir/run_exp.py' in contents.strip()) assert ('test.txt' not in contents.strip())
class BACE(MoleculeCSVDataset): def __init__(self, smiles_to_graph=smiles_2_dgl, load=False, log_every=1000, cache_file_path='./bace_dglgraph.bin', n_jobs=1): self._url = 'dataset/bace.zip' data_path = (get_download_dir() + '/bace.zip') dir_path = (get_download_dir() + '/bace') download(_get_dgl_url(self._url), path=data_path, overwrite=False) extract_archive(data_path, dir_path) df = pd.read_csv((dir_path + '/bace.csv')) super(BACE, self).__init__(df=df, smiles_to_graph=smiles_to_graph, smiles_column='mol', cache_file_path=cache_file_path, task_names=['Class'], load=load, log_every=log_every, init_mask=True, n_jobs=n_jobs) self.load_full = False self.ids = df['CID'].tolist() self.ids = [self.ids[i] for i in self.valid_ids] def __getitem__(self, item): if self.load_full: return (self.smiles[item], self.graphs[item], self.labels[item], self.mask[item], self.ids[item]) else: return (self.smiles[item], self.graphs[item], self.labels[item], self.mask[item])
def load_model_from_config(config, ckpt): print(f'Loading model from {ckpt}') pl_sd = torch.load(ckpt) sd = pl_sd['state_dict'] model = instantiate_from_config(config.model) (m, u) = model.load_state_dict(sd, strict=False) model.cuda() model.eval() return model
def test_predict_2_classes(): check_predictions(LogisticRegression(), X, Y1) check_predictions(LogisticRegression(), X_sp, Y1) check_predictions(LogisticRegression(lambda_1=0.001), X, Y1) check_predictions(LogisticRegression(lambda_1=0.001), X_sp, Y1) check_predictions(LogisticRegression(fit_intercept=False), X, Y1) check_predictions(LogisticRegression(fit_intercept=False), X_sp, Y1)