code
stringlengths
101
5.91M
class ChatMessage(BaseModel): role: Literal[('user', 'assistant', 'system', 'function')] content: Optional[str] function_call: Optional[Dict] = None
class VisionTextDualEncoderProcessor(): def __init__(self, feature_extractor: FeatureExtractionMixin, tokenizer: Union[(PreTrainedTokenizer, PreTrainedTokenizerFast)]): if (not isinstance(feature_extractor, FeatureExtractionMixin)): raise ValueError(f'`feature_extractor` has to be of type {FeatureExtractionMixin.__class__}, but is {type(feature_extractor)}') if (not isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast))): raise ValueError(f'`tokenizer` has to be of type `PreTrainedTokenizer` or `PreTrainedTokenizerFast`, but is {type(tokenizer)}') self.feature_extractor = feature_extractor self.tokenizer = tokenizer self.current_processor = self.feature_extractor def save_pretrained(self, save_directory): self.feature_extractor._set_processor_class(self.__class__.__name__) self.feature_extractor.save_pretrained(save_directory) self.tokenizer._set_processor_class(self.__class__.__name__) self.tokenizer.save_pretrained(save_directory) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): feature_extractor = AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__(self, text=None, images=None, return_tensors=None, **kwargs): if ((text is None) and (images is None)): raise ValueError('You have to specify either text or images. Both cannot be none.') if (text is not None): encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if (images is not None): image_features = self.feature_extractor(images, return_tensors=return_tensors, **kwargs) if ((text is not None) and (images is not None)): encoding['pixel_values'] = image_features.pixel_values return encoding elif (text is not None): return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs)
class Highway(Layer): def __init__(self, activation='relu', transform_activation='sigmoid', kernel_initializer='glorot_uniform', transform_initializer='glorot_uniform', bias_initializer='zeros', transform_bias_initializer=(- 2), kernel_regularizer=None, transform_regularizer=None, bias_regularizer=None, transform_bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): self.activation = activations.get(activation) self.transform_activation = activations.get(transform_activation) self.kernel_initializer = initializers.get(kernel_initializer) self.transform_initializer = initializers.get(transform_initializer) self.bias_initializer = initializers.get(bias_initializer) if isinstance(transform_bias_initializer, int): self.transform_bias_initializer = Constant(value=transform_bias_initializer) else: self.transform_bias_initializer = initializers.get(transform_bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.transform_regularizer = regularizers.get(transform_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.transform_bias_regularizer = regularizers.get(transform_bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super(Highway, self).__init__(**kwargs) def build(self, input_shape): assert (len(input_shape) == 2) input_dim = input_shape[(- 1)] self.W = self.add_weight(shape=(input_dim, input_dim), name='{}_W'.format(self.name), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.W_transform = self.add_weight(shape=(input_dim, input_dim), name='{}_W_transform'.format(self.name), initializer=self.transform_initializer, regularizer=self.transform_regularizer, constraint=self.kernel_constraint) self.bias = self.add_weight(shape=(input_dim,), name='{}_bias'.format(self.name), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) self.bias_transform = self.add_weight(shape=(input_dim,), name='{}_bias_transform'.format(self.name), initializer=self.transform_bias_initializer, regularizer=self.transform_bias_regularizer) self.built = True def call(self, x, mask=None): x_h = self.activation((K.dot(x, self.W) + self.bias)) x_trans = self.transform_activation((K.dot(x, self.W_transform) + self.bias_transform)) output = ((x_h * x_trans) + ((1 - x_trans) * x)) return output def get_config(self): config = {'activation': activations.serialize(self.activation), 'transform_activation': activations.serialize(self.transform_activation), 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'transform_initializer': initializers.serialize(self.transform_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'transform_bias_initializer': initializers.serialize(self.transform_bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'transform_regularizer': regularizers.serialize(self.transform_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'transform_bias_regularizer': regularizers.serialize(self.transform_bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)} base_config = super(Highway, self).get_config() return dict((list(base_config.items()) + list(config.items())))
def pyconvhgresnet50(pretrained=False, **kwargs): model = PyConvHGResNet(PyConvBlock, [3, 4, 6, 3], **kwargs) if pretrained: os.makedirs(default_cache_path, exist_ok=True) model.load_state_dict(torch.load(download_from_url(model_urls['pyconvhgresnet50'], root=default_cache_path))) return model
class BaseExecutor(): def __init__(self): self.cluster_spec = None self.mini_cluster_spec = None self.task_id = None self.task_type = None self.address: str = '' self.role: str = '' def get_tf_config_from_env(self): return get_tf_config() def get_cluster_info_by_master(self): pass def get_cluster_info_by_tf_config(self): tf_config = self.get_tf_config_from_env() (task_type, task_id) = get_tf_config_task_type_and_index() self.task_type = task_type self.task_id = task_id self.role = ((task_type + ':') + str(task_id)) self.cluster_spec = tf_config['cluster'] self.address = tf_config['cluster'][task_type][task_id] logger.info('cluster spec is {} task_type is {} task_id is {} address is {}'.format(self.cluster_spec, self.task_type, self.task_id, self.address)) def get_cluster_def(self, cluster_spec): mini_cluster_spec = {} ps_hosts = [] worker_hosts = [] cluster_def = cluster_pb2.ClusterDef() for job_name in cluster_spec: if (job_name == 'ps'): job = cluster_def.job.add() job.name = job_name for (task_index, address) in enumerate(cluster_spec[job_name]): job.tasks[task_index] = address ps_hosts.append(address) elif (job_name == self.task_type): job = cluster_def.job.add() task_id = self.task_id if (job_name == TFConstants.Chief()): job_name = 'chief' elif (job_name == TFConstants.Worker()): task_id = (self.task_id + 1) job.name = job_name job.tasks[task_id] = self.address if (self.task_type != 'ps'): worker_hosts.append(self.address) mini_cluster_spec['ps'] = ps_hosts if (self.task_type == TFConstants.Chief()): mini_cluster_spec['chief'] = worker_hosts else: mini_cluster_spec['worker'] = worker_hosts self.mini_cluster_spec = mini_cluster_spec logger.info('cluster def is:\n %s', cluster_def) return cluster_def def address_initiated(self): return (self.address != '') def start_server(self): if (self.task_type != TFConstants.Evaluator()): logger.info('starting server {}'.format(self.address)) logger.info(self.address_initiated()) if self.address_initiated(): self.server = server_lib.Server({'localhost': [self.address]}, protocol='grpc') self.server.start() else: self.server = server_lib.Server.create_local_server() grpc_address = self.server.target hostname = socket.gethostname() ip = socket.gethostbyname(hostname) self.address = ((ip + ':') + grpc_address.split(':')[(- 1)]) def get_config(self, cluster_spec): config = tf.estimator.RunConfig() tf_config = os.environ['TF_CONFIG'] tf_config = json.loads(tf_config) tf_config['environment'] = 'google' os.environ['TF_CONFIG'] = json.dumps(tf_config) cluster_def = self.get_cluster_def(cluster_spec) session_config = tf.ConfigProto(cluster_def=cluster_def, gpu_options=tf.GPUOptions(allow_growth=True), allow_soft_placement=True, log_device_placement=False) config = tf.estimator.RunConfig() logger.info('Using _get_run_config : %s', str(vars(config))) experimental_config = session_config.experimental experimental_config.share_session_state_in_clusterspec_propagation = True config._session_config = session_config config._is_chief = (self.task_type == TFConstants.Chief()) config._keep_checkpoint_max = 20 logger.info('mini cluster spec is {}'.format(self.mini_cluster_spec)) config._cluster_spec = server_lib.ClusterSpec(self.mini_cluster_spec) config._task_id = self.task_id if (self.task_type == TFConstants.Worker()): config._task_id = (self.task_id + 1) config._task_type = self.task_type if (self.task_type == TFConstants.Chief()): config._task_type = TFConstants.Chief() config._num_ps_replicas = len(self.mini_cluster_spec.get(TFConstants.PS(), {})) config._num_worker_replicas = 1 config._master = ('grpc://' + self.address) config._protocol = 'grpc' config._log_step_count_steps = 10 config._server_name = self.address return config
class ReparamLargeKernelConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, groups, small_kernel, small_kernel_merged=False): super(ReparamLargeKernelConv, self).__init__() self.kernel_size = kernel_size self.small_kernel = small_kernel padding = (kernel_size // 2) if small_kernel_merged: self.lkb_reparam = get_conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=1, groups=groups, bias=True) else: self.lkb_origin = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=1, groups=groups) if (small_kernel is not None): assert (small_kernel <= kernel_size), 'The kernel size for re-param cannot be larger than the large kernel!' self.small_conv = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=small_kernel, stride=stride, padding=(small_kernel // 2), groups=groups, dilation=1) def forward(self, inputs): if hasattr(self, 'lkb_reparam'): out = self.lkb_reparam(inputs) else: out = self.lkb_origin(inputs) if hasattr(self, 'small_conv'): out += self.small_conv(inputs) return out def get_equivalent_kernel_bias(self): (eq_k, eq_b) = fuse_bn(self.lkb_origin.conv, self.lkb_origin.bn) if hasattr(self, 'small_conv'): (small_k, small_b) = fuse_bn(self.small_conv.conv, self.small_conv.bn) eq_b += small_b eq_k += nn.functional.pad(small_k, ([((self.kernel_size - self.small_kernel) // 2)] * 4)) return (eq_k, eq_b) def merge_kernel(self): (eq_k, eq_b) = self.get_equivalent_kernel_bias() self.lkb_reparam = get_conv2d(in_channels=self.lkb_origin.conv.in_channels, out_channels=self.lkb_origin.conv.out_channels, kernel_size=self.lkb_origin.conv.kernel_size, stride=self.lkb_origin.conv.stride, padding=self.lkb_origin.conv.padding, dilation=self.lkb_origin.conv.dilation, groups=self.lkb_origin.conv.groups, bias=True) self.lkb_reparam.weight.data = eq_k self.lkb_reparam.bias.data = eq_b self.__delattr__('lkb_origin') if hasattr(self, 'small_conv'): self.__delattr__('small_conv')
def generate_all(all_monsters, all_groups, all_modifiers): monster_groupings = set() monsters_per_group = (len(all_monsters) // len(all_groups)) for monsters in itertools.permutations(all_monsters, len(all_monsters)): groups = [] for i in range(0, len(monsters), monsters_per_group): group = monsters[i:(i + monsters_per_group)] groups.append(frozenset(group)) monster_groupings.add(frozenset(groups)) monster_assignments = set() for groups in itertools.permutations(all_groups, len(all_groups)): for monster_grouping in monster_groupings: assignment = set() for (g, mg) in zip(groups, monster_grouping): assignment.add((g, tuple(sorted(list(mg))))) monster_assignments.add(frozenset(assignment)) modifier_groupings = set() modifiers_per_element = (len(all_modifiers) // len(ALL_TYPES)) for modifiers in itertools.permutations(all_modifiers, len(all_modifiers)): groups = [] for i in range(0, len(modifiers), modifiers_per_element): group = modifiers[i:(i + modifiers_per_element)] groups.append(frozenset(group)) modifier_groupings.add(frozenset(groups)) modifier_assignments = set() for elements in itertools.permutations(list(range(len(ALL_TYPES))), len(ALL_TYPES)): for modifier_grouping in modifier_groupings: assignment = [] for (e, mg) in zip(elements, modifier_grouping): assignment.append((e, tuple(sorted(list(mg))))) modifier_assignments.add(frozenset(assignment)) all_assignments = [] for m in monster_assignments: for mm in modifier_assignments: all_assignments.append((m, mm)) all_assignments.sort() random.Random(0).shuffle(all_assignments) n = (len(all_assignments) // 2) train = all_assignments[:n] dev = all_assignments[n:] return (train, dev)
class MMIFrameScorer(PartialScorerInterface): def __init__(self, lang, device, idim, sos_id, rank, use_segment, char_list): self.lang = lang self.device = device self.lexicon = Lexicon(lang) self.oov = self.oovid = open((self.lang / 'oov.txt')).read().strip() self.graph_compiler = MmiTrainingGraphCompiler(self.lexicon, self.device, self.oov) self.phone_ids = self.lexicon.phone_symbols() self.lo = torch.nn.Linear(idim, (len(self.phone_ids) + 1)) self.lm_scores = None for i in range(10): try: self.load_weight(rank) except: print(f'{i}-th trail to load MMI matrix weight but fail') self.P = create_bigram_phone_lm(self.phone_ids) self.P.set_scores_stochastic_(self.lm_scores) self.char_list = char_list self.eos = sos_id self.blank = 0 self.logzero = (- 10000) def load_weight(self, rank): ckpt_dict = torch.load((self.lang / f'mmi_param.{rank}.pth')) for v in ckpt_dict.values(): v.requires_grad = False self.lm_scores = ckpt_dict['lm_scores'] lo_dict = {'weight': ckpt_dict['lo.1.weight'], 'bias': ckpt_dict['lo.1.bias']} self.lo.load_state_dict(lo_dict) def init_state(self, x): x = x.unsqueeze(0) nnet_output = self.lo(x) texts = ['<UNK>'] (_, den) = self.graph_compiler.compile(texts, self.P, replicate_den=True) T = x.size()[1] den_scores = [] for t in range(T, 0, (- 1)): supervision = torch.Tensor([[0, 0, t]]).to(torch.int32) dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision) den_lats = k2.intersect_dense(den, dense_fsa_vec, output_beam=10.0) den_tot_scores = den_lats.get_tot_scores(log_semiring=True, use_double_scores=True) den_scores.append(den_tot_scores) den_scores = torch.cat(den_scores).unsqueeze(0) print('den_scores: ', den_scores) supervision = torch.Tensor([[0, 0, T]]).to(torch.int32) dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision) den_scores_ = step_intersect(den, dense_fsa_vec)[0].unsqueeze(0) den_scores_ = torch.flip(den_scores_, [1]) max_diff = torch.max(torch.abs((den_scores - den_scores_))).item() if (abs(max_diff) > 0.02): print('denominator error: ', den_scores, den_scores_) raise ValueError prev_score = torch.Tensor([0]).to(torch.float32) return (nnet_output, den_scores, prev_score) def select_state(self, states, j): (nnet_output_single, den_scores, prev_scores) = states return (nnet_output_single, den_scores, prev_scores[j]) def score(**kargs): raise NotImplementedError def score_partial(self, y, next_tokens, state, hs_pad): (nnet_output_single, den_scores, prev_score) = state T = nnet_output_single.size()[1] batch_size = len(next_tokens) num_egs = (T * batch_size) supervision = torch.stack([torch.arange(num_egs), torch.zeros(num_egs), torch.arange(T, 0, (- 1)).unsqueeze(1).repeat(1, batch_size).view((- 1))], dim=1).to(torch.int32) nnet_output = nnet_output_single.repeat(num_egs, 1, 1) dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision) y = y.unsqueeze(0).repeat(num_egs, 1) next_tokens = next_tokens.unsqueeze(1).repeat(T, 1) ys = torch.cat([y, next_tokens], dim=1) texts = [' '.join([self.char_list[tid] for tid in text[1:]]) for text in ys] texts = [text.replace('<eos>', '').strip() for text in texts] (num_graphs, _) = self.graph_compiler.compile(texts, self.P, replicate_den=False) num_lats = k2.intersect_dense(num_graphs, dense_fsa_vec, output_beam=10.0) num_tot_scores = num_lats.get_tot_scores(log_semiring=True, use_double_scores=True) num_tot_scores = num_tot_scores.view(T, batch_size).transpose(0, 1) supervision = torch.stack([torch.arange(batch_size), torch.zeros(batch_size), (torch.ones(batch_size).int() * T)], dim=1).to(torch.int32) dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision) ys = ys[:batch_size] texts = [' '.join([self.char_list[tid] for tid in text[1:]]) for text in ys] texts = [text.replace('<eos>', '').strip() for text in texts] (num_graphs, _) = self.graph_compiler.compile(texts, self.P, replicate_den=False) num_tot_scores_ = torch.stack(step_intersect(num_graphs, dense_fsa_vec), dim=0) num_tot_scores_ = torch.flip(num_tot_scores_, [1]) max_diff = torch.max(torch.abs((num_tot_scores - num_tot_scores_))).item() if (abs(max_diff) > 0.02): print('numerator error: ', num_tot_scores, num_tot_scores_) raise ValueError tot_scores_frame = (num_tot_scores - den_scores) tot_scores = torch.logsumexp(tot_scores_frame, dim=(- 1)) next_tokens = next_tokens.squeeze(1)[:batch_size] eos_pos = torch.where((next_tokens == self.eos))[0] if (len(eos_pos) > 0): tot_scores[eos_pos] = tot_scores_frame[(eos_pos.item(), 0)] blk_pos = torch.where((next_tokens == self.blank))[0] if (len(blk_pos) > 0): tot_scores[blk_pos] = self.logzero tok_scores = (tot_scores - prev_score) state = (nnet_output_single, den_scores, tot_scores) return (tok_scores, state) def final_score(self, state): return 0
def add_arguments(parser): a = parser.add_argument for name in names: data_type = type(defaults[name]) default = None a('--{}'.format(name), default=default, type=data_type, help='%(default)s')
def get_collator_func(tokenizer, max_length, input_text_type): input_text_type = ({'input_text_type': input_text_type} if ('input_text_type' in inspect.getfullargspec(tokenizer.__call__)[0]) else {}) def collator_fn(batch): if isinstance(batch[0], tuple): ids = torch.LongTensor([x[0] for x in batch]) features = tokenizer([x[1] for x in batch], padding=True, truncation=True, max_length=max_length, **input_text_type) return {'input_ids': torch.LongTensor(features['input_ids']), 'attention_mask': torch.LongTensor(features['attention_mask']), 'text_ids': ids} else: assert isinstance(batch[0], str) features = tokenizer(batch, padding=True, truncation=True, max_length=max_length, **input_text_type) return {'input_ids': torch.LongTensor(features['input_ids']), 'attention_mask': torch.LongTensor(features['attention_mask'])} return collator_fn
def pairwise_distance_loss(anchor, positives, pairwise_squared_d_dists, d_max_squared, f_max_squared, distance_loss_name='distance_loss'): all_features = tf.concat([anchor, positives], 1) pairwise_squared_f_dists = _pairwise_squared_distances(all_features) d_max_copies = tf.fill(pairwise_squared_d_dists.get_shape(), d_max_squared) f_max_copies = tf.fill(pairwise_squared_f_dists.get_shape(), f_max_squared) scaled_d_dists = tf.div(pairwise_squared_d_dists, d_max_copies) scaled_f_dists = tf.div(pairwise_squared_f_dists, f_max_copies) if ('huber' in distance_loss_name): squared_diffs = tf.losses.huber_loss(scaled_f_dists, scaled_d_dists, reduction=tf.losses.Reduction.NONE) else: squared_diffs = tf.squared_difference(scaled_f_dists, scaled_d_dists) summed_diffs1 = tf.reduce_mean(squared_diffs, axis=2) summed_diffs2 = tf.reduce_mean(summed_diffs1, axis=1) return tf.reduce_mean(summed_diffs2, axis=0)
class TestGaussianMLPPolicy(TfGraphTestCase): def test_invalid_env(self): env = GarageEnv(DummyDiscreteEnv()) with pytest.raises(ValueError): GaussianMLPPolicy(env_spec=env.spec) .parametrize('obs_dim, action_dim', [((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))]) def test_get_action(self, obs_dim, action_dim): env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim)) policy = GaussianMLPPolicy(env_spec=env.spec) env.reset() (obs, _, _, _) = env.step(1) (action, _) = policy.get_action(obs.flatten()) assert env.action_space.contains(action) (actions, _) = policy.get_actions([obs.flatten(), obs.flatten(), obs.flatten()]) for action in actions: assert env.action_space.contains(action) def test_get_action_dict_space(self): env = GarageEnv(DummyDictEnv(obs_space_type='box', act_space_type='box')) policy = GaussianMLPPolicy(env_spec=env.spec) obs = env.reset() (action, _) = policy.get_action(obs) assert env.action_space.contains(action) (actions, _) = policy.get_actions([obs, obs]) for action in actions: assert env.action_space.contains(action) .parametrize('obs_dim, action_dim', [((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))]) def test_build(self, obs_dim, action_dim): env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim)) policy = GaussianMLPPolicy(env_spec=env.spec) obs = env.reset() state_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, policy.input_dim)) dist_sym = policy.build(state_input, name='dist_sym').dist output1 = self.sess.run([policy.distribution.loc], feed_dict={policy.model.input: [[obs.flatten()]]}) output2 = self.sess.run([dist_sym.loc], feed_dict={state_input: [[obs.flatten()]]}) assert np.array_equal(output1, output2) .parametrize('obs_dim, action_dim', [((1,), (1,)), ((1,), (2,)), ((2,), (2,)), ((1, 1), (1, 1)), ((1, 1), (2, 2)), ((2, 2), (2, 2))]) def test_is_pickleable(self, obs_dim, action_dim): env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim)) policy = GaussianMLPPolicy(env_spec=env.spec) obs = env.reset() with tf.compat.v1.variable_scope('GaussianMLPPolicy/GaussianMLPModel', reuse=True): bias = tf.compat.v1.get_variable('dist_params/mean_network/hidden_0/bias') bias.load(tf.ones_like(bias).eval()) output1 = self.sess.run([policy.distribution.loc, policy.distribution.stddev()], feed_dict={policy.model.input: [[obs.flatten()]]}) p = pickle.dumps(policy) with tf.compat.v1.Session(graph=tf.Graph()) as sess: policy_pickled = pickle.loads(p) output2 = sess.run([policy_pickled.distribution.loc, policy_pickled.distribution.stddev()], feed_dict={policy_pickled.model.input: [[obs.flatten()]]}) assert np.array_equal(output2, output1)
def test(cuda=False): torch.manual_seed(1234) pooling = 'sum' size = 500 max_length = 100 hidden_size = 128 kernel_size = torch.randint(0, max_length, (size,)) x = torch.rand(kernel_size.sum(), hidden_size) if cuda: x = x.cuda() kernel_size = kernel_size.cuda() x.requires_grad_() print('start') out = dpooling(x, kernel_size, pooling=pooling) out1 = out.data out = out.mean() out.backward() grad1 = x.grad.data x.grad = None out = dpooling_torch(x, kernel_size, pooling=pooling) out2 = out.data out = out.mean() out.backward() grad2 = x.grad.data print(torch.max(torch.abs((out1 - out2)))) print(torch.max(torch.abs((grad1 - grad2)))) import time forward = 0 backward = 0 n_iter = 100 for _ in range(n_iter): start = time.time() out = dpooling(x, kernel_size, pooling=pooling) if cuda: torch.cuda.synchronize() forward += (time.time() - start) out = out.mean() start = time.time() out.backward() if cuda: torch.cuda.synchronize() backward += (time.time() - start) print('Mine Forward: {:.3f} ms | Backward {:.3f} ms'.format(((forward * 1000.0) / n_iter), ((backward * 1000.0) / n_iter))) import time forward = 0 backward = 0 n_iter = 100 for _ in range(n_iter): start = time.time() out = dpooling_torch(x, kernel_size, pooling=pooling) if cuda: torch.cuda.synchronize() forward += (time.time() - start) out = out.mean() start = time.time() out.backward() if cuda: torch.cuda.synchronize() backward += (time.time() - start) print('Pytorch Forward: {:.3f} ms | Backward {:.3f} ms'.format(((forward * 1000.0) / n_iter), ((backward * 1000.0) / n_iter)))
def convert_param_to_skopt(param, name): from skopt.space import Real, Integer, Categorical if (param['type'] == 'BOOL'): return Categorical([False, True], name=name) if (param['type'] == 'INT'): return Integer(low=param['min'], high=param['max'], name=name) if (param['type'] == 'STRING'): return Categorical(param['options'], name=name) if (param['type'] == 'FLOAT'): return Real(low=param['min'], high=param['max'], name=name) if (param['type'] == 'FLOAT_EXP'): return Real(low=param['min'], high=param['max'], base=10, prior='log-uniform', name=name) else: raise ValueError("Didn't understand space {}.".format(param))
def train_mod(model, data, epochs=3): (x_train, y_train, label_train, x_test, y_test, label_test) = data model.fit(x_train, y_train, epochs=epochs, batch_size=600, validation_data=(x_test, y_test), verbose=1) score = model.evaluate(x_test, y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1])
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): def __init__(self, args, params, fp32_optimizer, fp32_params): super().__init__(args) self.fp16_params = params self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params if (getattr(args, 'fp16_scale_window', None) is None): if (len(args.update_freq) > 1): raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule') data_parallel_size = int((args.distributed_world_size / args.model_parallel_size)) scale_window = int((((2 ** 14) / data_parallel_size) / args.update_freq[0])) else: scale_window = args.fp16_scale_window if (not getattr(args, 'bf16', False)): self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale, min_loss_scale=args.min_loss_scale) else: self.scaler = None def build_optimizer(cls, args, params): flatten = (not getattr(args, 'fp16_no_flatten_grads', False)) if getattr(args, 'bf16', False): flatten = False fp32_params = cls.build_fp32_params(params, flatten=flatten) if flatten: fp32_optimizer = optim.build_optimizer(args, [fp32_params]) else: fp32_optimizer = optim.build_optimizer(args, fp32_params) if (flatten and (not fp32_optimizer.supports_flat_params)): raise RuntimeError('chosen optimizer does not support flat params, please set --fp16-no-flatten-grads') return cls(args, params, fp32_optimizer, fp32_params) def optimizer(self): return self.fp32_optimizer.optimizer def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr)
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation_=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes, affine=affine_par) for i in self.bn1.parameters(): i.requires_grad = False padding = 1 if (dilation_ == 2): padding = 2 elif (dilation_ == 4): padding = 4 self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=padding, bias=False, dilation=dilation_) self.bn2 = nn.BatchNorm2d(planes, affine=affine_par) for i in self.bn2.parameters(): i.requires_grad = False self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * 4), affine=affine_par) for i in self.bn3.parameters(): i.requires_grad = False self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
def draw_fig3_barplots(dataframe): flatten = (lambda l: [item for sublist in l for item in sublist]) compar_set1 = ['last_inp', 'cur_inp', 'cur_pred', 'next_pred'] compar_set2 = ['top1_most_common', 'top1_distill_most_common', 'top3_distill_top3_common'] compars = flatten([[f'{x}x{y}' for y in compar_set2] for x in compar_set1]) keys = ['ent', 'emtpy_rate', 'layer'] axes1 = plt.subplot(221) axes1: Axes = sns.barplot(x=keys[(- 1)], y=compars[(0 + 1)], data=dataframe) axes1.set_title('Last Input') axes1.set_xlabel('') axes1.set_ylabel('') axes2 = plt.subplot(222) axes2: Axes = sns.barplot(x=keys[(- 1)], y=compars[((1 * 3) + 1)], data=dataframe) axes2.set_title('Current Input') axes2.set_xlabel('') axes2.set_ylabel('') axes3 = plt.subplot(223) axes3: Axes = sns.barplot(x=keys[(- 1)], y=compars[((2 * 3) + 1)], data=dataframe) axes3.set_title('Current Prediction') axes3.set_ylabel('') axes4 = plt.subplot(224) axes4: Axes = sns.barplot(x=keys[(- 1)], y=compars[((3 * 3) + 1)], data=dataframe) axes4.set_title('Next Prediction') axes4.set_ylabel('')
def data_load(filename, label): fl = np.loadtxt(filename) data = [] lab = [] (start, end) = (0, signal_size) while (end <= (fl.shape[0] / 10)): x = fl[start:end] imgs = CWT((signal_size + 1), x) data.append(imgs) lab.append(label) start += signal_size end += signal_size return (data, lab)
def compute_progress(dir, iter, run_opts): prev_model = '{0}/{1}.mdl'.format(dir, (iter - 1)) model = '{0}/{1}.mdl'.format(dir, iter) common_lib.background_command("{command} {dir}/log/progress.{iter}.log nnet3-am-info {model} '&&' nnet3-show-progress --use-gpu=no {prev_model} {model}\n ".format(command=run_opts.command, dir=dir, iter=iter, model=model, prev_model=prev_model)) if (((iter % 10) == 0) and (iter > 0)): common_lib.background_command('{command} {dir}/log/full_progress.{iter}.log nnet3-show-progress --use-gpu=no --verbose=2 {prev_model} {model}\n '.format(command=run_opts.command, dir=dir, iter=iter, model=model, prev_model=prev_model)) common_lib.background_command('{command} {dir}/log/full_info.{iter}.log nnet3-info --verbose=2 {model}\n '.format(command=run_opts.command, dir=dir, iter=iter, model=model))
class RandomHorizontalFlip(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target=None, rois=None): if (random.random() < self.prob): image = F.hflip(image) if (target is not None): target = target.transpose(0) if (rois is not None): rois = rois.transpose(0) return (image, target, rois)
class Voulge(BasePolearm): def __init__(self): super().__init__('voulge', weight=125, damage=D.Dice.from_str('2d4'), material=M.Iron, hit=0)
def get_img_txt_mappings(train_txt_dbs): train_img2txt = dict(ChainMap(*[json.load(open(os.path.join(db_folder, 'img2txts.json'))) for db_folder in train_txt_dbs])) train_txt2img = dict(itertools.chain(*[[(v, k) for v in vals] for (k, vals) in train_img2txt.items()])) train_json = [json.load(open(os.path.join(db_folder, 'img2txts.json'))) for db_folder in train_txt_dbs] train_img2set = dict(ChainMap(*[{k: v for k in tj} for (tj, v) in zip(train_json, train_txt_dbs)])) train_txt2set = {txt_id: train_img2set[img_id] for (txt_id, img_id) in train_txt2img.items()} (train_set2img, train_set2txt) = (collections.defaultdict(list), collections.defaultdict(list)) for (img_id, set_id) in train_img2set.items(): train_set2img[set_id].append(img_id) train_set2txt[set_id] += train_img2txt[img_id] return (train_img2txt, train_txt2img, train_img2set, train_txt2set, train_set2img, train_set2txt)
def lin_cka_dist(A, B): similarity = (np.linalg.norm((B A.T), ord='fro') ** 2) normalization = (np.linalg.norm((A A.T), ord='fro') * np.linalg.norm((B B.T), ord='fro')) return (1 - (similarity / normalization))
def test_potential_paramunits_2d(): from galpy import potential from galpy.util import conversion (ro, vo) = (11.0, 180.0) pot = potential.CosmphiDiskPotential(amp=1.0, m=3, phib=(20.0 * units.deg), phio=((1290.0 * (units.km ** 2)) / (units.s ** 2)), r1=(8.0 * units.kpc), rb=(7.0 * units.kpc), ro=ro, vo=vo) pot_nounits = potential.CosmphiDiskPotential(amp=1.0, m=3, phib=((20.0 / 180.0) * numpy.pi), phio=(1290.0 / (vo ** 2.0)), r1=(8.0 / ro), rb=(7.0 / ro), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'CosmphiDiskPotential w/ parameters w/ units does not behave as expected' pot = potential.CosmphiDiskPotential(amp=1.0, m=3, cp=((1000.0 * (units.km ** 2)) / (units.s ** 2.0)), sp=((300.0 * (units.km ** 2)) / (units.s ** 2.0)), r1=(8.0 * units.kpc), ro=ro, vo=vo) pot_nounits = potential.CosmphiDiskPotential(amp=1.0, m=3, cp=(1000.0 / (vo ** 2.0)), sp=(300.0 / (vo ** 2.0)), r1=(8.0 / ro), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'CosmphiDiskPotential w/ parameters w/ units does not behave as expected' pot = potential.EllipticalDiskPotential(amp=1.0, tform=(1.0 * units.Gyr), tsteady=(3.0 * units.Gyr), phib=(20.0 * units.deg), twophio=((1290.0 * (units.km ** 2)) / (units.s ** 2)), r1=(8.0 * units.kpc), ro=ro, vo=vo) pot_nounits = potential.EllipticalDiskPotential(amp=1.0, tform=(1.0 / conversion.time_in_Gyr(vo, ro)), tsteady=(3.0 / conversion.time_in_Gyr(vo, ro)), phib=((20.0 / 180.0) * numpy.pi), twophio=(1290.0 / (vo ** 2.0)), r1=(8.0 / ro), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'EllipticalDiskPotential w/ parameters w/ units does not behave as expected' pot = potential.EllipticalDiskPotential(amp=1.0, tform=(1.0 * units.Gyr), tsteady=(3.0 * units.Gyr), cp=((1000.0 * (units.km ** 2)) / (units.s ** 2.0)), sp=((300.0 * (units.km ** 2)) / (units.s ** 2.0)), r1=(8.0 * units.kpc), ro=ro, vo=vo) pot_nounits = potential.EllipticalDiskPotential(amp=1.0, tform=(1.0 / conversion.time_in_Gyr(vo, ro)), tsteady=(3.0 / conversion.time_in_Gyr(vo, ro)), cp=(1000.0 / (vo ** 2.0)), sp=(300.0 / (vo ** 2.0)), r1=(8.0 / ro), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'EllipticalDiskPotential w/ parameters w/ units does not behave as expected' pot = potential.LopsidedDiskPotential(amp=1.0, phib=(20.0 * units.deg), phio=((1290.0 * (units.km ** 2)) / (units.s ** 2)), r1=(8.0 * units.kpc), ro=ro, vo=vo) pot_nounits = potential.LopsidedDiskPotential(amp=1.0, phib=((20.0 / 180.0) * numpy.pi), phio=(1290.0 / (vo ** 2.0)), r1=(8.0 / ro), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'LopsidedDiskPotential w/ parameters w/ units does not behave as expected' pot = potential.LopsidedDiskPotential(amp=1.0, cp=((1000.0 * (units.km ** 2)) / (units.s ** 2.0)), sp=((300.0 * (units.km ** 2)) / (units.s ** 2.0)), r1=(8.0 * units.kpc), ro=ro, vo=vo) pot_nounits = potential.LopsidedDiskPotential(amp=1.0, cp=(1000.0 / (vo ** 2.0)), sp=(300.0 / (vo ** 2.0)), r1=(8.0 / ro), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'LopsidedDiskPotential w/ parameters w/ units does not behave as expected' pot = potential.SteadyLogSpiralPotential(amp=1.0, m=4, omegas=(((50.0 * units.km) / units.s) / units.kpc), A=((1700.0 * (units.km ** 2)) / (units.s ** 2)), gamma=(21.0 * units.deg), alpha=(- 9.0), ro=ro, vo=vo) pot_nounits = potential.SteadyLogSpiralPotential(amp=1.0, m=4, omegas=((50.0 * ro) / vo), A=(1700.0 / (vo ** 2.0)), gamma=((21.0 / 180.0) * numpy.pi), alpha=(- 9.0), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'SteadyLogSpiralPotential w/ parameters w/ units does not behave as expected' pot = potential.SteadyLogSpiralPotential(amp=1.0, m=4, omegas=(((50.0 * units.km) / units.s) / units.kpc), A=((1700.0 * (units.km ** 2)) / (units.s ** 2)), gamma=(21.0 * units.deg), p=(10.0 * units.deg), ro=ro, vo=vo) pot_nounits = potential.SteadyLogSpiralPotential(amp=1.0, m=4, omegas=((50.0 * ro) / vo), A=(1700.0 / (vo ** 2.0)), gamma=((21.0 / 180.0) * numpy.pi), p=((10.0 / 180.0) * numpy.pi), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'SteadyLogSpiralPotential w/ parameters w/ units does not behave as expected' pot = potential.TransientLogSpiralPotential(amp=1.0, m=4, omegas=(((50.0 * units.km) / units.s) / units.kpc), A=((1700.0 * (units.km ** 2)) / (units.s ** 2)), gamma=(21.0 * units.deg), alpha=(- 9.0), to=(2.0 * units.Gyr), sigma=(1.0 * units.Gyr), ro=ro, vo=vo) pot_nounits = potential.TransientLogSpiralPotential(amp=1.0, m=4, omegas=((50.0 * ro) / vo), A=(1700.0 / (vo ** 2.0)), gamma=((21.0 / 180.0) * numpy.pi), alpha=(- 9.0), to=(2.0 / conversion.time_in_Gyr(vo, ro)), sigma=(1.0 / conversion.time_in_Gyr(vo, ro)), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'TransientLogSpiralPotential w/ parameters w/ units does not behave as expected' pot = potential.TransientLogSpiralPotential(amp=1.0, m=4, omegas=(((50.0 * units.km) / units.s) / units.kpc), A=((1700.0 * (units.km ** 2)) / (units.s ** 2)), gamma=(21.0 * units.deg), p=(10.0 * units.deg), to=(2.0 * units.Gyr), sigma=(1.0 * units.Gyr), ro=ro, vo=vo) pot_nounits = potential.TransientLogSpiralPotential(amp=1.0, m=4, omegas=((50.0 * ro) / vo), A=(1700.0 / (vo ** 2.0)), gamma=((21.0 / 180.0) * numpy.pi), p=((10.0 / 180.0) * numpy.pi), to=(2.0 / conversion.time_in_Gyr(vo, ro)), sigma=(1.0 / conversion.time_in_Gyr(vo, ro)), ro=ro, vo=vo) assert (numpy.fabs((pot(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False) - pot_nounits(1.5, phi=0.1, t=(2.0 / conversion.time_in_Gyr(vo, ro)), use_physical=False))) < (10.0 ** (- 8.0))), 'TransientLogSpiralPotential w/ parameters w/ units does not behave as expected' return None
def parse_fn_haus(data_path): path = data_path.split(',') image_path = path[0] label_path = path[1] itk_image = sitk.ReadImage(image_path) itk_mask = sitk.ReadImage(label_path) spacing = itk_mask.GetSpacing() image = sitk.GetArrayFromImage(itk_image) mask = sitk.GetArrayFromImage(itk_mask) mask[(mask == 2)] = 1 return (image.transpose([0, 1, 2]), mask.transpose([0, 1, 2]), spacing)
def load_pretrained_weights(model, weight_path): checkpoint = load_checkpoint(weight_path) if ('state_dict' in checkpoint): state_dict = checkpoint['state_dict'] else: state_dict = checkpoint model_dict = model.state_dict() new_state_dict = OrderedDict() (matched_layers, discarded_layers) = ([], []) for (k, v) in state_dict.items(): if k.startswith('module.'): k = k[7:] if ((k in model_dict) and (model_dict[k].size() == v.size())): new_state_dict[k] = v matched_layers.append(k) else: discarded_layers.append(k) model_dict.update(new_state_dict) model.load_state_dict(model_dict) if (len(matched_layers) == 0): warnings.warn('The pretrained weights "{}" cannot be loaded, please check the key names manually (** ignored and continue **)'.format(weight_path)) else: print('Successfully loaded pretrained weights from "{}"'.format(weight_path)) if (len(discarded_layers) > 0): print('** The following layers are discarded due to unmatched keys or layer size: {}'.format(discarded_layers))
def get_samples(prior_model, search_model, max, w_min): max_SA = 10 min_QED = 0 sample_selfies = [] weights = [] sample_can_smiles_set = set() tries = 0 batch_size = 100 device = ('cuda' if torch.cuda.is_available() else 'cpu') search_model.to(device) prior_model.to(device) while (((tries * batch_size) < (10 * max)) and (len(sample_selfies) < max)): tries += 1 samples_z = search_model.sample_z_prior(n_mols=batch_size) gen_seq = search_model.decode(samples_z) (_, sample_indices) = torch.max(gen_seq, dim=1) prior_prob = GenProb(sample_indices, samples_z, prior_model) search_prob = GenProb(sample_indices, samples_z, search_model) batch_weights = torch.exp((prior_prob - search_prob)) if (0 < w_min): batch_weights = batch_weights.clamp(min=w_min) print('Mean of weights tensor: ', torch.mean(batch_weights).item()) new_ones = 0 filtered_sa = 0 filtered_qed = 0 batch_selfies = search_model.indices_to_smiles(sample_indices) for (i, s) in enumerate(batch_selfies): new_selfie = decoder(s) m = Chem.MolFromSmiles(new_selfie) if (m is None): continue can_smile = Chem.MolToSmiles(m) if (can_smile in sample_can_smiles_set): continue if ((max_SA is not None) or (min_QED is not None)): a = time.perf_counter() if ((max_SA is not None) and (calculateScore(m) > max_SA)): filtered_sa += 1 continue if ((min_QED is not None) and (Chem.QED.qed(m) < min_QED)): filtered_qed += 1 continue new_ones += 1 sample_can_smiles_set.add(can_smile) sample_selfies.append(new_selfie) weights.append(batch_weights[i]) print(f'{tries} : ({new_ones} molecules sampled, {filtered_sa} discarded on sa, {filtered_qed} on qed )/{batch_size}') return (sample_selfies, weights)
def _update_focal_loss_alpha(configs, alpha): classification_loss = _get_classification_loss(configs['model']) classification_loss_type = classification_loss.WhichOneof('classification_loss') if (classification_loss_type != 'weighted_sigmoid_focal'): raise TypeError('Classification loss must be `weighted_sigmoid_focal`.') classification_loss.weighted_sigmoid_focal.alpha = alpha
def upsample(input): return F.upsample(input, scale_factor=2, mode='bilinear', align_corners=False)
class Residual(BaseNetwork): def __init__(self, numIn, numOut): super(Residual, self).__init__() self.numIn = numIn self.numOut = numOut self.bn = nn.GroupNorm(32, self.numIn) self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(self.numIn, self.numOut, bias=True, kernel_size=3, stride=1, padding=1) self.bn1 = nn.GroupNorm(32, self.numOut) self.conv2 = nn.Conv2d(self.numOut, self.numOut, bias=True, kernel_size=3, stride=1, padding=1) self.bn2 = nn.GroupNorm(32, self.numOut) self.conv3 = nn.Conv2d(self.numOut, self.numOut, bias=True, kernel_size=3, stride=1, padding=1) if (self.numIn != self.numOut): self.conv4 = nn.Conv2d(self.numIn, self.numOut, bias=True, kernel_size=1) self.init_weights() def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.numIn != self.numOut): residual = self.conv4(x) return (out + residual)
class AdapterTrainingArguments(): train_task_adapters: Optional[bool] = field(default=False, metadata={'help': 'If set, adds task adapters in the model.'}) adapter_config_name: Optional[str] = field(default='adapter', metadata={'help': f'config name for the adapter layers, should be selected in {sorted(ADAPTER_CONFIG_MAPPING.keys())}.'}) add_layer_norm_before_adapter: Optional[bool] = field(default=False, metadata={'help': 'whether to have layer-norm before adapter.'}) add_layer_norm_after_adapter: Optional[bool] = field(default=True, metadata={'help': 'whether to have layer-norm after adapter.'}) hidden_dim: Optional[int] = field(default=128, metadata={'help': 'defines the default hidden dimension for adapter layers.'}) task_reduction_factor: Optional[int] = field(default=16, metadata={'help': 'defines the default reduction factor for adapter layers.'}) non_linearity: Optional[str] = field(default='swish', metadata={'help': 'Defines nonlinearity for adapter layers.'}) unfreeze_lm_head: bool = field(default=False, metadata={'help': 'If set unfreeze the last linear layer.'}) unfreeze_layer_norms: bool = field(default=False, metadata={'help': 'If set, unfreezes the layer norms.'}) task_adapter_layers_encoder: Optional[List[int]] = field(default=None, metadata={'help': 'Defines the layers idin which task adapters isadded in the encoder.'}) task_adapter_layers_decoder: Optional[List[int]] = field(default=None, metadata={'help': 'Defines the layers idin which task adapters isadded in the decoder.'}) task_adapter_in_decoder: Optional[bool] = field(default=True, metadata={'help': 'If set to false, do not includetask adapters in the decoder.'}) hypercomplex_adapters: Optional[bool] = field(default=False, metadata={'help': 'If set, uses the hypercomplex layersfor adapters.'}) hypercomplex_division: Optional[int] = field(default=8, metadata={'help': 'Defines the number to divide the dimensionsof the linear layer by it.'}) intrinsic_model: Optional[bool] = field(default=False, metadata={'help': 'If set, computes all parameters of the model with an intrinsic vector.'}) intrinsic_said: Optional[bool] = field(default=False, metadata={'help': 'If set, computes the SAID version of themodel with intrinsic vector.'}) intrinsic_dim: Optional[int] = field(default=100, metadata={'help': 'Defines the intrinsic dimensionality.'}) normalize_intrinsic_projections: Optional[bool] = field(default=False, metadata={'help': 'If set, normalizes the intrinsic projection matrices.'}) intrinsic_projection: Optional[str] = field(default='fastfood', metadata={'help': 'Defines the type of projectionfor intrinsic adapters, it can be random or fastfood.'}) learn_phm: Optional[bool] = field(default=True, metadata={'help': 'If set, learns the phm rules in Hypercomplex adapters.'}) normalize_phm_weight: Optional[bool] = field(default=False, metadata={'help': 'Weather to normalize the weights ofthe PHM layer.'}) intrinsic_layer_norms: Optional[bool] = field(default=False, metadata={'help': 'If selected, then in case of unfreezing layernorms for intrinsic_adapters case, it also adds the layernorms parameters inside the parameters given for the intrinsic projection, and if this is not set, those parameters are not projected with intrinsic vector.'}) hypercomplex_nonlinearity: Optional[str] = field(default='glorot-uniform', metadata={'help': 'Defines the nonlinearity for the hypercomplex adapter layers.'}) shared_phm_rule: Optional[bool] = field(default=False, metadata={'help': 'If set, uses a shared phm rules for all hypercomplex adapter layers.'}) factorized_phm: Optional[bool] = field(default=False, metadata={'help': 'If set, it factorizes the weights for the W in hypercomplex adapters.'}) shared_W_phm: Optional[bool] = field(default=False, metadata={'help': 'If set, shares the W in phm adapter layers between all adapters.'}) factorized_phm_rule: Optional[bool] = field(default=False, metadata={'help': 'If set, it factorizes the shared weights for the W in hypercomplex adapters.'}) phm_c_init: Optional[str] = field(default='normal', metadata={'help': 'Initialization for the phm rules.'}) phm_rank: Optional[int] = field(default=1, metadata={'help': 'sets the rank for the phm decomposition.'}) phm_init_range: Optional[float] = field(default=0.01, metadata={'help': 'defines the phm init range.'}) add_adapter_in_feed_forward: Optional[bool] = field(default=True, metadata={'help': 'If set, adds adapters in the feedforward.'}) add_adapter_in_self_attention: Optional[bool] = field(default=True, metadata={'help': 'If set, adds adapters in the selfattention'}) prefix_tuning: Optional[bool] = field(default=False, metadata={'help': 'If set, uses prefix tuning.'}) prefix_dim: Optional[int] = field(default=100, metadata={'help': 'Specifies the prefix embedding dimension.'}) init_prefix_from_vocab: Optional[bool] = field(default=False, metadata={'help': 'Initialize prefix from the tokens of pretrained t5-base model.'}) kronecker_prod: Optional[bool] = field(default=False, metadata={'help': 'If set, compute the kronecker using another version.'}) bitfit: Optional[bool] = field(default=False, metadata={'help': 'If set, we train the bitfit model.'}) freeze_bitfit_lm_head: Optional[bool] = field(default=False, metadata={'help': 'If set, freezes the classifier in bitfit.'}) freeze_bitfit_lm_head_all: Optional[bool] = field(default=False, metadata={'help': 'If set, freezes the classifier in bitfit.'}) low_rank_adapters: Optional[bool] = field(default=False, metadata={'help': 'If set, uses the low-rank adapters.'}) low_rank_w_init: Optional[str] = field(default='glorot-uniform', metadata={'help': 'Defines the initialization for low-rank adapters.'}) low_rank_rank: Optional[int] = field(default=1, metadata={'help': 'Defines the rank of low-rank adapters.'}) attn_prefix: bool = field(default=False, metadata={'help': 'use attention predix model'}) attn_method_name: Optional[str] = field(default='linear', metadata={'help': 'attention model for attn_prefix'}) adapters_cur_training_task: Optional[str] = field(default=None, metadata={'help': 'currently training data name.'})
def display_results(res_dict, logger, title=''): if (title != ''): logger.info(f''' {title}:''') logger.info(' \n Accuracy Metrics ') for (acc_metric, acc_val) in res_dict['accuracy'].items(): logger.info('\t{:<13} {:.3f}'.format(METRIC_NAMES[acc_metric], acc_val)) logger.info(' \n Average Calibration Metrics ') for (cali_metric, cali_val) in res_dict['avg_calibration'].items(): logger.info('\t{:<37} {:.3f}'.format(METRIC_NAMES[cali_metric], cali_val)) logger.info(' \n Adversarial Group Calibration Metrics ') _display_adv_group_cal(res_dict['adv_group_calibration'], logger) logger.info(' \n Sharpness Metrics ') for (sharp_metric, sharp_val) in res_dict['sharpness'].items(): logger.info('\t{:} {:.3f}'.format(METRIC_NAMES[sharp_metric], sharp_val)) logger.info(' \n Scoring Rule Metrics ') for (sr_metric, sr_val) in res_dict['scoring_rule'].items(): logger.info('\t{:<25} {:.3f}'.format(METRIC_NAMES[sr_metric], sr_val)) logger.info(' \n')
def check_none(p: str) -> bool: if (p in ('None', 'none', None)): return True return False
class Block(nn.Module): def __init__(self, in_channels, out_channels, group=1): super(Block, self).__init__() self.b1 = ops.EResidualBlock(64, 64, group=group) self.c1 = ops.BasicBlock((64 * 2), 64, 1, 1, 0) self.c2 = ops.BasicBlock((64 * 3), 64, 1, 1, 0) self.c3 = ops.BasicBlock((64 * 4), 64, 1, 1, 0) def forward(self, x): c0 = o0 = x b1 = self.b1(o0) c1 = torch.cat([c0, b1], dim=1) o1 = self.c1(c1) b2 = self.b1(o1) c2 = torch.cat([c1, b2], dim=1) o2 = self.c2(c2) b3 = self.b1(o2) c3 = torch.cat([c2, b3], dim=1) o3 = self.c3(c3) return o3
def get_parser(desc): parser = argparse.ArgumentParser(description=desc) parser.add_argument('--uniform', action='store_false', dest='importance_training', help='Enable uniform sampling') return parser
class ContrastiveLoss(cirloss.ContrastiveLoss): def __init__(self, margin, eps): super().__init__(margin=margin, eps=eps) self.reduction = 'sum' def forward(self, x, label): if isinstance(label, list): label = torch.cat(label) return super().forward(x, label.to(x.device))
def id_to_useritemid(num_users, num_items): all_dict = {} for id in range(num_users): all_dict[id] = id for id in range(num_items): all_dict[(id + num_users)] = id return all_dict
class cassieRLEnvWithFootForces(cassieRLEnvStepInPlaceWithFootForces): def __init__(self): super().__init__() self.speed = 1.0 def get_kin_state(self): pose = np.copy(self.trajectory.qpos[(self.phase * self.control_rate)]) pose[0] *= self.speed pose[0] += (((self.trajectory.qpos[(1681, 0)] - self.trajectory.qpos[(0, 0)]) * self.counter) * self.speed) pose[1] = 0 vel = np.copy(self.trajectory.qvel[(self.phase * self.control_rate)]) vel[0] *= self.speed return (pose, vel) def get_kin_next_state(self): phase = (self.phase + 1) if (phase >= 28): phase = 0 pose = np.copy(self.trajectory.qpos[(phase * self.control_rate)]) pose[0] *= self.speed vel = np.copy(self.trajectory.qvel[(phase * self.control_rate)]) pose[0] += (((self.trajectory.qpos[(1681, 0)] - self.trajectory.qpos[(0, 0)]) * self.counter) * self.speed) pose[1] = 0 vel[0] *= self.speed return (pose, vel) def reset(self): self.orientation = 0 self.speed = (random.randint(0, 10) / 10) orientation = (self.orientation + ((random.randint((- 10), 10) * np.pi) / 100)) quaternion = euler2quat(z=orientation, y=0, x=0) self.phase = random.randint(0, 27) self.time = 0 self.counter = 0 cassie_sim_free(self.sim.c) self.sim.c = cassie_sim_init() (qpos, qvel) = self.get_kin_state() qpos[3:7] = quaternion self.sim.set_qpos(qpos) self.sim.set_qvel(qvel) self.cassie_state = self.sim.step_pd(self.u) return self.get_state() def reset_for_normalization(self): return self.reset() def reset_for_test(self): self.speed = (random.randint(0, 10) / 10) self.orientation = 0 orientation = (self.orientation + ((random.randint((- 10), 10) * np.pi) / 100)) quaternion = euler2quat(z=orientation, y=0, x=0) self.phase = random.randint(0, (self.max_phase - 1)) self.time = 0 self.counter = 0 cassie_sim_free(self.sim.c) self.sim.c = cassie_sim_init() (qpos, qvel) = self.get_kin_state() qpos[3:7] = quaternion self.sim.set_qpos(qpos) self.sim.set_qvel(qvel) self.cassie_state = self.sim.step_pd(self.u) return self.get_state() def compute_reward(self): (ref_pos, ref_vel) = self.get_kin_state() weight = [0.15, 0.15, 0.1, 0.05, 0.05, 0.15, 0.15, 0.1, 0.05, 0.05] joint_penalty = 0 joint_index = [7, 8, 9, 14, 20, 21, 22, 23, 28, 34] vel_index = [6, 7, 8, 12, 18, 19, 20, 21, 25, 31] for i in range(10): error = (weight[i] * ((ref_pos[joint_index[i]] - self.sim.qpos()[joint_index[i]]) ** 2)) joint_penalty += (error * 30) pelvis_pos = self.cassie_state.pelvis.position[:] desired_x = (ref_pos[0] * np.cos(self.orientation)) desired_y = (ref_pos[1] * np.sin(self.orientation)) com_penalty = ((((pelvis_pos[0] - desired_x) ** 2) + ((pelvis_pos[1] - desired_y) ** 2)) + ((pelvis_pos[2] - ref_pos[2]) ** 2)) yaw = quat2yaw(self.sim.qpos()[3:7]) orientation_penalty = (((self.sim.qpos()[4] ** 2) + (self.sim.qpos()[5] ** 2)) + ((yaw - self.orientation) ** 2)) spring_penalty = ((self.sim.qpos()[15] ** 2) + (self.sim.qpos()[29] ** 2)) spring_penalty *= 1000 total_reward = ((((0.5 * np.exp((- joint_penalty))) + (0.3 * np.exp((- com_penalty)))) + (0.1 * np.exp(((- 30) * orientation_penalty)))) + (0.1 * np.exp((- force_penalty)))) return total_reward
def char_swap(s): for t in char_transforms[LANGUAGE]: for c in s: if (c in t): return s.replace(c, random.sample(t, 1)[0]) return s
def main(args=None): rclpy.init(args=args) node = rclpy.create_node('rcd_sub') subscription = node.create_subscription(RobotConfigurationData, 'topic', (lambda msg: node.get_logger().info(('I heard: ' + str(msg.timestamp))))) subscription rclpy.spin(node) node.destroy_node() rclpy.shutdown()
def do_train(cfg, model, resume=False): model.train() if cfg.SOLVER.USE_CUSTOM_SOLVER: optimizer = build_custom_optimizer(cfg, model) else: assert (cfg.SOLVER.OPTIMIZER == 'SGD') assert (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE != 'full_model') assert (cfg.SOLVER.BACKBONE_MULTIPLIER == 1.0) optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler) start_iter = (checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get('iteration', (- 1)) + 1) if (not resume): start_iter = 0 max_iter = (cfg.SOLVER.MAX_ITER if (cfg.SOLVER.TRAIN_ITER < 0) else cfg.SOLVER.TRAIN_ITER) periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter) writers = ([CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, 'metrics.json')), TensorboardXWriter(cfg.OUTPUT_DIR)] if comm.is_main_process() else []) use_custom_mapper = cfg.WITH_IMAGE_LABELS MapperClass = CustomDatasetMapper mapper = (MapperClass(cfg, True) if (cfg.INPUT.CUSTOM_AUG == '') else (DetrDatasetMapper(cfg, True) if (cfg.INPUT.CUSTOM_AUG == 'DETR') else MapperClass(cfg, True, augmentations=build_custom_augmentation(cfg, True), recompute_boxes=(cfg.INPUT.CUSTOM_AUG == 'LSJ')))) if (cfg.DATALOADER.SAMPLER_TRAIN in ['TrainingSampler', 'RepeatFactorTrainingSampler']): data_loader = build_detection_train_loader(cfg, mapper=mapper) else: data_loader = build_custom_train_loader(cfg, mapper=mapper) if cfg.FP16: scaler = GradScaler() logger.info('Starting training from iteration {}'.format(start_iter)) with EventStorage(start_iter) as storage: step_timer = Timer() data_timer = Timer() start_time = time.perf_counter() for (data, iteration) in zip(data_loader, range(start_iter, max_iter)): data_time = data_timer.seconds() storage.put_scalars(data_time=data_time) step_timer.reset() iteration = (iteration + 1) storage.step() loss_dict = model(data) losses = sum((loss for (k, loss) in loss_dict.items())) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() for (k, v) in comm.reduce_dict(loss_dict).items()} losses_reduced = sum((loss for (key, loss) in loss_dict_reduced.items() if ('loss' in key))) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() if cfg.FP16: scaler.scale(losses).backward() scaler.step(optimizer) scaler.update() else: losses.backward() optimizer.step() storage.put_scalar('lr', optimizer.param_groups[0]['lr'], smoothing_hint=False) step_time = step_timer.seconds() storage.put_scalars(time=step_time) data_timer.reset() scheduler.step() if ((cfg.TEST.EVAL_PERIOD > 0) and ((iteration % cfg.TEST.EVAL_PERIOD) == 0) and (iteration != max_iter)): do_test(cfg, model) comm.synchronize() if (((iteration - start_iter) > 5) and (((iteration % 20) == 0) or (iteration == max_iter))): for writer in writers: writer.write() periodic_checkpointer.step(iteration) total_time = (time.perf_counter() - start_time) logger.info('Total training time: {}'.format(str(datetime.timedelta(seconds=int(total_time)))))
def inference(data_p, region, weights, output, gpu_id): (metadata_path, out_path) = create_directory_structure(output, region, folder_name='inference') create_test_csv_json(data_p, region, metadata_path) (ds_iterator, test_dates, params) = get_data_iterator(region, data_p, metadata_path) models = load_models_and_weights(region, weights, device=gpu_id) if (len(models) == 1): predictions_per_day(test_dates, models[0], ds_iterator, gpu_id, out_path, params['data_params']) else: predictions_per_day_ensamble(test_dates, models, ds_iterator, gpu_id, out_path, params['data_params'])
def convert_to_trainID(maskpath, out_mask_dir, is_train, clsID_to_trID=full_clsID_to_trID, suffix=''): mask = np.array(Image.open(maskpath)) mask_copy = (np.ones_like(mask, dtype=np.uint8) * 255) for (clsID, trID) in clsID_to_trID.items(): mask_copy[(mask == clsID)] = trID seg_filename = (osp.join(out_mask_dir, ('train2017' + suffix), osp.basename(maskpath)) if is_train else osp.join(out_mask_dir, ('val2017' + suffix), osp.basename(maskpath))) if ((len(np.unique(mask_copy)) == 1) and (np.unique(mask_copy)[0] == 255)): return Image.fromarray(mask_copy).save(seg_filename, 'PNG')
def iom(dt, gt): area1 = area([dt]) area2 = area([gt]) min_area = np.min([area1, area2]) intersection_area = area(merge([dt, gt], intersect=True)) return (intersection_area / min_area)
def batch_cat(*x, axis=0): if isinstance(x[0], tuple): return tuple((batch_cat(*y, axis=axis) for y in zip(*x))) elif isinstance(x[0], dict): return x[0].__class__([(k, batch_cat(*[y[k] for y in x], axis=axis)) for k in x[0].keys()]) return tf.concat(x, axis=axis)
def _insert_recursively(dataset_dict: DatasetDict, data_dict: DatasetDict, insert_index: int): if isinstance(dataset_dict, np.ndarray): dataset_dict[insert_index] = data_dict elif isinstance(dataset_dict, dict): assert (dataset_dict.keys() == data_dict.keys()) for k in dataset_dict.keys(): _insert_recursively(dataset_dict[k], data_dict[k], insert_index) else: raise TypeError()
class ValueHead(nn.Module): def __init__(self, config, **kwargs): super().__init__() if (not hasattr(config, 'summary_dropout_prob')): summary_dropout_prob = kwargs.pop('summary_dropout_prob', 0.1) else: summary_dropout_prob = config.summary_dropout_prob self.dropout = (nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity()) if hasattr(config, 'word_embed_proj_dim'): hidden_size = config.word_embed_proj_dim else: hidden_size = config.hidden_size self.summary = nn.Linear(hidden_size, 1) self.flatten = nn.Flatten() def forward(self, hidden_states): output = self.dropout(hidden_states) if (output.dtype != self.summary.weight.dtype): output = output.to(self.summary.weight.dtype) output = self.summary(output) return output
def vae_val_step(input, test_samples: bool=False) -> Tuple[(dict, Tensor)]: model.eval() with torch.no_grad(): (input_recon, mu, logvar) = model(input) loss_dict = model.loss_function(input, input_recon, mu, logvar) if config.ssim_eval: anomaly_map = ssim_map(input_recon, input) if config.gaussian_blur: anomaly_map = anomaly_map.cpu().numpy() for i in range(anomaly_map.shape[0]): anomaly_map[i] = gaussian_filter(anomaly_map[i], sigma=config.sigma) anomaly_map = torch.from_numpy(anomaly_map).to(config.device) else: anomaly_map = (input - input_recon).abs().mean(1, keepdim=True) if config.gaussian_blur: anomaly_map = anomaly_map.cpu().numpy() for i in range(anomaly_map.shape[0]): anomaly_map[i] = gaussian_filter(anomaly_map[i], sigma=config.sigma) anomaly_map = torch.from_numpy(anomaly_map).to(config.device) if (config.modality == 'MRI'): mask = torch.stack([(inp[0].unsqueeze(0) > inp[0].min()) for inp in input]) anomaly_map *= mask anomaly_score = torch.tensor([map[(inp[0].unsqueeze(0) > inp[0].min())].max() for (map, inp) in zip(anomaly_map, input)]) elif (config.modality == 'RF'): anomaly_score = torch.tensor([map.max() for map in anomaly_map]) else: anomaly_score = torch.tensor([map.mean() for map in anomaly_map]) if test_samples: return (anomaly_map, anomaly_score, input_recon) else: return (loss_dict, anomaly_map, anomaly_score, input_recon)
class Lang(): def __init__(self, name): self.name = name self.word2index = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3} self.word2count = {'<pad>': 0, '<sos>': 0, '<eos>': 0, '<unk>': 0} self.index2word = {0: '<pad>', 1: '<sos>', 2: '<eos>', 3: '<unk>'} self.n_words = 4 def index_words(self, sentence): for word in sentence.split(' '): self.index_word(word) def index_cap_words(self, sentence): for word in sentence: self.index_cap_word(word) def index_word(self, word): if (word not in self.word2index): self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 def index_cap_word(self, word): if (word in self.word2index): self.word2count[word] += 1 def reset_word2count(self): self.word2count = {key: 0 for key in self.word2index} def enrich_word2count(self): for word in self.word2index: if (word not in self.word2count): self.word2count[word] = 0 def reset_ext_word2count(self): self.word2count = {} for word in self.word2index: self.word2count[word] = 0 def copy_dict(self, lang): self.word2index = lang.word2index self.word2count = lang.word2count self.index2word = lang.index2word self.n_words = lang.n_words self.reset_word2count() def copy_ext_dict(self, ixtoword, wordtoix): self.word2index = wordtoix self.index2word = ixtoword self.n_words = len(self.word2index) self.word2count = {} self.reset_ext_word2count() def set_word2index(self, word2index): self.word2index = word2index self.n_words = len(self.word2index) def set_index2word(self, index2word): self.index2word = index2word def increase_seos_count(self): self.word2count['<sos>'] += 1 self.word2count['<eos>'] += 1
def parse_args(): parser = argparse.ArgumentParser(description='MMDet test detector') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--gpus', default=1, type=int, help='GPU number used for testing') parser.add_argument('--proc_per_gpu', default=1, type=int, help='Number of processes per GPU') parser.add_argument('--out', help='output result file') parser.add_argument('--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types') parser.add_argument('--show', action='store_true', help='show results') args = parser.parse_args() return args
class SequentialEx(Module): def __init__(self, *layers): self.layers = nn.ModuleList(layers) def forward(self, x): res = x for l in self.layers: res.orig = x nres = l(res) res.orig = None res = nres return res def __getitem__(self, i): return self.layers[i] def append(self, l): return self.layers.append(l) def extend(self, l): return self.layers.extend(l) def insert(self, i, l): return self.layers.insert(i, l)
def StandardNormal(d, device=torch.device('cuda:0')): return Independent(Normal(torch.zeros(d).to(device), torch.ones(d).to(device)), 1)
class SiLU(nn.Module): def __init__(self): super().__init__() def forward(self, x): return (x * torch.sigmoid(x))
def load_target_user_embedding(user_ebd_path): user_embedding = np.load(user_ebd_path) return user_embedding
def test_robot_warehouse__reset(robot_warehouse_env: RobotWarehouse) -> None: chex.clear_trace_counter() reset_fn = jax.jit(chex.assert_max_traces(robot_warehouse_env.reset, n=1)) (key1, key2) = (random.PRNGKey(0), random.PRNGKey(1)) (state1, timestep1) = reset_fn(key1) (state2, timestep2) = reset_fn(key2) assert isinstance(timestep1, TimeStep) assert isinstance(state1, State) assert (state1.step_count == 0) assert (state1.grid.shape == (2, *robot_warehouse_env.grid_size)) assert_is_jax_array_tree(state1) assert (not jnp.all((state1.key == state2.key))) assert (not jnp.all((state1.grid == state2.grid))) assert (state1.step_count == state2.step_count)
def get_files(config: Namespace, train: bool=True) -> Union[(List, Tuple[(List, ...)])]: ap = ('AP_' if config.AP_only else '') sup = ('sup_' if config.sup_devices else 'no_sup_') if (config.sex == 'both'): file_name = f'*_normal_train_{ap}{sup}' file = sorted(glob(os.path.join(config.datasets_dir, 'CXR/normal_splits', (file_name + '*.txt')))) else: file_name = f'{config.sex}_normal_train_{ap}{sup}' file = sorted(glob(os.path.join(config.datasets_dir, 'CXR/normal_splits', (file_name + '*.txt')))) paths1 = open(file[0]).read().splitlines() if (config.sex == 'both'): paths2 = open(file[1]).read().splitlines() if (len(paths1) > len(paths2)): paths1 = paths1[:len(paths2)] else: paths2 = paths2[:len(paths1)] for (idx, path) in enumerate(paths1): paths1[idx] = os.path.join(config.datasets_dir, 'CXR', path) for (idx, path) in enumerate(paths2): paths2[idx] = os.path.join(config.datasets_dir, 'CXR', path) if train: return (paths1[200:] + paths2[200:]) else: for (idx, path) in enumerate(paths1): paths1[idx] = os.path.join(config.datasets_dir, 'CXR', path) if train: return paths1[200:] if (config.sex == 'both'): file_name = f'*_anomal_{config.pathology}_{ap}{sup}' file = sorted(glob(os.path.join(config.datasets_dir, 'CXR/anomal_splits', (file_name + '*.txt')))) else: file_name = f'{config.sex}_anomal_{config.pathology}_{ap}{sup}' file = sorted(glob(os.path.join(config.datasets_dir, 'CXR/anomal_splits', (file_name + '*.txt')))) anom_paths1 = open(file[0]).read().splitlines() if (config.sex == 'both'): anom_paths2 = open(file[1]).read().splitlines() if (len(anom_paths1) > len(anom_paths2)): anom_paths1 = anom_paths1[:len(anom_paths2)] else: anom_paths2 = anom_paths2[:len(anom_paths1)] for (idx, path) in enumerate(anom_paths1): anom_paths1[idx] = os.path.join(config.datasets_dir, 'CXR', path) for (idx, path) in enumerate(anom_paths2): anom_paths2[idx] = os.path.join(config.datasets_dir, 'CXR', path) normal_paths = (paths1[:200] + paths2[:200]) anomal_paths = (anom_paths1 + anom_paths2) return (normal_paths, anomal_paths[:400], ([0] * len(normal_paths)), ([1] * 400)) else: for (idx, path) in enumerate(anom_paths1): anom_paths1[idx] = os.path.join(config.datasets_dir, 'CXR', path) return (paths1[:200], anom_paths1[:200], ([0] * 200), ([1] * 200))
class Conv2d(Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super().__init__() if isinstance(kernel_size, (list, tuple)): assert _all_the_same(kernel_size), 'only square kernels are supported' kernel_size = kernel_size[0] if isinstance(stride, (list, tuple)): assert _all_the_same(stride), 'stride must be the same in each dimension' stride = stride[0] if isinstance(padding, (list, tuple)): assert _all_the_same(padding), 'padding must be the same in each dimension' padding = padding[0] pytorch_module = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.register_parameter('weight', pytorch_module.weight) if bias: self.register_parameter('bias', pytorch_module.bias) self.stride = stride self.padding = padding def forward(self, x): x = x.conv2d(self.weight, stride=self.stride, padding=self.padding) if hasattr(self, 'bias'): x = x.add(self.bias.unsqueeze((- 1)).unsqueeze((- 1))) return x def from_onnx(parameters=None, attributes=None): if (parameters is None): parameters = {} if (attributes is None): attributes = {} assert _all_the_same(attributes['kernel_shape']), 'only square kernels are supported' assert _all_the_same(attributes['strides']), 'stride must be the same in each dimension' if ('pads' not in attributes): attributes['pads'] = [0, 0] if ('group' not in attributes): attributes['group'] = 1 assert _all_the_same(attributes['pads']), 'padding must be the same in each dimension' assert (attributes['group'] == 1), 'group convolution not supported' assert all(((dilation == 1) for dilation in attributes['dilations'])), 'dilated convolutions not supported' in_channels = parameters['weight'].size(1) out_channels = parameters['weight'].size(0) module = Conv2d(in_channels, out_channels, attributes['kernel_shape'][0], stride=attributes['strides'][0], padding=attributes['pads'][0], bias=('bias' in parameters)) for (key, value) in parameters.items(): module.set_parameter(key, value) return module
_cache(maxsize=1) def get_detokenizer(): from sacremoses import MosesDetokenizer detok = MosesDetokenizer(lang='en') return detok
def num_word_types(stanza_doc): return len(set([word.lemma for word in stanza_doc.sentences[0].words]))
def incremental_residual_det_loss(anchor, positives, negatives, margin, s_old, v_old, m_old, seen, dimensions=10, scale=False): batches = int(anchor.get_shape()[0]) s_old = tf.tile(tf.expand_dims(s_old, 0), [batches, 1]) v_old = tf.tile(tf.expand_dims(v_old, 0), [batches, 1, 1]) m_old = tf.tile(tf.expand_dims(m_old, 0), [batches, 1]) m_old = tf.expand_dims(m_old, 1) num_pos = positives.get_shape()[1] num_neg = negatives.get_shape()[1] pos_features = tf.subtract(positives, tf.tile(anchor, [1, int(num_pos), 1])) neg_features = tf.subtract(negatives, tf.tile(anchor, [1, int(num_neg), 1])) residuals = tf.concat([pos_features, neg_features], 1) f_dim = int(residuals.get_shape()[2]) incremental_pos = incremental_s(pos_features, s_old, v_old, m_old, seen) incremental_neg = incremental_s(neg_features, s_old, v_old, m_old, seen) num_s = int(incremental_pos.get_shape()[1]) dimensions = tf.minimum(dimensions, (num_s - 1)) if scale: max_neg = tf.slice(incremental_neg, begin=[0, 0], size=[(- 1), 1]) pos_s = tf.truediv(tf.slice(incremental_pos, begin=[0, 0], size=[(- 1), dimensions]), tf.tile(max_neg, [1, dimensions])) neg_s = tf.truediv(tf.slice(incremental_neg, begin=[0, 0], size=[(- 1), dimensions]), tf.tile(max_neg, [1, dimensions])) else: pos_s = tf.slice(incremental_pos, begin=[0, 0], size=[(- 1), dimensions]) neg_s = tf.slice(incremental_neg, begin=[0, 0], size=[(- 1), dimensions]) losses = tf.add(tf.subtract(tf.reduce_prod(pos_s, axis=1), tf.reduce_prod(neg_s, axis=1)), margin) return (tf.reduce_mean(losses, axis=0), tf.reshape(residuals, [(- 1), f_dim]))
class AverageMeter(object): def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE): self.name = name self.fmt = fmt self.summary_type = summary_type self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count) def all_reduce(self): device = ('cuda' if torch.cuda.is_available() else 'cpu') total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device) dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) (self.sum, self.count) = total.tolist() self.avg = (self.sum / self.count) def __str__(self): fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})') return fmtstr.format(**self.__dict__) def summary(self): fmtstr = '' if (self.summary_type is Summary.NONE): fmtstr = '' elif (self.summary_type is Summary.AVERAGE): fmtstr = '{name} {avg:.3f}' elif (self.summary_type is Summary.SUM): fmtstr = '{name} {sum:.3f}' elif (self.summary_type is Summary.COUNT): fmtstr = '{name} {count:.3f}' else: raise ValueError(('invalid summary type %r' % self.summary_type)) return fmtstr.format(**self.__dict__)
class TFMPNetForQuestionAnswering(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def load_csv(path='to_dock_shuffled.csv'): import pandas as pd df = pd.read_csv(path) smiles = df['smile'].values actives = df['active'].values px50 = df['affinity'].values return (smiles, actives, px50)
class DepthNetGAN_M(DepthNetGAN): def run_on_instance(self, xy_keypts_src, z_keypts_src, xy_keypts_tgt, z_keypts_tgt, train, **kwargs): is_gan = (not self.no_gan) if train: self.optim['g'].zero_grad() self.optim['d'].zero_grad() bs = xy_keypts_src.shape[0] (xy_keypts_src_torch, z_keypts_src_torch, xy_keypts_tgt_torch) = (torch.from_numpy(xy_keypts_src).transpose(1, 2).float(), torch.from_numpy(z_keypts_src).float(), torch.from_numpy(xy_keypts_tgt).transpose(1, 2).float()) if self.use_cuda: xy_keypts_src_torch = xy_keypts_src_torch.cuda() z_keypts_src_torch = z_keypts_src_torch.cuda() xy_keypts_tgt_torch = xy_keypts_tgt_torch.cuda() net_out = self.g(torch.cat((xy_keypts_src_torch, xy_keypts_tgt_torch), dim=1)) if (not ((type(net_out) == tuple) and (len(net_out) == 2))): raise Exception('Output of g needs to be a tuple of two elements!') (src_z_pred, m_pred) = net_out src_z_pred = src_z_pred.unsqueeze(1) m_rshp = m_pred.view((bs, 2, 4)) ones = torch.ones((bs, 1, 66)).float() if self.use_cuda: ones = ones.cuda() if kwargs['use_gt_z']: rht = torch.cat((xy_keypts_src_torch, z_keypts_src_torch.unsqueeze(1), ones), dim=1) else: rht = torch.cat((xy_keypts_src_torch, src_z_pred, ones), dim=1) rhs = torch.matmul(m_rshp, rht) if (not self.use_l1): l2_loss = torch.mean(((xy_keypts_tgt_torch - rhs) ** 2)) else: l2_loss = torch.mean(torch.abs((xy_keypts_tgt_torch - rhs))) src_z_pred_given_inp = torch.cat((src_z_pred, xy_keypts_src_torch), dim=1) g_loss = torch.FloatTensor([0.0]) if train: (self.lamb * l2_loss).backward(retain_graph=True) if is_gan: g_loss = (- torch.mean(self.d(src_z_pred_given_inp))) if (((kwargs['iter'] - 1) % self.update_g_every) == 0): g_loss.backward() self.optim['g'].step() d_loss_real = torch.FloatTensor([0.0]) d_loss_fake = torch.FloatTensor([0.0]) d_loss = torch.FloatTensor([0.0]) if is_gan: if train: self.optim['d'].zero_grad() src_z_gt_given_inp = torch.cat((z_keypts_src_torch.unsqueeze(1), xy_keypts_src_torch), dim=1) d_real = self.d(src_z_gt_given_inp) d_fake = self.d(src_z_pred_given_inp.detach()) d_loss_real = torch.mean(d_real) d_loss_fake = torch.mean(d_fake) d_loss = ((- d_loss_real) + d_loss_fake) if train: d_loss.backward() self.optim['d'].step() g_norm_x = torch.FloatTensor([0.0]) if (train and (self.dnorm > 0.0)): d_real_inp = src_z_gt_given_inp.detach() d_real_inp.requires_grad = True d_real_ = self.d(d_real_inp) g_norm_x = self.grad_norm(d_real_, d_real_inp) self.optim['d'].zero_grad() (g_norm_x * self.dnorm).backward() self.optim['d'].step() losses = {'l2': l2_loss.data.item()} if is_gan: losses['g_loss'] = g_loss.data.item() losses['d_loss'] = d_loss.data.item() losses['d_loss_real'] = d_loss_real.data.item() losses['d_loss_fake'] = d_loss_fake.data.item() if (self.dnorm > 0): losses['dnorm_x'] = g_norm_x.data.item() outputs = {'src_z_pred': src_z_pred.detach(), 'tgt_2d_pred': rhs.detach(), 'affine': m_rshp.detach()} return (losses, outputs)
def test_get_sign_orbit_array_list(): s1 = jnp.array([[[(- 2.4), 2.4], [1.6, 0.8]], [[0.3, (- 0.2)], [(- 10), 0]]]) s2 = jnp.array([[[(- 2.9), 2.0], [1.2, 0.8], [0.3, 0.2]], [[0.3, (- 0.2)], [(- 10), 0], [(- 11), (- 1)]]]) s3 = jnp.array([[[(- 0.3), 0.6]], [[(- 2), (- 10)]]]) inputs = [s1, s2, s3] (syms, sym_signs) = sign_sym._get_sign_orbit_array_list(inputs, axis=(- 2)) expected_syms = [jnp.stack([s1, (- s1), s1, (- s1), s1, (- s1), s1, (- s1)], axis=(- 2)), jnp.stack([s2, s2, (- s2), (- s2), s2, s2, (- s2), (- s2)], axis=(- 2)), jnp.stack([s3, s3, s3, s3, (- s3), (- s3), (- s3), (- s3)], axis=(- 2))] expected_sym_signs = jnp.array([1, (- 1), (- 1), 1, (- 1), 1, 1, (- 1)]) assert_pytree_allclose(syms, expected_syms) np.testing.assert_allclose(sym_signs, expected_sym_signs)
def aggregate(table: Union[(pd.DataFrame, pd.Series)], cols_to_agg: list[str]=[], aggregates: list[str]=['mean', 'sem'], is_rename_cols: bool=True) -> Union[(pd.DataFrame, pd.Series)]: if (len(cols_to_agg) == 0): return table if isinstance(table, pd.Series): table = table.to_frame() new_idcs = [c for c in table.index.names if (c not in cols_to_agg)] table_agg = table.reset_index().groupby(by=new_idcs, dropna=False).agg(aggregates) if is_rename_cols: table_agg.columns = ['_'.join(col).rstrip('_') for col in table_agg.columns.values] else: table_agg.columns = [col[0] for col in table_agg.columns.values] return table_agg
class TestCascadeRoIHead(TestCase): (['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py']) def test_init(self, cfg_file): roi_head_cfg = get_roi_head_cfg(cfg_file) roi_head = MODELS.build(roi_head_cfg) assert roi_head.with_bbox assert roi_head.with_mask (['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py']) def test_cascade_roi_head_loss(self, cfg_file): if (not torch.cuda.is_available()): return unittest.skip('test requires GPU and torch+cuda') s = 256 img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1}] roi_head_cfg = get_roi_head_cfg(cfg_file) roi_head = MODELS.build(roi_head_cfg) roi_head = roi_head.cuda() feats = [] for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)): feats.append(torch.rand(1, 1, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))).to(device='cuda')) feats = tuple(feats) img_shape_list = [(3, s, s) for _ in img_metas] proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda') batch_data_samples = demo_mm_inputs(batch_size=1, image_shapes=[(3, s, s)], num_items=[1], num_classes=4, with_mask=True, device='cuda')['data_samples'] out = roi_head.loss(feats, proposal_list, batch_data_samples) for (name, value) in out.items(): if ('loss' in name): self.assertGreaterEqual(value.sum(), 0, msg='loss should be non-zero') proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda') batch_data_samples = demo_mm_inputs(batch_size=1, image_shapes=[(3, s, s)], num_items=[0], num_classes=4, with_mask=True, device='cuda')['data_samples'] out = roi_head.loss(feats, proposal_list, batch_data_samples) for (name, value) in out.items(): if ('loss_cls' in name): self.assertGreaterEqual(value.sum(), 0, msg='loss should be non-zero') elif (('loss_bbox' in name) or ('loss_mask' in name)): self.assertEqual(value.sum(), 0)
def sb_cnn_bn(x, is_training, config): print(('Input: ' + str(x.get_shape))) input_layer = tf.expand_dims(x, 3) print(input_layer.get_shape) bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1)) return sb_cnn_core(bn_input, is_training, config)
def vgg_fcn(num_classes=1000, pretrained=False, batch_norm=False, **kwargs): if pretrained: kwargs['init_weights'] = True model = VGG(make_layers(cfg['D'], batch_norm=batch_norm), num_classes, **kwargs) if pretrained: if batch_norm: pretrained_weights = model_zoo.load_url(model_urls['vgg19_bn']) else: pretrained_weights = model_zoo.load_url(model_urls['vgg19']) model.load_state_dict(pretrained_weights, strict=False) return model
def main(argv=None): if (not os.path.exists(FLAGS.result_dir)): os.mkdir(FLAGS.result_dir) folddirlist = FLAGS.model_dir.split(os.sep) saveresultdir = FLAGS.result_dir for i in xrange(2, len(folddirlist)): saveresultdir = ((saveresultdir + os.sep) + folddirlist[i]) if (not os.path.exists(saveresultdir)): os.mkdir(saveresultdir) evaluate(saveresultdir)
def freq_dist(importtext): text = word_tokenize(importtext) fdist1 = FreqDist(text) distribution = fdist1.most_common(len(fdist1)) return distribution
def init_kaf_nn(layer_sizes, scale=0.01, rs=np.random.RandomState(0), dict_size=20, boundary=3.0): D = np.linspace((- boundary), boundary, dict_size).reshape((- 1), 1) interval = (D[(1, 0)] - D[(0, 0)]) gamma = (0.5 / np.square((2 * interval))) D = D.reshape(1, 1, (- 1)) w = [((rs.randn(insize, outsize) * scale), (rs.randn(outsize) * scale), (rs.randn(1, outsize, dict_size) * 0.5)) for (insize, outsize) in zip(layer_sizes[:(- 1)], layer_sizes[1:])] return (w, (D, gamma))
def training_params(is_gcloud=False, output_dir=None): if (not output_dir): output_dir = util.construct_experiment_output_dir(__file__) num_gpus = 1 stop_after = 7 dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3} imgs_per_phase = 384000 dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()} dynamic_steps_per_phase[7] *= 2 return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 8, 3: 8, 4: 8, 5: 8, 6: 8, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True)
def benchmark_data(args): cfg = setup(args) timer = Timer() dataloader = build_detection_train_loader(cfg) logger.info('Initialize loader using {} seconds.'.format(timer.seconds())) timer.reset() itr = iter(dataloader) for i in range(10): next(itr) if (i == 0): startup_time = timer.seconds() timer = Timer() max_iter = 1000 for _ in tqdm.trange(max_iter): next(itr) logger.info('{} iters ({} images) in {} seconds.'.format(max_iter, (max_iter * cfg.SOLVER.IMS_PER_BATCH), timer.seconds())) logger.info('Startup time: {} seconds'.format(startup_time)) vram = psutil.virtual_memory() logger.info('RAM Usage: {:.2f}/{:.2f} GB'.format(((vram.total - vram.available) / (1024 ** 3)), (vram.total / (1024 ** 3)))) for _ in range(10): timer = Timer() max_iter = 1000 for _ in tqdm.trange(max_iter): next(itr) logger.info('{} iters ({} images) in {} seconds.'.format(max_iter, (max_iter * cfg.SOLVER.IMS_PER_BATCH), timer.seconds()))
def execute_experiment(args, train_probe, report_results): dataset_class = choose_dataset_class(args) (task_class, reporter_class, loss_class) = choose_task_classes(args) probe_class = choose_probe_class(args) model_class = choose_model_class(args) regimen_class = regimen.ProbeRegimen task = task_class() expt_dataset = dataset_class(args, task) expt_reporter = reporter_class(args) expt_probe = probe_class(args) expt_model = model_class(args) expt_regimen = regimen_class(args) expt_loss = loss_class(args) if train_probe: print('Training probe...') run_train_probe(args, expt_probe, expt_dataset, expt_model, expt_loss, expt_reporter, expt_regimen) if report_results: print('Reporting results of trained probe...') run_report_results(args, expt_probe, expt_dataset, expt_model, expt_loss, expt_reporter, expt_regimen)
def make_dot(var, params=None): if (params is not None): assert all((isinstance(p, torch.Tensor) for p in params.values())) param_map = {id(v): k for (k, v) in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(format='png', node_attr=node_attr, graph_attr={'size': '12,12'}) seen = set() def size_to_str(size): return (('(' + ', '.join([('%d' % v) for v in size])) + ')') def add_nodes(var): if (var not in seen): if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name = (param_map[id(u)] if (params is not None) else '') node_name = ('%s\n %s' % (name, size_to_str(u.size()))) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: if (u[0] is not None): dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) add_nodes(var.grad_fn) resize_graph(dot) return dot
class FusedLeakyReLUFunctionBackward(Function): def forward(ctx, grad_output, out, bias, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output.contiguous(), empty, out, 3, 1, negative_slope, scale) dim = [0] if (grad_input.ndim > 2): dim += list(range(2, grad_input.ndim)) if bias: grad_bias = grad_input.sum(dim).detach() else: grad_bias = empty return (grad_input, grad_bias) def backward(ctx, gradgrad_input, gradgrad_bias): (out,) = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return (gradgrad_out, None, None, None, None)
def l1LossMask(pred, gt, mask): return (torch.sum((torch.abs((pred - gt)) * mask)) / torch.clamp(mask.sum(), min=1))
def save_checkpoint(state, is_best, results_dir, filename='checkpoint.pth.tar'): torch.save(state, os.path.join(results_dir, filename)) if is_best: shutil.copyfile(os.path.join(results_dir, filename), os.path.join(results_dir, 'model_best.pth.tar'))
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class VarFastLSTM(VarRNNBase): def __init__(self, *args, **kwargs): super(VarFastLSTM, self).__init__(VarFastLSTMCell, *args, **kwargs) self.lstm = True
_module() class BFP(BaseModule): def __init__(self, in_channels: int, num_levels: int, refine_level: int=2, refine_type: str=None, conv_cfg: OptConfigType=None, norm_cfg: OptConfigType=None, init_cfg: OptMultiConfig=dict(type='Xavier', layer='Conv2d', distribution='uniform')) -> None: super().__init__(init_cfg=init_cfg) assert (refine_type in [None, 'conv', 'non_local']) self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert (0 <= self.refine_level < self.num_levels) if (self.refine_type == 'conv'): self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif (self.refine_type == 'non_local'): self.refine = NonLocal2d(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]: assert (len(inputs) == self.num_levels) feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if (i < self.refine_level): gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size) else: gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = (sum(feats) / len(feats)) if (self.refine_type is not None): bsf = self.refine(bsf) outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if (i < self.refine_level): residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append((residual + inputs[i])) return tuple(outs)
def test_no_frames(): env = BrokenRecordableEnv() rec = VideoRecorder(env) rec.close() assert rec.empty assert rec.functional assert (not os.path.exists(rec.path))
def create_voxel_off(path): pc_path = (path + '/voxelized_point_cloud_{}res_{}points.npz'.format(args.res, args.num_points)) off_path = (path + '/voxelized_point_cloud_{}res_{}points.off'.format(args.res, args.num_points)) pc = np.load(pc_path)['point_cloud'] trimesh.Trimesh(vertices=pc, faces=[]).export(off_path) print('Finished: {}'.format(path))
class DistilBertPreTrainedModel(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
_model def regnety_080(pretrained=False, **kwargs): return _create_regnet('regnety_080', pretrained, **kwargs)
def test_extend_linestring() -> LineString: polyline = np.array([[4, 4], [4, 12]]) ls = LineString(polyline) e_ls = synthetic_crosswalk_generator.extend_linestring(ls) assert np.allclose(np.array(e_ls.coords), np.array([[4.0, (- 796.0)], [4.0, 812.0]])) polyline = np.array([[4, 4], [12, 4]]) ls = LineString(polyline) e_ls = synthetic_crosswalk_generator.extend_linestring(ls) assert np.allclose(np.array(e_ls.coords), np.array([[(- 796.0), 4.0], [812.0, 4.0]])) polyline = np.array([[4, 4], [10, 10]]) ls = LineString(polyline) e_ls = synthetic_crosswalk_generator.extend_linestring(ls) assert np.allclose(np.array(e_ls.coords), np.array([[(- 596.0), (- 596.0)], [610.0, 610.0]]))
class ROIPool(nn.Module): def __init__(self, output_size, spatial_scale): super(ROIPool, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale def forward(self, input, rois): return roi_pool(input.float(), rois.float(), self.output_size, self.spatial_scale) def __repr__(self): tmpstr = (self.__class__.__name__ + '(') tmpstr += ('output_size=' + str(self.output_size)) tmpstr += (', spatial_scale=' + str(self.spatial_scale)) tmpstr += ')' return tmpstr
def check_java_version(): java_home = os.environ.get('JAVA_HOME') if ('jdk-17' in java_home): return 17 elif ('jdk-11' in java_home): return 11
def get_random_pos_on_map(free_space_indices, map_, safe_dist, forbidden_zones=None): def is_pos_valid(x_in_meters, y_in_meters): for forbidden_zone in forbidden_zones: if ((((x_in_meters - forbidden_zone[0]) ** 2) + ((y_in_meters - forbidden_zone[1]) ** 2)) < ((forbidden_zone[2] + safe_dist) ** 2)): return False cell_radius = int((safe_dist / map_.info.resolution)) x_index = int(((x_in_meters - map_.info.origin.position.x) // map_.info.resolution)) y_index = int(((y_in_meters - map_.info.origin.position.y) // map_.info.resolution)) for i in range((x_index - cell_radius), (x_index + cell_radius), 1): for j in range((y_index - cell_radius), (y_index + cell_radius), 1): index = ((j * map_.info.width) + i) if (index >= len(map_.data)): return False try: value = map_.data[index] except IndexError: print(('IndexError: index: %d, map_length: %d' % (index, len(map_.data)))) return False if (value != 0): return False return True assert ((len(free_space_indices) == 2) and (len(free_space_indices[0]) == len(free_space_indices[1]))), 'free_space_indices is not correctly setup' if (forbidden_zones is None): forbidden_zones = [] n_freespace_cells = len(free_space_indices[0]) pos_valid = False n_check_failed = 0 (x_in_meters, y_in_meters) = (None, None) while (not pos_valid): idx = random.randint(0, (n_freespace_cells - 1)) (y_in_cells, x_in_cells) = (free_space_indices[0][idx], free_space_indices[1][idx]) y_in_meters = ((y_in_cells * map_.info.resolution) + map_.info.origin.position.y) x_in_meters = ((x_in_cells * map_.info.resolution) + map_.info.origin.position.x) pos_valid = is_pos_valid(x_in_meters, y_in_meters) if (not pos_valid): n_check_failed += 1 if (n_check_failed > 100): raise Exception("cann't find any no-occupied space please check the map information") q = quaternion_from_euler(0.0, 0.0, 1, axes='sxyz') p = Pose() p.position = Point(*[x_in_meters, y_in_meters, 0]) p.orientation = Quaternion(*q) return p
def test_perf(pb_model_file, val_data): [x_test_np, label_test] = val_data q_model = tf.saved_model.load(pb_model_file) x_test = tf.convert_to_tensor(x_test_np) infer = q_model.signatures['serving_default'] times = 10 bt = 0 warmup = int((times * 0.2)) for i in range(times): if (i == warmup): bt = time.time() res = infer(x_test) et = time.time() res = list(res.values())[0] accuracy = calc_accuracy(res, label_test) print('accuracy:', accuracy) throughput = ((len(x_test) * (times - warmup)) / (et - bt)) print('max throughput(fps):', throughput) times = 1 bt = 0 warmup = int((times * 0.2)) for i in range(times): if (i == warmup): bt = time.time() for i in range(len(x_test)): res = infer(tf.convert_to_tensor([x_test_np[i]])) et = time.time() latency = ((((et - bt) * 1000) / (times - warmup)) / len(x_test)) print('latency(ms):', latency) return (accuracy, throughput, latency)
def dump_transformer_index(encoder_type, splits): if ((encoder_type == 'bert') or (encoder_type == 'vlbert') or (encoder_type == 'MultiDicEncoder')): dump_bert_index(splits) elif (encoder_type == 'gpt'): dump_gpt_index(splits) else: raise NotImplementedError
def build_single_model_ui(models): notice_markdown = '\n<div class="title">\n<div style="\n color: #fff;\n">Large Language Model <p style="\n font-size: 0.8rem;\n">4th Gen Intel Xeon with Intel AMX</p></div>\n \n</div>\n' learn_more_markdown = '<div class="footer">\n <p>Powered by <a href=" style="text-decoration: underline;" target="_blank">Intel Extension for Transformers</a> and <a href=" style="text-decoration: underline;" target="_blank">Intel Extension for PyTorch</a>\n <img src=\' class=\'img-logo-right-style\'/></p>\n </div>\n <div class="acknowledgments">\n <p></p></div>\n \n ' state = gr.State() notice = gr.Markdown(notice_markdown, elem_id='notice_markdown') with gr.Row(elem_id='model_selector_row', visible=False): model_selector = gr.Dropdown(choices=models, value=(models[0] if (len(models) > 0) else ''), interactive=True, show_label=False).style(container=False) chatbot = grChatbot(elem_id='chatbot', visible=False).style(height=550) with gr.Row(elem_id='text-box-style'): with gr.Column(scale=20): textbox = gr.Textbox(show_label=False, placeholder='Enter text and press ENTER', visible=False).style(container=False) with gr.Column(scale=1, min_width=50): send_btn = gr.Button(value='Send', visible=False, elem_id='btn-send-style') with gr.Accordion('Parameters', open=False, visible=False, elem_id='btn-style') as parameter_row: temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.001, step=0.1, interactive=True, label='Temperature', visible=False) max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=1, interactive=True, label='Max output tokens') topk = gr.Slider(minimum=1, maximum=10, value=1, step=1, interactive=True, label='TOP K') with gr.Row(visible=False, elem_id='btn-style') as button_row: upvote_btn = gr.Button(value=' Upvote', interactive=False, visible=False, elem_id='btn-list-style') downvote_btn = gr.Button(value=' Downvote', interactive=False, visible=False, elem_id='btn-list-style') flag_btn = gr.Button(value=' Flag', interactive=False, visible=False, elem_id='btn-list-style') regenerate_btn = gr.Button(value=' Regenerate', interactive=False, elem_id='btn-list-style') clear_btn = gr.Button(value=' Clear history', interactive=False, elem_id='btn-list-style') gr.Markdown(learn_more_markdown) btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn] upvote_btn.click(upvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn]) downvote_btn.click(downvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn]) flag_btn.click(flag_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn]) regenerate_btn.click(regenerate, state, ([state, chatbot, textbox] + btn_list)).then( [state, model_selector, temperature, max_output_tokens, topk], ([state, chatbot] + btn_list)) clear_btn.click(clear_history, None, ([state, chatbot, textbox] + btn_list)) model_selector.change(clear_history, None, ([state, chatbot, textbox] + btn_list)) textbox.submit(add_text, [state, textbox], ([state, chatbot, textbox] + btn_list)).then( [state, model_selector, temperature, max_output_tokens, topk], ([state, chatbot] + btn_list)) send_btn.click(add_text, [state, textbox], ([state, chatbot, textbox] + btn_list)).then( [state, model_selector, temperature, max_output_tokens, topk], ([state, chatbot] + btn_list)) return (state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row)
def FilterPhoneticDecodingLexicon(args, phonetic_decoding_lexicon, stats): silphones = set() for line in args.silence_file_handle: silphones.add(line.strip()) rejected_candidates = set() for (word, prons) in phonetic_decoding_lexicon.items(): for pron in prons: for phone in pron.split(): if (phone in silphones): if ((word, pron) in stats): count = stats[(word, pron)] del stats[(word, pron)] else: count = 0 rejected_candidates.add((word, pron)) print('WARNING: removing the candidate pronunciation from phonetic-decoding: {0}: "{1}" whose soft-count from lattice-alignment is {2}, cause it contains at least one silence phone.'.format(word, pron, count), file=sys.stderr) break for (word, pron) in rejected_candidates: phonetic_decoding_lexicon[word].remove(pron) return (phonetic_decoding_lexicon, stats)
def postChapter(heading: str) -> None: _deprecation("'postChapter(heading)' is deprecated. Use 'post.summaryChapter(heading)'.") post.summaryChapter(heading)
def test_lazy_and_debug_fisher_inverse_match(): (energy_grad, params, positions, _, log_psi_apply, mean_grad_fn) = _setup_fisher() fisher_inverse_fn_lazy = updates.sr.get_fisher_inverse_fn(log_psi_apply, mean_grad_fn, damping=0.0, maxiter=None, mode=updates.sr.SRMode.LAZY) fisher_inverse_fn_debug = updates.sr.get_fisher_inverse_fn(log_psi_apply, mean_grad_fn, damping=0.0, maxiter=None, mode=updates.sr.SRMode.DEBUG) lazy_Finverse_grad = fisher_inverse_fn_lazy(energy_grad, params, positions) debug_Finverse_grad = fisher_inverse_fn_debug(energy_grad, params, positions) np.testing.assert_allclose(lazy_Finverse_grad, debug_Finverse_grad, rtol=1e-06, atol=1e-06)
def spect_filter(spect0, spect_size=None): if (spect_size is None): return spect0 if isinstance(spect_size, int): spect_size = [spect_size] axis = list(range(((- len(spect_size)) - 1), (- 1))) for (s, a) in zip(spect_size, axis): spect0 = _spect_filter(spect0, s, axis=a) return spect0
class FlashbaxBufferStore(): def __init__(self, dataset_path: str) -> None: orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer() options = orbax.checkpoint.CheckpointManagerOptions(max_to_keep=1, create=True) self._manager = orbax.checkpoint.CheckpointManager(os.path.join(os.getcwd(), dataset_path), orbax_checkpointer, options, metadata=None) def save(self, t, buffer_state): return self._manager.save(step=t, items=buffer_state) def restore_state(self): raw_restored = self._manager.restore(self._manager.latest_step()) return TrajectoryBufferState(experience=jax.tree_util.tree_map(jnp.asarray, raw_restored['experience']), current_index=jnp.asarray(raw_restored['current_index']), is_full=jnp.asarray(raw_restored['is_full']))
def faiss_search(faiss_index_pkl, item_dict_pkl, cluster_mode, batch_size=65536, k=200): from bigdl.friesian.utils import SafePickle import faiss if (cluster_mode == 'yarn'): load_SPARK = True else: load_SPARK = False def do_search(partition): with open_pickle(faiss_index_pkl, load_SPARK) as index_pkl: faiss_idx = faiss.deserialize_index(SafePickle.load(index_pkl)) with open_pickle(item_dict_pkl, load_SPARK) as f: item_dict = SafePickle.load(f) buffer = [] for record in partition: if (len(buffer) == batch_size): s1 = time.time() seed_ids = [row[0] for row in buffer] embeddings = [row[1] for row in buffer] buffer = [record] q_vec = np.stack(embeddings).astype(np.float32) (similarity_array, idx_array) = faiss_idx.search(q_vec, k=k) e1 = time.time() print('Search time: ', (e1 - s1)) for i in range(batch_size): seed_idx = int(seed_ids[i]) seed_item = str(item_dict[seed_idx]) for (n, (score, rec_id)) in enumerate(zip(similarity_array[i], idx_array[i])): rec_id = int(rec_id) (yield (seed_item, str(item_dict[rec_id]), int(n), float(score))) else: buffer.append(record) remain_size = len(buffer) if (remain_size > 0): seed_ids = [row[0] for row in buffer] embeddings = [row[1] for row in buffer] q_vec = np.stack(embeddings).astype(np.float32) (similarity_array, idx_array) = faiss_idx.search(q_vec, k=k) for i in range(remain_size): seed_idx = int(seed_ids[i]) seed_item = str(item_dict[seed_idx]) for (n, (score, rec_id)) in enumerate(zip(similarity_array[i], idx_array[i])): rec_id = int(rec_id) (yield (seed_item, str(item_dict[rec_id]), int(n), float(score))) return do_search