code
stringlengths
101
5.91M
def data() -> Tuple[(np.ndarray, np.ndarray)]: X = np.random.randn(CONFIG.num_data, CONFIG.input_dim) Y = np.random.randn(CONFIG.num_data, CONFIG.output_dim) return (X, Y)
.parametrize('test_input', [0, (- 1), 1, 4, None, bool, int, 1.5, 'None', True]) def test_initialize_bad_target(test_input): _dn = BoostedRDNClassifier(target=test_input) (train, _) = load_toy_cancer() with pytest.raises(ValueError): _dn.fit(train)
def braycurtis(u, v, w=None): u = _validate_vector(u) v = _validate_vector(v, dtype=np.float64) l1_diff = abs((u - v)) l1_sum = abs((u + v)) if (w is not None): w = _validate_weights(w) l1_diff = (w * l1_diff) l1_sum = (w * l1_sum) return (l1_diff.sum() / l1_sum.sum())
class TestArticle(unittest.TestCase): def setUp(self): nlp = spacy.load('en') self.my_article = documents.Document.from_xml('2012-10-02', '<?xml version="1.0"?>\n<!DOCTYPE TimeML SYSTEM "TimeML.dtd">\n<TimeML>\nAt <TIMEX3 tid="t58" type="TIME" value="2009-05-29T13:28">1:28 pm</TIMEX3> on <TIMEX3 tid="t55" type="DATE" value="2009-05-29">29 May 2009</TIMEX3> , ghostofsichuan wrote : A friend , who as been in the U.S. went home to visit her parents in Xinjiang .\nHer father did not let her leave the house or have visitors for <TIMEX3 tid="t59" type="DURATION" value="P5D">five days</TIMEX3> and told her this was his social responsiblity .\n</TimeML>', nlp) self.my_article2 = documents.Document.from_xml('2012-10-02', '<?xml version="1.0"?>\n<!DOCTYPE TimeML SYSTEM "TimeML.dtd">\n<TimeML>\n<TIMEX3 tid="t58" type="DATE" value="2012-09">last month</TIMEX3> I did something .\n</TimeML>', nlp) def test_publication_date(self): date = datetime.datetime.strptime('2012-10-02', '%Y-%m-%d').date() self.assertEqual(date, self.my_article.publication_date) def test_tokens(self): tokens = 'At 1:28 pm on 29 May 2009 , ghostofsichuan wrote : A friend , who as been in the U.S. went home to visit her parents in Xinjiang .\nHer father did not let her leave the house or have visitors for five days and told her this was his social responsiblity .' self.assertEqual(tokens.split(), [str(tok) for sent in self.my_article for tok in sent.tokens]) def test_token_dates(self): date1 = datetime.datetime.strptime('2009-05-29T13:28', '%Y-%m-%dT%H:%M').date() date2 = datetime.datetime.strptime('2009-05-29', '%Y-%m-%d').date() time_values = ([None, date1, date1, None, date2, date2, date2] + ([None] * 46)) self.assertEqual(time_values, [tok.date for sent in self.my_article for tok in sent]) def test_sent_dates(self): self.assertEqual(datetime.datetime.strptime('2009-05-29', '%Y-%m-%d').date(), self.my_article.sentences[0].date) self.assertEqual(datetime.datetime.strptime('2012-10-02', '%Y-%m-%d').date(), self.my_article.sentences[1].date) self.assertEqual(datetime.datetime.strptime('2012-09-01', '%Y-%m-%d').date(), self.my_article2.sentences[0].date)
class AutoTokenCostEstimator(TokenCostEstimator): def __init__(self): self._token_cost_estimators: Dict[(str, TokenCostEstimator)] = {} def _get_estimator(self, organization: str) -> TokenCostEstimator: token_cost_estimator = self._token_cost_estimators.get(organization) if (token_cost_estimator is None): if (organization == 'openai'): token_cost_estimator = OpenAITokenCostEstimator() elif (organization == 'ai21'): token_cost_estimator = AI21TokenCostEstimator() elif (organization == 'cohere'): token_cost_estimator = CohereTokenCostEstimator() elif (organization == 'gooseai'): token_cost_estimator = GooseAITokenCostEstimator() else: token_cost_estimator = FreeTokenCostEstimator() self._token_cost_estimators[organization] = token_cost_estimator return token_cost_estimator def estimate_tokens(self, request: Request, metric_service: MetricService) -> int: token_cost_estimator: TokenCostEstimator = self._get_estimator(request.model_host) return token_cost_estimator.estimate_tokens(request, metric_service)
class SchemaGuidedDST(object): def __init__(self, bert_config, use_one_hot_embeddings): self._bert_config = bert_config self._use_one_hot_embeddings = use_one_hot_embeddings def define_model(self, features, is_training): (self._encoded_utterance, self._encoded_tokens, self.input_embedding) = self._encode_utterances(features, is_training) dialogue_token_embedding = self.input_embedding (_, max_seq_len, _) = dialogue_token_embedding.get_shape().as_list() noncat_slot_embedding = features['noncat_slot_emb'] (_, max_num_noncat_slots, _) = noncat_slot_embedding.get_shape().as_list() cat_slot_embedding = features['cat_slot_emb'] intent_embedding = features['intent_emb'] (_, max_num_intent, _) = intent_embedding.get_shape().as_list() value_embedding = features['cat_slot_value_emb'] (_, max_num_cat_slots, max_num_values, embedding_dim) = value_embedding.get_shape().as_list() batch_size = tf.shape(value_embedding)[0] value_embedding = tf.reshape(value_embedding, [batch_size, (max_num_cat_slots * max_num_values), embedding_dim]) self.dialogue_token_len = max_seq_len self.schema_len = (((max_num_intent + max_num_noncat_slots) + max_num_cat_slots) + (max_num_values * max_num_cat_slots)) input = tf.concat([dialogue_token_embedding, noncat_slot_embedding, cat_slot_embedding, intent_embedding, value_embedding], axis=1) special_token_embedding = tf.get_variable('special_token_embedding', [4, embedding_dim], tf.float32, tf.contrib.layers.xavier_initializer()) embedding_table = tf.concat([tf.tile(tf.expand_dims(special_token_embedding, axis=0), [batch_size, 1, 1]), input], axis=1) (_, self.vocab_size, _) = embedding_table.get_shape().as_list() output = features['output'] dec_output_len = features['dec_output_len'] max_decode_seq_len = (((2 + (3 * max_num_cat_slots)) + (4 * max_num_noncat_slots)) + 1) self.dec_output_mask = tf.sequence_mask(dec_output_len, max_decode_seq_len) outputs_list = tf.unstack(output, axis=1) self.targets = tf.stack(outputs_list[1:], axis=1) dec_input_ids = tf.expand_dims(tf.stack(outputs_list[:(- 1)], axis=1), 2) decoder_inputs = [] if is_training: self.batch_size_int = FLAGS.train_batch_size else: self.batch_size_int = FLAGS.predict_batch_size for i in range(self.batch_size_int): embedding_table_i = tf.squeeze(tf.slice(embedding_table, [i, 0, 0], [1, self.vocab_size, embedding_dim])) dec_input_ids_i = tf.reshape(tf.slice(dec_input_ids, [i, 0, 0], [1, max_decode_seq_len, 1]), [max_decode_seq_len, 1]) decoder_inputs.append(tf.gather_nd(embedding_table_i, dec_input_ids_i)) decoder_inputs = tf.stack(decoder_inputs, axis=0) cell = tf.contrib.rnn.LSTMCell schema_embedding = tf.concat([noncat_slot_embedding, cat_slot_embedding, intent_embedding, value_embedding], axis=1) schema_aware_dialogue_attention = self.get_dialogue_attention(self._encoded_tokens, schema_embedding) dialogue_aware_schema_attention = self.get_schema_attention(schema_embedding, self._encoded_tokens) decode_special_token_embedding = tf.slice(special_token_embedding, [2, 0], [2, embedding_dim]) decode_special_token_embedding_reshape = tf.tile(tf.expand_dims(decode_special_token_embedding, axis=0), [batch_size, 1, 1]) self.decode_vocab = tf.concat([decode_special_token_embedding_reshape, schema_aware_dialogue_attention, dialogue_aware_schema_attention], axis=1) if (not is_training): schema_aware_dialogue_attention = tf.contrib.seq2seq.tile_batch(schema_aware_dialogue_attention, FLAGS.beam_width) dialogue_aware_schema_attention = tf.contrib.seq2seq.tile_batch(dialogue_aware_schema_attention, FLAGS.beam_width) pointer_cell = PointerWrapper(cell(FLAGS.decoder_hidden_dim), FLAGS.decoder_attention_dim, schema_aware_dialogue_attention, dialogue_aware_schema_attention) if (FLAGS.decode_num_layer > 1): dec_cell = tf.contrib.rnn.MultiRNNCell(([cell(FLAGS.decoder_hidden_dim) for _ in range((FLAGS.decode_num_layer - 1))] + [pointer_cell])) else: dec_cell = pointer_cell final_outputs = {} self.w_1 = tf.get_variable('W_1', [(FLAGS.decoder_hidden_dim * 2), FLAGS.decoder_hidden_dim], tf.float32, tf.contrib.layers.xavier_initializer()) self.w_2 = tf.get_variable('W_2', [FLAGS.bert_dim, FLAGS.decoder_hidden_dim], tf.float32, tf.contrib.layers.xavier_initializer()) self.v = tf.get_variable('v', [FLAGS.decoder_hidden_dim, 1], tf.float32, tf.contrib.layers.xavier_initializer()) if is_training: self.cur_batch_max_len = tf.reduce_max(dec_output_len) helper = tf.contrib.seq2seq.TrainingHelper(decoder_inputs, dec_output_len) decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, dec_cell.zero_state(batch_size, tf.float32)) (outputs, _, _) = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True) rnn_output = outputs.rnn_output logits = self.get_logits_for_pointer(rnn_output) self.predicted_ids_with_logits = tf.nn.top_k(logits) logits = tf.concat([logits, tf.zeros([batch_size, (max_decode_seq_len - self.cur_batch_max_len), (self.vocab_size - 2)])], axis=1) final_outputs['logits'] = logits else: tile_embedding_table = tf.tile(tf.expand_dims(embedding_table, 1), [1, FLAGS.beam_width, 1, 1]) def embedding_lookup(ids): ids = (ids + 2) one_hot_ids = tf.cast(tf.one_hot(ids, self.vocab_size), dtype=tf.float32) one_hot_ids = tf.expand_dims(one_hot_ids, (- 1)) next_inputs = tf.reduce_sum((one_hot_ids * tile_embedding_table), axis=2) return next_inputs shifted_START_ID = (START_ID - 2) shifted_END_ID = (END_ID - 2) decoder = PointerBeamSearchDecoder(dec_cell, embedding_lookup, tf.tile([shifted_START_ID], [batch_size]), shifted_END_ID, dec_cell.zero_state((batch_size * FLAGS.beam_width), tf.float32), FLAGS.beam_width, output_layer=self.get_logits_for_pointer) (outputs, _, _) = tf.contrib.seq2seq.dynamic_decode(decoder, maximum_iterations=max_decode_seq_len) predicted_ids = outputs.predicted_ids self.predicted_ids = tf.transpose(predicted_ids, [0, 2, 1]) final_outputs['predicted_ids'] = self.predicted_ids return final_outputs def define_loss(self, features, outputs): shifted_targets = ((self.targets - 2) * tf.cast(self.dec_output_mask, tf.int32)) logits = outputs['logits'] losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=shifted_targets, logits=logits) loss = (tf.reduce_sum((losses * tf.cast(self.dec_output_mask, tf.float32))) / FLAGS.train_batch_size) losses = {'total_loss': loss} for (loss_name, loss) in losses.items(): tf.summary.scalar(loss_name, loss) return (sum(losses.values()) / len(losses)) def define_predictions(self, features, outputs): predictions = {'example_id': features['example_id'], 'service_id': features['service_id'], 'is_real_example': features['is_real_example']} predictions['noncat_alignment_start'] = features['noncat_alignment_start'] predictions['noncat_alignment_end'] = features['noncat_alignment_end'] predicted_ids = outputs['predicted_ids'] predictions['predicted_seq_ids'] = tf.squeeze(tf.gather(predicted_ids, [0], axis=1)) return predictions def _encode_utterances(self, features, is_training): bert_encoder = modeling.BertModel(config=self._bert_config, is_training=is_training, input_ids=features['utt'], input_mask=features['utt_mask'], token_type_ids=features['utt_seg'], use_one_hot_embeddings=self._use_one_hot_embeddings) encoded_utterance = bert_encoder.get_pooled_output() encoded_tokens = bert_encoder.get_sequence_output() input_embbedding = bert_encoder.word_embedding_output encoded_utterance = tf.layers.dropout(encoded_utterance, rate=FLAGS.dropout_rate, training=is_training) encoded_tokens = tf.layers.dropout(encoded_tokens, rate=FLAGS.dropout_rate, training=is_training) return (encoded_utterance, encoded_tokens, input_embbedding) def get_logits_for_pointer(self, cell_outputs): decode_length = tf.shape(cell_outputs)[1] cell_outputs_reshape = tf.reshape(tf.tile(tf.matmul(cell_outputs, self.w_1), [1, 1, (self.vocab_size - 2)]), [self.batch_size_int, (decode_length * (self.vocab_size - 2)), FLAGS.decoder_hidden_dim]) vocab_reshape = tf.tile(tf.matmul(self.decode_vocab, self.w_2), [1, decode_length, 1]) logits = tf.reshape(tf.matmul(tf.tanh((cell_outputs_reshape + vocab_reshape)), self.v), [self.batch_size_int, decode_length, (self.vocab_size - 2)]) return logits def get_dialogue_attention(self, dialogue, schema): w_1_d = tf.get_variable('W_1_d', [FLAGS.bert_dim, FLAGS.bert_dim], tf.float32, tf.contrib.layers.xavier_initializer()) w_2_d = tf.get_variable('W_2_d', [FLAGS.bert_dim, FLAGS.bert_dim], tf.float32, tf.contrib.layers.xavier_initializer()) dialogue_reshape = tf.reshape(tf.tile(tf.matmul(dialogue, w_1_d), [1, 1, self.schema_len]), [self.batch_size_int, (self.dialogue_token_len * self.schema_len), FLAGS.bert_dim]) schema_reshape = tf.tile(tf.matmul(schema, w_2_d), [1, self.dialogue_token_len, 1]) v_d = tf.get_variable('v_d', [FLAGS.bert_dim, 1], tf.float32, tf.contrib.layers.xavier_initializer()) alpha = tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh((dialogue_reshape + schema_reshape)), v_d), [self.batch_size_int, self.dialogue_token_len, self.schema_len])) attention = tf.matmul(alpha, schema) return attention def get_schema_attention(self, schema, dialogue): w_1_s = tf.get_variable('W_1_s', [FLAGS.bert_dim, FLAGS.bert_dim], tf.float32, tf.contrib.layers.xavier_initializer()) w_2_s = tf.get_variable('W_2_s', [FLAGS.bert_dim, FLAGS.bert_dim], tf.float32, tf.contrib.layers.xavier_initializer()) schema_reshape = tf.reshape(tf.tile(tf.matmul(schema, w_1_s), [1, 1, self.dialogue_token_len]), [self.batch_size_int, (self.schema_len * self.dialogue_token_len), FLAGS.bert_dim]) dialogue_reshape = tf.tile(tf.matmul(dialogue, w_2_s), [1, self.schema_len, 1]) v_s = tf.get_variable('v_s', [FLAGS.bert_dim, 1], tf.float32, tf.contrib.layers.xavier_initializer()) alpha = tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh((dialogue_reshape + schema_reshape)), v_s), [self.batch_size_int, self.schema_len, self.dialogue_token_len])) attention = tf.matmul(alpha, dialogue) return attention def _get_logits(self, element_embeddings, num_classes, name_scope): (_, num_elements, embedding_dim) = element_embeddings.get_shape().as_list() utterance_proj = tf.keras.layers.Dense(units=embedding_dim, activation=modeling.gelu, name='{}_utterance_proj'.format(name_scope)) utterance_embedding = utterance_proj(self._encoded_utterance) repeat_utterance_embeddings = tf.tile(tf.expand_dims(utterance_embedding, axis=1), [1, num_elements, 1]) utterance_element_emb = tf.concat([repeat_utterance_embeddings, element_embeddings], axis=2) layer_1 = tf.keras.layers.Dense(units=embedding_dim, activation=modeling.gelu, name='{}_projection_1'.format(name_scope)) layer_2 = tf.keras.layers.Dense(units=num_classes, name='{}_projection_2'.format(name_scope)) return layer_2(layer_1(utterance_element_emb)) def _get_intents(self, features): intent_embeddings = features['intent_emb'] (_, max_num_intents, embedding_dim) = intent_embeddings.get_shape().as_list() null_intent_embedding = tf.get_variable('null_intent_embedding', shape=[1, 1, embedding_dim], initializer=tf.truncated_normal_initializer(stddev=0.02)) batch_size = tf.shape(intent_embeddings)[0] repeated_null_intent_embedding = tf.tile(null_intent_embedding, [batch_size, 1, 1]) intent_embeddings = tf.concat([repeated_null_intent_embedding, intent_embeddings], axis=1) logits = self._get_logits(intent_embeddings, 1, 'intents') logits = tf.squeeze(logits, axis=(- 1)) mask = tf.sequence_mask((features['intent_num'] + 1), maxlen=(max_num_intents + 1)) negative_logits = (((- 0.7) * tf.ones_like(logits)) * logits.dtype.max) return tf.where(mask, logits, negative_logits) def _get_requested_slots(self, features): slot_embeddings = features['req_slot_emb'] logits = self._get_logits(slot_embeddings, 1, 'requested_slots') return tf.squeeze(logits, axis=(- 1)) def _get_categorical_slot_goals(self, features): slot_embeddings = features['cat_slot_emb'] status_logits = self._get_logits(slot_embeddings, 3, 'categorical_slot_status') value_embeddings = features['cat_slot_value_emb'] (_, max_num_slots, max_num_values, embedding_dim) = value_embeddings.get_shape().as_list() value_embeddings_reshaped = tf.reshape(value_embeddings, [(- 1), (max_num_slots * max_num_values), embedding_dim]) value_logits = self._get_logits(value_embeddings_reshaped, 1, 'categorical_slot_values') value_logits = tf.reshape(value_logits, [(- 1), max_num_slots, max_num_values]) mask = tf.sequence_mask(features['cat_slot_value_num'], maxlen=max_num_values) negative_logits = (((- 0.7) * tf.ones_like(value_logits)) * value_logits.dtype.max) value_logits = tf.where(mask, value_logits, negative_logits) return (status_logits, value_logits) def _get_noncategorical_slot_goals(self, features): slot_embeddings = features['noncat_slot_emb'] max_num_slots = slot_embeddings.get_shape().as_list()[1] status_logits = self._get_logits(slot_embeddings, 3, 'noncategorical_slot_status') token_embeddings = self._encoded_tokens max_num_tokens = token_embeddings.get_shape().as_list()[1] tiled_token_embeddings = tf.tile(tf.expand_dims(token_embeddings, 1), [1, max_num_slots, 1, 1]) tiled_slot_embeddings = tf.tile(tf.expand_dims(slot_embeddings, 2), [1, 1, max_num_tokens, 1]) slot_token_embeddings = tf.concat([tiled_slot_embeddings, tiled_token_embeddings], axis=3) embedding_dim = slot_embeddings.get_shape().as_list()[(- 1)] layer_1 = tf.keras.layers.Dense(units=embedding_dim, activation=modeling.gelu, name='noncat_spans_layer_1') layer_2 = tf.keras.layers.Dense(units=2, name='noncat_spans_layer_2') span_logits = layer_2(layer_1(slot_token_embeddings)) token_mask = features['utt_mask'] token_mask = tf.cast(token_mask, tf.bool) tiled_token_mask = tf.tile(tf.expand_dims(tf.expand_dims(token_mask, 1), 3), [1, max_num_slots, 1, 2]) negative_logits = (((- 0.7) * tf.ones_like(span_logits)) * span_logits.dtype.max) span_logits = tf.where(tiled_token_mask, span_logits, negative_logits) (span_start_logits, span_end_logits) = tf.unstack(span_logits, axis=3) return (status_logits, span_start_logits, span_end_logits)
def cot() -> operations.GraphOfOperations: operations_graph = operations.GraphOfOperations() operations_graph.append_operation(operations.Generate(1, 1)) operations_graph.append_operation(operations.Score(1, False, utils.num_errors)) operations_graph.append_operation(operations.GroundTruth(utils.test_sorting)) return operations_graph
def load_pretrained_model(model_name_or_path_or_checkpoint, *args, **kwargs): if PathManager.isfile(model_name_or_path_or_checkpoint): return _load_pretrained_checkpoint(model_name_or_path_or_checkpoint, args, kwargs) else: return _load_pretrained_model(model_name_or_path_or_checkpoint, args, kwargs)
def from_rank(n, rank): factoradic = ([None] * n) for j in range(1, (n + 1)): factoradic[(n - j)] = Integer((rank % j)) rank = (int(rank) // j) return from_lehmer_code(factoradic, Permutations(n))
def plotIsoFreqNSimpedance(ax, freq, array, flag, par='abs', colorbar=True, colorNorm='SymLog', cLevel=True, contour=True): indUniFreq = np.where((freq == array['freq'])) (x, y) = (array['x'][indUniFreq], array['y'][indUniFreq]) if (par == 'abs'): zPlot = np.abs(array[flag][indUniFreq]) cmap = plt.get_cmap('OrRd_r') level = np.logspace(0, (- 5), 31) clevel = np.logspace(0, (- 4), 5) plotNorm = colors.LogNorm() elif (par == 'real'): zPlot = np.real(array[flag][indUniFreq]) cmap = plt.get_cmap('RdYlBu') if cLevel: level = np.concatenate(((- np.logspace(0, (- 10), 31)), np.logspace((- 10), 0, 31))) clevel = np.concatenate(((- np.logspace(0, (- 8), 5)), np.logspace((- 8), 0, 5))) else: level = np.linspace(zPlot.min(), zPlot.max(), 100) clevel = np.linspace(zPlot.min(), zPlot.max(), 10) if (colorNorm == 'SymLog'): plotNorm = colors.SymLogNorm(1e-10, linscale=2) else: plotNorm = colors.Normalize() elif (par == 'imag'): zPlot = np.imag(array[flag][indUniFreq]) cmap = plt.get_cmap('RdYlBu') level = np.concatenate(((- np.logspace(0, (- 10), 31)), np.logspace((- 10), 0, 31))) clevel = np.concatenate(((- np.logspace(0, (- 8), 5)), np.logspace((- 8), 0, 5))) plotNorm = colors.SymLogNorm(1e-10, linscale=2) if cLevel: level = np.concatenate(((- np.logspace(0, (- 10), 31)), np.logspace((- 10), 0, 31))) clevel = np.concatenate(((- np.logspace(0, (- 8), 5)), np.logspace((- 8), 0, 5))) else: level = np.linspace(zPlot.min(), zPlot.max(), 100) clevel = np.linspace(zPlot.min(), zPlot.max(), 10) if (colorNorm == 'SymLog'): plotNorm = colors.SymLogNorm(1e-10, linscale=2) elif (colorNorm == 'Lin'): plotNorm = colors.Normalize() if contour: cs = ax.tricontourf(x, y, zPlot, levels=level, cmap=cmap, norm=plotNorm) else: (uniX, uniY) = (np.unique(x), np.unique(y)) (X, Y) = np.meshgrid(np.append((uniX - 25), (uniX[(- 1)] + 25)), np.append((uniY - 25), (uniY[(- 1)] + 25))) cs = ax.pcolor(X, Y, np.reshape(zPlot, (len(uniY), len(uniX))), cmap=cmap, norm=plotNorm) if colorbar: plt.colorbar(cs, cax=ax.cax, ticks=clevel, format='%1.2e') ax.set_title(((flag + ' ') + par), fontsize=8) return cs
def query_2_deepdb_sql(query: Query, table: Table, aggregate=True, split=False): preds = [] for (col, pred) in query.predicates.items(): if (pred is None): continue (op, val) = pred if (op == '[]'): val = table.columns[col].normalize(list(val)) assert (len(val) == 2), val if split: preds.append(f'{col} >= {val[0]}') preds.append(f'{col} <= {val[1]}') else: preds.append(f'({col} between {val[0]} and {val[1]})') else: val = table.columns[col].normalize(val).item() preds.append(f'{col} {op} {val}') return f"""SELECT {('COUNT(*)' if aggregate else '*')} FROM "{table.name}" WHERE {' AND '.join(preds)}"""
def build_lmdb(save_path, metas, commit_interval=1000): if (not save_path.endswith('.lmdb')): raise ValueError("lmdb_save_path must end with 'lmdb'.") if osp.exists(save_path): print('Folder [{:s}] already exists.'.format(save_path)) return if (not osp.exists('/'.join(save_path.split('/')[:(- 1)]))): os.makedirs('/'.join(save_path.split('/')[:(- 1)])) data_size_per_img = cv2.imread(metas[0][0], cv2.IMREAD_UNCHANGED).nbytes data_size = (data_size_per_img * len(metas)) env = lmdb.open(save_path, map_size=(data_size * 10)) txn = env.begin(write=True) shape = dict() print('Building lmdb...') for i in tqdm(range(len(metas))): image_filename = metas[i][0] img = pil_loader(filename=image_filename) assert ((img is not None) and (len(img.shape) == 3)) txn.put(image_filename.encode('ascii'), img.copy(order='C')) shape[image_filename] = '{:d}_{:d}_{:d}'.format(img.shape[0], img.shape[1], img.shape[2]) if ((i % commit_interval) == 0): txn.commit() txn = env.begin(write=True) pickle.dump(shape, open(os.path.join(save_path, 'meta_info.pkl'), 'wb')) txn.commit() env.close() print('Finish writing lmdb.')
def regression_suite(sebs_client: 'SeBS', experiment_config: dict, providers: Set[str], deployment_config: dict, benchmark_name: Optional[str]=None): suite = unittest.TestSuite() global cloud_config cloud_config = deployment_config language = experiment_config['runtime']['language'] language_version = experiment_config['runtime']['version'] if ('aws' in providers): assert ('aws' in cloud_config) if (language == 'python'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(AWSTestSequencePython)) elif (language == 'nodejs'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(AWSTestSequenceNodejs)) if ('gcp' in providers): assert ('gcp' in cloud_config) if (language == 'python'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GCPTestSequencePython)) elif (language == 'nodejs'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GCPTestSequenceNodejs)) if ('azure' in providers): assert ('azure' in cloud_config) if (language == 'python'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(AzureTestSequencePython)) elif (language == 'nodejs'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(AzureTestSequenceNodejs)) if ('openwhisk' in providers): assert ('openwhisk' in cloud_config) if (language == 'python'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(OpenWhiskTestSequencePython)) elif (language == 'nodejs'): suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(OpenWhiskTestSequenceNodejs)) tests = [] for case in suite: for test in case: test_name = cast(unittest.TestCase, test)._testMethodName if (not filter_out_benchmarks(test_name, test.deployment_name, language, language_version)): print(f'Skip test {test_name} - not supported.') continue if ((not benchmark_name) or (benchmark_name and (benchmark_name in test_name))): test.client = sebs_client test.experiment_config = experiment_config tests.append(test) else: print(f'Skip test {test_name}') concurrent_suite = testtools.ConcurrentStreamTestSuite((lambda : ((test, None) for test in tests))) result = TracingStreamResult() result.startTestRun() concurrent_suite.run(result) result.stopTestRun() print(f'Succesfully executed {len(result.success)} out of {len(tests)} functions') for suc in result.success: print(f'- {suc}') if len(result.failures): print(f'Failures when executing {len(result.failures)} out of {len(tests)} functions') for failure in result.failures: print(f'- {failure}') return (not result.all_correct)
def TranslateX(img, v, max_v, bias=0): v = (_float_parameter(v, max_v) + bias) if (random.random() < 0.5): v = (- v) v = int((v * img.size[0])) return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
class _ConvNd(Module): __constants__ = ['stride', 'padding', 'dilation', 'groups', 'padding_mode', 'output_padding', 'in_channels', 'out_channels', 'kernel_size'] __annotations__ = {'bias': Optional[torch.Tensor]} _in_channels: int out_channels: int kernel_size: Tuple[(int, ...)] stride: Tuple[(int, ...)] padding: Tuple[(int, ...)] dilation: Tuple[(int, ...)] transposed: bool output_padding: Tuple[(int, ...)] groups: int padding_mode: str complex_weights: bool weight: Union[(Tensor, Tuple[(Tensor, Tensor)])] bias: Optional[Union[(Tensor, Tuple[(Tensor, Tensor)])]] def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t, padding: _size_1_t, dilation: _size_1_t, transposed: bool, output_padding: _size_1_t, groups: int, bias: bool, padding_mode: str, complex_weights=True) -> None: super(_ConvNd, self).__init__() if ((in_channels % groups) != 0): raise ValueError('in_channels must be divisible by groups') if ((out_channels % groups) != 0): raise ValueError('out_channels must be divisible by groups') valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'} if (padding_mode not in valid_padding_modes): raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(valid_padding_modes, padding_mode)) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups self.padding_mode = padding_mode self.complex_weights = complex_weights self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2) if complex_weights: if transposed: self.weight = Parameter(torch.Tensor(in_channels, (out_channels // groups), *kernel_size).to(torch.cfloat)) else: self.weight = Parameter(torch.Tensor(out_channels, (in_channels // groups), *kernel_size).to(torch.cfloat)) else: if transposed: weight_real = Parameter(torch.Tensor(in_channels, (out_channels // groups), *kernel_size)) weight_imag = Parameter(torch.Tensor(in_channels, (out_channels // groups), *kernel_size)) else: weight_real = Parameter(torch.Tensor(out_channels, (in_channels // groups), *kernel_size)) weight_imag = Parameter(torch.Tensor(out_channels, (in_channels // groups), *kernel_size)) self.weight = ParameterList([weight_real, weight_imag]) if bias: if complex_weights: self.bias = Parameter(torch.Tensor(out_channels).to(torch.cfloat)) else: bias_real = Parameter(torch.Tensor(out_channels)) bias_imag = Parameter(torch.Tensor(out_channels)) self.bias = ParameterList([bias_real, bias_imag]) else: self.register_parameter('bias', None) self.reset_parameters() def _reset_parameters(self, weight, bias) -> None: init.kaiming_uniform_(weight, a=math.sqrt(5)) if (bias is not None): (fan_in, _) = init._calculate_fan_in_and_fan_out(weight) bound = (1 / math.sqrt(fan_in)) init.uniform_(bias, (- bound), bound) def reset_parameters(self) -> None: if (type(self.weight) is ParameterList): self._reset_parameters(self.weight[0], (None if (self.bias is None) else self.bias[0])) self._reset_parameters(self.weight[1], (None if (self.bias is None) else self.bias[1])) else: self._reset_parameters(self.weight, self.bias) def extra_repr(self): s = '{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' if (self.padding != ((0,) * len(self.padding))): s += ', padding={padding}' if (self.dilation != ((1,) * len(self.dilation))): s += ', dilation={dilation}' if (self.output_padding != ((0,) * len(self.output_padding))): s += ', output_padding={output_padding}' if (self.groups != 1): s += ', groups={groups}' if (self.bias is None): s += ', bias=False' if (self.padding_mode != 'zeros'): s += ', padding_mode={padding_mode}' return s.format(**self.__dict__) def __setstate__(self, state): super(_ConvNd, self).__setstate__(state) if (not hasattr(self, 'padding_mode')): self.padding_mode = 'zeros'
class ConfigDictionary(): def __init__(self, dictionary: dict=None): if (dictionary is None): dictionary = dict() self._keys = set(dictionary) for (key, value) in dictionary.items(): self.__setattr__(key, value) def __repr__(self): d = self.__dict__.copy() del d['_keys'] return d.__repr__() def copy(self): return copy.deepcopy(self) def __getitem__(self, item): return self.__getattribute__(item) def __contains__(self, item) -> bool: return (item in self._keys) def __iter__(self) -> Iterable[str]: return self._keys.__iter__() def items(self) -> Iterable[Tuple]: return ((key, self[key]) for key in self) def __len__(self) -> int: return len(self._keys) def __eq__(self, other): if (len(self) != len(other)): return False for (key, value) in other.items(): if ((key not in self._keys) or (value != self[key])): return False return True
def test_quad_vec_pool(): from multiprocessing.dummy import Pool f = _lorenzian (res, err) = quad_vec(f, (- np.inf), np.inf, norm='max', epsabs=0.0001, workers=4) assert_allclose(res, np.pi, rtol=0, atol=0.0001) with Pool(10) as pool: f = (lambda x: (1 / (1 + (x ** 2)))) (res, err) = quad_vec(f, (- np.inf), np.inf, norm='max', epsabs=0.0001, workers=pool.map) assert_allclose(res, np.pi, rtol=0, atol=0.0001)
def figure4(): n_subjects = 10 net = xfr.models.lightcnn.LightCNN_29Layers_v2(num_classes=80013) statedict = xfr.models.lightcnn.Load_Checkpoint('../models/LightCNN_29Layers_V2_checkpoint.pth.tar') net.load_state_dict(statedict) wb = xfr.models.whitebox.Whitebox(xfr.models.whitebox.WhiteboxLightCNN(net), ebp_subtree_mode='affineonly_with_prior', eps=1e-16, ebp_version=5) if (not os.path.exists('_vggface2_topk_nonmates.pkl')): _vggface2_topk_nonmates(wb, topk=32) (matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_topk_probe_frontalpose() matelist = [f_detection(im).rgb() for im in matelist] nonmatelist = [f_detection(im).rgb() for im in nonmatelist] probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist] probelist_clean = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure4a_%d.jpg' % n_subjects), f_saliency=None) probelist_4a = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_ebp(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure4b_%d.jpg' % n_subjects), f_saliency=f_saliency) probelist_4b = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_cebp(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure4c_%d.jpg' % n_subjects), f_saliency=f_saliency) probelist_4c = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_tcebp(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure4d_%d.jpg' % n_subjects), f_saliency=f_saliency) probelist_4d = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) f_saliency = (lambda im: f_saliency_whitebox_weighted_subtree(wb, im)) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure4e_%d.jpg' % n_subjects), f_saliency=f_saliency) probelist_4e = copy.deepcopy(probelist) probelist = copy.deepcopy(probelist_clean) matelist = ([matelist[0]] * n_subjects) probelist = (((([probelist_4a[0]] + [probelist_4b[0]]) + [probelist_4c[0]]) + [probelist_4d[0]]) + [probelist_4e[0]]) f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, ('figure4f_%d.jpg' % n_subjects), f_saliency=None) print(('[eccv40.figure4]: Saving montage to "%s"' % f_montage))
class LegacyVersion(_BaseVersion): def __init__(self, version): self._version = str(version) self._key = _legacy_cmpkey(self._version) def __str__(self): return self._version def __repr__(self): return '<LegacyVersion({0})>'.format(repr(str(self))) def public(self): return self._version def base_version(self): return self._version def local(self): return None def is_prerelease(self): return False def is_postrelease(self): return False
def generate_arch(task, net_type): update_cfg_from_cfg(search_cfg, cfg) if (task == 'pde'): merge_cfg_from_file('configs/pde_search_cfg_resnet.yaml', cfg) input_shape = (3, 85, 85) elif (task == 'protein'): merge_cfg_from_file('configs/protein_search_cfg_resnet.yaml', cfg) input_shape = (57, 128, 128) elif (task == 'cosmic'): merge_cfg_from_file('configs/cosmic_search_cfg_resnet.yaml', cfg) input_shape = (1, 256, 256) else: raise NotImplementedError config = copy.deepcopy(cfg) pprint.pformat(config) SearchSpace = importlib.import_module(('models.search_space_' + net_type)).Network ArchGenerater = importlib.import_module(('run_apis.derive_arch_' + net_type), __package__).ArchGenerate derivedNetwork = getattr(model_derived, ('%s_Net' % net_type.upper())) der_Net = (lambda net_config: derivedNetwork(net_config, task=task, config=config)) target_params = lower_than_target = False while (not lower_than_target): config = copy.deepcopy(cfg) super_model = SearchSpace(config.optim.init_dim, task, config) arch_gener = ArchGenerater(super_model, config) (betas, head_alphas, stack_alphas) = super_model.display_arch_params() derived_arch = arch_gener.derive_archs(betas, head_alphas, stack_alphas) derived_arch_str = '|\n'.join(map(str, derived_arch)) derived_model = der_Net(derived_arch_str) derived_flops = comp_multadds(derived_model, input_size=input_shape) derived_params = utils.count_parameters_in_MB(derived_model) if (derived_params <= (target_params + 1)): print('found arch!') lower_than_target = True print(('Derived Model Mult-Adds = %.2fMB' % derived_flops)) print(('Derived Model Num Params = %.2fMB' % derived_params)) print(derived_arch_str) return derived_arch_str
class ProjectiveConic_finite_field(ProjectiveConic_field, ProjectivePlaneCurve_finite_field): def __init__(self, A, f): ProjectiveConic_field.__init__(self, A, f) def count_points(self, n): F = self.base_ring() q = F.cardinality() return [((q ** i) + 1) for i in range(1, (n + 1))] def has_rational_point(self, point=False, read_cache=True, algorithm='default'): if (not point): return True if read_cache: if (self._rational_point is not None): return (True, self._rational_point) B = self.base_ring() (s, pt) = self.has_singular_point(point=True) if s: return (True, pt) while True: x = B.random_element() Y = PolynomialRing(B, 'Y').gen() r = self.defining_polynomial()([x, Y, 1]).roots() if r: return (True, self.point([x, r[0][0], B(1)]))
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None): if (delimeters is None): delimeters = (Template.default_namespace['start_braces'], Template.default_namespace['end_braces']) in_expr = False chunks = [] last = 0 last_pos = ((line_offset + 1), 1) token_re = re.compile(('%s|%s' % (re.escape(delimeters[0]), re.escape(delimeters[1])))) for match in token_re.finditer(s): expr = match.group(0) pos = find_position(s, match.end(), last, last_pos) if ((expr == delimeters[0]) and in_expr): raise TemplateError(('%s inside expression' % delimeters[0]), position=pos, name=name) elif ((expr == delimeters[1]) and (not in_expr)): raise TemplateError(('%s outside expression' % delimeters[1]), position=pos, name=name) if (expr == delimeters[0]): part = s[last:match.start()] if part: chunks.append(part) in_expr = True else: chunks.append((s[last:match.start()], last_pos)) in_expr = False last = match.end() last_pos = pos if in_expr: raise TemplateError(('No %s to finish last expression' % delimeters[1]), name=name, position=last_pos) part = s[last:] if part: chunks.append(part) if trim_whitespace: chunks = trim_lex(chunks) return chunks
def load_config(save_config=True): gin.parse_config_files_and_bindings(flags.FLAGS.gin_configs, flags.FLAGS.gin_bindings, skip_unknown=True) config = Config() if (save_config and (jax.host_id() == 0)): if (not utils.isdir(config.checkpoint_dir)): os.makedirs(config.checkpoint_dir) with open((config.checkpoint_dir + '/config.gin'), 'w') as f: f.write(gin.config_str()) else: print('Resume training from {}.'.format(config.checkpoint_dir)) with open((config.checkpoint_dir + '/config_ft.gin'), 'w') as f: f.write(gin.config_str()) return config
def get_dynamic_defaults(): if (FLAGS.logdir is None): new_logdir = f'./runs/{FLAGS.dataset}' log.info(f'No logdir set, using default of {new_logdir}') FLAGS.logdir = new_logdir
_utils.test() def test_pass_struct_mismatch(): sphere_type = ti.types.struct(center=ti.math.vec3, radius=float) circle_type = ti.types.struct(center=ti.math.vec2, radius=float) def foo(sphere: sphere_type): pass with pytest.raises(ti.TaichiRuntimeTypeError, match="Argument <class 'taichi.lang.struct.Struct.* cannot be converted into required type <ti.StructType center=<taichi.lang.matrix.VectorType object at .*>, radius=f32, struct_methods={}>") as e: foo(circle_type(center=ti.math.vec2([1, 2]), radius=2.33))
def test_method_token_segments_pretrained_tokenizer_fast(): AutoTokenizer = pytest.importorskip('transformers').AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased', use_fast=True) masker = shap.maskers.Text(tokenizer) test_text = 'I ate a Cannoli' (output_token_segments, _) = masker.token_segments(test_text) correct_token_segments = ['', 'I ', 'ate ', 'a ', 'Can', 'no', 'li', ''] assert (output_token_segments == correct_token_segments)
class UDADecorator(BaseSegmentor): def __init__(self, **cfg): super(BaseSegmentor, self).__init__() self.model = build_segmentor(deepcopy(cfg['model'])) self.train_cfg = cfg['model']['train_cfg'] self.test_cfg = cfg['model']['test_cfg'] self.num_classes = cfg['model']['decode_head']['num_classes'] def get_model(self): return get_module(self.model) def extract_feat(self, img): return self.get_model().extract_feat(img) def encode_decode(self, img, img_metas): return self.get_model().encode_decode(img, img_metas) def forward_train(self, img, img_metas, gt_semantic_seg, target_img, target_img_metas, return_feat=False): losses = self.get_model().forward_train(img, img_metas, gt_semantic_seg, return_feat=return_feat) return losses def inference(self, img, img_meta, rescale): return self.get_model().inference(img, img_meta, rescale) def simple_test(self, img, img_meta, rescale=True): return self.get_model().simple_test(img, img_meta, rescale) def aug_test(self, imgs, img_metas, rescale=True): return self.get_model().aug_test(imgs, img_metas, rescale)
class SAN(nn.Module): def __init__(self, num_of_dim, vocab_size, embedding_size, r, lstm_hidden_dim=128, da=128, hidden_dim=256) -> None: super(SAN, self).__init__() self._embedding = nn.Embedding(vocab_size, embedding_size) self._bilstm = nn.LSTM(embedding_size, lstm_hidden_dim, batch_first=True, bidirectional=True) self._attention = SelfAttention((2 * lstm_hidden_dim), da, r) self._classifier = nn.Sequential(nn.Linear(((2 * lstm_hidden_dim) * r), hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, num_of_dim)) def forward(self, x: torch.Tensor): fmap = self._embedding(x) (outputs, hc) = self._bilstm(fmap) attn_mat = self._attention(outputs) m = torch.bmm(attn_mat, outputs) flatten = m.view(m.size()[0], (- 1)) score = self._classifier(flatten) return score def _get_attention_weight(self, x): fmap = self._embedding(x) (outputs, hc) = self._bilstm(fmap) attn_mat = self._attention(outputs) m = torch.bmm(attn_mat, outputs) flatten = m.view(m.size()[0], (- 1)) score = self._classifier(flatten) return (score, attn_mat)
_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) def unpackbits(a, axis=None, count=None, bitorder='big'): return (a,)
class TransfoXLForSequenceClassification(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def filter_desc_df_lm(desc): df = desc return df[[(i, j) for i in ['ppl', 'total_time'] for j in ['mean', 'max', 'min', 'std']]]
.skip(reason='This test is covered by the Xilinx tests.') def test_hardware_axpy_double_pump(veclen=2): with dace.config.set_temporary('compiler', 'xilinx', 'frequency', value='"0:300\\|1:600"'): spec = importlib.util.spec_from_file_location('axpy', ((((Path(__file__).parent.parent.parent / 'samples') / 'fpga') / 'rtl') / 'axpy_double_pump.py')) axpy = importlib.util.module_from_spec(spec) spec.loader.exec_module(axpy) N = dace.symbol('N') N.set(32) a = np.random.rand(1)[0].astype(np.float32) x = np.random.rand(N.get()).astype(np.float32) y = np.random.rand(N.get()).astype(np.float32) result = np.zeros((N.get(),)).astype(np.float32) sdfg = axpy.make_sdfg(veclen) sdfg(a=a, x=x, y=y, result=result, N=N) expected = ((a * x) + y) diff = (np.linalg.norm((expected - result)) / N.get()) assert (diff <= 1e-05) return sdfg
def register_Ns3MeasurementReportHeader_methods(root_module, cls): cls.add_constructor([param('ns3::MeasurementReportHeader const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'bIterator')], is_virtual=True) cls.add_method('GetMessage', 'ns3::LteRrcSap::MeasurementReport', [], is_const=True) cls.add_method('PreSerialize', 'void', [], is_const=True, is_virtual=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('SetMessage', 'void', [param('ns3::LteRrcSap::MeasurementReport', 'msg')]) return
class SExtInst(ConversionInst): code = 'sext' def type_constraints(self, tcs): tcs.integer(self) tcs.integer(self.arg) tcs.specific(self, self.ty) tcs.specific(self.arg, self.src_ty) tcs.width_order(self.arg, self)
def _v1_compatible_scope_naming(scope): if (scope is None): with tf.variable_scope(None, default_name='separable') as s, tf.name_scope(s.original_name_scope): (yield '') else: scope += '_' (yield scope)
class CheckpointConfig(FairseqDataclass): save_dir: str = field(default='checkpoints', metadata={'help': 'path to save checkpoints'}) restore_file: str = field(default='checkpoint_last.pt', metadata={'help': 'filename from which to load checkpoint (default: <save-dir>/checkpoint_last.pt'}) finetune_from_model: Optional[str] = field(default=None, metadata={'help': 'finetune from a pretrained model; note that meters and lr scheduler will be reset'}) reset_dataloader: bool = field(default=False, metadata={'help': 'if set, does not reload dataloader state from the checkpoint'}) reset_lr_scheduler: bool = field(default=False, metadata={'help': 'if set, does not load lr scheduler state from the checkpoint'}) reset_meters: bool = field(default=False, metadata={'help': 'if set, does not load meters from the checkpoint'}) reset_optimizer: bool = field(default=False, metadata={'help': 'if set, does not load optimizer state from the checkpoint'}) optimizer_overrides: str = field(default='{}', metadata={'help': 'a dictionary used to override optimizer args when loading a checkpoint'}) save_interval: int = field(default=1, metadata={'help': 'save a checkpoint every N epochs'}) save_interval_updates: int = field(default=0, metadata={'help': 'save a checkpoint (and validate) every N updates'}) keep_interval_updates: int = field(default=(- 1), metadata={'help': 'keep the last N checkpoints saved with --save-interval-updates'}) keep_last_epochs: int = field(default=(- 1), metadata={'help': 'keep last N epoch checkpoints'}) keep_best_checkpoints: int = field(default=(- 1), metadata={'help': 'keep best N checkpoints based on scores'}) no_save: bool = field(default=False, metadata={'help': "don't save models or checkpoints"}) no_epoch_checkpoints: bool = field(default=False, metadata={'help': 'only store last and best checkpoints'}) no_last_checkpoints: bool = field(default=False, metadata={'help': "don't store last checkpoints"}) no_save_optimizer_state: bool = field(default=False, metadata={'help': "don't save optimizer-state as part of checkpoint"}) best_checkpoint_metric: str = field(default='loss', metadata={'help': 'metric to use for saving "best" checkpoints'}) maximize_best_checkpoint_metric: bool = field(default=False, metadata={'help': 'select the largest metric value for saving "best" checkpoints'}) patience: int = field(default=(- 1), metadata={'help': "early stop training if valid performance doesn't improve for N consecutive validation runs; note that this is influenced by --validate-interval"}) checkpoint_suffix: str = field(default='', metadata={'help': 'suffix to add to the checkpoint file name'}) checkpoint_shard_count: int = field(default=1, metadata={'help': 'Number of shards containing the checkpoint - if the checkpoint is over 300GB, it is preferable to split it into shards to prevent OOM on CPU while loading the checkpoint'}) load_checkpoint_on_all_dp_ranks: bool = field(default=False, metadata={'help': 'load checkpoints on all data parallel devices (default: only load on rank 0 and broadcast to other devices)'}) model_parallel_size: int = II('common.model_parallel_size') distributed_rank: int = II('distributed_training.distributed_rank')
class CrystalOfAlcovePathsElement(ElementWrapper): def __iter__(self): return iter(self.value) def is_admissible(self): W = WeylGroup(self.parent()._R._cartan_type, prefix='s') s = W.simple_reflections() highest_weight_crystal = self.parent()._highest_weight_crystal if highest_weight_crystal: successors = 'bruhat_upper_covers' else: successors = 'quantum_bruhat_successors' w = W.one() for i in self: t = prod([s[j] for j in i.root.associated_reflection()]) successor = (w * t) if (successor not in getattr(w, successors)()): return False w = successor return True def _latex_(self): return [(latex(i.root), i.height) for i in self.value] _in_parent_method def integer_sequence(self): lambda_chain = self.parent()._R.lambda_chain() return [lambda_chain.index(j) for j in self.value] def phi(self, i): highest_weight_crystal = self.parent()._highest_weight_crystal (positions, gi) = self._gi(i) m = max(gi) if ((not highest_weight_crystal) and (i == 0)): raise NotImplementedError M = ((Integer(m) / 2) - (Integer(1) / 2)) return M def epsilon(self, i): j = 0 temp = self temp = temp.e(i) while (temp is not None): j += 1 temp = temp.e(i) return j def weight(self): root_space = self.parent().R.root_space() weight = (- self.parent().weight) for i in self.value[::(- 1)]: root = root_space(i.root) weight = (((- i.height) * root) + weight.reflection(root)) WLR = self.parent().weight_lattice_realization() if (self.cartan_type().is_affine() and self.parent()._highest_weight_crystal): wt = WLR._from_dict({i: Integer(c) for (i, c) in (- weight)}, remove_zeros=False) return wt La = WLR.fundamental_weights() wt = WLR.sum(((Integer(c) * La[i]) for (i, c) in (- weight))) if self.cartan_type().is_affine(): assert (not self.parent()._highest_weight_crystal) wt -= (La[0] * wt.level()) return wt def plot(self): ct = self.parent()._R._cartan_type.dual() word = self.parent()._R.word() integer_sequence = self.integer_sequence() foldings = [False for i in word] for i in integer_sequence: foldings[i] = True affine_ambient_space = RootSystem(ct.affine()).ambient_space() return (affine_ambient_space.plot() + affine_ambient_space.plot_alcove_walk(word, foldings=foldings, labels=False)) def _richcmp_(self, other, op): return richcmp(self.value, other.value, op) def __hash__(self): return hash(self.value) def _folding_data(self, i): Parent = self.parent() finite_cartan_type = Parent._finite_cartan_type J = list(self.value) R = Parent._R weight = Parent.weight signs = {} if (finite_cartan_type and (i == 0)): Beta = R._root_lattice.highest_root() elif (i in self.index_set()): Beta = R._root_lattice.simple_root(i) max_height_Beta = weight.scalar(Beta.associated_coroot()) if (not J): for k in range(max_height_Beta): x = R(Beta, k) signs[x] = self._sign(Beta) signs['infinity'] = self._sign(Beta) else: for k in range(max_height_Beta): x = R(Beta, k) if (x <= J[0]): signs[x] = self._sign(Beta) for j in range(len(J)): Beta = Beta.reflection(J[j].root) sign_Beta = self._sign(Beta) max_height_Beta = weight.scalar((sign_Beta * Beta).associated_coroot()) c1 = (J[j]._cmp_v[0] * max_height_Beta) if (j == (len(J) - 1)): c2 = max_height_Beta else: c2 = min(max_height_Beta, ((J[(j + 1)]._cmp_v[0] * max_height_Beta) + 1)) for k in range(int(c1), int(c2)): x = R((sign_Beta * Beta), k) if (((j < (len(J) - 1)) and (J[j] < x <= J[(j + 1)])) or ((j == (len(J) - 1)) and (J[j] < x))): signs[x] = sign_Beta signs['infinity'] = sign_Beta if (finite_cartan_type and (i == 0)): signs = {x: (- signs[x]) for x in signs} return signs def e(self, i): Parent = self.parent() finite_cartan_type = Parent._finite_cartan_type J = list(self.value) (positions, gi) = self._gi(i) m = max(gi) m_index = ((len(gi) - 1) - list(reversed(gi)).index(m)) if (finite_cartan_type and (i == 0)): M = ((Integer(m) / 2) + (Integer(1) / 2)) else: M = ((Integer(m) / 2) - (Integer(1) / 2)) KR_test = (finite_cartan_type and (i == 0) and (m_index < (len(gi) - 1))) KR_test = (KR_test and (M >= 1)) if ((((not finite_cartan_type) or (i != 0)) and (m_index < (len(gi) - 1))) or KR_test): J.remove(positions[m_index]) if ((m_index + 1) < len(positions)): J.append(positions[(m_index + 1)]) return_value = Parent(tuple(sorted(J))) try: return_value.i_string = (self.i_string + [['e', i]]) except AttributeError: return_value.i_string = [['e', i]] return return_value else: return None _method def _gi(self, i): signs = self._folding_data(i) positions = sorted((x for x in signs if (x != 'infinity'))) if (not positions): return (positions, [signs['infinity']]) gi = [signs[positions[0]]] for j in range(1, len(positions)): gi.append(((gi[(j - 1)] + (signs[positions[(j - 1)]] * self._eps(positions[(j - 1)]))) + signs[positions[j]])) gi.append(((gi[(- 1)] + (signs[positions[(- 1)]] * self._eps(positions[(- 1)]))) + signs['infinity'])) return (positions, gi) def f(self, i): Parent = self.parent() finite_cartan_type = Parent._finite_cartan_type J = list(self.value) (positions, gi) = self._gi(i) m = max(gi) m_index = gi.index(m) if (finite_cartan_type and (i == 0)): M = ((Integer(m) / 2) + (Integer(1) / 2)) else: M = ((Integer(m) / 2) - (Integer(1) / 2)) KR_test = (finite_cartan_type and (i == 0)) KR_test = (KR_test and (M > 1)) if ((((not finite_cartan_type) or (i != 0)) and (M > 0)) or KR_test): J.append(positions[(m_index - 1)]) if (m_index < len(positions)): J.remove(positions[m_index]) return_value = Parent(tuple(sorted(J))) try: return_value.i_string = (self.i_string + [['f', i]]) except AttributeError: return_value.i_string = [['f', i]] return return_value else: return None def _sign(root): if root.is_positive_root(): return 1 else: return (- 1) def _eps(self, root): if (root in self.value): return (- 1) else: return 1 def path(self): W = WeylGroup(self.parent()._R._cartan_type, prefix='s') s = W.simple_reflections() w = W.one() ret = [w] for i in self: ret.append((ret[(- 1)] * prod((s[j] for j in i.root.associated_reflection())))) return ret
class AudioArrayClip(AudioClip): def __init__(self, array, fps): Clip.__init__(self) self.array = array self.fps = fps self.duration = ((1.0 * len(array)) / fps) def make_frame(t): if isinstance(t, np.ndarray): array_inds = (self.fps * t).astype(int) in_array = ((array_inds > 0) & (array_inds < len(self.array))) result = np.zeros((len(t), 2)) result[in_array] = self.array[array_inds[in_array]] return result else: i = int((self.fps * t)) if ((i < 0) or (i >= len(self.array))): return (0 * self.array[0]) else: return self.array[i] self.make_frame = make_frame self.nchannels = len(list(self.get_frame(0)))
def trace(func, example_inputs, optimize=None, check_trace=True, check_inputs=None, check_tolerance=1e-05, strict=True, _force_outplace=False, _module_class=None, _compilation_unit=_python_cu): if (not _enabled): return func if (optimize is not None): warnings.warn('`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead') if isinstance(func, torch.jit.ScriptModule): warnings.warn('The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is.') return func if isinstance(func, torch.nn.Module): return trace_module(func, {'forward': example_inputs}, None, check_trace, wrap_check_inputs(check_inputs), check_tolerance, strict, _force_outplace, _module_class) if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module) and (func.__name__ == 'forward')): return trace_module(func.__self__, {'forward': example_inputs}, None, check_trace, wrap_check_inputs(check_inputs), check_tolerance, strict, _force_outplace, _module_class) if isinstance(example_inputs, (torch.Tensor, dict)): example_inputs = (example_inputs,) elif (not isinstance(example_inputs, tuple)): example_inputs = tuple(example_inputs) var_lookup_fn = _create_interpreter_name_lookup_fn(0) if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module)): raise AttributeError("trace doesn't support compiling individual module's functions.\nPlease use trace_module") name = _qualified_name(func) traced = torch._C._create_function_from_trace(name, func, example_inputs, var_lookup_fn, strict, _force_outplace) if check_trace: if (check_inputs is not None): _check_trace(check_inputs, func, traced, check_tolerance, strict, _force_outplace, False, _module_class) else: _check_trace([example_inputs], func, traced, check_tolerance, strict, _force_outplace, False, _module_class) return traced
def test_check_increasing_up_extreme(): x = [0, 1, 2, 3, 4, 5] y = [0, 1, 2, 3, 4, 5] with warnings.catch_warnings(): warnings.simplefilter('error', UserWarning) is_increasing = check_increasing(x, y) assert is_increasing
class NllbTokenizer(metaclass=DummyObject): _backends = ['sentencepiece'] def __init__(self, *args, **kwargs): requires_backends(self, ['sentencepiece'])
class NiftiSegmentationLabelList(NiftiImageList): _processor = SegmentationProcessor def __init__(self, items: Iterator, classes: Collection=None, **kwargs): super().__init__(items, **kwargs) self.copy_new.append('classes') (self.classes, self.loss_func) = (classes, None) def reconstruct(self, t: Tensor): obj = ants.from_numpy(t.numpy()) path = self.path return NiftiImage(t, obj, path)
class SliceParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SLICEPARAMETER
class ProtocolWrapper(gym.Wrapper): def __init__(self, env, protocol): super(ProtocolWrapper, self).__init__(env) self.protocol = protocol self.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()}) self._elapsed_episodes = 0 self._elapsed_timesteps = 0 return def step(self, action): (observation, reward, done, info) = self.env.step(action) self._elapsed_timesteps += 1 invalid_interventions = 0 interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict=interventions_dict) while ((not success_signal) and (invalid_interventions < 5)): invalid_interventions += 1 interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict=interventions_dict) else: break return (observation, reward, done, info) def reset(self): self._elapsed_episodes += 1 self._elapsed_timesteps = 0 invalid_interventions = 0 observation = self.env.reset() interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict) while ((not success_signal) and (invalid_interventions < 5)): invalid_interventions += 1 interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict) else: break return observation
def test_merge_indexed_categorical(): records = ak.contents.IndexedArray(ak.index.Index64([0, 2, 3]), ak.contents.RecordArray([ak.contents.NumpyArray(np.array([4.0, 3.0, 1.0, 9.0, 8.0, 7.0], dtype=np.int64))], ['x'], parameters={'inner': 'bar', 'drop': 'this'}), parameters={'outer': 'foo', 'ignore': 'me', '__array__': 'categorical'}) union = ak.contents.UnionArray(ak.index.Index8([0, 0, 0, 1, 1, 1]), ak.index.Index64([0, 1, 2, 0, 1, 2]), [ak.contents.NumpyArray(np.arange(10, dtype=np.int64)), ak.contents.IndexedArray(ak.index.Index64([0, 1, 2]), ak.contents.RecordArray([ak.contents.NumpyArray(np.array([4.0, 3.0, 1.0], dtype=np.int64))], ['x'], parameters={'inner': 'bar'}), parameters={'outer': 'foo', '__array__': 'categorical'})]) with pytest.raises(NotImplementedError, match='merging categorical arrays is currently not implemented'): ak.concatenate((union, records), highlevel=False)
def downsample_with_max_pooling(array): factor = (2, 2, 2) sections = [] for offset in np.ndindex(factor): part = array[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))] sections.append(part) output = sections[0].copy() for section in sections[1:]: np.maximum(output, section, output) return output
class TensorboardXWriter(): def __init__(self, log_dir: str, window_size: int=20, **kwargs): self._window_size = window_size from torch.utils.tensorboard import SummaryWriter self._writer = SummaryWriter(log_dir, **kwargs) def write(self): storage = get_event_storage() for (k, v) in storage.latest_with_smoothing_hint(self._window_size).items(): self._writer.add_scalar(k, v, storage.iter) def __del__(self): if hasattr(self, '_writer'): self._writer.close()
class OutputDiscriminator(nn.Module): def __init__(self, in_channel=2, softmax=False, init=False): super(OutputDiscriminator, self).__init__() self._softmax = softmax filter_num_list = [64, 128, 256, 512, 1] self.upsample = nn.UpsamplingBilinear2d(size=(224, 224)) self.conv1 = nn.Conv2d(in_channel, filter_num_list[0], kernel_size=4, stride=2, padding=2, bias=False) self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1], kernel_size=4, stride=2, padding=2, bias=False) self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2], kernel_size=4, stride=2, padding=2, bias=False) self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3], kernel_size=4, stride=2, padding=2, bias=False) self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4], kernel_size=4, stride=2, padding=2, bias=False) self.leakyrelu = nn.LeakyReLU(negative_slope=0.2) if init: self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) if (m.bias is not None): m.bias.data.zero_() def forward(self, x): x = self.upsample(x) if self._softmax: x = F.softmax(x, dim=1) x = self.leakyrelu(self.conv1(x)) x = self.leakyrelu(self.conv2(x)) x = self.leakyrelu(self.conv3(x)) x = self.leakyrelu(self.conv4(x)) x = self.conv5(x) return x
def recover_formula_internal(prefix_tree): from .propcalc import formula as propcalc_formula if (len(prefix_tree) == 3): bool_formula = (((('(' + prefix_tree[1]) + prefix_tree[0]) + prefix_tree[2]) + ')') else: bool_formula = ''.join(prefix_tree) try: bool_formula = propcalc_formula(bool_formula) except (SyntaxError, NameError): raise SyntaxError return repr(bool_formula)
def options(opt): opt.add_option('--simu', type='string', help='path to hexapod_dart_simu', dest='simu')
def is_tensor(x): if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True return isinstance(x, np.ndarray)
class ReductionParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _REDUCTIONPARAMETER
def get_bootstrap_dataset_config() -> CN: _C = CN() _C.DATASET = '' _C.RATIO = 0.1 _C.IMAGE_LOADER = CN(new_allowed=True) _C.IMAGE_LOADER.TYPE = '' _C.IMAGE_LOADER.BATCH_SIZE = 4 _C.IMAGE_LOADER.NUM_WORKERS = 4 _C.INFERENCE = CN() _C.INFERENCE.INPUT_BATCH_SIZE = 4 _C.INFERENCE.OUTPUT_BATCH_SIZE = 2 _C.DATA_SAMPLER = CN(new_allowed=True) _C.DATA_SAMPLER.TYPE = '' _C.FILTER = CN(new_allowed=True) _C.FILTER.TYPE = '' return _C
.operations('slow') def test_hypothesis_deadline(any_app, any_app_schema): execute(any_app_schema, hypothesis_settings=hypothesis.settings(deadline=500)) assert_incoming_requests_num(any_app, 1) assert_request(any_app, 0, 'GET', '/api/slow')
def get_fine_tuning_parameters(model, ft_begin_index): if (ft_begin_index == 0): return model.parameters() ft_module_names = [] for i in range(ft_begin_index, 5): ft_module_names.append('layer{}'.format(i)) ft_module_names.append('fc') parameters = [] for (k, v) in model.named_parameters(): for ft_module in ft_module_names: if (ft_module in k): parameters.append({'params': v}) break else: parameters.append({'params': v, 'lr': 0.0}) return parameters
def get_std_fsa_1label_2times(): fsa = Fsa() fsa.add_arc(0, 0, BlankLabel) fsa.add_arc(0, 1, Label1) fsa.add_arc(1, 1, Label1) fsa.add_arc(1, 2, BlankLabel) fsa.add_arc(2, 2, BlankLabel) fsa.add_arc(2, 3, Label1) fsa.add_arc(3, 3, Label1) fsa.add_arc(3, 4, BlankLabel) fsa.add_arc(4, 4, BlankLabel) fsa.add_final_state(3) fsa.add_final_state(4) return fsa
def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]: dtypes = [torch.float32, torch.float64] if include_half: dtypes.append(torch.float16) if include_bfloat16: dtypes.append(torch.bfloat16) return dtypes
class Timer(object): def __init__(self): self.reset() def average_time(self): return ((self.total_time / self.calls) if (self.calls > 0) else 0.0) def tic(self): self.start_time = time.time() def toc(self, average=True): self.add((time.time() - self.start_time)) if average: return self.average_time else: return self.diff def add(self, time_diff): self.diff = time_diff self.total_time += self.diff self.calls += 1 def reset(self): self.total_time = 0.0 self.calls = 0 self.start_time = 0.0 self.diff = 0.0 def avg_time_str(self): time_str = str(datetime.timedelta(seconds=self.average_time)) return time_str
class SimpleConfig(BaseConfig): param: float = 2.0 start_time: float = 0.0 end_time: float = 5.0
def total_intersect_and_union(results, gt_seg_maps, num_classes, ignore_index, label_map=dict(), reduce_zero_label=False): num_imgs = len(results) assert (len(gt_seg_maps) == num_imgs) total_area_intersect = torch.zeros((num_classes,), dtype=torch.float64) total_area_union = torch.zeros((num_classes,), dtype=torch.float64) total_area_pred_label = torch.zeros((num_classes,), dtype=torch.float64) total_area_label = torch.zeros((num_classes,), dtype=torch.float64) for i in range(num_imgs): (area_intersect, area_union, area_pred_label, area_label) = intersect_and_union(results[i], gt_seg_maps[i], num_classes, ignore_index, label_map, reduce_zero_label) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return (total_area_intersect, total_area_union, total_area_pred_label, total_area_label)
class RuleEG(Rule): def insertion(self, j, r): if (r[(- 1)] <= j): return None y_pos = bisect_right(r, j) if ((r[y_pos] == (j + 1)) and (y_pos > 0) and (j == r[(y_pos - 1)])): j += 1 else: (j, r[y_pos]) = (r[y_pos], j) return j def reverse_insertion(self, x, row): y_pos = (bisect_left(row, x) - 1) if ((row[y_pos] == (x - 1)) and (y_pos < (len(row) - 1)) and (row[(y_pos + 1)] == x)): x -= 1 else: (x, row[y_pos]) = (row[y_pos], x) return x def _backward_format_output(self, lower_row, upper_row, output, p_is_standard, q_is_standard): if (q_is_standard and (output == 'permutation')): n = 0 if list(lower_row): n = (max(list(lower_row)) + 1) from sage.combinat.permutation import Permutations return Permutations(n).from_reduced_word(list(lower_row)) else: return super()._backward_format_output(lower_row, upper_row, output, p_is_standard, q_is_standard)
.parametrize('time_threshold, user_answer, item_answer', [(datetime.strptime('06-01-2020', '%d-%m-%Y'), [[1, 1, 1, 1, 1, 3, 3, 3, 3, 3], []], [[1, 2, 3, 4, 5, 1, 5, 3, 1, 2], []])]) .parametrize('dataset_type', [pytest.param('spark_dataframe_test', marks=pytest.mark.spark), pytest.param('pandas_dataframe_test', marks=pytest.mark.core)]) def test_time_splitter_drop_users(time_threshold, user_answer, item_answer, dataset_type, request): dataframe = request.getfixturevalue(dataset_type) filtered_dataframe = TimeSplitter(time_threshold=time_threshold, query_column='user_id', drop_cold_users=True, drop_cold_items=False).split(dataframe) if (dataset_type == 'pandas_dataframe_test'): item_ids = _get_column_list_pandas(filtered_dataframe, 'item_id') user_ids = _get_column_list_pandas(filtered_dataframe, 'user_id') else: item_ids = _get_column_list(filtered_dataframe, 'item_id') user_ids = _get_column_list(filtered_dataframe, 'user_id') _check_assert(user_ids, item_ids, user_answer, item_answer)
def test_random_state_pickle(): rs = RandomState(seed=0) random_integer = rs.randint(5) pickle_rs = pickle.dumps(rs) pickle_rs = pickle.loads(pickle_rs) pickle_random_integer = pickle_rs.randint(5) assert (random_integer == pickle_random_integer)
def make_kmer_vector(seq_list, kmer_list, rev_kmer_list, k, upto, revcomp, normalize): if upto: index = make_index_upto_k(k) sum = ([0] * k) len_k = k else: index = make_index(k) sum = [0] len_k = 1 vector = [] for seq in seq_list: kmer_count = {} for i in range(len_k): sum[i] = 0 for j in range(index[i], index[(i + 1)]): kmer = kmer_list[j] temp_count = frequency(seq, kmer) if revcomp: rev_kmer = find_revcomp(kmer, {}) if (kmer <= rev_kmer): if (kmer not in kmer_count): kmer_count[kmer] = 0 kmer_count[kmer] += temp_count else: if (rev_kmer not in kmer_count): kmer_count[rev_kmer] = 0 kmer_count[rev_kmer] += temp_count else: if (kmer not in kmer_count): kmer_count[kmer] = 0 kmer_count[kmer] += temp_count sum[i] += temp_count if revcomp: temp_vec = [kmer_count[kmer] for kmer in rev_kmer_list] else: temp_vec = [kmer_count[kmer] for kmer in kmer_list] if normalize: i = 0 if (not upto): temp_vec = [round((float(e) / sum[i]), 3) for e in temp_vec] if upto: if revcomp: upto_index = make_index_upto_k_revcomp(k) else: upto_index = make_index_upto_k(k) j = 0 for e in temp_vec: if (j >= upto_index[(i + 1)]): i += 1 temp_vec[j] = round((float(e) / sum[i]), 3) j += 1 vector.append(temp_vec) return vector
def to_mido_time_signature(time_signature: TimeSignature) -> MetaMessage: return MetaMessage('time_signature', time=time_signature.time, numerator=time_signature.numerator, denominator=time_signature.denominator)
class SpanPadder(Padder): def __init__(self, vocab): super(SpanPadder, self).__init__() self.vocab = vocab self.null_idx = self.vocab['NULL'] self.vocab_size = len(self.vocab) def __call__(self, contents, field_name, field_ele_dtype, dim: int): parent_span = [] child_span = [] root_span = [] child_segment_idx = [] hierarical_span = defaultdict(list) for (b_idx, tree) in enumerate(contents): tree = tree.convert() def add(node, is_root=False): if isinstance(node, InternalParseNode): if (node.span_length > 1): parent_span.append([b_idx, node.left, node.right, self.vocab[node.top_label]]) if is_root: root_span.append([b_idx, node.left, node.right, self.vocab[node.top_label], (len(parent_span) - 1)]) if (len(node.labels) > 1): labels = node.label.split('+') for level in range((len(labels) - 1)): hierarical_span[level].append((b_idx, node.left, node.right, self.vocab[labels[(level + 1)]], (len(parent_span) - 1))) for child in node.children: child_segment_idx.append((len(parent_span) - 1)) if isinstance(child, InternalParseNode): child_span.append([b_idx, child.left, child.right, self.vocab[child.top_label]]) else: child_span.append([b_idx, child.left, child.right, self.null_idx]) for child in node.children: if isinstance(child, InternalParseNode): add(child) else: pass add(tree, is_root=True) levels = list(hierarical_span.keys()) levels.sort() h_spans = [] for level in levels: h_spans.append(np.array(hierarical_span[level])) return {'parent_span': np.array(parent_span), 'child_span': np.array(child_span), 'root_span': np.array(root_span), 'hierarical_spans': h_spans, 'child_segment_idx': np.array(child_segment_idx)}
class CornerPoolPack(nn.Module): def __init__(self, dim, pool1, pool2, conv_cfg=None, norm_cfg=None, first_kernel_size=3, kernel_size=3, corner_dim=128): super(CornerPoolPack, self).__init__() self.p1_conv1 = ConvModule(dim, corner_dim, first_kernel_size, stride=1, padding=((first_kernel_size - 1) // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.p2_conv1 = ConvModule(dim, corner_dim, first_kernel_size, stride=1, padding=((first_kernel_size - 1) // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.p_conv1 = nn.Conv2d(corner_dim, dim, 3, padding=1, bias=False) self.p_gn1 = nn.GroupNorm(num_groups=32, num_channels=dim) self.conv1 = nn.Conv2d(dim, dim, 1, bias=False) self.gn1 = nn.GroupNorm(num_groups=32, num_channels=dim) self.relu1 = nn.ReLU(inplace=True) self.conv2 = ConvModule(dim, dim, kernel_size, stride=1, padding=((kernel_size - 1) // 2), conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.pool1 = pool1 self.pool2 = pool2 def forward(self, x): p1_conv1 = self.p1_conv1(x) pool1 = self.pool1(p1_conv1) p2_conv1 = self.p2_conv1(x) pool2 = self.pool2(p2_conv1) p_conv1 = self.p_conv1((pool1 + pool2)) p_gn1 = self.p_gn1(p_conv1) conv1 = self.conv1(x) gn1 = self.gn1(conv1) relu1 = self.relu1((p_gn1 + gn1)) conv2 = self.conv2(relu1) return conv2
def save_cache(features, cached_features_file): writer = tf.io.TFRecordWriter(cached_features_file) for (ex_index, feature) in enumerate(features): if ((ex_index % 5000) == 0): logging.info(('Writing example %d of %d' % (ex_index, len(features)))) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f record_feature = collections.OrderedDict() record_feature['input_ids'] = create_int_feature(feature.input_ids) record_feature['input_mask'] = create_int_feature(feature.input_mask) record_feature['segment_ids'] = create_int_feature(feature.segment_ids) record_feature['label_ids'] = create_int_feature(feature.label_ids) tf_example = tf.train.Example(features=tf.train.Features(feature=record_feature)) writer.write(tf_example.SerializeToString()) writer.close()
def _compute_delta(log_moments, eps): min_delta = 1.0 for (moment_order, log_moment) in log_moments: if (moment_order == 0): continue if (math.isinf(log_moment) or math.isnan(log_moment)): sys.stderr.write(('The %d-th order is inf or Nan\n' % moment_order)) continue if (log_moment < (moment_order * eps)): min_delta = min(min_delta, math.exp((log_moment - (moment_order * eps)))) return min_delta
def _op_stats(net_def): type_count = {} for t in [op.type for op in net_def.op]: type_count[t] = (type_count.get(t, 0) + 1) type_count_list = sorted(type_count.items(), key=(lambda kv: kv[0])) type_count_list = sorted(type_count_list, key=(lambda kv: (- kv[1]))) return '\n'.join(('{:>4}x {}'.format(count, name) for (name, count) in type_count_list))
class TemplateError(Exception): if PY2: def __init__(self, message=None): if (message is not None): message = text_type(message).encode('utf-8') Exception.__init__(self, message) def message(self): if self.args: message = self.args[0] if (message is not None): return message.decode('utf-8', 'replace') def __unicode__(self): return (self.message or u'') else: def __init__(self, message=None): Exception.__init__(self, message) def message(self): if self.args: message = self.args[0] if (message is not None): return message
def test_fft_function(): np.random.seed(1234) import scipy x = (np.random.randn(10) + (1j * np.random.randn(10))) with pytest.deprecated_call(match='1\\.5\\.0'): X = scipy.fft(x) with pytest.deprecated_call(match='2\\.0\\.0'): y = scipy.ifft(X) assert_allclose(y, x) import scipy.fft assert_allclose(X, scipy.fft.fft(x)) with pytest.deprecated_call(match='1\\.5\\.0'): X = scipy.fft(x) assert_allclose(X, scipy.fft.fft(x)) with pytest.deprecated_call(match='2\\.0\\.0'): y = scipy.ifft(X) assert_allclose(y, x) from scipy import fft with pytest.deprecated_call(match='1\\.5\\.0'): X = fft(x) with pytest.deprecated_call(match='2\\.0\\.0'): y = scipy.ifft(X) assert_allclose(y, x)
def build_dict(imgs, wtoi, params): wtoi['<eos>'] = 0 count_imgs = 0 refs_words = [] refs_idxs = [] for img in imgs: if ((params['split'] == img['split']) or ((params['split'] == 'train') and (img['split'] == 'restval')) or (params['split'] == 'all')): ref_words = [] ref_idxs = [] for sent in img['sentences']: if hasattr(params, 'bpe'): sent['tokens'] = params.bpe.segment(' '.join(sent['tokens'])).strip().split(' ') tmp_tokens = (sent['tokens'] + ['<eos>']) tmp_tokens = [(_ if (_ in wtoi) else 'UNK') for _ in tmp_tokens] ref_words.append(' '.join(tmp_tokens)) ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens])) refs_words.append(ref_words) refs_idxs.append(ref_idxs) count_imgs += 1 print('total imgs:', count_imgs) (ngram_words, count_refs) = get_doc_freq(refs_words, params) (ngram_idxs, count_refs) = get_doc_freq(refs_idxs, params) print('count_refs:', count_refs) return (ngram_words, ngram_idxs, count_refs)
def read_csv(fname): import pandas return pandas.read_csv(fname, index_col=None, comment='#')
def create_model(opt): model = find_model_using_name(opt.model) instance = model() instance.initialize(opt) instance.print_networks() print(('model [%s] was created' % instance.name())) return instance
def job_fssdJ5q_opt(p, data_source, tr, te, r): return job_fssdJ1q_opt(p, data_source, tr, te, r, J=5)
def _seg_76(): return [(194755, 'M', u''), (194756, 'M', u''), (194757, 'M', u''), (194758, 'M', u''), (194759, 'M', u''), (194760, 'M', u''), (194761, 'M', u''), (194762, 'M', u''), (194763, 'M', u''), (194764, 'M', u''), (194765, 'M', u''), (194766, 'M', u''), (194767, 'M', u''), (194768, 'M', u''), (194769, 'M', u''), (194770, 'M', u''), (194771, 'M', u''), (194772, 'M', u''), (194773, 'M', u''), (194774, 'M', u''), (194775, 'M', u''), (194776, 'M', u''), (194777, 'M', u''), (194778, 'M', u''), (194779, 'M', u''), (194780, 'M', u''), (194781, 'M', u''), (194782, 'M', u''), (194783, 'M', u''), (194784, 'M', u''), (194785, 'M', u''), (194786, 'M', u''), (194787, 'M', u''), (194788, 'M', u''), (194789, 'M', u''), (194790, 'M', u''), (194791, 'M', u''), (194792, 'M', u''), (194793, 'M', u''), (194794, 'M', u''), (194795, 'M', u''), (194796, 'M', u''), (194797, 'M', u''), (194798, 'M', u''), (194799, 'M', u''), (194800, 'M', u''), (194801, 'M', u''), (194802, 'M', u''), (194803, 'M', u''), (194804, 'M', u''), (194805, 'M', u''), (194806, 'M', u''), (194807, 'M', u''), (194808, 'M', u''), (194809, 'M', u''), (194810, 'M', u''), (194811, 'M', u''), (194812, 'M', u''), (194813, 'M', u''), (194814, 'M', u''), (194815, 'M', u''), (194816, 'M', u''), (194817, 'M', u''), (194818, 'M', u''), (194819, 'M', u''), (194820, 'M', u''), (194821, 'M', u''), (194822, 'M', u''), (194823, 'M', u''), (194824, 'M', u''), (194825, 'M', u''), (194826, 'M', u''), (194827, 'M', u''), (194828, 'M', u''), (194829, 'M', u''), (194830, 'M', u''), (194831, 'M', u''), (194832, 'M', u''), (194833, 'M', u''), (194834, 'M', u''), (194835, 'M', u''), (194836, 'M', u''), (194837, 'M', u''), (194838, 'M', u''), (194839, 'M', u''), (194840, 'M', u''), (194841, 'M', u''), (194842, 'M', u''), (194843, 'M', u''), (194844, 'M', u''), (194845, 'M', u''), (194846, 'M', u''), (194847, 'X'), (194848, 'M', u''), (194849, 'M', u''), (194850, 'M', u''), (194851, 'M', u''), (194852, 'M', u''), (194853, 'M', u''), (194854, 'M', u'')]
def main(): args = parse_args() num_classes = (10 if (args.dataset == 'CIFAR10') else 100) have_cuda = torch.cuda.is_available() def cast(x): return (x.cuda() if have_cuda else x) checkpoint = torch.load(args.checkpoint) weights_unpacked = {} for (k, w) in checkpoint.items(): if (w.dtype == torch.uint8): scale = np.sqrt((2 / (((w.shape[1] * w.shape[2]) * w.shape[3]) * 8))) signed = ((np.unpackbits(w, axis=1).astype(np.int) * 2) - 1) weights_unpacked[k[7:]] = (torch.from_numpy(signed).float() * scale) else: weights_unpacked[k[7:]] = w model = WRN_McDonnell(args.depth, args.width, num_classes) model.load_state_dict(weights_unpacked) model = cast(model) model.eval() class_acc = ClassErrorMeter(accuracy=True) for (inputs, targets) in tqdm(DataLoader(create_dataset(args, train=False), 256)): with torch.no_grad(): class_acc.add(model(cast(inputs)).cpu(), targets) print(class_acc.value())
.parametrize('sparse_container', (((CSC_CONTAINERS + CSR_CONTAINERS) + DOK_CONTAINERS) + LIL_CONTAINERS)) def test_silhouette_samples_euclidean_sparse(sparse_container): X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T y = [0, 0, 0, 0, 1, 1, 1, 1] pdist_dense = pairwise_distances(X) pdist_sparse = sparse_container(pdist_dense) assert issparse(pdist_sparse) output_with_sparse_input = silhouette_samples(pdist_sparse, y) output_with_dense_input = silhouette_samples(pdist_dense, y) assert_allclose(output_with_sparse_input, output_with_dense_input)
def fcos_config(): test_cfg = mmcv.Config(dict(deploy_nms_pre=0, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) model = FCOSHead(num_classes=4, in_channels=1, test_cfg=test_cfg) model.requires_grad_(False) return model
def measure_layer(layer, *args): global count_ops, count_params for x in args: delta_ops = 0 delta_params = 0 multi_add = 1 type_name = get_layer_info(layer) if (type_name in ['Conv2d']): out_h = int(((((x.size()[2] + ((2 * layer.padding[0]) / layer.dilation[0])) - layer.kernel_size[0]) / layer.stride[0]) + 1)) out_w = int(((((x.size()[3] + ((2 * layer.padding[1]) / layer.dilation[1])) - layer.kernel_size[1]) / layer.stride[1]) + 1)) delta_ops = (((((((layer.in_channels * layer.out_channels) * layer.kernel_size[0]) * layer.kernel_size[1]) * out_h) * out_w) / layer.groups) * multi_add) delta_params = get_layer_param(layer) elif (type_name in ['ConvTranspose2d']): (_, _, in_h, in_w) = x.size() out_h = int((((((in_h - 1) * layer.stride[0]) - (2 * layer.padding[0])) + layer.kernel_size[0]) + layer.output_padding[0])) out_w = int((((((in_w - 1) * layer.stride[1]) - (2 * layer.padding[1])) + layer.kernel_size[1]) + layer.output_padding[1])) delta_ops = (((((((layer.in_channels * layer.out_channels) * layer.kernel_size[0]) * layer.kernel_size[1]) * out_h) * out_w) / layer.groups) * multi_add) delta_params = get_layer_param(layer) elif (type_name in ['LearnedGroupConv']): measure_layer(layer.relu, x) measure_layer(layer.norm, x) conv = layer.conv out_h = int(((((x.size()[2] + (2 * conv.padding[0])) - conv.kernel_size[0]) / conv.stride[0]) + 1)) out_w = int(((((x.size()[3] + (2 * conv.padding[1])) - conv.kernel_size[1]) / conv.stride[1]) + 1)) delta_ops = (((((((conv.in_channels * conv.out_channels) * conv.kernel_size[0]) * conv.kernel_size[1]) * out_h) * out_w) / layer.condense_factor) * multi_add) delta_params = (get_layer_param(conv) / layer.condense_factor) elif (type_name in ['ReLU', 'ReLU6']): delta_ops = x.numel() delta_params = get_layer_param(layer) elif (type_name in ['AvgPool2d', 'MaxPool2d']): in_w = x.size()[2] kernel_ops = (layer.kernel_size * layer.kernel_size) out_w = int(((((in_w + (2 * layer.padding)) - layer.kernel_size) / layer.stride) + 1)) out_h = int(((((in_w + (2 * layer.padding)) - layer.kernel_size) / layer.stride) + 1)) delta_ops = ((((x.size()[0] * x.size()[1]) * out_w) * out_h) * kernel_ops) delta_params = get_layer_param(layer) elif (type_name in ['LastLevelMaxPool']): pass elif (type_name in ['AdaptiveAvgPool2d']): delta_ops = (((x.size()[0] * x.size()[1]) * x.size()[2]) * x.size()[3]) delta_params = get_layer_param(layer) elif (type_name in ['ZeroPad2d', 'RetinaNetPostProcessor']): pass elif (type_name in ['Linear']): weight_ops = (layer.weight.numel() * multi_add) bias_ops = layer.bias.numel() delta_ops = (x.size()[0] * (weight_ops + bias_ops)) delta_params = get_layer_param(layer) elif (type_name in ['BatchNorm2d', 'Dropout2d', 'DropChannel', 'Dropout', 'FrozenBatchNorm2d', 'GroupNorm']): delta_params = get_layer_param(layer) elif (type_name in ['SumTwo']): delta_ops = x.numel() elif (type_name in ['AggregateCell']): if (not layer.pre_transform): delta_ops = (2 * x.numel()) else: measure_layer(layer.branch_1, x) measure_layer(layer.branch_2, x) delta_params = get_layer_param(layer) elif (type_name in ['Identity', 'Zero']): pass elif (type_name in ['Scale']): delta_params = get_layer_param(layer) delta_ops = x.numel() elif (type_name in ['FCOSPostProcessor', 'RPNPostProcessor', 'KeypointPostProcessor', 'ROIAlign', 'PostProcessor', 'KeypointRCNNPredictor', 'NaiveSyncBatchNorm', 'Upsample', 'Sequential']): pass elif (type_name in ['DeformConv']): offset_conv = list(layer.parameters())[0] delta_ops = reduce(operator.mul, offset_conv.size(), (x.size()[2] * x.size()[3])) out_h = int(((((x.size()[2] + ((2 * layer.padding[0]) / layer.dilation[0])) - layer.kernel_size[0]) / layer.stride[0]) + 1)) out_w = int(((((x.size()[3] + ((2 * layer.padding[1]) / layer.dilation[1])) - layer.kernel_size[1]) / layer.stride[1]) + 1)) delta_ops += (((((((layer.in_channels * layer.out_channels) * layer.kernel_size[0]) * layer.kernel_size[1]) * out_h) * out_w) / layer.groups) * multi_add) delta_params = get_layer_param(layer) else: raise TypeError(('unknown layer type: %s' % type_name)) count_ops += delta_ops count_params += delta_params return
class Mesh(): def __init__(self, model: MeshModel): self.model = model (self.units, self.num_layers) = (self.model.units, self.model.num_layers) self.pairwise_perm_idx = pairwise_off_diag_permutation(self.units) (ss, cs, sc, cc) = self.model.mzi_error_tensors (self.ss, self.cs, self.sc, self.cc) = (tf.constant(ss, dtype=TF_COMPLEX), tf.constant(cs, dtype=TF_COMPLEX), tf.constant(sc, dtype=TF_COMPLEX), tf.constant(cc, dtype=TF_COMPLEX)) self.perm_layers = [PermutationLayer(self.model.perm_idx[layer]) for layer in range((self.num_layers + 1))] def mesh_layers(self, phases: MeshPhasesTensorflow) -> List[MeshVerticalLayer]: internal_psl = phases.internal_phase_shift_layers external_psl = phases.external_phase_shift_layers if self.model.hadamard: s11 = ((self.cc * internal_psl) + (self.ss * _roll_tensor(internal_psl, up=True))) s22 = _roll_tensor(((self.ss * internal_psl) + (self.cc * _roll_tensor(internal_psl, up=True)))) s12 = _roll_tensor(((self.cs * internal_psl) - (self.sc * _roll_tensor(internal_psl, up=True)))) s21 = ((self.sc * internal_psl) - (self.cs * _roll_tensor(internal_psl, up=True))) else: s11 = ((self.cc * internal_psl) - (self.ss * _roll_tensor(internal_psl, up=True))) s22 = _roll_tensor((((- self.ss) * internal_psl) + (self.cc * _roll_tensor(internal_psl, up=True)))) s12 = (1j * _roll_tensor(((self.cs * internal_psl) + (self.sc * _roll_tensor(internal_psl, up=True))))) s21 = (1j * ((self.sc * internal_psl) + (self.cs * _roll_tensor(internal_psl, up=True)))) diag_layers = ((external_psl * (s11 + s22)) / 2) off_diag_layers = ((_roll_tensor(external_psl) * (s21 + s12)) / 2) if (self.units % 2): diag_layers = tf.concat((diag_layers[:(- 1)], tf.ones_like(diag_layers[(- 1):])), axis=0) (diag_layers, off_diag_layers) = (tf.transpose(diag_layers), tf.transpose(off_diag_layers)) mesh_layers = [MeshVerticalLayer(self.pairwise_perm_idx, diag_layers[0], off_diag_layers[0], self.perm_layers[1], self.perm_layers[0])] for layer in range(1, self.num_layers): mesh_layers.append(MeshVerticalLayer(self.pairwise_perm_idx, diag_layers[layer], off_diag_layers[layer], self.perm_layers[(layer + 1)])) return mesh_layers
class StringConst(object): def __init__(self, cname, text, byte_string): self.cname = cname self.text = text self.escaped_value = StringEncoding.escape_byte_string(byte_string) self.py_strings = None self.py_versions = [] def add_py_version(self, version): if (not version): self.py_versions = [2, 3] elif (version not in self.py_versions): self.py_versions.append(version) def get_py_string_const(self, encoding, identifier=None, is_str=False, py3str_cstring=None): py_strings = self.py_strings text = self.text is_str = bool((identifier or is_str)) is_unicode = ((encoding is None) and (not is_str)) if (encoding is None): encoding_key = None else: encoding = encoding.lower() if (encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii')): encoding = None encoding_key = None else: encoding_key = ''.join(find_alphanums(encoding)) key = (is_str, is_unicode, encoding_key, py3str_cstring) if (py_strings is not None): try: return py_strings[key] except KeyError: pass else: self.py_strings = {} if identifier: intern = True elif (identifier is None): if isinstance(text, bytes): intern = bool(possible_bytes_identifier(text)) else: intern = bool(possible_unicode_identifier(text)) else: intern = False if intern: prefix = Naming.interned_prefixes['str'] else: prefix = Naming.py_const_prefix if encoding_key: encoding_prefix = ('_%s' % encoding_key) else: encoding_prefix = '' pystring_cname = ('%s%s%s_%s' % (prefix, ((is_str and 's') or (is_unicode and 'u') or 'b'), encoding_prefix, self.cname[len(Naming.const_prefix):])) py_string = PyStringConst(pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern) self.py_strings[key] = py_string return py_string
class TensorIndex(ABC): def iteration_type(self) -> TensorIterationTypes: pass def locate(self) -> bool: pass def assembly(self) -> TensorAssemblyType: pass def full(self) -> bool: pass def ordered(self) -> bool: pass def unique(self) -> bool: pass def branchless(self) -> bool: pass def compact(self) -> bool: pass def fields(self, lvl: int, dummy_symbol: symbolic.SymExpr) -> Dict[(str, Data)]: pass def to_json(self): attrs = serialize.all_properties_to_json(self) retdict = {'type': type(self).__name__, 'attributes': attrs} return retdict def from_json(cls, json_obj, context=None): if (json_obj['type'] == 'TensorIndexDense'): self = TensorIndexDense.__new__(TensorIndexDense) elif (json_obj['type'] == 'TensorIndexCompressed'): self = TensorIndexCompressed.__new__(TensorIndexCompressed) elif (json_obj['type'] == 'TensorIndexSingleton'): self = TensorIndexSingleton.__new__(TensorIndexSingleton) elif (json_obj['type'] == 'TensorIndexRange'): self = TensorIndexRange.__new__(TensorIndexRange) elif (json_obj['type'] == 'TensorIndexOffset'): self = TensorIndexOffset.__new__(TensorIndexOffset) else: raise TypeError(f"Invalid data type, got: {json_obj['type']}") serialize.set_properties_from_json(self, json_obj['attributes'], context=context) return self
(nopython=False, fastmath=True, cache=True) def apply_bxmask(u_hat, mask): if (mask is not None): N = mask.shape if (len(N) == 1): mask = np.broadcast_to(mask, u_hat.shape[(- 1)]) for i in range(u_hat.shape[(- 1)]): if (mask[i] == 0): u_hat[(..., i)] = 0 elif (len(N) == 2): mask = np.broadcast_to(mask, u_hat.shape[(- 2):]) for i in range(u_hat.shape[(- 2)]): for j in range(u_hat.shape[(- 1)]): if (mask[(i, j)] == 0): u_hat[(..., i, j)] = 0 elif (len(N) == 3): mask = np.broadcast_to(mask, u_hat.shape[(- 3):]) for i in range(u_hat.shape[(- 3)]): for j in range(u_hat.shape[(- 2)]): for k in range(u_hat.shape[(- 1)]): if (mask[(i, j, k)] == 0): u_hat[(..., i, j, k)] = 0 elif (len(N) == 4): mask = np.broadcast_to(mask, u_hat.shape[(- 4):]) for i in range(u_hat.shape[(- 4)]): for j in range(u_hat.shape[(- 3)]): for k in range(u_hat.shape[(- 2)]): for l in range(u_hat.shape[(- 1)]): if (mask[(i, j, k, l)] == 0): u_hat[(..., i, j, k, l)] = 0 else: u_hat *= mask return u_hat
class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
def compute_reduced_graph(set_links): node_indices = utils.get_nodes(set_links) graph = TransitiveGraph(len(node_indices)) for (arg1, arg2, relation) in set_links: node_index1 = node_indices[arg1] node_index2 = node_indices[arg2] graph.add_edge(node_index1, node_index2) closure_matrix = graph.transitive_closure() indirect_links = set() for (from_node, to_nodes) in enumerate(closure_matrix): for (to_node, reachable) in enumerate(to_nodes): if ((from_node != to_node) and (reachable == 1)): for (indirect_node, indirect_reachable) in enumerate(closure_matrix[to_node]): if (indirect_node != to_node): if (indirect_reachable == 1): indirect_links.add((from_node, indirect_node)) reduced_links = [] for (arg1, arg2, relation) in set_links: node_index1 = node_indices[arg1] node_index2 = node_indices[arg2] if ((node_index1, node_index2) not in indirect_links): reduced_links.append((arg1, arg2, relation)) return reduced_links
def test_vid4_dataset(): root_path = (Path(__file__).parent.parent.parent / 'data') txt_content = 'calendar 1 (320,480,3)\ncity 2 (320,480,3)\n' mocked_open_function = mock_open(read_data=txt_content) with patch('builtins.open', mocked_open_function): vid4_dataset = SRVid4Dataset(lq_folder=(root_path / 'lq'), gt_folder=(root_path / 'gt'), ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, test_mode=False, metric_average_mode='clip', filename_tmpl='{:08d}') assert (vid4_dataset.data_infos == [dict(lq_path=str((root_path / 'lq')), gt_path=str((root_path / 'gt')), key=osp.join('calendar', ''), num_input_frames=5, max_frame_num=1), dict(lq_path=str((root_path / 'lq')), gt_path=str((root_path / 'gt')), key=osp.join('city', ''), num_input_frames=5, max_frame_num=2), dict(lq_path=str((root_path / 'lq')), gt_path=str((root_path / 'gt')), key=osp.join('city', ''), num_input_frames=5, max_frame_num=2)]) results = [{'eval_result': {'PSNR': 21, 'SSIM': 0.75}}, {'eval_result': {'PSNR': 22, 'SSIM': 0.8}}, {'eval_result': {'PSNR': 24, 'SSIM': 0.9}}] eval_result = vid4_dataset.evaluate(results) np.testing.assert_almost_equal(eval_result['PSNR'], 22) np.testing.assert_almost_equal(eval_result['SSIM'], 0.8) vid4_dataset = SRVid4Dataset(lq_folder=(root_path / 'lq'), gt_folder=(root_path / 'gt'), ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, test_mode=False, metric_average_mode='all', filename_tmpl='{:08d}') eval_result = vid4_dataset.evaluate(results) np.testing.assert_almost_equal(eval_result['PSNR'], 22.3333333) np.testing.assert_almost_equal(eval_result['SSIM'], 0.) with pytest.raises(AssertionError): SRVid4Dataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=6, pipeline=[], scale=4, test_mode=False) with pytest.raises(ValueError): SRVid4Dataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, metric_average_mode='abc', test_mode=False) with pytest.raises(TypeError): vid4_dataset.evaluate(results=5) with pytest.raises(AssertionError): vid4_dataset.evaluate(results=[results[0]])
def get_filenames(data_root, task, sub_task, split=''): if (task == 'concode'): data_dir = '{}/{}'.format(data_root, task) train_fn = '{}/train.json'.format(data_dir) dev_fn = '{}/dev.json'.format(data_dir) test_fn = '{}/test.json'.format(data_dir) elif (task == 'summarize'): data_dir = '{}/{}/{}'.format(data_root, task, sub_task) train_fn = '{}/train.jsonl'.format(data_dir) dev_fn = '{}/valid.jsonl'.format(data_dir) test_fn = '{}/test.jsonl'.format(data_dir) elif ('refine' in task): data_dir = '{}/{}/{}'.format(data_root, task, sub_task) train_fn = '{}/train.buggy-fixed.buggy,{}/train.buggy-fixed.fixed'.format(data_dir, data_dir) dev_fn = '{}/valid.buggy-fixed.buggy,{}/valid.buggy-fixed.fixed'.format(data_dir, data_dir) test_fn = '{}/test.buggy-fixed.buggy,{}/test.buggy-fixed.fixed'.format(data_dir, data_dir) elif ('translate' in task): data_dir = '{}/{}'.format(data_root, task) if (sub_task == 'cs-java'): train_fn = '{}/train.java-cs.txt.cs,{}/train.java-cs.txt.java'.format(data_dir, data_dir) dev_fn = '{}/valid.java-cs.txt.cs,{}/valid.java-cs.txt.java'.format(data_dir, data_dir) test_fn = '{}/test.java-cs.txt.cs,{}/test.java-cs.txt.java'.format(data_dir, data_dir) else: train_fn = '{}/train.java-cs.txt.java,{}/train.java-cs.txt.cs'.format(data_dir, data_dir) dev_fn = '{}/valid.java-cs.txt.java,{}/valid.java-cs.txt.cs'.format(data_dir, data_dir) test_fn = '{}/test.java-cs.txt.java,{}/test.java-cs.txt.cs'.format(data_dir, data_dir) elif (task == 'clone'): data_dir = '{}/{}'.format(data_root, task) train_fn = '{}/train.txt'.format(data_dir) dev_fn = '{}/valid.txt'.format(data_dir) test_fn = '{}/test.txt'.format(data_dir) elif (task == 'defect'): data_dir = '{}/{}'.format(data_root, task) train_fn = '{}/train.jsonl'.format(data_dir) dev_fn = '{}/valid.jsonl'.format(data_dir) test_fn = '{}/test.jsonl'.format(data_dir) if (split == 'train'): return train_fn elif (split == 'dev'): return dev_fn elif (split == 'test'): return test_fn else: return (train_fn, dev_fn, test_fn)
def register_Ns3WaypointMobilityModel_methods(root_module, cls): cls.add_constructor([param('ns3::WaypointMobilityModel const &', 'arg0')]) cls.add_constructor([]) cls.add_method('AddWaypoint', 'void', [param('ns3::Waypoint const &', 'waypoint')]) cls.add_method('EndMobility', 'void', []) cls.add_method('GetNextWaypoint', 'ns3::Waypoint', [], is_const=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('WaypointsLeft', 'uint32_t', [], is_const=True) cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) cls.add_method('DoGetPosition', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True) cls.add_method('DoGetVelocity', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True) cls.add_method('DoSetPosition', 'void', [param('ns3::Vector const &', 'position')], visibility='private', is_virtual=True) cls.add_method('Update', 'void', [], is_const=True, visibility='private', is_virtual=True) return
((device_cc() < 90), 'Device compute capability is insufficient for SM90 tests.') class GemmF16Sm90(unittest.TestCase): pass
def main(): parser = argparse.ArgumentParser(description='Worker script for the case study.') descriptors = ['Bakery', 'Sour', 'Intensity', 'Sweet', 'Burnt', 'Pleasantness', 'Fish', 'Fruit', 'Garlic', 'Spices', 'Cold', 'Acid', 'Warm', 'Musky', 'Sweaty', 'Ammonia', 'Decayed', 'Wood', 'Grass', 'Flower', 'Chemical'] parser.add_argument('min_feature', type=int, help='Min Feature ID') parser.add_argument('max_feature', type=int, help='Max Feature ID') parser.add_argument('--descriptor', choices=descriptors, default='Bakery', help='The descriptor type to get p-values for.') parser.add_argument('--nthreads', type=int, default=4, help='Number of parallel workers to run.') parser.add_argument('--fdr_threshold', type=float, default=0.2, help='Target false discovery rate.') parser.add_argument('--importance_threshold', type=float, default=0.001, help='Minimum heuristic feature importance to make a feature test-worthy.') args = parser.parse_args() dargs = vars(args) torch.set_num_threads(1) print('Loading data') (X, Y, descriptors, target_features) = load_olfaction() features = X.columns print('Loading model') (x, y, forest_model) = load_or_fit_model(args.descriptor, X, Y) for m in forest_model.models: m.n_jobs = 1 model_weights = get_model_weights(forest_model) print('Total: {}'.format(len(X.columns))) jobs = [(target_feature, x, y, features, forest_model, model_weights, args.descriptor) for (target_feature, importance) in zip(X.columns[args.min_feature:(args.max_feature + 1)], model_weights[args.min_feature:(args.max_feature + 1)]) if (importance >= args.importance_threshold)] print('Running {} jobs'.format(len(jobs))) with Pool(args.nthreads, initializer=seed_fn) as pool: p_values = np.array(pool.map(run_parallel, jobs)) discoveries = bh(p_values, args.fdr_threshold) discovery_genes = features[discoveries] discovery_p = p_values[discoveries] discovery_weights = model_weights[discoveries] order = np.argsort(discovery_weights)[::(- 1)] print('') print('Molecular Feature & Model Weight & $p$-value \\\\') for (g, w, p) in zip(discovery_genes[order], discovery_weights[order], discovery_p[order]): print('{} & {} & {} \\\\'.format(g, w, p))
class Convkxk(nn.Module): def __init__(self, in_planes, out_planes, kernel_size=1, stride=1, padding=0): super(Convkxk, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm2d(out_planes) self.relu = nn.ReLU(inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): return self.relu(self.bn(self.conv(x)))
_processor(name=TOKENIZE) class TokenizeProcessor(UDProcessor): PROVIDES_DEFAULT = set([TOKENIZE]) REQUIRES_DEFAULT = set([]) MAX_SEQ_LENGTH_DEFAULT = 1000 def _set_up_model(self, config, pipeline, device): if config.get('pretokenized'): self._trainer = None else: self._trainer = Trainer(model_file=config['model_path'], device=device) postprocessor = config.get('postprocessor') if (postprocessor and callable(postprocessor)): self._postprocessor = postprocessor elif (not postprocessor): self._postprocessor = None else: raise ValueError(("Tokenizer recieved 'postprocessor' option of unrecognized type; postprocessor must be callable. Got %s" % postprocessor)) def process_pre_tokenized_text(self, input_src): document = [] if isinstance(input_src, str): sentences = [sent.strip().split() for sent in input_src.strip().split('\n') if (len(sent.strip()) > 0)] elif isinstance(input_src, list): sentences = input_src idx = 0 for sentence in sentences: sent = [] for (token_id, token) in enumerate(sentence): sent.append({doc.ID: ((token_id + 1),), doc.TEXT: token, doc.MISC: f'start_char={idx}|end_char={(idx + len(token))}'}) idx += (len(token) + 1) document.append(sent) raw_text = ' '.join([' '.join(sentence) for sentence in sentences]) return (raw_text, document) def process(self, document): if (not (isinstance(document, str) or isinstance(document, doc.Document) or (self.config.get('pretokenized') or self.config.get('no_ssplit', False)))): raise ValueError(("If neither 'pretokenized' or 'no_ssplit' option is enabled, the input to the TokenizerProcessor must be a string or a Document object. Got %s" % str(type(document)))) if isinstance(document, doc.Document): if self.config.get('pretokenized'): return document document = document.text if self.config.get('pretokenized'): (raw_text, document) = self.process_pre_tokenized_text(document) return doc.Document(document, raw_text) if hasattr(self, '_variant'): return self._variant.process(document) raw_text = ('\n\n'.join(document) if isinstance(document, list) else document) max_seq_len = self.config.get('max_seqlen', TokenizeProcessor.MAX_SEQ_LENGTH_DEFAULT) batches = TokenizationDataset(self.config, input_text=raw_text, vocab=self.vocab, evaluation=True, dictionary=self.trainer.dictionary) with torch.no_grad(): (_, _, _, document) = output_predictions(None, self.trainer, batches, self.vocab, None, max_seq_len, orig_text=raw_text, no_ssplit=self.config.get('no_ssplit', False), num_workers=self.config.get('num_workers', 0), postprocessor=self._postprocessor) for sentence in document: for token in sentence: if (len(token['text']) > max_seq_len): token['text'] = TOKEN_TOO_LONG_REPLACEMENT return doc.Document(document, raw_text) def bulk_process(self, docs): if hasattr(self, '_variant'): return self._variant.bulk_process(docs) if self.config.get('pretokenized'): res = [] for document in docs: (raw_text, document) = self.process_pre_tokenized_text(document.text) res.append(doc.Document(document, raw_text)) return res combined_text = '\n\n'.join([thisdoc.text for thisdoc in docs]) processed_combined = self.process(doc.Document([], text=combined_text)) charoffset = 0 sentst = senten = 0 for thisdoc in docs: while ((senten < len(processed_combined.sentences)) and ((processed_combined.sentences[senten].tokens[(- 1)].end_char - charoffset) <= len(thisdoc.text))): senten += 1 sentences = processed_combined.sentences[sentst:senten] thisdoc.sentences = sentences for sent in sentences: sent._doc = thisdoc for token in sent.tokens: token._start_char -= charoffset token._end_char -= charoffset if token.words: for word in token.words: word._start_char -= charoffset word._end_char -= charoffset thisdoc.num_tokens = sum((len(sent.tokens) for sent in sentences)) thisdoc.num_words = sum((len(sent.words) for sent in sentences)) sentst = senten charoffset += (len(thisdoc.text) + 2) return docs
class TestDQN(TfGraphTestCase): .large def test_dqn_cartpole(self): with LocalTFRunner(snapshot_config, sess=self.sess) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = ((n_epochs * steps_per_epoch) * sampler_batch_size) env = GarageEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(10000.0)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) epilson_greedy_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=epilson_greedy_policy, replay_buffer=replay_buffer, qf_lr=0.0001, discount=1.0, min_buffer_size=int(1000.0), double_q=False, n_train_steps=500, steps_per_epoch=steps_per_epoch, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) last_avg_ret = runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size) assert (last_avg_ret > 15) env.close() .large def test_dqn_cartpole_double_q(self): with LocalTFRunner(snapshot_config, sess=self.sess) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = ((n_epochs * steps_per_epoch) * sampler_batch_size) env = GarageEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(10000.0)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) epilson_greedy_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=epilson_greedy_policy, replay_buffer=replay_buffer, qf_lr=0.0001, discount=1.0, min_buffer_size=int(1000.0), double_q=True, n_train_steps=500, steps_per_epoch=steps_per_epoch, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) last_avg_ret = runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size) assert (last_avg_ret > 15) env.close() .large def test_dqn_cartpole_grad_clip(self): with LocalTFRunner(snapshot_config, sess=self.sess) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = ((n_epochs * steps_per_epoch) * sampler_batch_size) env = GarageEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(10000.0)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) epilson_greedy_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=epilson_greedy_policy, replay_buffer=replay_buffer, qf_lr=0.0001, discount=1.0, min_buffer_size=int(1000.0), double_q=False, n_train_steps=500, grad_norm_clipping=5.0, steps_per_epoch=steps_per_epoch, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) last_avg_ret = runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size) assert (last_avg_ret > 13) env.close() def test_dqn_cartpole_pickle(self): with LocalTFRunner(snapshot_config, sess=self.sess) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = ((n_epochs * steps_per_epoch) * sampler_batch_size) env = GarageEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(10000.0)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) epilson_greedy_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=epilson_greedy_policy, replay_buffer=replay_buffer, qf_lr=0.0001, discount=1.0, min_buffer_size=int(1000.0), double_q=False, n_train_steps=500, grad_norm_clipping=5.0, steps_per_epoch=steps_per_epoch, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) with tf.compat.v1.variable_scope('DiscreteMLPQFunction/MLPModel/mlp/hidden_0', reuse=True): bias = tf.compat.v1.get_variable('bias') old_bias = tf.ones_like(bias).eval() bias.load(old_bias) h = pickle.dumps(algo) with tf.compat.v1.Session(graph=tf.Graph()): pickle.loads(h) with tf.compat.v1.variable_scope('DiscreteMLPQFunction/MLPModel/mlp/hidden_0', reuse=True): new_bias = tf.compat.v1.get_variable('bias') new_bias = new_bias.eval() assert np.array_equal(old_bias, new_bias) env.close()
def add_preprocess_arguments(parser): parser.add_argument('--entity-encoding-form', choices=['canonical', 'type'], default='canonical', help='Input entity form to the encoder') parser.add_argument('--entity-decoding-form', choices=['canonical', 'type'], default='canonical', help='Input entity form to the decoder') parser.add_argument('--entity-target-form', choices=['canonical', 'type'], default='canonical', help='Output entity form to the decoder') parser.add_argument('--cache', default='.cache', help='Path to cache for preprocessed batches') parser.add_argument('--ignore-cache', action='store_true', help='Ignore existing cache') parser.add_argument('--mappings', help='Path to vocab mappings')
class AlbertForMultipleChoice(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
class Partition22(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:22'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1, 1] self.lookup = {'l_0': 'decoder.4.1.EncDecAttention.o', 'l_1': 'decoder.4.1.dropout', 'l_2': 'decoder.4.2.layer_norm', 'l_3': 'decoder.4.2.DenseReluDense.wi', 'l_4': 'decoder.4.2.DenseReluDense.dropout', 'l_5': 'decoder.4.2.DenseReluDense.wo', 'l_6': 'decoder.4.2.dropout', 'l_7': 'decoder.5.0.layer_norm', 'l_8': 'decoder.5.0.SelfAttention.q', 'l_9': 'decoder.5.0.SelfAttention.k', 'l_10': 'decoder.5.0.SelfAttention.v', 'l_11': 'decoder.5.0.SelfAttention.dropout', 'l_12': 'decoder.5.0.SelfAttention.o', 'l_13': 'decoder.5.0.dropout', 'l_14': 'decoder.5.1.layer_norm', 'l_15': 'decoder.5.1.EncDecAttention.q', 'l_16': 'decoder.5.1.EncDecAttention.k', 'l_17': 'decoder.5.1.EncDecAttention.v', 'l_18': 'decoder.5.1.EncDecAttention.dropout', 'l_19': 'decoder.5.1.EncDecAttention.o', 'l_20': 'decoder.5.1.dropout', 'l_21': 'decoder.5.2.layer_norm', 'l_22': 'decoder.5.2.DenseReluDense.wi', 'l_23': 'decoder.5.2.DenseReluDense.dropout', 'l_24': 'decoder.5.2.DenseReluDense.wo', 'l_25': 'decoder.5.2.dropout', 'l_26': 'decoder.6.0.layer_norm', 'l_27': 'decoder.6.0.SelfAttention.q', 'l_28': 'decoder.6.0.SelfAttention.k', 'l_29': 'decoder.6.0.SelfAttention.v', 'l_30': 'decoder.6.0.SelfAttention.dropout', 'l_31': 'decoder.6.0.SelfAttention.o', 'l_32': 'decoder.6.0.dropout', 'l_33': 'decoder.6.1.layer_norm', 'l_34': 'decoder.6.1.EncDecAttention.q', 'l_35': 'decoder.6.1.EncDecAttention.k', 'l_36': 'decoder.6.1.EncDecAttention.v', 'l_37': 'decoder.6.1.EncDecAttention.dropout', 'l_38': 'decoder.6.1.EncDecAttention.o', 'l_39': 'decoder.6.1.dropout', 'l_40': 'decoder.6.2.layer_norm', 'l_41': 'decoder.6.2.DenseReluDense.wi', 'l_42': 'decoder.6.2.DenseReluDense.dropout', 'l_43': 'decoder.6.2.DenseReluDense.wo'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure) t_0 = self.l_16(x0) t_1 = self.l_17(x0) t_2 = self.l_35(x0) t_3 = self.l_36(x0) t_4 = x5.view(x4, (- 1), 4096) t_4 = self.l_0(t_4) t_4 = self.l_1(t_4) t_4 = (x3 + t_4) t_5 = self.l_2(t_4) t_5 = self.l_3(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_4(t_5) t_5 = self.l_5(t_5) t_5 = self.l_6(t_5) t_5 = (t_4 + t_5) t_4 = self.l_7(t_5) t_6 = t_4.size() t_7 = self.l_8(t_4) t_8 = self.l_9(t_4) t_4 = self.l_10(t_4) t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_4 = t_4.view(t_6, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x1 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = self.l_11(t_8) t_4 = torch.matmul(t_8, t_4) t_4 = t_4.transpose(1, 2) t_4 = t_4.contiguous() t_6 = t_4.view(t_6, (- 1), 4096) t_6 = self.l_12(t_6) t_6 = self.l_13(t_6) t_6 = (t_5 + t_6) t_5 = self.l_14(t_6) t_4 = t_5.size() t_5 = self.l_15(t_5) t_4 = t_4[0] t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_0 = t_0.view(t_4, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_5, t_0) t_0 += x2 t_5 = t_0.float() t_5 = torch.nn.functional.softmax(t_5, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_5.type_as(t_0) t_0 = self.l_18(t_0) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_4 = t_1.view(t_4, (- 1), 4096) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_6 + t_4) t_6 = self.l_21(t_4) t_6 = self.l_22(t_6) t_6 = torch.nn.functional.relu(t_6, inplace=False) t_6 = self.l_23(t_6) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_6 = (t_4 + t_6) t_4 = self.l_26(t_6) t_1 = t_4.size() t_0 = self.l_27(t_4) t_5 = self.l_28(t_4) t_4 = self.l_29(t_4) t_1 = t_1[0] t_0 = t_0.view(t_1, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_5 = t_5.view(t_1, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.view(t_1, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_0, t_5) t_5 += x1 t_0 = t_5.float() t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_0.type_as(t_5) t_5 = self.l_30(t_5) t_4 = torch.matmul(t_5, t_4) t_4 = t_4.transpose(1, 2) t_4 = t_4.contiguous() t_1 = t_4.view(t_1, (- 1), 4096) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_6 = self.l_33(t_1) t_4 = t_6.size() t_6 = self.l_34(t_6) t_4 = t_4[0] t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_4, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_4, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += x2 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = self.l_37(t_2) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_4 = t_3.view(t_4, (- 1), 4096) t_4 = self.l_38(t_4) t_4 = self.l_39(t_4) t_4 = (t_1 + t_4) t_1 = self.l_40(t_4) t_1 = self.l_41(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_42(t_1) t_1 = self.l_43(t_1) return list(flatten((x0, x1, x2, t_4, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
_model_architecture('transformer_lm', 'transformer_lm_gpt') def transformer_lm_gpt(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args)
class NamedArgument(): def __init__(self, name: str, arg: Argument): self.name = name self.arg = arg