code
stringlengths
101
5.91M
class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() def _set_init_defaults(self, init_type, kwargs): defaults = {'normal_': {'mean': 0.0, 'std': 0.2}, 'xavier_normal_': {'gain': 0.2}, 'xavier_uniform_': {'gain': 1.0}, 'kaiming_normal_': {'a': 0.0, 'mode': 'fan_in'}, 'orthogonal_': {'gain': 0.2}} for (k, v) in defaults.get(init_type, {}).items(): kwargs.setdefault(k, v) return kwargs def init_weights(self, init_type='xavier_normal_', **kwargs): kwargs = self._set_init_defaults(init_type, kwargs) def init_func(m): classname = m.__class__.__name__ if (classname.find('BatchNorm2d') != (- 1)): if (hasattr(m, 'weight') and (m.weight is not None)): init.normal_(m.weight.data, 1.0, kwargs.get('std', 0.2)) if (hasattr(m, 'bias') and (m.bias is not None)): init.constant_(m.bias.data, 0.0) elif (hasattr(m, 'weight') and (classname.startswith('Conv') or classname.startswith('Linear'))): if (init_type.lower() == 'default'): m.reset_parameters() elif hasattr(init, init_type): getattr(init, init_type)(m.weight.data, **kwargs) else: raise NotImplementedError(f'initialization method `{init_type}` is not implemented') if (hasattr(m, 'bias') and (m.bias is not None)): init.constant_(m.bias.data, 0.0) elif hasattr(m, 'reset_parameters'): m.reset_parameters() self.apply(init_func) for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights(init_type, **kwargs) def build_net(self): pass def forward(self): pass
class EvalHistory(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EVALHISTORY
def clean_pl_pesel(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame: if (output_format not in {'compact', 'standard', 'birthdate', 'gender'}): raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact", "standard", "birthdate" or "gender".') df = to_dask(df) df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object) df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0))) df = df.rename(columns={'_temp_': f'{column}_clean'}) df = df.drop(columns=['clean_code_tup']) if inplace: df[column] = df[f'{column}_clean'] df = df.drop(columns=f'{column}_clean') df = df.rename(columns={column: f'{column}_clean'}) with ProgressBar(minimum=1, disable=(not progress)): df = df.compute() return df
.parametrize('dtype,device', product(grad_dtypes, devices)) def test_radius_graph(dtype, device): x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)]], dtype, device) edge_index = radius_graph(x, r=2.5, flow='target_to_source') assert (to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1), (2, 3), (3, 0), (3, 2)])) edge_index = radius_graph(x, r=2.5, flow='source_to_target') assert (to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)])) jit = torch.jit.script(radius_graph) edge_index = jit(x, r=2.5, flow='source_to_target') assert (to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)]))
class TestDB(unittest.TestCase): def testPicklable(self): s = schema.Struct(('field1', schema.Scalar(dtype=np.int32)), ('field2', schema.List(schema.Scalar(dtype=str)))) s2 = pickle.loads(pickle.dumps(s)) for r in (s, s2): self.assertTrue(isinstance(r.field1, schema.Scalar)) self.assertTrue(isinstance(r.field2, schema.List)) self.assertTrue((getattr(r, 'non_existent', None) is None)) def testListSubclassClone(self): class Subclass(schema.List): pass s = Subclass(schema.Scalar()) clone = s.clone() self.assertIsInstance(clone, Subclass) self.assertEqual(s, clone) self.assertIsNot(clone, s) def testListWithEvictedSubclassClone(self): class Subclass(schema.ListWithEvicted): pass s = Subclass(schema.Scalar()) clone = s.clone() self.assertIsInstance(clone, Subclass) self.assertEqual(s, clone) self.assertIsNot(clone, s) def testStructSubclassClone(self): class Subclass(schema.Struct): pass s = Subclass(('a', schema.Scalar())) clone = s.clone() self.assertIsInstance(clone, Subclass) self.assertEqual(s, clone) self.assertIsNot(clone, s) def testNormalizeField(self): s = schema.Struct(('field1', np.int32), ('field2', str)) self.assertEquals(s, schema.Struct(('field1', schema.Scalar(dtype=np.int32)), ('field2', schema.Scalar(dtype=str)))) def testTuple(self): s = schema.Tuple(np.int32, str, np.float32) s2 = schema.Struct(('field_0', schema.Scalar(dtype=np.int32)), ('field_1', schema.Scalar(dtype=np.str)), ('field_2', schema.Scalar(dtype=np.float32))) self.assertEquals(s, s2) self.assertEquals(s[0], schema.Scalar(dtype=np.int32)) self.assertEquals(s[1], schema.Scalar(dtype=np.str)) self.assertEquals(s[2], schema.Scalar(dtype=np.float32)) self.assertEquals(s[(2, 0)], schema.Struct(('field_2', schema.Scalar(dtype=np.float32)), ('field_0', schema.Scalar(dtype=np.int32)))) for (i, (v1, v2)) in enumerate(zip(s, s2)): self.assertEquals(v1, v2) self.assertEquals(s[i], v1) self.assertEquals(s2[i], v1) def testRawTuple(self): s = schema.RawTuple(2) self.assertEquals(s, schema.Struct(('field_0', schema.Scalar()), ('field_1', schema.Scalar()))) self.assertEquals(s[0], schema.Scalar()) self.assertEquals(s[1], schema.Scalar()) def testStructIndexing(self): s = schema.Struct(('field1', schema.Scalar(dtype=np.int32)), ('field2', schema.List(schema.Scalar(dtype=str))), ('field3', schema.Struct())) self.assertEquals(s['field2'], s.field2) self.assertEquals(s['field2'], schema.List(schema.Scalar(dtype=str))) self.assertEquals(s['field3'], schema.Struct()) self.assertEquals(s[('field2', 'field1')], schema.Struct(('field2', schema.List(schema.Scalar(dtype=str))), ('field1', schema.Scalar(dtype=np.int32)))) def testListInStructIndexing(self): a = schema.List(schema.Scalar(dtype=str)) s = schema.Struct(('field1', schema.Scalar(dtype=np.int32)), ('field2', a)) self.assertEquals(s['field2:lengths'], a.lengths) self.assertEquals(s['field2:values'], a.items) with self.assertRaises(KeyError): s['fields2:items:non_existent'] with self.assertRaises(KeyError): s['fields2:non_existent'] def testListWithEvictedInStructIndexing(self): a = schema.ListWithEvicted(schema.Scalar(dtype=str)) s = schema.Struct(('field1', schema.Scalar(dtype=np.int32)), ('field2', a)) self.assertEquals(s['field2:lengths'], a.lengths) self.assertEquals(s['field2:values'], a.items) self.assertEquals(s['field2:_evicted_values'], a._evicted_values) with self.assertRaises(KeyError): s['fields2:items:non_existent'] with self.assertRaises(KeyError): s['fields2:non_existent'] def testMapInStructIndexing(self): a = schema.Map(schema.Scalar(dtype=np.int32), schema.Scalar(dtype=np.float32)) s = schema.Struct(('field1', schema.Scalar(dtype=np.int32)), ('field2', a)) self.assertEquals(s['field2:values:keys'], a.keys) self.assertEquals(s['field2:values:values'], a.values) with self.assertRaises(KeyError): s['fields2:keys:non_existent'] def testPreservesMetadata(self): s = schema.Struct(('a', schema.Scalar(np.float32)), ('b', schema.Scalar(np.int32, metadata=schema.Metadata(categorical_limit=5))), ('c', schema.List(schema.Scalar(np.int32, metadata=schema.Metadata(categorical_limit=6))))) s.c.lengths.set_metadata(schema.Metadata(categorical_limit=7)) self.assertEqual(None, s.a.metadata) self.assertEqual(5, s.b.metadata.categorical_limit) self.assertEqual(6, s.c.value.metadata.categorical_limit) self.assertEqual(7, s.c.lengths.metadata.categorical_limit) sc = s.clone() self.assertEqual(None, sc.a.metadata) self.assertEqual(5, sc.b.metadata.categorical_limit) self.assertEqual(6, sc.c.value.metadata.categorical_limit) self.assertEqual(7, sc.c.lengths.metadata.categorical_limit) sv = schema.from_blob_list(s, [np.array([3.4]), np.array([2]), np.array([3]), np.array([1, 2, 3])]) self.assertEqual(None, sv.a.metadata) self.assertEqual(5, sv.b.metadata.categorical_limit) self.assertEqual(6, sv.c.value.metadata.categorical_limit) self.assertEqual(7, sv.c.lengths.metadata.categorical_limit) def testDupField(self): with self.assertRaises(ValueError): schema.Struct(('a', schema.Scalar()), ('a', schema.Scalar())) def testAssignToField(self): with self.assertRaises(TypeError): s = schema.Struct(('a', schema.Scalar())) s.a = schema.Scalar() def testPreservesEmptyFields(self): s = schema.Struct(('a', schema.Scalar(np.float32)), ('b', schema.Struct())) sc = s.clone() self.assertIn('a', sc.fields) self.assertIn('b', sc.fields) sv = schema.from_blob_list(s, [np.array([3.4])]) self.assertIn('a', sv.fields) self.assertIn('b', sv.fields) self.assertEqual(0, len(sv.b.fields)) def testStructSubstraction(self): s1 = schema.Struct(('a', schema.Scalar()), ('b', schema.Scalar()), ('c', schema.Scalar())) s2 = schema.Struct(('b', schema.Scalar())) s = (s1 - s2) self.assertEqual(['a', 'c'], s.field_names()) s3 = schema.Struct(('a', schema.Scalar())) s = (s1 - s3) self.assertEqual(['b', 'c'], s.field_names()) with self.assertRaises(TypeError): (s1 - schema.Scalar()) def testStructNestedSubstraction(self): s1 = schema.Struct(('a', schema.Scalar()), ('b', schema.Struct(('c', schema.Scalar()), ('d', schema.Scalar()), ('e', schema.Scalar()), ('f', schema.Scalar())))) s2 = schema.Struct(('b', schema.Struct(('d', schema.Scalar()), ('e', schema.Scalar())))) s = (s1 - s2) self.assertEqual(['a', 'b:c', 'b:f'], s.field_names()) def testStructAddition(self): s1 = schema.Struct(('a', schema.Scalar())) s2 = schema.Struct(('b', schema.Scalar())) s = (s1 + s2) self.assertIn('a', s.fields) self.assertIn('b', s.fields) with self.assertRaises(TypeError): (s1 + s1) with self.assertRaises(TypeError): (s1 + schema.Scalar()) def testStructNestedAddition(self): s1 = schema.Struct(('a', schema.Scalar()), ('b', schema.Struct(('c', schema.Scalar())))) s2 = schema.Struct(('b', schema.Struct(('d', schema.Scalar())))) s = (s1 + s2) self.assertEqual(['a', 'b:c', 'b:d'], s.field_names()) s3 = schema.Struct(('b', schema.Scalar())) with self.assertRaises(TypeError): s = (s1 + s3) def testGetFieldByNestedName(self): st = schema.Struct(('a', schema.Scalar()), ('b', schema.Struct(('c', schema.Struct(('d', schema.Scalar())))))) self.assertRaises(KeyError, st.__getitem__, '') self.assertRaises(KeyError, st.__getitem__, 'x') self.assertRaises(KeyError, st.__getitem__, 'x:y') self.assertRaises(KeyError, st.__getitem__, 'b:c:x') a = st['a'] self.assertTrue(isinstance(a, schema.Scalar)) bc = st['b:c'] self.assertIn('d', bc.fields) bcd = st['b:c:d'] self.assertTrue(isinstance(bcd, schema.Scalar)) def testAddFieldByNestedName(self): f_a = schema.Scalar(blob=core.BlobReference('blob1')) f_b = schema.Struct(('c', schema.Struct(('d', schema.Scalar(blob=core.BlobReference('blob2')))))) f_x = schema.Struct(('x', schema.Scalar(blob=core.BlobReference('blob3')))) with self.assertRaises(TypeError): st = schema.Struct(('a', f_a), ('b', f_b), ('b:c:d', f_x)) with self.assertRaises(TypeError): st = schema.Struct(('a', f_a), ('b', f_b), ('b:c:d:e', f_x)) st = schema.Struct(('a', f_a), ('b', f_b), ('e:f', f_x)) self.assertEqual(['a', 'b:c:d', 'e:f:x'], st.field_names()) self.assertEqual(['blob1', 'blob2', 'blob3'], st.field_blobs()) st = schema.Struct(('a', f_a), ('b:c:e', f_x), ('b', f_b)) self.assertEqual(['a', 'b:c:e:x', 'b:c:d'], st.field_names()) self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs()) st = schema.Struct(('a:a1', f_a), ('b:b1', f_b), ('a', f_x)) self.assertEqual(['a:a1', 'a:x', 'b:b1:c:d'], st.field_names()) self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs()) def testContains(self): st = schema.Struct(('a', schema.Scalar()), ('b', schema.Struct(('c', schema.Struct(('d', schema.Scalar())))))) self.assertTrue(('a' in st)) self.assertTrue(('b:c' in st)) self.assertTrue(('b:c:d' in st)) self.assertFalse(('' in st)) self.assertFalse(('x' in st)) self.assertFalse(('b:c:x' in st)) self.assertFalse(('b:c:d:x' in st)) def testFromEmptyColumnList(self): st = schema.Struct() columns = st.field_names() rec = schema.from_column_list(col_names=columns) self.assertEqual(rec, schema.Struct()) def testFromColumnList(self): st = schema.Struct(('a', schema.Scalar()), ('b', schema.List(schema.Scalar())), ('c', schema.Map(schema.Scalar(), schema.Scalar()))) columns = st.field_names() for _ in range(10): some_blobs = [core.BlobReference(('blob:' + x)) for x in columns] rec = schema.from_column_list(columns, col_blobs=some_blobs) self.assertTrue(rec.has_blobs()) self.assertEqual(sorted(st.field_names()), sorted(rec.field_names())) self.assertEqual([str(blob) for blob in rec.field_blobs()], [str(('blob:' + name)) for name in rec.field_names()]) random.shuffle(columns) def testStructGet(self): net = core.Net('test_net') s1 = schema.NewRecord(net, schema.Scalar(np.float32)) s2 = schema.NewRecord(net, schema.Scalar(np.float32)) t = schema.Tuple(s1, s2) assert (t.get('field_0', None) == s1) assert (t.get('field_1', None) == s2) assert (t.get('field_2', None) is None) def testScalarForVoidType(self): s0_good = schema.Scalar((None, (2,))) with self.assertRaises(TypeError): s0_bad = schema.Scalar((np.void, (2,))) s1_good = schema.Scalar(np.void) s2_good = schema.Scalar(None) assert (s1_good == s2_good) def testScalarShape(self): s0 = schema.Scalar(np.int32) self.assertEqual(s0.field_type().shape, ()) s1_good = schema.Scalar((np.int32, 5)) self.assertEqual(s1_good.field_type().shape, (5,)) with self.assertRaises(ValueError): s1_bad = schema.Scalar((np.int32, (- 1))) s1_hard = schema.Scalar((np.int32, 1)) self.assertEqual(s1_hard.field_type().shape, (1,)) s2 = schema.Scalar((np.int32, (2, 3))) self.assertEqual(s2.field_type().shape, (2, 3)) def testDtypeForCoreType(self): dtype = schema.dtype_for_core_type(core.DataType.FLOAT16) self.assertEqual(dtype, np.float16) with self.assertRaises(TypeError): schema.dtype_for_core_type(100)
class GoogleMapSearchAddressBook(VirtualFunctionTool): name = 'GoogleMapSearchAddressBook' summary = 'Search for locations in the address book.' parameters: List[ArgParameter] = [{'name': 'keywords', 'type': 'string', 'description': 'The keywords to search for locations in the address book.', 'required': True}] returns: List[ArgReturn] = [{'name': 'addresses', 'type': 'array', 'description': "The addresses found in the address book. Each address object contains 'location_address' (string, the address of the location in the format of 'street address, city, zip code'), 'name' (string, the name of the location), 'note' (string, the note of the location)."}] exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'keywords' argument is empty."}]
class TestThresholdSelection(unittest.TestCase): def test_no_clipping_function(self): x = np.random.randn(10, 10, 10) dummy = 0 ml = power_of_two_selection_tensor(x, dummy, n_bits=8, quant_error_method=qc.QuantizationErrorMethod.NOCLIPPING)[THRESHOLD] self.assertTrue((ml > np.max(np.abs(x)))) def test_mse_from_histogram(self): hc = HistogramCollector() for i in range(10): x = np.random.randn(10, 10, 10) hc.update(x) (b, c) = hc.get_histogram() dummy = 0 _mse_error_histogram(b, c, dummy, 8)
class Encoder(nn.Module): def __init__(self, nc, ndf, hidden_size): super(Encoder, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(nc, ndf, kernel_size=3, stride=1, padding=1), nn.ELU(True)) self.conv2 = conv_block(ndf, ndf) self.conv3 = conv_block(ndf, (ndf * 2)) self.conv4 = conv_block((ndf * 2), (ndf * 3)) self.conv5 = conv_block((ndf * 3), (ndf * 4)) self.encode = nn.Conv2d((ndf * 4), hidden_size, kernel_size=8, stride=1, padding=0) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) x = self.encode(x) return x
class CustomModel(torch.nn.Module): def __init__(self, embedding_dim=128, rnn_size=256, layers=2, output_dim=1000, return_hidden=False): super().__init__() self.return_hidden = return_hidden self.reshape = False self.embedding = sb.nnet.embedding.Embedding(num_embeddings=output_dim, embedding_dim=embedding_dim) self.rnn = torch.nn.LSTM(input_size=embedding_dim, hidden_size=rnn_size, bidirectional=False, num_layers=layers) self.out = sb.nnet.linear.Linear(input_size=rnn_size, n_neurons=output_dim) self.log_softmax = sb.nnet.activations.Softmax(apply_log=True) def forward(self, x, hx=None): x = self.embedding(x) if (len(x.shape) == 2): x = x.unsqueeze(dim=1) self.reshape = True x = x.transpose(0, 1) (x, hidden) = self.rnn(x, hx) x = x.transpose(0, 1) x = self.out(x) x = self.log_softmax(x) if self.reshape: x = x.squeeze(dim=1) if self.return_hidden: return (x, hidden) else: return x
class XLMWithLMHeadModel(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def train(model, optimizer, data): model.train() optimizer.zero_grad() out = model(data) loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) loss.backward() optimizer.step()
class LibFuzzerModel(BaseModel): seed = peewee.CharField() output = peewee.CharField() group = peewee.CharField() program = peewee.CharField() argument = peewee.CharField() thread = peewee.IntegerField() pid = peewee.IntegerField()
class Edge(object): def __init__(self, source_node: BaseNode, sink_node: BaseNode, source_index: int, sink_index: int): self.source_node = source_node self.sink_node = sink_node self.source_index = source_index self.sink_index = sink_index def get_attributes(self) -> Dict[(str, Any)]: return {EDGE_SOURCE_INDEX: self.source_index, EDGE_SINK_INDEX: self.sink_index} def __eq__(self, other: Any) -> bool: if isinstance(other, Edge): return ((other.sink_node == self.sink_node) and (other.source_node == self.source_node) and (other.source_index == self.source_index) and (other.sink_index == self.source_index)) return False def __repr__(self) -> str: return f'{self.source_node.name}:{self.source_index} -> {self.sink_node.name}:{self.sink_index}'
class sCW_sBC_reg(atomic_reg): OP_NAME = 'sCW&sBC' _fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 23), ('dbg_mode', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_res0_prec', ctypes.c_uint64, 3), ('rsvd0', ctypes.c_uint64, 6), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('rsvd1', ctypes.c_uint64, 32), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32)] cmd_short: int op_code: int cmd_id_dep: int dbg_mode: int tsk_typ: int tsk_eu_typ: int opt_res0_prec: int rsvd0: int pwr_step: int intr_en: int res0_n: int res0_c: int res0_h: int res0_w: int opd0_c: int opd0_w: int rsvd1: int res0_addr: int opd0_addr: int length: int = 256
class ExponentialMovingAverage(InvertibleTransformBase): def __init__(self, alpha: float, normalize: bool=True, p: float=0.95, ci: bool=False): super().__init__() self.alpha = alpha self.normalize = normalize self.p = p self.ci = ci def requires_inversion_state(self): return False def train(self, time_series: TimeSeries): pass def __call__(self, time_series: TimeSeries) -> TimeSeries: new_vars = OrderedDict() for (name, var) in time_series.items(): emw = var.to_pd().ewm(alpha=self.alpha, adjust=self.normalize) ema = emw.mean() new_vars[name] = UnivariateTimeSeries.from_pd(ema) if self.ci: ems = emw.std() ems[0] = ems[1] new_vars[f'{name}_lb'] = UnivariateTimeSeries.from_pd((ema + (norm.ppf((0.5 * (1 - self.p))) * ems))) new_vars[f'{name}_ub'] = UnivariateTimeSeries.from_pd((ema + (norm.ppf((0.5 * (1 + self.p))) * ems))) ret = TimeSeries(new_vars, check_aligned=False) ret._is_aligned = time_series.is_aligned return ret def _invert(self, time_series: TimeSeries) -> TimeSeries: new_vars = OrderedDict() for (name, var) in time_series.items(): if (isinstance(name, str) and (name.endswith('_lb') or name.endswith('_ub'))): continue (t, y) = (var.index, var.np_values) if self.normalize: weights = (1 - ((1 - self.alpha) ** np.arange(1, (len(y) + 1)))) y = ((y * weights) / self.alpha) x = (y[1:] - ((1 - self.alpha) * y[:(- 1)])) else: x = ((y[1:] - ((1 - self.alpha) * y[:(- 1)])) / self.alpha) x = np.concatenate((y[:1], x)) new_vars[name] = UnivariateTimeSeries(t, x) ret = TimeSeries(new_vars, check_aligned=False) ret._is_aligned = time_series.is_aligned return ret
def compute_md5(cfg: dict) -> str: md5 = hashlib.md5(json.dumps(cfg, sort_keys=True).encode('utf-8')).hexdigest() return md5
class TFOPTModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class TokenClassificationTask(): def read_examples_from_file(data_dir, mode: Union[(Split, str)]) -> List[InputExample]: raise NotImplementedError def get_labels(path: str) -> List[str]: raise NotImplementedError def convert_examples_to_features(examples: List[InputExample], label_list: List[str], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=(- 100), sequence_a_segment_id=0, mask_padding_with_zero=True) -> List[InputFeatures]: label_map = {label: i for (i, label) in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ((ex_index % 10000) == 0): logger.info('Writing example %d of %d', ex_index, len(examples)) tokens = [] label_ids = [] for (word, label) in zip(example.words, example.labels): word_tokens = tokenizer.tokenize(word) if (len(word_tokens) > 0): tokens.extend(word_tokens) label_ids.extend(([label_map[label]] + ([pad_token_label_id] * (len(word_tokens) - 1)))) special_tokens_count = tokenizer.num_special_tokens_to_add() if (len(tokens) > (max_seq_length - special_tokens_count)): tokens = tokens[:(max_seq_length - special_tokens_count)] label_ids = label_ids[:(max_seq_length - special_tokens_count)] tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: tokens += [sep_token] label_ids += [pad_token_label_id] segment_ids = ([sequence_a_segment_id] * len(tokens)) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = ([cls_token] + tokens) label_ids = ([pad_token_label_id] + label_ids) segment_ids = ([cls_token_segment_id] + segment_ids) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids)) padding_length = (max_seq_length - len(input_ids)) if pad_on_left: input_ids = (([pad_token] * padding_length) + input_ids) input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask) segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids) label_ids = (([pad_token_label_id] * padding_length) + label_ids) else: input_ids += ([pad_token] * padding_length) input_mask += ([(0 if mask_padding_with_zero else 1)] * padding_length) segment_ids += ([pad_token_segment_id] * padding_length) label_ids += ([pad_token_label_id] * padding_length) assert (len(input_ids) == max_seq_length) assert (len(input_mask) == max_seq_length) assert (len(segment_ids) == max_seq_length) assert (len(label_ids) == max_seq_length) if (ex_index < 5): logger.info('*** Example ***') logger.info('guid: %s', example.guid) logger.info('tokens: %s', ' '.join([str(x) for x in tokens])) logger.info('input_ids: %s', ' '.join([str(x) for x in input_ids])) logger.info('input_mask: %s', ' '.join([str(x) for x in input_mask])) logger.info('segment_ids: %s', ' '.join([str(x) for x in segment_ids])) logger.info('label_ids: %s', ' '.join([str(x) for x in label_ids])) if ('token_type_ids' not in tokenizer.model_input_names): segment_ids = None features.append(InputFeatures(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids)) return features
def calc_gap(theta_true, theta_pred, simplify=True): gap = (theta_true - theta_pred) if simplify: gap = gap.simplify() return gap
def set_emotion_in_speaker(emotion_ids, input_ids, bos, eos, speaker1, speaker2, pad): special_token_ids_list = [bos, eos, speaker1, speaker2] new_emotion_ids = [] for (i, emotion) in enumerate(emotion_ids): if (input_ids[i] in special_token_ids_list): new_emotion_ids.append(emotion_ids[i]) else: new_emotion_ids.append(pad) return new_emotion_ids
class Scale(Transform): def __init__(self, scale_factor, output_sz=None, shift=None): super().__init__(output_sz, shift) self.scale_factor = scale_factor def __call__(self, image): if isinstance(image, torch.Tensor): (h_orig, w_orig) = image.shape[2:] if (h_orig != w_orig): raise NotImplementedError h_new = round((h_orig / self.scale_factor)) h_new += ((h_new - h_orig) % 2) w_new = round((w_orig / self.scale_factor)) w_new += ((w_new - w_orig) % 2) image_resized = F.interpolate(image, [h_new, w_new], mode='bilinear') return self.crop_to_output(image_resized) else: raise NotImplementedError
def register_Ns3IntToType__5_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')]) return
class LoopEntryTransform(Transform, abc.ABC): def __init__(self, loop_axis=None, entries=()) -> None: super().__init__() self.loop_axis = loop_axis self.entries = entries def loop_entries(sample: dict, fn, entries, loop_axis=None): for entry in entries: if (entry not in sample): if raise_error_if_entry_not_extracted: raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry)) continue np_entry = check_and_return(sample[entry], np.ndarray) if (loop_axis is None): np_entry = fn(np_entry, entry) else: slicing = [slice(None) for _ in range(np_entry.ndim)] for i in range(np_entry.shape[loop_axis]): slicing[loop_axis] = i np_entry[tuple(slicing)] = fn(np_entry[tuple(slicing)], entry, i) sample[entry] = np_entry return sample def __call__(self, sample: dict) -> dict: return self.loop_entries(sample, self.transform_entry, self.entries, self.loop_axis) def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray: pass
_interact(title=(lambda : text_control('<h2>Derivative grapher</h2>')), function=(lambda : input_box(default='x^5-3*x^3+1', label='Function:')), x_range=(lambda : range_slider((- 15), 15, 0.1, default=((- 2), 2), label='Range (x)')), y_range=(lambda : range_slider((- 15), 15, 0.1, default=((- 8), 6), label='Range (y)'))) def function_derivative(title, function, x_range, y_range): x = SR.var('x') f = symbolic_expression(function).function(x) df = derivative(f, x) ddf = derivative(df, x) plots = ((plot(f(x), x_range, thickness=1.5) + plot(df(x), x_range, color='green')) + plot(ddf(x), x_range, color='red')) if (y_range == (0, 0)): show(plots, xmin=x_range[0], xmax=x_range[1]) else: show(plots, xmin=x_range[0], xmax=x_range[1], ymin=y_range[0], ymax=y_range[1]) html(('<center>$\\color{Blue}{f(x) = %s}$</center>' % latex(f(x)))) html(("<center>$\\color{Green}{f'(x) = %s}$</center>" % latex(df(x)))) html(("<center>$\\color{Red}{f''(x) = %s}$</center>" % latex(ddf(x))))
def _roundtrip_compare_gpt2_checkpoint(model_id, revision, config: Optional[Gpt2Config]=None): import torch converter = Gpt2Config.default_hf_checkpoint_converter torch_model: HfGpt2LMHeadModel = AutoModelForCausalLM.from_pretrained(model_id, revision=revision) torch_model.eval() model: Gpt2LMHeadModel = cast(Gpt2LMHeadModel, converter.load_pretrained((config or Gpt2LMHeadModel), RepoRef(model_id, revision=revision))) model = inference_mode(model, True) input = hax.random.randint(PRNGKey(0), model.Pos, 0, model.Vocab.size) torch_out = torch_model(torch.from_numpy(onp.array(input.array)).to(torch.int32).unsqueeze(0)) torch_out = torch_out.logits[0].detach().cpu().numpy() torch_out = jax.nn.softmax(torch_out, axis=(- 1)) attn_mask = hax.nn.attention.causal_mask(model.Pos, model.config.KeyPos) def compute(input): return hax.nn.softmax(model(input, key=None, attn_mask=attn_mask), axis=model.Vocab) compute = jax.jit(compute) jax_out = compute(input).array assert (torch_out.shape == jax_out.shape), f'{torch_out.shape} != {jax_out.shape}' assert onp.isclose(torch_out, onp.array(jax_out), rtol=0.01, atol=0.01).all(), f'{torch_out} != {jax_out}' with tempfile.TemporaryDirectory() as tmpdir: converter.save_pretrained(model, tmpdir) torch_model2: HfGpt2LMHeadModel = AutoModelForCausalLM.from_pretrained(tmpdir) torch_model2.eval() torch_out2 = torch_model2(torch.from_numpy(onp.array(input.array)).to(torch.int32).unsqueeze(0)) torch_out2 = torch_out2.logits[0].detach().cpu().numpy() torch_out2 = jax.nn.softmax(torch_out2, axis=(- 1)) assert onp.isclose(torch_out2, onp.array(jax_out), rtol=0.01, atol=0.01).all(), f'{torch_out2} != {jax_out}'
class LabelingFunction(): def __init__(self, name: str, f: Callable[(..., int)], resources: Optional[Mapping[(str, Any)]]=None, pre: Optional[List[BasePreprocessor]]=None) -> None: self.name = name self._f = f self._resources = (resources or {}) self._pre = (pre or []) def _preprocess_data_point(self, x: DataPoint) -> DataPoint: for preprocessor in self._pre: x = preprocessor(x) if (x is None): raise ValueError('Preprocessor should not return None') return x def __call__(self, x: DataPoint) -> int: x = self._preprocess_data_point(x) return self._f(x, **self._resources) def __repr__(self) -> str: preprocessor_str = f', Preprocessors: {self._pre}' return f'{type(self).__name__} {self.name}{preprocessor_str}'
class _DataListMixin(): def decode_rows(self, stream, conversors): return list(super().decode_rows(stream, conversors))
class Distribution(TorchDistribution): def sample_and_logprob(self): s = self.sample() log_p = self.log_prob(s) return (s, log_p) def rsample_and_logprob(self): s = self.rsample() log_p = self.log_prob(s) return (s, log_p) def mle_estimate(self): return self.mean def get_diagnostics(self): return {}
def test_wrong_split_strategy() -> None: with pytest.raises(ValueError, match='Please provide a valid*'): check_split_strategy(strategy='not_valid')
class Follower(): def __init__(self, uav_type, uav_id, uav_num): self.hover = 'HOVER' self.uav_type = uav_type self.uav_num = uav_num self.id = uav_id self.f = 30 self.pose = PoseStamped() self.cmd_vel_enu = Twist() self.avoid_vel = Vector3() self.formation_pattern = None self.Kp = 1.0 self.Kp_avoid = 2.0 self.vel_max = 1.0 self.leader_pose = PoseStamped() self.pose_sub = rospy.Subscriber((((self.uav_type + '_') + str(self.id)) + '/mavros/local_position/pose'), PoseStamped, self.pose_callback, queue_size=1) self.avoid_vel_sub = rospy.Subscriber((((('/xtdrone/' + self.uav_type) + '_') + str(self.id)) + '/avoid_vel'), Vector3, self.avoid_vel_callback, queue_size=1) self.formation_pattern_sub = rospy.Subscriber('/xtdrone/formation_pattern', Float32MultiArray, self.formation_pattern_callback, queue_size=1) self.vel_enu_pub = rospy.Publisher((((('/xtdrone/' + self.uav_type) + '_') + str(self.id)) + '/cmd_vel_enu'), Twist, queue_size=1) self.info_pub = rospy.Publisher((((('/xtdrone/' + self.uav_type) + '_') + str(self.id)) + '/info'), String, queue_size=1) self.cmd_pub = rospy.Publisher((((('/xtdrone/' + self.uav_type) + '_') + str(self.id)) + '/cmd'), String, queue_size=1) self.leader_pose_sub = rospy.Subscriber((self.uav_type + '_0/mavros/local_position/pose'), PoseStamped, self.leader_pose_callback, queue_size=1) def formation_pattern_callback(self, msg): self.formation_pattern = numpy.array(msg.data).reshape(3, (self.uav_num - 1)) def pose_callback(self, msg): self.pose = msg def leader_pose_callback(self, msg): self.leader_pose = msg def avoid_vel_callback(self, msg): self.avoid_vel = msg def loop(self): rospy.init_node(('follower' + str((self.id - 1)))) rate = rospy.Rate(self.f) while (not rospy.is_shutdown()): if (not (self.formation_pattern is None)): self.cmd_vel_enu.linear.x = (self.Kp * ((self.leader_pose.pose.position.x + self.formation_pattern[(0, (self.id - 1))]) - self.pose.pose.position.x)) self.cmd_vel_enu.linear.y = (self.Kp * ((self.leader_pose.pose.position.y + self.formation_pattern[(1, (self.id - 1))]) - self.pose.pose.position.y)) self.cmd_vel_enu.linear.z = (self.Kp * ((self.leader_pose.pose.position.z + self.formation_pattern[(2, (self.id - 1))]) - self.pose.pose.position.z)) self.cmd_vel_enu.linear.x = (self.cmd_vel_enu.linear.x + (self.Kp_avoid * self.avoid_vel.x)) self.cmd_vel_enu.linear.y = (self.cmd_vel_enu.linear.y + (self.Kp_avoid * self.avoid_vel.y)) self.cmd_vel_enu.linear.z = (self.cmd_vel_enu.linear.z + (self.Kp_avoid * self.avoid_vel.z)) cmd_vel_magnitude = ((((self.cmd_vel_enu.linear.x ** 2) + (self.cmd_vel_enu.linear.y ** 2)) + (self.cmd_vel_enu.linear.z ** 2)) ** 0.5) if (cmd_vel_magnitude > ((3 ** 0.5) * self.vel_max)): self.cmd_vel_enu.linear.x = ((self.cmd_vel_enu.linear.x / cmd_vel_magnitude) * self.vel_max) self.cmd_vel_enu.linear.y = ((self.cmd_vel_enu.linear.y / cmd_vel_magnitude) * self.vel_max) self.cmd_vel_enu.linear.z = ((self.cmd_vel_enu.linear.z / cmd_vel_magnitude) * self.vel_max) self.vel_enu_pub.publish(self.cmd_vel_enu) try: rate.sleep() except: continue
class RandomRotation(object): def __init__(self, degrees, resample=False, expand=False, center=None, fill=0): if isinstance(degrees, numbers.Number): if (degrees < 0): raise ValueError('If degrees is a single number, it must be positive.') self.degrees = ((- degrees), degrees) else: if (len(degrees) != 2): raise ValueError('If degrees is a sequence, it must be of len 2.') self.degrees = degrees self.resample = resample self.expand = expand self.center = center self.fill = fill def get_params(degrees): angle = random.uniform(degrees[0], degrees[1]) return angle def __call__(self, img): angle = self.get_params(self.degrees) return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill) def __repr__(self): format_string = (self.__class__.__name__ + '(degrees={0}'.format(self.degrees)) format_string += ', resample={0}'.format(self.resample) format_string += ', expand={0}'.format(self.expand) if (self.center is not None): format_string += ', center={0}'.format(self.center) format_string += ')' return format_string
def overview(target, data): target.write(data['name']) target.write('\n') target.write('\n\n') target.write('\n'.join(data['description'])) target.write('\n\n') if ('more_info' in data): target.write('\n'.join(data['more_info'])) target.write('\n\n') if ('perf_fields' in data): target.write('### PERFORMANCE\n') ctr = len(data['perf_fields']) for idx in range(0, (ctr - 1)): target.write(data['perf_fields'][idx]) target.write('|') target.write(data['perf_fields'][(ctr - 1)]) target.write('\n') for idx in range(0, (ctr - 1)): target.write('-----|') target.write('-----\n') count = len(data['performance']) for result in range(0, count): for i in range(0, (ctr - 1)): target.write(data['performance'][result][i]) target.write('|') target.write(data['performance'][result][(ctr - 1)]) target.write('\n') if ('key_concepts' in data): target.write('***KEY CONCEPTS:*** ') elem_count = len(data['key_concepts']) for result in data['key_concepts']: elem_count -= 1 target.write(result) if (elem_count != 0): target.write(', ') target.write('\n\n') if ('keywords' in data): target.write('***KEYWORDS:*** ') word_count = len(data['keywords']) for result in data['keywords']: word_count -= 1 target.write(result) if (word_count != 0): target.write(', ') target.write('\n\n') return
def _set_jit_function_cache(key, value): assert isinstance(value, torch.jit.ScriptFunction) _jit_caching_layer[key] = value.qualified_name
def mk_auto_soundness_theorem_block(lean_gen: LeanSoundnessGen, ctx: LeanGenContext) -> Tuple[(Optional[LeanGenContext], Optional[LeanGenContext], Optional[LeanBranchCond])]: cond: Optional[LeanBranchCond] = None ctx_pos: Optional[LeanGenContext] = ctx ctx_neg: Optional[LeanGenContext] = None while (ctx_pos == ctx): instr = ctx.func.lean_desc[ctx.lean_desc_num] if (isinstance(instr, LeanPreprocessedJumpToLabelInstruction) and (instr.condition is None)): mk_auto_soundness_jmp(ctx) elif isinstance(instr, LeanPreprocessedIf): cond = mk_auto_soundness_if(ctx, instr) elif (isinstance(instr, LeanPreprocessedJumpToLabelInstruction) and (instr.condition is not None)): cond = mk_auto_soundness_jnz(ctx) elif isinstance(instr, LeanPreprocessedReturn): mk_auto_soundness_return(ctx, instr) else: mk_auto_soundness_step_instr(ctx) (ctx_pos, ctx_neg) = lean_gen.next_ctxs(ctx) return (ctx_pos, ctx_neg, cond)
class SquadFeatures(): def __init__(self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible, qas_id: str=None): self.input_ids = input_ids self.attention_mask = attention_mask self.token_type_ids = token_type_ids self.cls_index = cls_index self.p_mask = p_mask self.example_index = example_index self.unique_id = unique_id self.paragraph_len = paragraph_len self.token_is_max_context = token_is_max_context self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible self.qas_id = qas_id
class EncoderFeedForward(torch.nn.Module): def __init__(self, num_features, dim, num_gc_layers, num_fc_layers, out_features, dropout): super(EncoderFeedForward, self).__init__() self.encoder = Encoder(num_features, dim, num_gc_layers) input_size_to_feed_forward = (dim * num_gc_layers) self.feed_forward = FeedForwardNetwork(input_size_to_feed_forward, out_features, num_fc_layers, dropout) def forward(self, batch, x, edge_index, edge_weight=None): x = self.encoder(batch, x, edge_index, edge_weight) x = self.feed_forward(x) x = F.log_softmax(x, dim=(- 1)) return x
def get_header_dirs(): dirs = [pkg_resources.resource_filename(__name__, 'lib/include')] return dirs
def load_results(datafile): with open(datafile, 'rb') as f: results = pickle.load(f) results = [{'search_results': x['search_results'], 'baseline_results': x['baseline_results'], 'bins': x['bins'], 'p_norm': x['p_norm'], 'q_norm': x['q_norm'], 'epsilon': x['epsilon']} for x in results] return results
class GenericPairLoss(BaseMetricLossFunction): def __init__(self, mat_based_loss, **kwargs): super().__init__(**kwargs) self.loss_method = (self.mat_based_loss if mat_based_loss else self.pair_based_loss) def compute_loss(self, embeddings, labels, indices_tuple): indices_tuple = lmu.convert_to_pairs(indices_tuple, labels) if all(((len(x) <= 1) for x in indices_tuple)): return self.zero_losses() mat = self.distance(embeddings) return self.loss_method(mat, labels, indices_tuple) def _compute_loss(self): raise NotImplementedError def mat_based_loss(self, mat, labels, indices_tuple): (a1, p, a2, n) = indices_tuple (pos_mask, neg_mask) = (torch.zeros_like(mat), torch.zeros_like(mat)) pos_mask[(a1, p)] = 1 neg_mask[(a2, n)] = 1 return self._compute_loss(mat, pos_mask, neg_mask) def pair_based_loss(self, mat, labels, indices_tuple): (a1, p, a2, n) = indices_tuple (pos_pair, neg_pair) = ([], []) if (len(a1) > 0): pos_pair = mat[(a1, p)] if (len(a2) > 0): neg_pair = mat[(a2, n)] return self._compute_loss(pos_pair, neg_pair, indices_tuple)
class MountainCar(Environment): def __init__(self): self.name = MOUNTAINCAR self.min_position = (- 1.2) self.max_position = 0.6 self.max_speed = 0.07 self.goal_position = 0.5 self.state = None self.observation = None self.n_max_steps = 10000 self.steps_elapsed = 0 self.low = np.array([self.min_position, (- self.max_speed)]) self.high = np.array([self.max_position, self.max_speed]) self.viewer = None self.action_space = spaces.Discrete(3) self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32) self.reset() def act(self, action): assert self.action_space.contains(action), ('%r (%s) invalid' % (action, type(action))) (position, velocity) = self.state velocity += (((action - 1) * 0.001) + (math.cos((3 * position)) * (- 0.0025))) velocity = np.clip(velocity, (- self.max_speed), self.max_speed) position += velocity position = np.clip(position, self.min_position, self.max_position) if ((position == self.min_position) and (velocity < 0)): velocity = 0 reward = (- 1.0) self.state = (position, velocity) self.steps_elapsed += 1 self.observation = Observation(reward=reward, state=np.array(self.state), is_episode_over=self.is_over()) return self.observe() def observe(self): return self.observation def reset(self): self.state = np.array([np.random.uniform(low=(- 0.6), high=(- 0.4)), 0]) self.observation = Observation(reward=0.0, state=np.array(self.state), is_episode_over=self.is_over()) self.steps_elapsed = 0 return self.observe() def is_over(self): if (self.steps_elapsed >= self.n_max_steps): return True return bool((self.state[0] >= self.goal_position)) def _height(self, xs): return ((np.sin((3 * xs)) * 0.45) + 0.55) def display(self, mode='human'): screen_width = 600 screen_height = 400 world_width = (self.max_position - self.min_position) scale = (screen_width / world_width) carwidth = 40 carheight = 20 if (self.viewer is None): from gym.envs.classic_control import rendering self.viewer = rendering.Viewer(screen_width, screen_height) xs = np.linspace(self.min_position, self.max_position, 100) ys = self._height(xs) xys = list(zip(((xs - self.min_position) * scale), (ys * scale))) self.track = rendering.make_polyline(xys) self.track.set_linewidth(4) self.viewer.add_geom(self.track) clearance = 10 (l, r, t, b) = (((- carwidth) / 2), (carwidth / 2), carheight, 0) car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) car.add_attr(rendering.Transform(translation=(0, clearance))) self.cartrans = rendering.Transform() car.add_attr(self.cartrans) self.viewer.add_geom(car) frontwheel = rendering.make_circle((carheight / 2.5)) frontwheel.set_color(0.5, 0.5, 0.5) frontwheel.add_attr(rendering.Transform(translation=((carwidth / 4), clearance))) frontwheel.add_attr(self.cartrans) self.viewer.add_geom(frontwheel) backwheel = rendering.make_circle((carheight / 2.5)) backwheel.add_attr(rendering.Transform(translation=(((- carwidth) / 4), clearance))) backwheel.add_attr(self.cartrans) backwheel.set_color(0.5, 0.5, 0.5) self.viewer.add_geom(backwheel) flagx = ((self.goal_position - self.min_position) * scale) flagy1 = (self._height(self.goal_position) * scale) flagy2 = (flagy1 + 50) flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2)) self.viewer.add_geom(flagpole) flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, (flagy2 - 10)), ((flagx + 25), (flagy2 - 5))]) flag.set_color(0.8, 0.8, 0) self.viewer.add_geom(flag) pos = self.state[0] self.cartrans.set_translation(((pos - self.min_position) * scale), (self._height(pos) * scale)) self.cartrans.set_rotation(math.cos((3 * pos))) return self.viewer.render(return_rgb_array=(mode == 'rgb_array')) def close(self): if self.viewer: self.viewer.close() def all_possible_actions(self): return list(range(self.action_space.n)) def set_seed(self, seed): pass def are_states_equal(self, state_1, state_2): return (np.linalg.norm((state_1 - state_2)) < 0.2) def create_copy(self): return deepcopy(self.state) def load_copy(self, env_copy): self.state = env_copy
def get_heideltime_corpus_stats(heideltime_file: str) -> None: type_dist = {'DATE': 0, 'SET': 0, 'DURATION': 0, 'TIME': 0} all_num_sentences = [] all_num_annotations = [] with open(heideltime_file) as f: json_lines = f.readlines() prev_id = json.loads(json_lines[0].strip('\n '))['id'] num_sentences = 0 num_annotations = 0 for line in json_lines: current_sample = json.loads(line.strip('\n ')) if (current_sample['id'] != prev_id): all_num_sentences.append(num_sentences) all_num_annotations.append(num_annotations) num_sentences = 0 num_annotations = 0 prev_id = current_sample['id'] num_sentences += get_number_of_sentences(current_sample['text']) annotations = BeautifulSoup(current_sample['tagged_text'], 'lxml').findAll('timex3') num_annotations += len(annotations) for annotation in annotations: type_dist[annotation.attrs['type']] += 1 all_num_sentences.append(num_sentences) all_num_annotations.append(num_annotations) print_stats(all_num_annotations, all_num_sentences, type_dist)
def convert_to_coco_json(dataset_name, output_file, allow_cached=True): PathManager.mkdirs(os.path.dirname(output_file)) with file_lock(output_file): if (PathManager.exists(output_file) and allow_cached): logger.warning(f"Using previously cached COCO format annotations at '{output_file}'. You need to clear the cache file if your dataset has been modified.") else: logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") coco_dict = convert_to_coco_dict(dataset_name) logger.info(f"Caching COCO format annotations at '{output_file}' ...") with PathManager.open(output_file, 'w') as f: json.dump(coco_dict, f)
def param_name_dict(): layer = caffe_pb2.LayerParameter() param_names = [s for s in dir(layer) if s.endswith('_param')] param_type_names = [type(getattr(layer, s)).__name__ for s in param_names] param_names = [s[:(- len('_param'))] for s in param_names] param_type_names = [s[:(- len('Parameter'))] for s in param_type_names] return dict(zip(param_type_names, param_names))
def simple_backward_setup(output, seed=None): assert isinstance(output, torch.Tensor) if seed: torch.manual_seed(seed) grad_output = torch.randn_like(output) return (output, grad_output)
def adjust_learning_rate(optimizer, epoch, config): if (epoch < config.training.warmup_epochs): lr = ((config.optim.lr * epoch) / config.training.warmup_epochs) else: lr = (config.optim.min_lr + (((config.optim.lr - config.optim.min_lr) * 0.5) * (1.0 + math.cos(((math.pi * (epoch - config.training.warmup_epochs)) / (config.training.n_epochs - config.training.warmup_epochs)))))) for param_group in optimizer.param_groups: if ('lr_scale' in param_group): param_group['lr'] = (lr * param_group['lr_scale']) else: param_group['lr'] = lr return lr
def main(parsed_args, **unused_kwargs): assert (parsed_args.path is not None), '--path required for evaluation!' if (torch.cuda.is_available() and (not parsed_args.cpu)): torch.cuda.set_device(parsed_args.device_id) utils.import_user_module(parsed_args) logger.info(parsed_args) use_cuda = (torch.cuda.is_available() and (not parsed_args.cpu)) task = tasks.setup_task(parsed_args) logger.info('loading model(s) from {}'.format(parsed_args.path)) (models, args) = checkpoint_utils.load_model_ensemble(parsed_args.path.split(os.pathsep), arg_overrides=eval(parsed_args.model_overrides), task=task, suffix=getattr(parsed_args, 'checkpoint_suffix', '')) for arg in vars(parsed_args).keys(): if (arg not in {'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token'}): setattr(args, arg, getattr(parsed_args, arg)) args.tokens_per_sample -= args.context_window task = tasks.setup_task(args) task.load_dataset(args.gen_subset) dataset = task.dataset(args.gen_subset) if (args.context_window > 0): dataset = LMContextWindowDataset(dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad()) logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset))) for model in models: model.make_generation_fast_() if args.fp16: model.half() if use_cuda: model.cuda() assert (len(models) > 0) logger.info('num. model params: {}'.format(sum((p.numel() for p in models[0].parameters())))) itr = task.get_batch_iterator(dataset=dataset, max_tokens=(args.max_tokens or 36000), max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar(itr, log_format=args.log_format, log_interval=args.log_interval, default_log_format=('tqdm' if (not args.no_progress_bar) else 'none')) gen_timer = StopwatchMeter() scorer = SequenceScorer(task.target_dictionary, args.softmax_batch) score_sum = 0.0 count = 0 if (args.remove_bpe is not None): if (args.remove_bpe == 'sentencepiece'): raise NotImplementedError else: bpe_cont = args.remove_bpe.rstrip() bpe_toks = {i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont)} bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() wps_meter = TimeMeter() for sample in progress: if ('net_input' not in sample): continue sample = (utils.move_to_cuda(sample) if use_cuda else sample) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample['ntokens']) for (i, hypos_i) in enumerate(hypos): hypo = hypos_i[0] sample_id = sample['id'][i] tokens = hypo['tokens'] tgt_len = tokens.numel() pos_scores = hypo['positional_scores'].float() if args.add_bos_token: assert (hypo['tokens'][0].item() == task.target_dictionary.bos()) tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if (bpe_toks is not None): for i in range((tgt_len - 1)): if (tokens[i].item() in bpe_toks): skipped_toks += 1 pos_scores[(i + 1)] += pos_scores[i] pos_scores[i] = 0 inf_scores = (pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))) if inf_scores.any(): logger.info('skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()])) pos_scores = pos_scores[(~ inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += (pos_scores.numel() - skipped_toks) if (args.output_word_probs or args.output_word_stats): w = '' word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += task.source_dictionary[w_ind] if ((bpe_toks is not None) and (w_ind in bpe_toks)): w = w[:(- bpe_len)] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = (i + 1) while (ind < len(tokens)): if (pos_scores[ind].item() != 0): next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob) is_bpe = False w = '' if args.output_word_probs: logger.info(((str(int(sample_id)) + ' ') + '\t'.join(('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob)))) wps_meter.update(sample['ntokens']) progress.log({'wps': round(wps_meter.avg)}) avg_nll_loss = (((- score_sum) / count) / math.log(2)) logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, (1.0 / gen_timer.avg))) logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, (2 ** avg_nll_loss))) if args.output_word_stats: for ws in sorted(word_stats.values(), key=(lambda x: x.count), reverse=True): logger.info(ws)
class CustomDatasetDataLoader(BaseDataLoader): def name(self): return 'CustomDatasetDataLoader' def initialize(self, opt): BaseDataLoader.initialize(self, opt) self.dataset = CreateDataset(opt) self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batchSize, shuffle=(not opt.serial_batches), num_workers=int(opt.nThreads)) def load_data(self): return self def __len__(self): return min(len(self.dataset), self.opt.max_dataset_size) def __iter__(self): for (i, data) in enumerate(self.dataloader): if (i >= self.opt.max_dataset_size): break (yield data)
def save_sample_to_jsonl_gz(function, out_file): writer = codecs.getwriter('utf-8') writer(out_file).write(json.dumps(function)) writer(out_file).write('\n')
class StringListPropertyField(fields.TextAreaField): def _value(self): if self.raw_data: return self.raw_data[0] else: return ((self.data and text_type('\n'.join(self.data))) or '') def process_formdata(self, valuelist): if valuelist: try: self.data = valuelist[0].splitlines() except ValueError: raise ValueError(self.gettext('Not a valid list'))
def BCPy(JDUTC, ra=0.0, dec=0.0, epoch=2451545.0, pmra=0.0, pmdec=0.0, px=0.0, rv=0.0, zmeas=0.0, loc=None, ephemeris='de430', leap_dir=os.path.join(os.path.dirname(__file__), 'data'), leap_update=True, predictive=False): (JDTDB, JDTT, warning, error) = utc_tdb.JDUTC_to_JDTDB(JDUTC) (r_pint, v_pint) = PINT.gcrs_posvel_from_itrf(loc, JDUTC, JDTT) r_eci = r_pint[0] v_eci = v_pint[0] earth_geo = get_body_barycentric_posvel('earth', JDTDB, ephemeris=ephemeris) r_geo = np.reshape((earth_geo[0].xyz.value * 1000.0), 3) v_geo = np.reshape(((earth_geo[1].xyz.value * 1000.0) / 86400.0), 3) PosVector_EarthSSB = (r_eci + r_geo) VelVector_EarthSSB = ((v_eci + v_geo) / (1.0 + (np.sum((v_eci * v_geo)) / (c ** 2)))) BetaEarth = (VelVector_EarthSSB / c) r0hat = np.array([(math.cos(((ra * np.pi) / 180.0)) * math.cos(((dec * np.pi) / 180.0))), (math.sin(((ra * np.pi) / 180.0)) * math.cos(((dec * np.pi) / 180.0))), math.sin(((dec * np.pi) / 180.0))]) up = [0.0, 0.0, 1.0] east = np.cross(up, r0hat) east = (east / math.sqrt(np.sum((east * east)))) north = np.cross(r0hat, east) mu = ((((pmra * east) + (pmdec * north)) / pctoau) / 1000) epoch0 = (2000.0 + ((epoch - 2451545.0) / 365.25)) yearnow = (2000.0 + ((JDTDB.jd - 2451545.0) / 365.25)) T = (yearnow - epoch0) vpi = (((rv / 1000.0) * kmstoauyr) * ((px / 1000.0) / pctoau)) vel = (mu + (vpi * r0hat)) r = (r0hat + (vel * T)) rhat = (r / math.sqrt(np.sum((r * r)))) if (px > 0): rho = ((((1000.0 * rhat) / px) * pctoau) - (PosVector_EarthSSB / AU)) rhohat = (rho / math.sqrt(np.sum((rho * rho)))) r0 = (((1000.0 / px) * pctoau) * AU) BetaStar = ((((r0 * mu) / c) / year) + ((rv * r0hat) / c)) zlighttravel = ((((rv * r0) * np.sum((mu * mu))) * T) / ((year * c) * c)) else: rhohat = rhat BetaStar = [0.0, 0.0, 0.0] zlighttravel = 0.0 Sum_GR = 0.0 zshapiro = 0.0 for ss_body in ss_bodies: if (ss_body == 'Earth'): PosVector_SSObject = (earth_geo[0].xyz.value * 1000.0) else: jplephem = get_body_barycentric(ss_body, JDTDB, ephemeris=ephemeris) PosVector_SSObject = np.reshape((jplephem.xyz.value * 1000.0), 3) (PosVector_EarthSSObject, PosMag_EarthSSObject, PosHat_EarthSSObject) = CalculatePositionVector(r1=PosVector_EarthSSB, r2=PosVector_SSObject) a = np.dot((rhohat - (np.dot(PosHat_EarthSSObject, rhohat) * PosHat_EarthSSObject)), BetaEarth) zshapiro += ((((- 2.0) * GM[ss_body]) * a) / (((c * c) * PosMag_EarthSSObject) * (1 + np.dot(PosHat_EarthSSObject, rhohat)))) if PosMag_EarthSSObject: Sum_GR += (GM[ss_body] / PosMag_EarthSSObject) zgravity = ((1.0 / (1 + (Sum_GR / (c * c)))) - 1) GammaEarth = (1.0 / math.sqrt((1.0 - np.sum((BetaEarth ** 2))))) zb = ((((- 1.0) - zshapiro) - zlighttravel) + (((GammaEarth * (1 + np.dot(BetaEarth, rhohat))) * (1 + np.dot(r0hat, BetaStar))) / ((1.0 + np.dot(BetaStar, rhohat)) * (1.0 + zgravity)))) if (not predictive): v_final = (c * (((1.0 + zb) * (1.0 + zmeas)) - 1.0)) else: v_final = (((1 / (1.0 + zb)) - 1) * c) return (v_final, warning, error)
class AnswerSelector(object): def __init__(self, strategy: str): if (strategy not in STRATEGIES): raise Exception(f'Unknown strategy: {strategy}') self.strategy = strategy self.nlp = spacy.load('en_core_web_sm') def _get_np_chunks_answers(self, sentence: Span) -> List[AnswerOffsets]: chunks = [] for chunk in sentence.noun_chunks: chunks.append(AnswerOffsets(chunk.start_char, chunk.end_char, sentence.start_char, sentence.end_char, str(chunk))) return chunks def _get_max_np_answers(self, sentence: Span) -> List[AnswerOffsets]: root = sentence.root nodes = [root] nps = [] while (len(nodes) > 0): node = nodes.pop() recurse = True if (node.pos_ in ['NOUN', 'PROPN']): min_index = node.i max_index = node.i stack = [node] while (len(stack) > 0): current = stack.pop() min_index = min(min_index, current.i) max_index = max(max_index, current.i) for child in current.children: stack.append(child) sent_start_index = sentence[0].i num_tokens = ((max_index - min_index) + 1) if (num_tokens <= 7): recurse = False span = sentence[(min_index - sent_start_index):((max_index + 1) - sent_start_index)] nps.append(AnswerOffsets(span.start_char, span.end_char, sentence.start_char, sentence.end_char, str(span))) if recurse: for child in node.children: nodes.append(child) nps.sort(key=(lambda offsets: offsets.start)) return nps def _get_ner_answers(self, sentence: Span) -> List[AnswerOffsets]: ners = [] for entity in sentence.ents: if (entity.label_ in ['PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'EVENT', 'WORK_OF_ART']): ners.append(AnswerOffsets(entity.start_char, entity.end_char, sentence.start_char, sentence.end_char, str(entity))) return ners def _get_all_answers(self, sentence: Span) -> List[AnswerOffsets]: answers = set() answers |= set(self._get_np_chunks_answers(sentence)) answers |= set(self._get_max_np_answers(sentence)) answers |= set(self._get_ner_answers(sentence)) answers = sorted(answers, key=(lambda answer: (answer.start, answer.end))) return answers def select(self, text: str) -> List[AnswerOffsets]: doc = self.nlp(text) answers = [] for sent in doc.sents: if (self.strategy == NP_CHUNKS_STRATEGY): answers.extend(self._get_np_chunks_answers(sent)) elif (self.strategy == MAX_NP_STRATEGY): answers.extend(self._get_max_np_answers(sent)) elif (self.strategy == NER_STRATEGY): answers.extend(self._get_ner_answers(sent)) elif (self.strategy == ALL_STRATEGY): answers.extend(self._get_all_answers(sent)) else: raise Exception(f'Unknown strategy: {self.strategy}') return answers def select_all(self, text_list: List[str]) -> List[List[AnswerOffsets]]: return [self.select(text) for text in text_list]
_module() class AOTEncoderDecoder(GLEncoderDecoder): def __init__(self, encoder=dict(type='AOTEncoder'), decoder=dict(type='AOTDecoder'), dilation_neck=dict(type='AOTBlockNeck')): super().__init__() self.encoder = build_component(encoder) self.decoder = build_component(decoder) self.dilation_neck = build_component(dilation_neck)
def main(args) -> None: save_dir = f'exp/{args.probe_type}/{args.eval_dataset}/{args.framework}_{args.text_type}_{args.text_rep}/' save_hparams(args, save_dir) embs_dir = f'{args.msu_dir}/{args.eval_dataset}/pretrained/{args.framework}_{args.text_type}_{args.text_rep}' if (args.eval_dataset in ['mtg_top50tags', 'mtg_genre', 'mtg_instrument', 'mtg_moodtheme']): embs_dir = f'{args.msu_dir}/mtg/pretrained/{args.framework}_{args.text_type}_{args.text_rep}' audio_embs = torch.load(os.path.join(embs_dir, 'audio_embs.pt')) tag_embs = torch.load(os.path.join(embs_dir, 'tag_embs.pt')) test_loader = get_dataloader(args=args, audio_embs=audio_embs, split='TEST') t_embs = [tag_embs[tag] for tag in test_loader.dataset.list_of_label] (a_embs, groudturths) = ([], []) for batch in tqdm(test_loader): a_embs.append(batch['audio']) groudturths.append(batch['binary']) a_embs = torch.cat(a_embs, dim=0) t_embs = torch.cat(t_embs, dim=0) targets = torch.cat(groudturths, dim=0) a_embs = nn.functional.normalize(a_embs, dim=1) t_embs = nn.functional.normalize(t_embs, dim=1) logits = (a_embs t_embs.T) if (args.eval_dataset in ['fma', 'gtzan', 'emotify']): results = get_evaluation(targets.numpy(), logits.numpy(), test_loader.dataset.list_of_label, 'multiclass') else: results = get_evaluation(targets.numpy(), logits.numpy(), test_loader.dataset.list_of_label, 'multilabel') with open(os.path.join(save_dir, f'results.json'), mode='w') as io: json.dump(results, io, indent=4)
def test_RegularArray(): v2_array = ak.highlevel.Array(np.array([[0.0, 1.1, 2.2, 3.3], [4.4, 5.5, 6.6, 7.7]])).layout assert (to_list(ak._do.combinations(v2_array, 2, replacement=False)) == [[(0.0, 1.1), (0.0, 2.2), (0.0, 3.3), (1.1, 2.2), (1.1, 3.3), (2.2, 3.3)], [(4.4, 5.5), (4.4, 6.6), (4.4, 7.7), (5.5, 6.6), (5.5, 7.7), (6.6, 7.7)]]) assert (ak._do.combinations(v2_array.to_typetracer(), 2, replacement=False).form == ak._do.combinations(v2_array, 2, replacement=False).form) assert (to_list(ak._do.combinations(v2_array, 2, replacement=False, fields=['x', 'y'])) == [[{'x': 0.0, 'y': 1.1}, {'x': 0.0, 'y': 2.2}, {'x': 0.0, 'y': 3.3}, {'x': 1.1, 'y': 2.2}, {'x': 1.1, 'y': 3.3}, {'x': 2.2, 'y': 3.3}], [{'x': 4.4, 'y': 5.5}, {'x': 4.4, 'y': 6.6}, {'x': 4.4, 'y': 7.7}, {'x': 5.5, 'y': 6.6}, {'x': 5.5, 'y': 7.7}, {'x': 6.6, 'y': 7.7}]]) assert (ak._do.combinations(v2_array.to_typetracer(), 2, replacement=False, fields=['x', 'y']).form == ak._do.combinations(v2_array, 2, replacement=False, fields=['x', 'y']).form) assert (ak._do.combinations(v2_array, 2, replacement=False, parameters={'some': 'param'}).content.parameters['some'] == 'param') assert (ak._do.combinations(v2_array.to_typetracer(), 2, replacement=False, parameters={'some': 'param'}).form == ak._do.combinations(v2_array, 2, replacement=False, parameters={'some': 'param'}).form) assert (to_list(ak._do.combinations(v2_array, 2, replacement=True)) == [[(0.0, 0.0), (0.0, 1.1), (0.0, 2.2), (0.0, 3.3), (1.1, 1.1), (1.1, 2.2), (1.1, 3.3), (2.2, 2.2), (2.2, 3.3), (3.3, 3.3)], [(4.4, 4.4), (4.4, 5.5), (4.4, 6.6), (4.4, 7.7), (5.5, 5.5), (5.5, 6.6), (5.5, 7.7), (6.6, 6.6), (6.6, 7.7), (7.7, 7.7)]]) assert (ak._do.combinations(v2_array.to_typetracer(), 2, replacement=True).form == ak._do.combinations(v2_array, 2, replacement=True).form) assert (to_list(ak._do.combinations(v2_array, 3, replacement=False)) == [[(0.0, 1.1, 2.2), (0.0, 1.1, 3.3), (0.0, 2.2, 3.3), (1.1, 2.2, 3.3)], [(4.4, 5.5, 6.6), (4.4, 5.5, 7.7), (4.4, 6.6, 7.7), (5.5, 6.6, 7.7)]]) assert (ak._do.combinations(v2_array.to_typetracer(), 3, replacement=False).form == ak._do.combinations(v2_array, 3, replacement=False).form) assert (to_list(ak._do.combinations(v2_array, 3, replacement=True)) == [[(0.0, 0.0, 0.0), (0.0, 0.0, 1.1), (0.0, 0.0, 2.2), (0.0, 0.0, 3.3), (0.0, 1.1, 1.1), (0.0, 1.1, 2.2), (0.0, 1.1, 3.3), (0.0, 2.2, 2.2), (0.0, 2.2, 3.3), (0.0, 3.3, 3.3), (1.1, 1.1, 1.1), (1.1, 1.1, 2.2), (1.1, 1.1, 3.3), (1.1, 2.2, 2.2), (1.1, 2.2, 3.3), (1.1, 3.3, 3.3), (2.2, 2.2, 2.2), (2.2, 2.2, 3.3), (2.2, 3.3, 3.3), (3.3, 3.3, 3.3)], [(4.4, 4.4, 4.4), (4.4, 4.4, 5.5), (4.4, 4.4, 6.6), (4.4, 4.4, 7.7), (4.4, 5.5, 5.5), (4.4, 5.5, 6.6), (4.4, 5.5, 7.7), (4.4, 6.6, 6.6), (4.4, 6.6, 7.7), (4.4, 7.7, 7.7), (5.5, 5.5, 5.5), (5.5, 5.5, 6.6), (5.5, 5.5, 7.7), (5.5, 6.6, 6.6), (5.5, 6.6, 7.7), (5.5, 7.7, 7.7), (6.6, 6.6, 6.6), (6.6, 6.6, 7.7), (6.6, 7.7, 7.7), (7.7, 7.7, 7.7)]]) assert (ak._do.combinations(v2_array.to_typetracer(), 3, replacement=True).form == ak._do.combinations(v2_array, 3, replacement=True).form)
def expected_speedup_compared_to_seq(pipe_times, seq_times: ProfileResult): def extract_seq_stuff(seq_times): nocomm_real_b_times = seq_times.nocommb_times_mean nocomm_real_f_times = seq_times.nocommf_times_mean real_b_times = seq_times.b_times_mean real_f_times = seq_times.f_times_mean b_seq_no_recomp_no_comm_times = sum(nocomm_real_b_times.values()) f_seq_no_recomp_no_comm_times = sum(nocomm_real_f_times.values()) b_seq_no_recomp_with_comm_times = sum(real_b_times.values()) f_seq_no_recomp_with_comm_times = sum(real_f_times.values()) seq_times = ((b_seq_no_recomp_no_comm_times, f_seq_no_recomp_no_comm_times), (b_seq_no_recomp_with_comm_times, f_seq_no_recomp_with_comm_times)) return seq_times (fwd_times, bwd_times, fwd_times_wo_comm, bwd_times_wo_comm) = pipe_times ((b_seq_no_recomp_no_comm_times, f_seq_no_recomp_no_comm_times), (b_seq_no_recomp_with_comm_times, f_seq_no_recomp_with_comm_times)) = extract_seq_stuff(seq_times) worst_fwd = max(fwd_times.values()) worst_bwd = max(bwd_times.values()) pipe_fwd_plus_bwd = (worst_fwd + worst_bwd) seq_fwd_plus_bwd = (f_seq_no_recomp_no_comm_times + b_seq_no_recomp_no_comm_times) expected_speedup = (seq_fwd_plus_bwd / pipe_fwd_plus_bwd) return expected_speedup
.parametrize('exponent_bits', [5, 6, 7, 8]) _utils.test(require=ti.extension.quant) def test_shared_exponent_borrow(exponent_bits): qflt1 = ti.types.quant.float(exp=exponent_bits, frac=10, signed=False) qflt2 = ti.types.quant.float(exp=exponent_bits, frac=14, signed=False) a = ti.field(dtype=qflt1) b = ti.field(dtype=qflt2) bitpack = ti.BitpackedFields(max_num_bits=32) bitpack.place(a, b, shared_exponent=True) ti.root.place(bitpack) def foo(x: ti.f32, y: ti.f32): a[None] = x b[None] = y def inc(): a[None] += 1 b[None] -= 1 foo(0, 100) for i in range(100): assert (a[None] == i) assert (b[None] == (100 - i)) inc()
class PoolFormer(nn.Module): def __init__(self, model_name: str='S24') -> None: super().__init__() assert (model_name in poolformer_settings.keys()), f'PoolFormer model name should be in {list(poolformer_settings.keys())}' (layers, embed_dims, drop_path_rate) = poolformer_settings[model_name] self.channels = embed_dims self.patch_embed = PatchEmbed(7, 4, 2, 3, embed_dims[0]) network = [] for i in range(len(layers)): blocks = [] for j in range(layers[i]): dpr = ((drop_path_rate * (j + sum(layers[:i]))) / (sum(layers) - 1)) blocks.append(PoolFormerBlock(embed_dims[i], 3, dpr)) network.append(nn.Sequential(*blocks)) if (i >= (len(layers) - 1)): break network.append(PatchEmbed(3, 2, 1, embed_dims[i], embed_dims[(i + 1)])) self.network = nn.ModuleList(network) self.out_indices = [0, 2, 4, 6] for (i, index) in enumerate(self.out_indices): self.add_module(f'norm{index}', nn.GroupNorm(1, embed_dims[i])) def forward(self, x: Tensor): x = self.patch_embed(x) outs = [] for (i, blk) in enumerate(self.network): x = blk(x) if (i in self.out_indices): out = getattr(self, f'norm{i}')(x) outs.append(out) return outs
def fused_batch_normalization_backward_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axes=(1,), decay_rate=0.9, eps=1e-05, batch_stat=True, nonlinearity='relu'): is_add = (True if (len(inputs) == 8) else False) if is_add: g_dx0 = grad_inputs[0] g_db0 = grad_inputs[1] g_dg0 = grad_inputs[2] g_dz0 = grad_inputs[3] dy = inputs[0] x0 = inputs[1] b0 = inputs[2] g0 = inputs[3] rm = inputs[4] rv = inputs[5] y0 = inputs[6] z0 = inputs[7] else: g_dx0 = grad_inputs[0] g_db0 = grad_inputs[1] g_dg0 = grad_inputs[2] dy = inputs[0] x0 = inputs[1] b0 = inputs[2] g0 = inputs[3] rm = inputs[4] rv = inputs[5] y0 = inputs[6] z0 = None g_dz0 = None (g_dy, g_x0, g_b0, g_g0) = double_backward(g_dx0, g_db0, g_dg0, g_dz0, dy, x0, b0, g0, rm, rv, y0, z0, axes, decay_rate, eps, nonlinearity, batch_stat) if is_add: return (g_dy, g_x0, g_b0, g_g0, None, None, None, None) else: return (g_dy, g_x0, g_b0, g_g0, None, None, None)
def save_frames_as_video(frames, video_path, fps=30): (height, width, layers) = frames[0].shape video = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) for frame in frames: video.write(cv2.cvtColor((frame * 255).astype(np.uint8), cv2.COLOR_RGB2BGR)) cv2.destroyAllWindows() video.release()
class DetrForSegmentation(): def __init__(self, *args, **kwargs): requires_backends(self, ['timm']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['timm'])
def get_backend(): backend = getattr(g, '_backend', None) if (backend is None): g._backend = Backend(app.config['user_params'], app.config['schema'], app.config['scenario_db'], app.config['systems'], app.config['sessions'], app.config['controller_map'], app.config['pairing_probabilities'], app.config['num_chats_per_scenario'], Messages) backend = g._backend return backend
def post_process_generate_ids(tokenizer: PreTrainedTokenizer, ids: torch.Tensor): ids = copy.deepcopy(ids) ids[(ids < 0)] = tokenizer.pad_token_id return ids
def quadratic_L_function__exact(n, d): if (n <= 0): return (QuadraticBernoulliNumber((1 - n), d) / (n - 1)) elif (n >= 1): if (kronecker_symbol(fundamental_discriminant(d), (- 1)) == 1): delta = 0 else: delta = 1 if (((n - delta) % 2) == 0): from sage.misc.functional import sqrt from sage.symbolic.constants import I, pi from sage.symbolic.ring import SR f = abs(fundamental_discriminant(d)) if (delta == 0): GS = sqrt(f) else: GS = (I * sqrt(f)) ans = SR((ZZ((- 1)) ** (1 + ((n - delta) / 2)))) ans *= (((2 * pi) / f) ** n) ans *= GS ans *= (QQ.one() / (2 * (I ** delta))) ans *= (QuadraticBernoulliNumber(n, d) / factorial(n)) return ans else: if (delta == 0): raise TypeError('n must be a critical value (i.e. even > 0 or odd < 0)') if (delta == 1): raise TypeError('n must be a critical value (i.e. odd > 0 or even <= 0)')
class NodePrivSAGE(SAGE): def __init__(self, num_classes, epsilon: Annotated[(float, ArgInfo(help='DP epsilon parameter', option='-e'))], delta: Annotated[(Union[(Literal['auto'], float)], ArgInfo(help='DP delta parameter (if "auto", sets a proper value based on data size)', option='-d'))]='auto', max_degree: Annotated[(int, ArgInfo(help='max degree to sample per each node'))]=100, max_grad_norm: Annotated[(float, ArgInfo(help='maximum norm of the per-sample gradients'))]=1.0, batch_size: Annotated[(int, ArgInfo(help='batch size'))]=256, **kwargs: Annotated[(dict, ArgInfo(help='extra options passed to base class', bases=[SAGE], exclude=['batch_norm', 'mp_layers', 'val_interval']))]): super().__init__(num_classes=num_classes, batch_size=batch_size, batch_norm=False, mp_layers=1, val_interval=0, **kwargs) self.epsilon = epsilon self.delta = delta self.max_degree = max_degree self.max_grad_norm = max_grad_norm self.num_train_nodes = None self.classifier.normalize = True def calibrate(self): self.noisy_sgd = GNNBasedNoisySGD(noise_scale=0.0, dataset_size=self.num_train_nodes, batch_size=self.batch_size, epochs=self.epochs, max_grad_norm=self.max_grad_norm, max_degree=self.max_degree) self.noisy_aggr_gm = GaussianMechanism(noise_scale=0.0) composed_mechanism = ComposedNoisyMechanism(noise_scale=0.0, mechanism_list=[self.noisy_sgd, self.noisy_aggr_gm], coeff_list=[1, 1]) if hasattr(self, 'noisy_aggr_hook'): self.noisy_aggr_hook.remove() self.noisy_aggr_hook = self.classifier.gnn.convs[0].register_message_and_aggregate_forward_hook((lambda module, inputs, output: (self.noisy_aggr_gm(data=output, sensitivity=np.sqrt(self.max_degree)) if (not module.training) else output))) with console.status('calibrating noise to privacy budget'): if (self.delta == 'auto'): delta = (0.0 if np.isinf(self.epsilon) else (1.0 / (10 ** len(str(self.num_train_nodes))))) console.info(('delta = %.0e' % delta)) self.noise_scale = composed_mechanism.calibrate(eps=self.epsilon, delta=delta) console.info(f'''noise scale: {self.noise_scale:.4f} ''') self._classifier = self.noisy_sgd.prepare_module(self._classifier) def sample_neighbors(self, data: Data) -> Data: data = data.to(self.device, non_blocking=True) with console.status('bounding the number of neighbors per node'): data = BoundDegree(self.max_degree)(data) return data def fit(self, data: Data, prefix: str='') -> Metrics: num_train_nodes = data.train_mask.sum().item() if (num_train_nodes != self.num_train_nodes): self.num_train_nodes = num_train_nodes self.calibrate() data = self.sample_neighbors(data) return super().fit(data, prefix=prefix) def test(self, data: Optional[Data]=None, prefix: str='') -> Metrics: if ((data is not None) and (data != self.data)): data = self.sample_neighbors(data) return super().test(data, prefix=prefix) def predict(self, data: Optional[Data]=None) -> torch.Tensor: if ((data is not None) and (data != self.data)): data = self.sample_neighbors(data) return super().predict(data) def data_loader(self, data: Data, stage: Stage) -> NodeDataLoader: dataloader = super().data_loader(data, stage) if (stage == 'train'): dataloader.hops = 1 dataloader.poisson_sampling = False return dataloader def configure_optimizer(self) -> Optimizer: optimizer = super().configure_optimizer() optimizer = self.noisy_sgd.prepare_optimizer(optimizer) return optimizer
def convert_mat(mat_file, in_dir, out_dir): data = loadmat(osp.join(in_dir, mat_file)) mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8) seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png')) Image.fromarray(mask).save(seg_filename, 'PNG')
def iou(det_x, det_y, gt_x, gt_y): if (approx_area_of_intersection(det_x, det_y, gt_x, gt_y) > 1): ymax = (np.maximum(np.max(det_y), np.max(gt_y)) + 1) xmax = (np.maximum(np.max(det_x), np.max(gt_x)) + 1) bin_mask = np.zeros((ymax, xmax)) det_bin_mask = np.zeros_like(bin_mask) gt_bin_mask = np.zeros_like(bin_mask) (rr, cc) = polygon(det_y, det_x) det_bin_mask[(rr, cc)] = 1 (rr, cc) = polygon(gt_y, gt_x) gt_bin_mask[(rr, cc)] = 1 final_bin_mask = (det_bin_mask + gt_bin_mask) inter_map = np.where((final_bin_mask == 2), 1, 0) inter = np.sum(inter_map) union_map = np.where((final_bin_mask > 0), 1, 0) union = np.sum(union_map) return (inter / float((union + 1.0))) else: return 0
def assureSingleInstanceName(name): if (name in name2label): return name if (not name.endswith('group')): return None name = name[:(- len('group'))] if (not (name in name2label)): return None if (not name2label[name].hasInstances): return None return name
def postprocess_atomic_facts(_atomic_facts, para_breaks, nlp): verbs = ['born.', ' appointed.', ' characterized.', ' described.', ' known.', ' member.', ' advocate.', 'served.', 'elected.'] permitted_verbs = ['founding member.'] atomic_facts = [] new_atomic_facts = [] new_para_breaks = [] for (i, (sent, facts)) in enumerate(_atomic_facts): sent = sent.strip() if ((len(sent.split()) == 1) and (i not in para_breaks) and (i > 0)): assert (i not in para_breaks) atomic_facts[(- 1)][0] += (' ' + sent) atomic_facts[(- 1)][1] += facts else: if (i in para_breaks): new_para_breaks.append(len(atomic_facts)) atomic_facts.append([sent, facts]) for (i, (sent, facts)) in enumerate(atomic_facts): entities = detect_entities(sent, nlp) covered_entities = set() new_facts = [] for (i, fact) in enumerate(facts): if (any([fact.endswith(verb) for verb in verbs]) and (not any([fact.endswith(verb) for verb in permitted_verbs]))): if any([(fact[:(- 1)] in other_fact) for (j, other_fact) in enumerate(facts) if (j != i)]): continue sent_entities = detect_entities(fact, nlp) covered_entities |= set([e for e in sent_entities if (e in entities)]) new_entities = (sent_entities - entities) if (len(new_entities) > 0): do_pass = False for new_ent in new_entities: pre_ent = None for ent in entities: if ent.startswith(new_ent): pre_ent = ent break if (pre_ent is None): do_pass = True break fact = fact.replace(new_ent, pre_ent) covered_entities.add(pre_ent) if do_pass: continue if (fact in new_facts): continue new_facts.append(fact) try: assert (entities == covered_entities) except Exception: new_facts = facts new_atomic_facts.append((sent, new_facts)) return (new_atomic_facts, new_para_breaks)
def _preprocess_input(image, footprint=None, out=None, mask=None, out_dtype=None, pixel_size=1): check_nD(image, 2) input_dtype = image.dtype if ((input_dtype in (bool, bool)) or (out_dtype in (bool, bool))): raise ValueError('dtype cannot be bool.') if (input_dtype not in (np.uint8, np.uint16)): message = f'Possible precision loss converting image of type {input_dtype} to uint8 as required by rank filters. Convert manually using skimage.util.img_as_ubyte to silence this warning.' warn(message, stacklevel=5) image = img_as_ubyte(image) if _footprint_is_sequence(footprint): raise ValueError('footprint sequences are not currently supported by rank filters') footprint = np.ascontiguousarray(img_as_ubyte((footprint > 0))) if (footprint.ndim != image.ndim): raise ValueError('Image dimensions and neighborhood dimensionsdo not match') image = np.ascontiguousarray(image) if (mask is not None): mask = img_as_ubyte(mask) mask = np.ascontiguousarray(mask) if (image is out): raise NotImplementedError('Cannot perform rank operation in place.') if (out is None): if (out_dtype is None): out_dtype = image.dtype out = np.empty((image.shape + (pixel_size,)), dtype=out_dtype) elif (len(out.shape) == 2): out = out.reshape((out.shape + (pixel_size,))) if (image.dtype in (np.uint8, np.int8)): n_bins = 256 else: n_bins = (int(max(3, image.max())) + 1) if (n_bins > (2 ** 10)): warn(f'Bad rank filter performance is expected due to a large number of bins ({n_bins}), equivalent to an approximate bitdepth of {np.log2(n_bins):.1f}.', stacklevel=2) return (image, footprint, out, mask, n_bins)
class ExpediaBooking(VirtualFunctionTool): name = 'ExpediaBooking' summary = 'Book flight or accommodation options using user-provided details and payment information.' parameters: List[ArgParameter] = [{'name': 'option_ids', 'type': 'array', 'description': 'An non-empty array of unique identifiers of the options to book. The options must be of the same type, either all flights or all accommodations.', 'required': True}, {'name': 'payment_method', 'type': 'object', 'description': 'An object containing payment information including card number, expiry date, and CVV.', 'required': True}, {'name': 'travellers', 'type': 'array', 'description': "An array of objects, each object contains name (string), date_of_birth (string, format: 'YYYY-MM-DD'), passport_number (string), passport_expiry_date (string, format: 'YYYY-MM-DD'). Only required for flight booking.", 'required': False}] returns: List[ArgReturn] = [{'name': 'booking_results', 'type': 'object', 'description': "An object indicates the booking results, including success (boolean). If success is true, the object also contains reservation_id (string), reservation_type (string, one of ['flight', 'accommodation']), total_price (number, in USD)."}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "One or more of the 'option_ids' does not exist."}, {'name': 'InvalidRequestException', 'description': "The options in 'option_ids' are not of the same type, or the 'option_ids' is empty, or payment information is invalid, or the 'travellers' parameter is not provided for flight booking, or there are invalid information in the 'travellers' parameter."}]
def format_rows(data, metas, sharded_meta=False, headers=['shard_name', 'filename', 'id', 'segment']): data_with_metas = {} keys = [] no_meta = 0 for row in data: fname = Path(row['filename']).stem flag = False if sharded_meta: shard_name = row['shard_name'] if (shard_name in metas): if (fname in metas[shard_name]): meta = metas[shard_name][fname] flag = True elif (fname in metas): meta = metas[fname] flag = True if (not flag): meta = {'id': '-1', 'segment': [(- 1.0), (- 1.0)]} no_meta += 1 row = {**row, **meta} data_with_metas[fname] = row keys.append(fname) lines = [] for key in keys: row = data_with_metas[key] line = [row[h] for h in headers] lines.append(line) return lines
def main(): matplotlib.use('Agg') np.random.seed(args['SEED']) torch.manual_seed(args['SEED']) gpuAvailable = torch.cuda.is_available() device = torch.device(('cuda' if gpuAvailable else 'cpu')) kwargs = ({'num_workers': args['NUM_WORKERS'], 'pin_memory': True} if gpuAvailable else {}) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False videoParams = {'videoFPS': args['VIDEO_FPS']} trainData = LRS2Main('train', args['DATA_DIRECTORY'], args['MAIN_REQ_INPUT_LENGTH'], args['CHAR_TO_INDEX'], args['STEP_SIZE'], videoParams) trainLoader = DataLoader(trainData, batch_size=args['BATCH_SIZE'], collate_fn=collate_fn, shuffle=True, **kwargs) valData = LRS2Main('val', args['DATA_DIRECTORY'], args['MAIN_REQ_INPUT_LENGTH'], args['CHAR_TO_INDEX'], args['STEP_SIZE'], videoParams) valLoader = DataLoader(valData, batch_size=args['BATCH_SIZE'], collate_fn=collate_fn, shuffle=True, **kwargs) model = VideoNet(args['TX_NUM_FEATURES'], args['TX_ATTENTION_HEADS'], args['TX_NUM_LAYERS'], args['PE_MAX_LENGTH'], args['TX_FEEDFORWARD_DIM'], args['TX_DROPOUT'], args['NUM_CLASSES']) model.to(device) optimizer = optim.Adam(model.parameters(), lr=args['INIT_LR'], betas=(args['MOMENTUM1'], args['MOMENTUM2'])) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=args['LR_SCHEDULER_FACTOR'], patience=args['LR_SCHEDULER_WAIT'], threshold=args['LR_SCHEDULER_THRESH'], threshold_mode='abs', min_lr=args['FINAL_LR'], verbose=True) loss_function = nn.CTCLoss(blank=0, zero_infinity=False) if os.path.exists((args['CODE_DIRECTORY'] + '/checkpoints')): while True: ch = input("Continue and remove the 'checkpoints' directory? y/n: ") if (ch == 'y'): break elif (ch == 'n'): exit() else: print('Invalid input') shutil.rmtree((args['CODE_DIRECTORY'] + '/checkpoints')) os.mkdir((args['CODE_DIRECTORY'] + '/checkpoints')) os.mkdir((args['CODE_DIRECTORY'] + '/checkpoints/models')) os.mkdir((args['CODE_DIRECTORY'] + '/checkpoints/plots')) if (args['PRETRAINED_MODEL_FILE'] is not None): print(('\n\nPre-trained Model File: %s' % args['PRETRAINED_MODEL_FILE'])) print('\nLoading the pre-trained model .... \n') model.load_state_dict(torch.load((args['CODE_DIRECTORY'] + args['PRETRAINED_MODEL_FILE']), map_location=device)) model.to(device) print('Loading Done.\n') trainingLossCurve = list() validationLossCurve = list() trainingWERCurve = list() validationWERCurve = list() (numTotalParams, numTrainableParams) = num_params(model) print(('\nNumber of total parameters in the model = %d' % numTotalParams)) print(('Number of trainable parameters in the model = %d\n' % numTrainableParams)) print('\nTraining the model .... \n') trainParams = {'spaceIx': args['CHAR_TO_INDEX'][' '], 'eosIx': args['CHAR_TO_INDEX']['<EOS>']} valParams = {'decodeScheme': 'greedy', 'spaceIx': args['CHAR_TO_INDEX'][' '], 'eosIx': args['CHAR_TO_INDEX']['<EOS>']} for step in range(args['NUM_STEPS']): (trainingLoss, trainingCER, trainingWER) = train(model, trainLoader, optimizer, loss_function, device, trainParams) trainingLossCurve.append(trainingLoss) trainingWERCurve.append(trainingWER) (validationLoss, validationCER, validationWER) = evaluate(model, valLoader, loss_function, device, valParams) validationLossCurve.append(validationLoss) validationWERCurve.append(validationWER) print(('Step: %03d || Tr.Loss: %.6f Val.Loss: %.6f || Tr.CER: %.3f Val.CER: %.3f || Tr.WER: %.3f Val.WER: %.3f' % (step, trainingLoss, validationLoss, trainingCER, validationCER, trainingWER, validationWER))) scheduler.step(validationWER) if ((((step % args['SAVE_FREQUENCY']) == 0) or (step == (args['NUM_STEPS'] - 1))) and (step != 0)): savePath = (args['CODE_DIRECTORY'] + '/checkpoints/models/train-step_{:04d}-wer_{:.3f}.pt'.format(step, validationWER)) torch.save(model.state_dict(), savePath) plt.figure() plt.title('Loss Curves') plt.xlabel('Step No.') plt.ylabel('Loss value') plt.plot(list(range(1, (len(trainingLossCurve) + 1))), trainingLossCurve, 'blue', label='Train') plt.plot(list(range(1, (len(validationLossCurve) + 1))), validationLossCurve, 'red', label='Validation') plt.legend() plt.savefig((args['CODE_DIRECTORY'] + '/checkpoints/plots/train-step_{:04d}-loss.png'.format(step))) plt.close() plt.figure() plt.title('WER Curves') plt.xlabel('Step No.') plt.ylabel('WER') plt.plot(list(range(1, (len(trainingWERCurve) + 1))), trainingWERCurve, 'blue', label='Train') plt.plot(list(range(1, (len(validationWERCurve) + 1))), validationWERCurve, 'red', label='Validation') plt.legend() plt.savefig((args['CODE_DIRECTORY'] + '/checkpoints/plots/train-step_{:04d}-wer.png'.format(step))) plt.close() print('\nTraining Done.\n') return
def test_comparison_with_keywords(): p = sqlparse.parse('foo = NULL')[0] assert (len(p.tokens) == 1) assert isinstance(p.tokens[0], sql.Comparison) assert (len(p.tokens[0].tokens) == 5) assert (p.tokens[0].left.value == 'foo') assert (p.tokens[0].right.value == 'NULL') p = sqlparse.parse('foo = null')[0] assert (len(p.tokens) == 1) assert isinstance(p.tokens[0], sql.Comparison)
def evaluate_RWords(continuations, unigramDist): all_results = [] for continuation in tqdm(continuations): mean_log_unigram_prob_gold = 0.0 l_gold = 0 for candidate in continuation: for c in word_tokenize(candidate): l_gold += 1 if (c in unigramDist): mean_log_unigram_prob_gold += unigramDist[c] else: mean_log_unigram_prob_gold += (- 20.0) mean_log_unigram_prob_gold /= l_gold all_results.append(mean_log_unigram_prob_gold) final_result = np.average(all_results) return (final_result, all_results)
class _LazyAutoMapping(OrderedDict): def __init__(self, config_mapping, model_mapping): self._config_mapping = config_mapping self._reverse_config_mapping = {v: k for (k, v) in config_mapping.items()} self._model_mapping = model_mapping self._extra_content = {} self._modules = {} def __len__(self): common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys()) return (len(common_keys) + len(self._extra_content)) def __getitem__(self, key): if (key in self._extra_content): return self._extra_content[key] model_type = self._reverse_config_mapping[key.__name__] if (model_type in self._model_mapping): model_name = self._model_mapping[model_type] return self._load_attr_from_module(model_type, model_name) model_types = [k for (k, v) in self._config_mapping.items() if (v == key.__name__)] for mtype in model_types: if (mtype in self._model_mapping): model_name = self._model_mapping[mtype] return self._load_attr_from_module(mtype, model_name) raise KeyError(key) def _load_attr_from_module(self, model_type, attr): module_name = model_type_to_module_name(model_type) if (module_name not in self._modules): self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models') return getattribute_from_module(self._modules[module_name], attr) def keys(self): mapping_keys = [self._load_attr_from_module(key, name) for (key, name) in self._config_mapping.items() if (key in self._model_mapping.keys())] return (mapping_keys + list(self._extra_content.keys())) def get(self, key, default): try: return self.__getitem__(key) except KeyError: return default def __bool__(self): return bool(self.keys()) def values(self): mapping_values = [self._load_attr_from_module(key, name) for (key, name) in self._model_mapping.items() if (key in self._config_mapping.keys())] return (mapping_values + list(self._extra_content.values())) def items(self): mapping_items = [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping.keys() if (key in self._config_mapping.keys())] return (mapping_items + list(self._extra_content.items())) def __iter__(self): return iter(self.keys()) def __contains__(self, item): if (item in self._extra_content): return True if ((not hasattr(item, '__name__')) or (item.__name__ not in self._reverse_config_mapping)): return False model_type = self._reverse_config_mapping[item.__name__] return (model_type in self._model_mapping) def register(self, key, value): if (hasattr(key, '__name__') and (key.__name__ in self._reverse_config_mapping)): model_type = self._reverse_config_mapping[key.__name__] if (model_type in self._model_mapping.keys()): raise ValueError(f"'{key}' is already used by a Transformers model.") self._extra_content[key] = value
class GenerationDataset(Dataset): def __init__(self, data: List[dict], config: ModelConfigBase=None, training: bool=True): super().__init__(data, config=config, training=training) if training: self._indexing = [(src_idx, trg_idx) for (src_idx, entry) in enumerate(self.data) for (trg_idx, tokens) in enumerate(entry['full_trg_tokens'])] def summary(self): summary = [super().summary] seq_lens = [len(tokens) for entry in self.data for tokens in entry['full_trg_tokens']] (ave_len, max_len) = ((sum(seq_lens) / len(seq_lens)), max(seq_lens)) summary.extend([f'The average `trg_tokens` length is {ave_len:,.1f}', f'The maximum `trg_tokens` length is {max_len:,}']) return '\n'.join(summary) def __len__(self): if self.training: return len(self._indexing) else: return len(self.data) def _get_entry(self, i): if self.training: (src_idx, trg_idx) = self._indexing[i] entry = self.data[src_idx] entry['trg_tokens'] = entry['full_trg_tokens'][trg_idx] return entry else: return self.data[i]
class SawyerDialTurnEnvV2(SawyerXYZEnv): def __init__(self): hand_low = ((- 0.5), 0.4, 0.05) hand_high = (0.5, 1, 0.5) obj_low = ((- 0.1), 0.7, 0.0) obj_high = (0.1, 0.8, 0.0) goal_low = ((- 0.1), 0.73, 0.0299) goal_high = (0.1, 0.83, 0.0301) super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high) self.init_config = {'obj_init_pos': np.array([0, 0.7, 0.0]), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)} self.goal = np.array([0.0, 0.73, 0.08]) self.obj_init_pos = self.init_config['obj_init_pos'] self.hand_init_pos = self.init_config['hand_init_pos'] self.max_path_length = 150 self._random_reset_space = Box(np.array(obj_low), np.array(obj_high)) self.goal_space = Box(np.array(goal_low), np.array(goal_high)) self.dial_radius = 0.05 def model_name(self): return full_v2_path_for('sawyer_xyz/sawyer_dial.xml') _assert_task_is_set def step(self, action): ob = super().step(action) (reward, reachDist, pullDist) = self.compute_reward(action, ob) self.curr_path_length += 1 info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.03))} return (ob, reward, False, info) def _get_pos_objects(self): dial_center = self.get_body_com('dial').copy() dial_angle_rad = self.data.get_joint_qpos('knob_Joint_1') offset = np.array([np.sin(dial_angle_rad), (- np.cos(dial_angle_rad)), 0]) offset *= self.dial_radius return (dial_center + offset) def reset_model(self): self._reset_hand() self._target_pos = self.goal.copy() self.obj_init_pos = self.init_config['obj_init_pos'] if self.random_init: goal_pos = self._get_state_rand_vec() self.obj_init_pos = goal_pos[:3] final_pos = (goal_pos.copy() + np.array([0, 0.03, 0.03])) self._target_pos = final_pos self.sim.model.body_pos[self.model.body_name2id('dial')] = self.obj_init_pos self.maxPullDist = np.abs((self._target_pos[1] - self.obj_init_pos[1])) return self._get_obs() def _reset_hand(self): super()._reset_hand() self.reachCompleted = False def compute_reward(self, actions, obs): del actions objPos = obs[3:6] (rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')) fingerCOM = ((rightFinger + leftFinger) / 2) pullGoal = self._target_pos pullDist = np.abs((objPos[1] - pullGoal[1])) reachDist = np.linalg.norm((objPos - fingerCOM)) reachRew = (- reachDist) self.reachCompleted = (reachDist < 0.05) def pullReward(): c1 = 1000 c2 = 0.001 c3 = 0.0001 if self.reachCompleted: pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3))))) pullRew = max(pullRew, 0) return pullRew else: return 0 pullRew = pullReward() reward = (reachRew + pullRew) return [reward, reachDist, pullDist]
def parse_args(args=None, namespace=None): parser = argparse.ArgumentParser() parser.add_argument('-a', '--root_audio', type=pathlib.Path, help='root for extracted audio files') parser.add_argument('-f', '--root_frame', type=pathlib.Path, help='root for extracted video frames') parser.add_argument('-o', '--out_dir', type=pathlib.Path, help='output directory') parser.add_argument('--fps', default=1, type=int, help='fps of video frames') parser.add_argument('--ratio', default=0.2, type=float, help='percentage of the validation set') parser.add_argument('--seed', default=1234, type=int, help='manual seed') return parser.parse_args(args=args, namespace=namespace)
def parse_args(args=None): parser = argparse.ArgumentParser(description='Training and Testing Knowledge Graph Embedding Models', usage='train.py [<args>] [-h | --help]') parser.add_argument('--cuda', action='store_true', help='use GPU') parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_valid', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data') parser.add_argument('--dataset', type=str, default='ogbl-wikikg2', help='dataset name, default to wikikg2') parser.add_argument('--model', default='TransE', type=str) parser.add_argument('-de', '--double_entity_embedding', action='store_true') parser.add_argument('-dr', '--double_relation_embedding', action='store_true') parser.add_argument('-n', '--negative_sample_size', default=128, type=int) parser.add_argument('-d', '--hidden_dim', default=500, type=int) parser.add_argument('-g', '--gamma', default=12.0, type=float) parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true') parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float) parser.add_argument('-b', '--batch_size', default=1024, type=int) parser.add_argument('-r', '--regularization', default=0.0, type=float) parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size') parser.add_argument('--uni_weight', action='store_true', help='Otherwise use subsampling weighting like in word2vec') parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float) parser.add_argument('-cpu', '--cpu_num', default=10, type=int) parser.add_argument('-init', '--init_checkpoint', default=None, type=str) parser.add_argument('-save', '--save_path', default=None, type=str) parser.add_argument('--max_steps', default=100000, type=int) parser.add_argument('--warm_up_steps', default=None, type=int) parser.add_argument('--save_checkpoint_steps', default=10000, type=int) parser.add_argument('--valid_steps', default=10000, type=int) parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps') parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps') parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET') parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET') parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not') parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually') parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples') return parser.parse_args(args)
def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): if (not css): css = '\n table.mytable {\n font-family: times;\n font-size:12px;\n color:#000000;\n border-width: 1px;\n border-color: #eeeeee;\n border-collapse: collapse;\n background-color: #ffffff;\n width=100%;\n max-width:550px;\n table-layout:fixed;\n }\n table.mytable th {\n border-width: 1px;\n padding: 8px;\n border-style: solid;\n border-color: #eeeeee;\n background-color: #e6eed6;\n color:#000000;\n }\n table.mytable td {\n border-width: 1px;\n padding: 8px;\n border-style: solid;\n border-color: #eeeeee;\n }\n #code {\n display:inline;\n font-family: courier;\n color: #3d9400;\n }\n #string {\n display:inline;\n font-weight: bold;\n }\n ' timecode_table = SimpleTable([(['Timecode List:'] + (cut_list if cut_list else [start.get_timecode() for (start, _) in scene_list[1:]]))], css_class=css_class) header_row = ['Scene Number', 'Start Frame', 'Start Timecode', 'Start Time (seconds)', 'End Frame', 'End Timecode', 'End Time (seconds)', 'Length (frames)', 'Length (timecode)', 'Length (seconds)'] for (i, (start, end)) in enumerate(scene_list): duration = (end - start) row = SimpleTableRow([('%d' % (i + 1)), ('%d' % start.get_frames()), start.get_timecode(), ('%.3f' % start.get_seconds()), ('%d' % end.get_frames()), end.get_timecode(), ('%.3f' % end.get_seconds()), ('%d' % duration.get_frames()), duration.get_timecode(), ('%.3f' % duration.get_seconds())]) if image_filenames: for image in image_filenames[i]: row.add_cell(SimpleTableCell(SimpleTableImage(image, width=image_width, height=image_height))) if (i == 0): scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename)
class LimitValuation_generic(DiscretePseudoValuation): def __init__(self, parent, approximation): DiscretePseudoValuation.__init__(self, parent) self._initial_approximation = approximation self._approximation = approximation def reduce(self, f, check=True): f = self.domain().coerce(f) self._improve_approximation_for_reduce(f) F = self._approximation.reduce(f, check=check) return self.residue_ring()(F) def _call_(self, f): self._improve_approximation_for_call(f) return self._approximation(f) _method def _improve_approximation_for_reduce(self, f): _method def _improve_approximation_for_call(self, f): def _repr_(self): from sage.rings.infinity import infinity from .augmented_valuation import AugmentedValuation_base if (self._initial_approximation(self._G) is not infinity): if isinstance(self._initial_approximation, AugmentedValuation_base): return (repr(self._initial_approximation)[:(- 1)] + ', ... ]') return repr(self._initial_approximation)
class CrossNERDataset(CQA): is_classification = True def __init__(self, data, *, make_example, **kwargs): subsample = kwargs.pop('subsample') domain = kwargs.pop('domain') examples = [] (example_id, tokens, labels) = (0, [], []) for (i, line) in enumerate(data): line = line.strip() if (line == ''): if len(tokens): examples.append(make_example([example_id, tokens, labels], domain)) (tokens, labels) = ([], []) example_id += 1 else: splits = line.split('\t') tokens.append(splits[0]) labels.append(splits[1]) if ((subsample is not None) and (len(examples) >= subsample)): break super().__init__(examples, **kwargs) def return_splits(cls, path='.data', train='train', validation='dev', test='test', **kwargs): crossner_domains = kwargs.pop('crossner_domains') all_train_data = [] all_validation_data = [] all_test_data = [] for domain in crossner_domains: (train_data, validation_data, test_data) = (None, None, None) (train_path, validation_path, test_path) = (None, None, None) if train: train_path = os.path.join(path, domain, 'train.txt') with open(train_path, 'r') as fin: train_data = fin.readlines() if validation: validation_path = os.path.join(path, domain, f'{validation}.txt') with open(validation_path, 'r') as fin: validation_data = fin.readlines() if test: test_path = os.path.join(path, domain, 'test.txt') with open(test_path, 'r') as fin: test_data = fin.readlines() kwargs['domain'] = domain train_data = (None if (train is None) else cls(train_data, **kwargs)) validation_data = (None if (validation is None) else cls(validation_data, **kwargs)) test_data = (None if (test is None) else cls(test_data, **kwargs)) if (not all_train_data): all_train_data = train_data elif train_data: all_train_data.examples = (all_train_data.examples + train_data.examples) if (not all_validation_data): all_validation_data = validation_data elif validation_data: all_validation_data.examples = (all_validation_data.examples + validation_data.examples) if (not all_test_data): all_test_data = test_data elif test_data: all_test_data.examples = (all_test_data.examples + test_data.examples) return (Split(train=all_train_data, eval=all_validation_data, test=all_test_data), Split(train=train_path, eval=validation_path, test=test_path))
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_epochs, device, scheduler, config): model.train() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.8f}')) metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')) header = 'Train Epoch: [{}]'.format(epoch) print_freq = 50 for (i, (image0, image1, text, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): (image0, image1, targets) = (image0.to(device), image1.to(device), targets.to(device)) text_inputs = tokenizer(text, padding='longest', return_tensors='pt').to(device) loss = model(image0, image1, text_inputs, targets=targets, train=True) optimizer.zero_grad() with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() optimizer.step() scheduler.step() metric_logger.update(lr=optimizer.param_groups[0]['lr']) metric_logger.update(loss=loss.item()) metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger.global_avg()) return {k: '{:.4f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()}
def __add_emit_without_colors(fn): def __emit_without_color(*args): args[0].levelcolor = '' args[0].resetcolor = '' return fn(*args) return __emit_without_color
def dump_tensorboard_summary(graph_executor, logdir): with FileWriter(logdir) as w: pb_graph = visualize(graph_executor) evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString()) w.add_event(evt)
def build_arg(parser): parser.add_argument('--config', default='config/crnn_mrn.py', help='path to validation dataset') parser.add_argument('--valid_datas', default=[' ../dataset/MLT17_IL/test_2017', '../dataset/MLT19_IL/test_2019'], help='path to testing dataset') parser.add_argument('--select_data', type=str, default=[' ../dataset/MLT17_IL/train_2017', '../dataset/MLT19_IL/train_2019'], help='select training data.') parser.add_argument('--workers', type=int, default=4, help='number of data loading workers') parser.add_argument('--batch_size', type=int, default=128, help='input batch size') parser.add_argument('--num_iter', type=int, default=20000, help='number of iterations to train for') parser.add_argument('--val_interval', type=int, default=5000, help='Interval between each validation') parser.add_argument('--log_multiple_test', action='store_true', help='log_multiple_test') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5') parser.add_argument('--optimizer', type=str, default='adam', help='optimizer |sgd|adadelta|adam|') parser.add_argument('--lr', type=float, default=0.0005, help='learning rate, default=1.0 for Adadelta, 0.0005 for Adam') parser.add_argument('--sgd_momentum', default=0.9, type=float, help='momentum for SGD') parser.add_argument('--sgd_weight_decay', default=1e-06, type=float, help='weight decay for SGD') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-08, help='eps for Adadelta. default=1e-8') parser.add_argument('--schedule', default='super', nargs='*', help='(learning rate schedule. default is super for super convergence, 1 for None, [0.6, 0.8] for the same setting with ASTER') parser.add_argument('--lr_drop_rate', type=float, default=0.1, help='lr_drop_rate. default is the same setting with ASTER') parser.add_argument('--model_name', type=str, required=False, help='CRNN|TRBA') parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int, default=3, help='the number of input channel of Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state') parser.add_argument('--batch_ratio', type=str, default='1.0', help='assign ratio for each selected data in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage ratio, this ratio is multiplied to total number of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height of the input image') parser.add_argument('--imgW', type=int, default=100, help='the width of the input image') parser.add_argument('--NED', action='store_true', help='For Normalized edit_distance') parser.add_argument('--Aug', type=str, default='None', help='whether to use augmentation |None|Blur|Crop|Rot|') parser.add_argument('--exp_name', help='Where to store logs and models') parser.add_argument('--manual_seed', type=int, default=111, help='for random seed setting') parser.add_argument('--saved_model', default='', help='path to model to continue training') return parser
def test_orchid(): tree = ET.ElementTree(ET.fromstring(SMALL_DOC)) documents = parse_xml(tree) check_results(documents, EXPECTED_RESULTS, EXPECTED_TEXT, EXPECTED_LABELS)
class SmoothedValue(object): def __init__(self, window_size=20, fmt=None): if (fmt is None): fmt = '{median:.4f} ({global_avg:.4f})' self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += (value * n) def synchronize_between_processes(self): if (not dist_utils.is_dist_avail_and_initialized()): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] def median(self): d = torch.tensor(list(self.deque)) return d.median().item() def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() def global_avg(self): return (self.total / self.count) def max(self): return max(self.deque) def value(self): return self.deque[(- 1)] def __str__(self): return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
class Classifier(nn.Module): def __init__(self, in_channels, num_anchors, num_classes, num_layers, pyramid_levels=5, onnx_export=False): super(Classifier, self).__init__() self.num_anchors = num_anchors self.num_classes = num_classes self.num_layers = num_layers self.conv_list = nn.ModuleList([SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)]) self.bn_list = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=0.001) for i in range(num_layers)]) for j in range(pyramid_levels)]) self.header = SeparableConvBlock(in_channels, (num_anchors * num_classes), norm=False, activation=False) self.swish = (MemoryEfficientSwish() if (not onnx_export) else Swish()) def forward(self, inputs): feats = [] for (feat, bn_list) in zip(inputs, self.bn_list): for (i, bn, conv) in zip(range(self.num_layers), bn_list, self.conv_list): feat = conv(feat) feat = bn(feat) feat = self.swish(feat) feat = self.header(feat) feat = feat.permute(0, 2, 3, 1) feat = feat.contiguous().view(feat.shape[0], feat.shape[1], feat.shape[2], self.num_anchors, self.num_classes) feat = feat.contiguous().view(feat.shape[0], (- 1), self.num_classes) feats.append(feat) feats = torch.cat(feats, dim=1) feats = feats.sigmoid() return feats
def compute_metrics(eval_preds): (logits, labels) = eval_preds predictions = np.argmax(logits, axis=(- 1)) return metric.compute(predictions=predictions, references=labels, average='weighted')
class IMEXRK443(PDEIMEXRK): def steps(cls): return 4 def stages(self): a = np.array([[0, 0, 0, 0, 0], [0, (1 / 2), 0, 0, 0], [0, (1 / 6), (1 / 2), 0, 0], [0, ((- 1) / 2), (1 / 2), (1 / 2), 0], [0, (3 / 2), ((- 3) / 2), (1 / 2), (1 / 2)]]) b = np.array([[0, 0, 0, 0, 0], [(1 / 2), 0, 0, 0, 0], [(11 / 18), (1 / 18), 0, 0, 0], [(5 / 6), ((- 5) / 6), (1 / 2), 0, 0], [(1 / 4), (7 / 4), (3 / 4), ((- 7) / 4), 0]]) c = (0, (1 / 2), (2 / 3), (1 / 2), 1) return (a, b, c)
def PrintBytearray(host_workspace): uint_str = None prefix = None print('uint32_t host_workspace[] = {') for (idx, byte) in enumerate(host_workspace): if (not (idx % 4)): if (uint_str is not None): print(prefix, uint_str, ',') prefix = ('/* offset: %d B */ 0x' % idx) uint_str = '' uint_str = ('{:02x}'.format(byte) + uint_str) print('};')
def define_node(args, node_index, level, parent_index, tree_struct, identity=False): num_transforms = (0 if (node_index == 0) else count_number_transforms(parent_index, tree_struct)) meta = {'index': node_index, 'parent': parent_index, 'left_child': 0, 'right_child': 0, 'level': level, 'extended': False, 'split': False, 'visited': False, 'is_leaf': True, 'train_accuracy_gain_split': (- np.inf), 'valid_accuracy_gain_split': (- np.inf), 'test_accuracy_gain_split': (- np.inf), 'train_accuracy_gain_ext': (- np.inf), 'valid_accuracy_gain_ext': (- np.inf), 'test_accuracy_gain_ext': (- np.inf), 'num_transforms': num_transforms} if (not tree_struct): meta['in_shape'] = (1, args.input_nc, args.input_width, args.input_height) else: meta['in_shape'] = tree_struct[parent_index]['out_shape'] if ((meta['in_shape'][2] < 3) or (meta['in_shape'][3] < 3)): identity = True if (identity or (args.transformer_ver == 1)): meta['transformed'] = False else: meta['transformed'] = True num_downsample = (0 if (node_index == 0) else count_number_transforms_after_last_downsample(parent_index, tree_struct)) if ((args.downsample_interval == num_downsample) or (node_index == 0)): meta['downsampled'] = True else: meta['downsampled'] = False config_t = {'kernel_size': args.transformer_k, 'ngf': args.transformer_ngf, 'batch_norm': args.batch_norm, 'downsample': meta['downsampled'], 'expansion_rate': args.transformer_expansion_rate, 'reduction_rate': args.transformer_reduction_rate} transformer_ver = args.transformer_ver if identity: transformer = models.Identity(meta['in_shape'][1], meta['in_shape'][2], meta['in_shape'][3], **config_t) else: transformer = define_transformer(transformer_ver, meta['in_shape'][1], meta['in_shape'][2], meta['in_shape'][3], **config_t) meta['identity'] = identity meta['out_shape'] = transformer.outputshape print(' data shape before/after transformer ') print(meta['in_shape'], type(meta['in_shape'])) print(meta['out_shape'], type(meta['out_shape'])) config_s = {'no_classes': args.no_classes, 'dropout_prob': args.solver_dropout_prob, 'batch_norm': args.batch_norm} solver = define_solver(args.solver_ver, meta['out_shape'][1], meta['out_shape'][2], meta['out_shape'][3], **config_s) config_r = {'kernel_size': args.router_k, 'ngf': args.router_ngf, 'soft_decision': True, 'stochastic': False, 'dropout_prob': args.router_dropout_prob, 'batch_norm': args.batch_norm} router = define_router(args.router_ver, meta['out_shape'][1], meta['out_shape'][2], meta['out_shape'][3], **config_r) module = {'transform': transformer, 'classifier': solver, 'router': router} return (meta, module)
def heegner_point_height(self, D, prec=2, check_rank=True): if (not self.satisfies_heegner_hypothesis(D)): raise ArithmeticError(('Discriminant (=%s) must be a fundamental discriminant that satisfies the Heegner hypothesis.' % D)) if (check_rank and (self.rank() >= 2)): return ZZ(0) if ((D == (- 3)) or (D == (- 4))): raise ArithmeticError(('Discriminant (=%s) must not be -3 or -4.' % D)) eps = self.root_number() L1_vanishes = self.lseries().L1_vanishes() IR = RealIntervalField(20) if ((eps == 1) and L1_vanishes): return IR(0) RR = RealField() from math import sqrt alpha = (RR(sqrt(abs(D))) / (2 * self.period_lattice().complex_area())) F = self.quadratic_twist(D) E = self k_E = ((prec * sqrt(E.conductor())) + 20) k_F = ((prec * sqrt(F.conductor())) + 20) MIN_ERR = RR('1e-6') if (eps == 1): (LF1, err_F) = F.lseries().deriv_at1(k_F) (LE1, err_E) = E.lseries().at1(k_E) err_F = max(err_F, MIN_ERR) err_E = max(err_E, MIN_ERR) return ((IR((alpha - MIN_ERR), (alpha + MIN_ERR)) * IR((LE1 - err_E), (LE1 + err_E))) * IR((LF1 - err_F), (LF1 + err_F))) else: (LE1, err_E) = E.lseries().deriv_at1(k_E) (LF1, err_F) = F.lseries().at1(k_F) err_F = max(err_F, MIN_ERR) err_E = max(err_E, MIN_ERR) return ((IR((alpha - MIN_ERR), (alpha + MIN_ERR)) * IR((LE1 - err_E), (LE1 + err_E))) * IR((LF1 - err_F), (LF1 + err_F)))
class SkewPolynomialRing_finite_order(SkewPolynomialRing): def __init__(self, base_ring, morphism, derivation, name, sparse, category=None): if (self.Element is None): import sage.rings.polynomial.skew_polynomial_finite_order self.Element = sage.rings.polynomial.skew_polynomial_finite_order.SkewPolynomial_finite_order_dense if (self._fraction_field_class is None): from sage.rings.polynomial.ore_function_field import OreFunctionField_with_large_center self._fraction_field_class = OreFunctionField_with_large_center SkewPolynomialRing.__init__(self, base_ring, morphism, derivation, name, sparse, category) self._order = morphism.order() (self._constants, self._embed_constants) = morphism.fixed_field() self._center = {} self._center_variable_name = 'z' for i in range(WORKING_CENTER_MAX_TRIES): try: self._working_center = self.center() self._center_variable_name = None break except ValueError: self._center_variable_name = ('z%s_' % i) if (self._center_variable_name is not None): raise NotImplementedError('unable to create the center') def center(self, name=None, names=None, default=False): if ((name is not None) and (names is not None)): raise ValueError('you must specify the name of the variable') if (names is None): if (name is None): name = self._center_variable_name if (name is None): name = 'z' names = (name,) names = normalize_names(1, names) name = names[0] if (name in self._center): center = self._center[name] else: center = PolynomialRing(self._constants, names) embed = SkewPolynomialCenterInjection(center, self, self._embed_constants, self._order) try: assert (not self.has_coerce_map_from(center)) self.register_coercion(embed) center.register_conversion(embed.section()) except AssertionError: raise ValueError('creation of coercion map fails; consider using another variable name') self._center[name] = center if (default or (self._center_variable_name is None)): self._center_variable_name = name return center
def get_bias_by_neighbors(model, v, gender_direction, topn): neighbors = model.similar_by_vector(v, topn=topn) neighbors_words = [n for (n, _) in neighbors] bias = len([n for n in neighbors_words if (model.cosine_similarities(model[n], [gender_direction])[0] > 0)]) bias /= (1.0 * topn) return bias
def _mpool(inpOp, kH, kW, dH, dW): global pool_counter global parameters name = ('pool' + str(pool_counter)) pool_counter += 1 if (FLAGS.data_format == 'NCHW'): ksize = [1, 1, kH, kW] strides = [1, 1, dH, dW] else: ksize = [1, kH, kW, 1] strides = [1, dH, dW, 1] return tf.nn.max_pool(inpOp, ksize=ksize, strides=strides, padding='VALID', data_format=FLAGS.data_format, name=name)
class CategoricalMLPModuleEx(nn.Module): def __init__(self, input_dim, output_dim, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, hidden_w_init=nn.init.xavier_uniform_, hidden_b_init=nn.init.zeros_, output_nonlinearity=None, output_w_init=nn.init.xavier_uniform_, output_b_init=nn.init.zeros_, layer_normalization=False, categorical_distribution_cls=Categorical, distribution_transformations=None): super().__init__() self._input_dim = input_dim self._output_dim = output_dim self._hidden_sizes = hidden_sizes self._hidden_nonlinearity = hidden_nonlinearity self._hidden_w_init = hidden_w_init self._hidden_b_init = hidden_b_init self._output_nonlinearity = output_nonlinearity self._output_w_init = output_w_init self._output_b_init = output_b_init self._layer_normalization = layer_normalization self._categorical_dist_class = categorical_distribution_cls self._distribution_transformations = distribution_transformations self._logits_module = MLPModule(input_dim=self._input_dim, output_dim=self._output_dim, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization) def _maybe_move_distribution_transformations(self): device = next(self.parameters()).device if (self._distribution_transformations is not None): self._distribution_transformations = [t.maybe_clone_to_device(device) for t in self._distribution_transformations] def _apply(self, *args, **kwargs): ret = super()._apply(*args, **kwargs) self._maybe_move_distribution_transformations() return ret def _get_logits(self, *inputs): return self._logits_module(*inputs) def forward(self, *inputs): logits = self._get_logits(*inputs) dist = self._categorical_dist_class(logits=logits) if (self._distribution_transformations is not None): dist = TransformedDistributionEx(dist, self._distribution_transformations) if (not isinstance(dist, (TanhNormal, OneHotCategorical))): dist = Independent(dist, 1) return dist def forward_mode(self, *inputs): logits = self._get_logits(*inputs) dist = self._categorical_dist_class(logits=logits) if (self._distribution_transformations is not None): dist = TransformedDistributionEx(dist, self._distribution_transformations) if (not isinstance(dist, (TanhNormal, OneHotCategorical))): dist = Independent(dist, 1) return dist.mode def forward_with_transform(self, *inputs, transform): logits = self._get_logits(*inputs) dist = self._categorical_dist_class(logits=logits) if (self._distribution_transformations is not None): dist = TransformedDistributionEx(dist, self._distribution_transformations) if (not isinstance(dist, (TanhNormal, OneHotCategorical))): dist = Independent(dist, 1) logits = transform(logits) dist_transformed = self._categorical_dist_class(logits=logits) if (self._distribution_transformations is not None): dist_transformed = TransformedDistributionEx(dist_transformed, self._distribution_transformations) if (not isinstance(dist_transformed, (TanhNormal, OneHotCategorical))): dist_transformed = Independent(dist_transformed, 1) return (dist, dist_transformed) def forward_with_chunks(self, *inputs, merge): logits = [] for chunk_inputs in zip(*inputs): chunk_logits = self._get_logits(*chunk_inputs) logits.append(chunk_logits) logits = merge(logits, batch_dim=0) dist = self._categorical_dist_class(logits=logits) if (self._distribution_transformations is not None): dist = TransformedDistributionEx(dist, self._distribution_transformations) if (not isinstance(dist, (TanhNormal, OneHotCategorical))): dist = Independent(dist, 1) return dist
class ComponentsTest(unittest.TestCase): def test_components(self): g = Graph(num_nodes=12) g.add_arc(1, 2) g.add_arc(3, 4) g.add_arc(5, 6).add_arc(6, 7).add_arc(7, 5) g.add_arc(8, 9).add_arc(8, 10).add_arc(8, 11) self.assertEqual(num_components(g), 5) comps = components(g) self.assertEqual(len(comps), 5) union = set() for c in comps: union |= set(c) self.assertEqual(len(union), len(g))
def dataio_prepare(hparams): .data_pipeline.takes('path') .data_pipeline.provides('sig') def audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) return sig .data_pipeline.takes('path') .data_pipeline.provides('sig') def sp_audio_pipeline(wav): sig = sb.dataio.dataio.read_audio(wav) sig = sig.unsqueeze(0) sig = hparams['speed_perturb'](sig) sig = sig.squeeze(0) return sig .data_pipeline.takes('trans') .data_pipeline.provides('trans', 'tokens_list', 'tokens_bos', 'tokens_eos') def reference_text_pipeline(translation): (yield translation) tokens_list = tokenizer.sp.encode_as_ids(translation) (yield tokens_list) tokens_bos = torch.LongTensor(([hparams['bos_index']] + tokens_list)) (yield tokens_bos) tokens_eos = torch.LongTensor((tokens_list + [hparams['eos_index']])) (yield tokens_eos) data_folder = hparams['data_folder'] tokenizer = SentencePiece(model_dir=hparams['save_folder'], vocab_size=hparams['vocab_size'], annotation_train=hparams['annotation_train'], annotation_read='trans', annotation_format='json', model_type='unigram', bos_id=hparams['bos_index'], eos_id=hparams['eos_index']) datasets = {} for dataset in ['train', 'valid']: json_path = f'{data_folder}/{dataset}.json' is_use_sp = ((dataset == 'train') and ('speed_perturb' in hparams)) audio_pipeline_func = (sp_audio_pipeline if is_use_sp else audio_pipeline) datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=json_path, replacements={'data_root': data_folder}, dynamic_items=[audio_pipeline_func, reference_text_pipeline], output_keys=['id', 'sig', 'duration', 'trans', 'tokens_list', 'tokens_bos', 'tokens_eos']) for dataset in ['valid', 'test']: json_path = hparams[f'annotation_{dataset}'] datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=json_path, replacements={'data_root': data_folder}, dynamic_items=[audio_pipeline, reference_text_pipeline], output_keys=['id', 'sig', 'duration', 'trans', 'tokens_list', 'tokens_bos', 'tokens_eos']) if (hparams['sorting'] == 'ascending'): if hparams['debug']: datasets['train'] = datasets['train'].filtered_sorted(key_min_value={'duration': hparams['sorting_min_duration']}, key_max_value={'duration': hparams['sorting_max_duration']}, sort_key='duration', reverse=True) datasets['valid'] = datasets['valid'].filtered_sorted(key_min_value={'duration': hparams['sorting_min_duration']}, key_max_value={'duration': hparams['sorting_max_duration']}, sort_key='duration', reverse=True) else: datasets['train'] = datasets['train'].filtered_sorted(sort_key='duration') datasets['valid'] = datasets['valid'].filtered_sorted(sort_key='duration') hparams['dataloader_options']['shuffle'] = False hparams['dataloader_options']['shuffle'] = False elif (hparams['sorting'] == 'descending'): if hparams['debug']: datasets['train'] = datasets['train'].filtered_sorted(key_min_value={'duration': hparams['sorting_min_duration']}, key_max_value={'duration': hparams['sorting_max_duration']}, sort_key='duration', reverse=True) datasets['valid'] = datasets['valid'].filtered_sorted(key_min_value={'duration': hparams['sorting_min_duration']}, key_max_value={'duration': hparams['sorting_max_duration']}, sort_key='duration', reverse=True) else: datasets['train'] = datasets['train'].filtered_sorted(sort_key='duration', reverse=True) datasets['valid'] = datasets['valid'].filtered_sorted(sort_key='duration', reverse=True) hparams['dataloader_options']['shuffle'] = False hparams['dataloader_options']['shuffle'] = False elif (hparams['sorting'] == 'random'): if hparams['debug']: datasets['train'] = datasets['train'].filtered_sorted(key_min_value={'duration': hparams['sorting_debug_duration']}, key_max_value={'duration': hparams['sorting_max_duration']}, sort_key='duration') datasets['valid'] = datasets['valid'].filtered_sorted(key_min_value={'duration': hparams['sorting_min_duration']}, key_max_value={'duration': hparams['sorting_max_duration']}, sort_key='duration') hparams['dataloader_options']['shuffle'] = True else: raise NotImplementedError('sorting must be random, ascending or descending') return (datasets, tokenizer)