code
stringlengths
101
5.91M
def dequantize_model(model): model.float() params = model.state_dict() for (n, p) in params.items(): if ('quantization' not in n): qp = QTensor(tensor=p, scale=params[(n + '.quantization.scale')][0], zero_point=params[(n + '.quantization.zero_point')][0]) p.copy_(dequantize_tensor(qp)) model.register_buffer((n + '.quantization.scale'), None) model.register_buffer((n + '.quantization.zero_point'), None) model.quantized = None
class ConvReLU2d(nnqat.Conv2d, nni._FusedModule): _FLOAT_MODULE = nni.ConvReLU2d _FLOAT_CONV_MODULE = nn.Conv2d _FLOAT_BN_MODULE = None _FLOAT_RELU_MODULE = nn.ReLU def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', qconfig=None): super(ConvReLU2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode, qconfig=qconfig) assert qconfig, 'qconfig must be provided for QAT module' self.qconfig = qconfig self.weight_fake_quant = self.qconfig.weight() def forward(self, input): return F.relu(self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)) def from_float(cls, mod): return super(ConvReLU2d, cls).from_float(mod)
def compute_maxIoU_overlap_alignment_wrapper(opts, rel_lo=0, rel_hi=1, batch_size=8, data_loader_kwargs=None, max_items=None, **stats_kwargs): dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) if (data_loader_kwargs is None): data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2) G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device) c_iter = iterate_random_labels(opts=opts, batch_size=batch_size) num_items = len(dataset) if (max_items is not None): num_items = min(num_items, max_items) stats_bbox_real = FeatureStats(max_items=num_items, capture_all=True, **stats_kwargs) stats_bbox_fake = FeatureStats(max_items=num_items, capture_all=True, **stats_kwargs) stats_bbox_class = FeatureStats(max_items=num_items, capture_all=True, **stats_kwargs) stats_mask = FeatureStats(max_items=num_items, capture_all=True, **stats_kwargs) stats_overlap = FeatureStats(max_items=num_items, capture_all=True, **stats_kwargs) stats_alignment = FeatureStats(max_items=num_items, capture_all=True, **stats_kwargs) progress = opts.progress.sub(tag='calculate maximum IoU', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi) item_subset = [(((i * opts.num_gpus) + opts.rank) % num_items) for i in range((((num_items - 1) // opts.num_gpus) + 1))] for (samples, _labels) in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs): bbox_real = samples['bboxes'].to(opts.device).to(torch.float32) bbox_class = samples['labels'].to(opts.device).to(torch.int64) bbox_text = list(map(list, zip(*samples['texts']))) bbox_patch = samples['patches'].to(opts.device).to(torch.float32) mask = samples['mask'].to(opts.device).to(torch.bool) padding_mask = (~ mask) background = samples['background'].to(opts.device).to(torch.float32) gen_z = torch.randn([bbox_class.shape[0], bbox_class.shape[1], G.z_dim], dtype=torch.float32, device=opts.device) bbox_fake = G(z=gen_z, bbox_class=bbox_class, bbox_real=bbox_real, bbox_text=bbox_text, bbox_patch=bbox_patch, padding_mask=padding_mask, background=background, c=next(c_iter), **opts.G_kwargs) stats_bbox_real.append_torch(bbox_real, num_gpus=opts.num_gpus, rank=opts.rank) stats_bbox_fake.append_torch(bbox_fake, num_gpus=opts.num_gpus, rank=opts.rank) stats_bbox_class.append_torch(bbox_class, num_gpus=opts.num_gpus, rank=opts.rank) stats_mask.append_torch(mask, num_gpus=opts.num_gpus, rank=opts.rank) overlap = compute_overlap(bbox_fake, mask).unsqueeze((- 1)) stats_overlap.append_torch(overlap, num_gpus=opts.num_gpus, rank=opts.rank) alignment = compute_alignment(bbox_fake, mask).unsqueeze((- 1)) stats_alignment.append_torch(alignment, num_gpus=opts.num_gpus, rank=opts.rank) progress.update(stats_bbox_real.num_items) return (stats_bbox_real, stats_bbox_fake, stats_bbox_class, stats_mask, stats_overlap, stats_alignment)
class Argument(object): def __init__(self, dest, nargs=1, obj=None): self.dest = dest self.nargs = nargs self.obj = obj def process(self, value, state): if (self.nargs > 1): holes = sum((1 for x in value if (x is None))) if (holes == len(value)): value = None elif (holes != 0): raise BadArgumentUsage('argument {} takes {} values'.format(self.dest, self.nargs)) state.opts[self.dest] = value state.order.append(self.obj)
def test_gemm(): A = np.random.rand(M, K).astype(np.float32) B = np.random.rand(K, N).astype(np.float32) C = np.random.rand(M, N).astype(np.float32) origC = np.zeros([M, N], dtype=np.float32) origC[:] = C gemm(A, B, C, 1.0, 1.0) realC = ((1.0 * (A B)) + (1.0 * origC)) diff = (np.linalg.norm((C - realC)) / (M * N)) print('Difference:', diff) assert (diff < 1e-05)
class ResFieldNetBase(ResNetBase): def network_initialization(self, in_channels, out_channels, D): field_ch = 32 field_ch2 = 64 self.field_network = nn.Sequential(ME.MinkowskiSinusoidal(in_channels, field_ch), ME.MinkowskiBatchNorm(field_ch), ME.MinkowskiReLU(inplace=True), ME.MinkowskiLinear(field_ch, field_ch), ME.MinkowskiBatchNorm(field_ch), ME.MinkowskiReLU(inplace=True), ME.MinkowskiToSparseTensor()) self.field_network2 = nn.Sequential(ME.MinkowskiSinusoidal((field_ch + in_channels), field_ch2), ME.MinkowskiBatchNorm(field_ch2), ME.MinkowskiReLU(inplace=True), ME.MinkowskiLinear(field_ch2, field_ch2), ME.MinkowskiBatchNorm(field_ch2), ME.MinkowskiReLU(inplace=True), ME.MinkowskiToSparseTensor()) ResNetBase.network_initialization(self, field_ch2, out_channels, D) def forward(self, x: ME.TensorField): otensor = self.field_network(x) otensor2 = self.field_network2(otensor.cat_slice(x)) return ResNetBase.forward(self, otensor2)
def get_sentence_map(segments, sentence_end): current = 0 sent_map = [] sent_end_idx = 0 assert (len(sentence_end) == sum([len(s) for s in segments])) for segment in segments: for i in range(len(segment)): sent_map.append(current) current += int(sentence_end[sent_end_idx]) sent_end_idx += 1 return sent_map
class Dataset(torch.utils.data.Dataset): def __init__(self, x1: np.ndarray, x2: np.ndarray, y: np.ndarray, device): self.x1 = x1 self.x2 = x2 self.y = y self.device = device def __len__(self): return len(self.y) def __getitem__(self, index): with torch.no_grad(): (v1, v2, y) = (self.x1[index], self.x2[index], self.y[index]) (vec1, vec2) = (torch.from_numpy(v1).double(), torch.from_numpy(v2).double()) vec1 = vec1.to(self.device) vec2 = vec2.to(self.device) return (vec1, vec2, torch.tensor(y).to(self.device).float())
def get_divergence(T, K, U_hat, W_hat, **context): div_u = Array(T) U_hat = cross2(U_hat, K, W_hat) div_u = T.backward((1j * (((K[0] * U_hat[0]) + (K[1] * U_hat[1])) + (K[2] * U_hat[2]))), div_u) return div_u
class Estimator(nn.Module): def __init__(self, n_output, cnn_input=128): n_input = cnn_input n_units = n_output super().__init__() self.layer0 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU()) self.layer1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU()) self.layers = [self.layer0, self.layer1] self.block_nonlinear = nn.Sequential(nn.Conv2d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(n_units), nn.ReLU(), nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True)) self.block_ln = nn.Sequential(Permute(0, 2, 3, 1), nn.LayerNorm(n_units), Permute(0, 3, 1, 2)) self.linear_shortcut = nn.Conv2d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False) if (n_units >= n_input): eye_mask = np.zeros((n_units, n_input, 1, 1), dtype=np.uint8) for i in range(n_input): eye_mask[(i, i, 0, 0)] = 1 self.linear_shortcut.weight.data.uniform_((- 0.01), 0.01) self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def forward(self, x: torch.Tensor, return_full_list=False, clip_grad=False, prop_limit=None): def _clip_grad(v, min, max): v_tmp = v.expand_as(v) v_tmp.register_hook((lambda g: g.clamp(min, max))) return v_tmp out = [] for (i, layer) in enumerate(self.layers): if ((prop_limit is not None) and (i >= prop_limit)): break x = layer(x) if clip_grad: x = _clip_grad(x, (- clip_grad), clip_grad) out.append(x) out = out[(- 1)] out = self.block_ln((self.block_nonlinear(out) + self.linear_shortcut(out))) return out
def forward_param_layer(input, param): ndim = input.get_shape().ndims param = tf.convert_to_tensor(param) num_units = int(param.get_shape()[0]) reshaped_param = tf.reshape(param, (((1,) * (ndim - 1)) + (num_units,))) tile_arg = tf.concat([tf.shape(input)[:(ndim - 1)], [1]], 0) tiled = tf.tile(reshaped_param, tile_arg) return tiled
class GPT(): def __init__(self, engine='davinci', temperature=0.5, max_tokens=100, input_prefix='input: ', input_suffix='\n', output_prefix='output: ', output_suffix='\n\n', append_output_prefix_to_query=False): self.examples = {} self.engine = engine self.temperature = temperature self.max_tokens = max_tokens self.input_prefix = input_prefix self.input_suffix = input_suffix self.output_prefix = output_prefix self.output_suffix = output_suffix self.append_output_prefix_to_query = append_output_prefix_to_query self.stop = (output_suffix + input_prefix).strip() def add_example(self, ex): assert isinstance(ex, Example), 'Please create an Example object.' self.examples[ex.get_id()] = ex def delete_example(self, id): if (id in self.examples): del self.examples[id] def get_example(self, id): return self.examples.get(id, None) def get_all_examples(self): return {k: v.as_dict() for (k, v) in self.examples.items()} def get_prime_text(self): return ''.join([self.format_example(ex) for ex in self.examples.values()]) def get_engine(self): return self.engine def get_temperature(self): return self.temperature def get_max_tokens(self): return self.max_tokens def craft_query(self, prompt): q = (((self.get_prime_text() + self.input_prefix) + prompt) + self.input_suffix) if self.append_output_prefix_to_query: q = (q + self.output_prefix) return q def submit_request(self, prompt): response = openai.Completion.create(engine=self.get_engine(), prompt=self.craft_query(prompt), max_tokens=self.get_max_tokens(), temperature=self.get_temperature(), top_p=1, n=1, stream=False, stop=self.stop) return response def get_top_reply(self, prompt): response = self.submit_request(prompt) return response['choices'][0]['text'] def format_example(self, ex): return (((((self.input_prefix + ex.get_input()) + self.input_suffix) + self.output_prefix) + ex.get_output()) + self.output_suffix)
def consult_tree(root, dic): nodes = traverse(root) for n in nodes: n.label = dic[n.label] return nodes[0]
def numpy_azimint_naive(data, radius, npt): rmax = radius.max() res = np.zeros(npt, dtype=np.float64) for i in range(npt): r1 = ((rmax * i) / npt) r2 = ((rmax * (i + 1)) / npt) mask_r12 = np.logical_and((r1 <= radius), (radius < r2)) values_r12 = data[mask_r12] res[i] = values_r12.mean() return res
def get_log_info(log: SparkDataFrame, user_col='user_idx', item_col='item_idx') -> str: cnt = log.count() user_cnt = log.select(user_col).distinct().count() item_cnt = log.select(item_col).distinct().count() return ', '.join([f'total lines: {cnt}', f'total users: {user_cnt}', f'total items: {item_cnt}'])
class TestGetTableSchema(TestCase): def test_get_table_schema(self): conn = testing.get_singleton_db_connection() if (conn.driver == 'mysql'): schema = get_table_schema(conn, 'iris.train') expect = [('sepal_length', 'FLOAT'), ('sepal_width', 'FLOAT'), ('petal_length', 'FLOAT'), ('petal_width', 'FLOAT'), ('class', 'INT')] self.assertEqual(expect, schema) schema = selected_columns_and_types(conn, 'SELECT sepal_length, petal_width * 2.3 new_petal_width, class FROM iris.train') expect = [('sepal_length', 'FLOAT'), ('new_petal_width', 'DOUBLE'), ('class', 'INT')] self.assertEqual(expect, schema) elif (conn.driver == 'hive'): schema = get_table_schema(conn, 'iris.train') expect = (('sepal_length', 'FLOAT'), ('sepal_width', 'FLOAT'), ('petal_length', 'FLOAT'), ('petal_width', 'FLOAT'), ('class', 'INT')) self.assertTrue(np.array_equal(expect, schema)) schema = selected_columns_and_types(conn, 'SELECT sepal_length, petal_width * 2.3 AS new_petal_width, class FROM iris.train') expect = [('sepal_length', 'FLOAT'), ('new_petal_width', 'FLOAT'), ('class', 'INT')] self.assertTrue(np.array_equal(expect, schema)) elif (conn.driver == 'maxcompute'): case_db = os.getenv('SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT') table = ('%s.sqlflow_iris_train' % case_db) schema = get_table_schema(conn, table) expect = [('sepal_length', 'DOUBLE'), ('sepal_width', 'DOUBLE'), ('petal_length', 'DOUBLE'), ('petal_width', 'DOUBLE'), ('class', 'BIGINT')] self.assertTrue(np.array_equal(expect, schema)) schema = selected_columns_and_types(conn, ('SELECT sepal_length, petal_width * 2.3 new_petal_width, class FROM %s' % table)) expect = [('sepal_length', 'DOUBLE'), ('new_petal_width', 'DOUBLE'), ('class', 'BIGINT')] self.assertTrue(np.array_equal(expect, schema))
def ResNet_model(bn=False, num_classes=10, depth=56, nb_filters=16, kernel_size=3, inp_channels=3, k=1, pad_conv1=0, affine=True, inp_noise=0, VIB=False): return ResNet(depth=depth, nb_filters=nb_filters, num_classes=num_classes, bn=bn, kernel_size=kernel_size, inp_channels=inp_channels, k=k, pad_conv1=pad_conv1, affine=affine, inp_noise=inp_noise, VIB=VIB)
def get_descriptors(model, dataloader, device): descriptors = [] with torch.no_grad(): with torch.autocast(device_type='cuda', dtype=torch.float16): for batch in tqdm(dataloader, 'Calculating descritptors...'): (imgs, labels) = batch output = model(imgs.to(device)).cpu() descriptors.append(output) return torch.cat(descriptors)
def find_latest_tag_commit(tags): for tag in reversed(tags): s = re.match('v\\s*([\\d.]+)', tag.name) print(f'Latest version tag is: {tag.name}', file=sys.stderr) if (s is not None): return tag.commit
class CraigslistValidationPipeline(object): def process_item(self, item, spider): if (item == {}): raise DropItem('parse error') else: return item
def silent_net(): n = caffe.NetSpec() (n.data, n.data2) = L.DummyData(shape=dict(dim=3), ntop=2) n.silence_data = L.Silence(n.data, ntop=0) n.silence_data2 = L.Silence(n.data2, ntop=0) return n.to_proto()
class SRWLPartBeam(object): def __init__(self, _Iavg=0, _nPart=0, _partStatMom1=None, _arStatMom2=None): self.Iavg = _Iavg self.nPart = _nPart self.partStatMom1 = (SRWLParticle() if (_partStatMom1 is None) else _partStatMom1) self.arStatMom2 = (array('d', ([0] * 21)) if (_arStatMom2 is None) else _arStatMom2) def from_Twiss(self, _Iavg=0, _e=0, _sig_e=0, _emit_x=0, _beta_x=0, _alpha_x=0, _eta_x=0, _eta_x_pr=0, _emit_y=0, _beta_y=0, _alpha_y=0, _eta_y=0, _eta_y_pr=0): self.Iavg = _Iavg self.partStatMom1.gamma = (_e / 0.) sigeE2 = (_sig_e * _sig_e) self.arStatMom2[0] = ((_emit_x * _beta_x) + ((sigeE2 * _eta_x) * _eta_x)) self.arStatMom2[1] = (((- _emit_x) * _alpha_x) + ((sigeE2 * _eta_x) * _eta_x_pr)) self.arStatMom2[2] = (((_emit_x * (1 + (_alpha_x * _alpha_x))) / _beta_x) + ((sigeE2 * _eta_x_pr) * _eta_x_pr)) self.arStatMom2[3] = ((_emit_y * _beta_y) + ((sigeE2 * _eta_y) * _eta_y)) self.arStatMom2[4] = (((- _emit_y) * _alpha_y) + ((sigeE2 * _eta_y) * _eta_y_pr)) self.arStatMom2[5] = (((_emit_y * (1 + (_alpha_y * _alpha_y))) / _beta_y) + ((sigeE2 * _eta_y_pr) * _eta_y_pr)) self.arStatMom2[10] = sigeE2 def from_RMS(self, _Iavg=0, _e=0, _sig_e=0, _sig_x=0, _sig_x_pr=0, _m_xx_pr=0, _sig_y=0, _sig_y_pr=0, _m_yy_pr=0): self.Iavg = _Iavg self.partStatMom1.gamma = (_e / 0.) sigeE2 = (_sig_e * _sig_e) self.arStatMom2[0] = (_sig_x * _sig_x) self.arStatMom2[1] = _m_xx_pr self.arStatMom2[2] = (_sig_x_pr * _sig_x_pr) self.arStatMom2[3] = (_sig_y * _sig_y) self.arStatMom2[4] = _m_yy_pr self.arStatMom2[5] = (_sig_y_pr * _sig_y_pr) self.arStatMom2[10] = sigeE2 def drift(self, _dist): self.partStatMom1.drift(_dist) self.arStatMom2[0] += (((self.arStatMom2[1] * _dist) * 2) + ((self.arStatMom2[2] * _dist) * _dist)) self.arStatMom2[1] += (self.arStatMom2[2] * _dist) self.arStatMom2[3] += (((self.arStatMom2[4] * _dist) * 2) + ((self.arStatMom2[5] * _dist) * _dist)) self.arStatMom2[4] += (self.arStatMom2[5] * _dist)
class DropPath(nn.Module): def __init__(self, p: float=None): super().__init__() self.p = p def forward(self, x: Tensor) -> Tensor: if ((self.p == 0.0) or (not self.training)): return x kp = (1 - self.p) shape = ((x.shape[0],) + ((1,) * (x.ndim - 1))) random_tensor = (kp + torch.rand(shape, dtype=x.dtype, device=x.device)) random_tensor.floor_() return (x.div(kp) * random_tensor)
class ErrorErasureChannel(Channel): def __init__(self, space, number_errors, number_erasures): if isinstance(number_errors, (Integer, int)): number_errors = (number_errors, number_errors) if (not isinstance(number_errors, (tuple, list))): raise ValueError('number_errors must be a tuple, a list, an Integer or a Python int') if isinstance(number_erasures, (Integer, int)): number_erasures = (number_erasures, number_erasures) if (not isinstance(number_erasures, (tuple, list))): raise ValueError('number_erasures must be a tuple, a list, an Integer or a Python int') output_space = cartesian_product([space, VectorSpace(GF(2), space.dimension())]) super().__init__(space, output_space) if ((number_errors[1] + number_erasures[1]) > space.dimension()): raise ValueError('The total number of errors and erasures cannot exceed the dimension of the input space') self._number_errors = number_errors self._number_erasures = number_erasures def _repr_(self): no_err = self.number_errors() no_era = self.number_erasures() return ('Error-and-erasure channel creating %s errors and %s erasures of input space %s and output space %s' % (format_interval(no_err), format_interval(no_era), self.input_space(), self.output_space())) def _latex_(self): no_err = self.number_errors() no_era = self.number_erasures() return ('\\textnormal{Error-and-erasure channel creating %s errors and %s erasures of input space %s and output space %s}' % (format_interval(no_err), format_interval(no_era), self.input_space(), self.output_space())) def transmit_unsafe(self, message): number_errors = randint(*self.number_errors()) number_erasures = randint(*self.number_erasures()) V = self.input_space() n = V.dimension() zero = V.base_ring().zero() errors = sample(range(n), (number_errors + number_erasures)) error_positions = errors[:number_errors] erasure_positions = errors[number_errors:] error_vector = random_error_vector(n, V.base_ring(), error_positions) erasure_vector = random_error_vector(n, GF(2), erasure_positions) message = (message + error_vector) for i in erasure_positions: message[i] = zero return (message, erasure_vector) def number_errors(self): return self._number_errors def number_erasures(self): return self._number_erasures
class MarianTokenizer(): def __init__(self, *args, **kwargs): requires_sentencepiece(self) def from_pretrained(self, *args, **kwargs): requires_sentencepiece(self)
('VariableLSTM') def _variable_lstm_grad(op, act_grad, gate_grad, mem_grad): initial_state = op.inputs[1] initial_memory = op.inputs[2] w_m_m = op.inputs[3] act = op.outputs[0] gate_raw_act = op.outputs[1] memory = op.outputs[2] return rnn.variable_lstm_grad(initial_state, initial_memory, w_m_m, act, gate_raw_act, memory, act_grad, gate_grad, mem_grad)
def trivial_loop(data: dace.float64[(I, J)]): for i in range(1, 2): for j in dace.map[0:J]: data[(i, j)] = (data[(i, j)] + data[((i - 1), j)])
def dconv_bn_relu(in_dim, out_dim): return nn.Sequential(nn.ConvTranspose2d(in_dim, out_dim, 5, 2, padding=2, output_padding=1, bias=False), nn.BatchNorm2d(out_dim), nn.ReLU())
class BeamsplitterTest(tf.test.TestCase): def test_(self): for hadamard in [True, False]: for epsilon in [0, 0.1]: bs = Beamsplitter(hadamard=hadamard, epsilon=epsilon) self.assertAllClose((bs.matrix bs.inverse_matrix), IDENTITY) self.assertAllClose(bs.matrix.conj().T, bs.inverse_matrix) if (epsilon == 0): self.assertAllClose(np.abs((bs.matrix ** 2)), (0.5 * np.ones_like(bs.matrix))) self.assertAllClose(np.linalg.det(bs.matrix), ((- 1) if hadamard else 1))
def write_results(results): filename = tempfile.mktemp() tmp_file = open(filename, 'w+') tmp_file.write(results.encode('utf-8')) return tmp_file
def dist_gather_tensor(vecs, world_size, local_rank=0, detach=True): all_tensors = [torch.empty_like(vecs) for _ in range(world_size)] dist.all_gather(all_tensors, vecs) if (not detach): all_tensors[local_rank] = vecs all_tensors = torch.cat(all_tensors, dim=0) return all_tensors
def process_events(events: List[Event], sentence_entities: List[List[Entity]], sentences: List[Tuple[(str, int, int)]]) -> List[List[Event]]: sentence_events = [[] for _ in range(len(sentences))] for event in events: (start, end) = (event.trigger.start, event.trigger.end) for (i, (_, s, e)) in enumerate(sentences): sent_entities = sentence_entities[i] if ((start >= s) and (end <= e)): arguments = [] for argument in event.arguments: mention_id = argument.mention_id for entity in sent_entities: if (entity.mention_id == mention_id): arguments.append(argument) break event_cleaned = Event(event.event_id, event.mention_id, event.event_type, event.event_subtype, trigger=event.trigger.copy(), arguments=arguments) sentence_events[i].append(event_cleaned) sentence_events_cleaned = [[] for _ in range(len(sentences))] for (i, events) in enumerate(sentence_events): if (not events): continue events.sort(key=(lambda x: (x.trigger.end - x.trigger.start)), reverse=True) chars = ([0] * max([x.trigger.end for x in events])) for event in events: overlap = False for j in range(event.trigger.start, event.trigger.end): if (chars[j] == 1): overlap = True break if (not overlap): chars[event.trigger.start:event.trigger.end] = ([1] * (event.trigger.end - event.trigger.start)) sentence_events_cleaned[i].append(event) sentence_events_cleaned[i].sort(key=(lambda x: x.trigger.start)) return sentence_events_cleaned
class RASampler(torch.utils.data.Sampler): def __init__(self, dataset_len, batch_size, repetitions=1, len_factor=3.0, shuffle=False, drop_last=False): self.dataset_len = dataset_len self.batch_size = batch_size self.repetitions = repetitions self.len_images = int((dataset_len * len_factor)) self.shuffle = shuffle self.drop_last = drop_last def shuffler(self): if self.shuffle: new_perm = (lambda : iter(np.random.permutation(self.dataset_len))) else: new_perm = (lambda : iter(np.arange(self.dataset_len))) shuffle = new_perm() while True: try: index = next(shuffle) except StopIteration: shuffle = new_perm() index = next(shuffle) for repetition in range(self.repetitions): (yield index) def __iter__(self): shuffle = iter(self.shuffler()) seen = 0 batch = [] for _ in range(self.len_images): index = next(shuffle) batch.append(index) if (len(batch) == self.batch_size): (yield batch) batch = [] if (batch and (not self.drop_last)): (yield batch) def __len__(self): if self.drop_last: return (self.len_images // self.batch_size) else: return (((self.len_images + self.batch_size) - 1) // self.batch_size)
def drop_connect(inputs, p, training): assert (0 <= p <= 1), 'p must be in range of [0,1]' if (not training): return inputs batch_size = inputs.shape[0] keep_prob = (1 - p) random_tensor = keep_prob random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device) binary_tensor = torch.floor(random_tensor) output = ((inputs / keep_prob) * binary_tensor) return output
class TResNet(nn.Module): def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, no_aa_jit=False, global_pool='avg', drop_rate=0.0): self.num_classes = num_classes self.drop_rate = drop_rate super(TResNet, self).__init__() space_to_depth = SpaceToDepthModule() aa_layer = partial(AntiAliasDownsampleLayer, no_jit=no_aa_jit) self.inplanes = int((64 * width_factor)) self.planes = int((64 * width_factor)) conv1 = conv2d_iabn((in_chans * 16), self.planes, stride=1, kernel_size=3) layer1 = self._make_layer(BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) layer2 = self._make_layer(BasicBlock, (self.planes * 2), layers[1], stride=2, use_se=True, aa_layer=aa_layer) layer3 = self._make_layer(Bottleneck, (self.planes * 4), layers[2], stride=2, use_se=True, aa_layer=aa_layer) layer4 = self._make_layer(Bottleneck, (self.planes * 8), layers[3], stride=2, use_se=False, aa_layer=aa_layer) self.body = nn.Sequential(OrderedDict([('SpaceToDepth', space_to_depth), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4)])) self.num_features = ((self.planes * 8) * Bottleneck.expansion) self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head = nn.Sequential(OrderedDict([('fc', nn.Linear((self.num_features * self.global_pool.feat_mult()), num_classes))])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) for m in self.modules(): if isinstance(m, BasicBlock): m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) if isinstance(m, Bottleneck): m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): layers = [] if (stride == 2): layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) layers += [conv2d_iabn(self.inplanes, (planes * block.expansion), kernel_size=1, stride=1, act_layer='identity')] downsample = nn.Sequential(*layers) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) return nn.Sequential(*layers) def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.num_classes = num_classes self.head = None if num_classes: num_features = (self.num_features * self.global_pool.feat_mult()) self.head = nn.Sequential(OrderedDict([('fc', nn.Linear(num_features, num_classes))])) else: self.head = nn.Sequential(OrderedDict([('fc', nn.Identity())])) def forward_features(self, x): return self.body(x) def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) if self.drop_rate: x = F.dropout(x, p=float(self.drop_rate), training=self.training) x = self.head(x) return x
def get_f77flags(src): flags = {} f = open_latin1(src, 'r') i = 0 for line in f: i += 1 if (i > 20): break m = _f77flags_re.match(line) if (not m): continue fcname = m.group('fcname').strip() fflags = m.group('fflags').strip() flags[fcname] = split_quoted(fflags) f.close() return flags
def tokenize_sentences(x): tokenized_s = tokenizer(x['s']['text'], add_special_tokens=False) tokenized_s_with_context = tokenizer(x['s_with_context']['text'], add_special_tokens=False) s_links = {} for (k, v) in x['s']['links'].items(): anchors = [] for (start, end) in v: if ((start < end) and (end > 0)): start = tokenized_s.char_to_token(start) end = tokenized_s.char_to_token((end - 1)) if ((start is not None) and (end is not None)): anchors.append([start, (end + 1)]) if anchors: s_links[k] = anchors s_with_context_links = {} for (k, v) in x['s_with_context']['links'].items(): anchors = [] for (start, end) in v: if ((start < end) and (end > 0)): start = tokenized_s_with_context.char_to_token(start) end = tokenized_s_with_context.char_to_token((end - 1)) if ((start is not None) and (end is not None)): anchors.append([start, (end + 1)]) if anchors: s_with_context_links[k] = anchors s_loc_start = x['s_with_context']['s_loc'][0] while (tokenized_s_with_context.char_to_token(s_loc_start) is None): s_loc_start += 1 s_loc_start = tokenized_s_with_context.char_to_token(s_loc_start) s_loc_end = (x['s_with_context']['s_loc'][1] - 1) while (tokenized_s_with_context.char_to_token(s_loc_end) is None): s_loc_end -= 1 s_loc_end = tokenized_s_with_context.char_to_token(s_loc_end) if (s_loc_start >= s_loc_end): error = 'sentence not in context' else: error = 'none' return {'md5': x['md5'], 'title': x['title'], 's': {'ids': tokenized_s['input_ids'], 'links': s_links}, 's_with_context': {'ids': tokenized_s_with_context['input_ids'], 's_loc': [s_loc_start, (s_loc_end + 1)], 'links': s_with_context_links}, 'error': error}
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(1, 1), dilation=1): (filters1, filters2, filters3) = filters if (K.image_data_format() == 'channels_last'): bn_axis = 3 else: bn_axis = 1 conv_name_base = ((('res' + str(stage)) + block) + '_branch') bn_name_base = ((('bn' + str(stage)) + block) + '_branch') x = Conv2D(filters1, (1, 1), strides=strides, name=(conv_name_base + '2a'), use_bias=False)(input_tensor) x = BN(axis=bn_axis, name=(bn_name_base + '2a'))(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=(conv_name_base + '2b'), use_bias=False, dilation_rate=dilation)(x) x = BN(axis=bn_axis, name=(bn_name_base + '2b'))(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=(conv_name_base + '2c'), use_bias=False)(x) x = BN(axis=bn_axis, name=(bn_name_base + '2c'))(x) shortcut = Conv2D(filters3, (1, 1), strides=strides, name=(conv_name_base + '1'), use_bias=False)(input_tensor) shortcut = BN(axis=bn_axis, name=(bn_name_base + '1'))(shortcut) x = layers.add([x, shortcut]) x = Activation('relu')(x) return x
def make_latex_table(args): csvs = glob.glob(f'{args.results_folder}/**/*Success.csv') results = defaultdict((lambda : defaultdict(list))) for csv in csvs: (seed, real, coda, _, dyna, roll, mbpo, c3xm) = parse_title(csv) coda_to_real_ratio = (int(coda) // int(real)) csv = pandas.read_csv(csv) dyna = (dyna and int(dyna)) mbpo = (mbpo and int(mbpo)) c3xm = (c3xm and int(c3xm)) if c3xm: mbpo = c3xm if dyna: coda_to_real_ratio = f'{coda_to_real_ratio}_{roll}' if mbpo: mbpo_to_real_ratio = (int(mbpo) // int(real)) if c3xm: coda_to_real_ratio = f'{coda_to_real_ratio}_{mbpo_to_real_ratio}_{roll}_c3' else: coda_to_real_ratio = f'{coda_to_real_ratio}_{mbpo_to_real_ratio}_{roll}' BASE = 500 results[real][coda_to_real_ratio].append(csv['Test/Success'][(BASE - 20):BASE].mean()) for datasize in ['25000', '50000', '75000', '100000', '150000', '250000']: hphantom = '' if (len(datasize) == 5): hphantom = '\\hphantom{0}' s = [f'${hphantom}{(int(datasize) // 1000)}$', '&'] means = [] stds = [] for coda_to_real_ratio in [0, '0_1', '0_1_5', 1, 2, 3, 5, '3_1_5']: values = results[datasize][coda_to_real_ratio] if (len(values) not in [5, 10]): mean = (- 1) std = (- 1) else: values = (np.array(values) * 100) mean = np.mean(values).round(1) resampled_means = [] for i in range(1000): resampled_means.append(np.mean(resample(values))) std = np.std(resampled_means).round(1) hphantom = '' means.append(mean) stds.append(std) max_mean = max(means) for (mean, std) in zip(means, stds): m = (f'\mybm{{{mean}}}' if (mean == max_mean) else mean) s += [f'${m} \pm {hphantom}{std}$ ', '&'] s = (s[:(- 1)] + ['\\\\']) print(*s)
def save_cog(out_np: np.ndarray, path_tiff_save: str, profile: dict, tags: Optional[dict]=None, dir_tmpfiles: str='.'): for (idx, c) in enumerate(['count', 'height', 'width']): if (c in profile): assert (profile[c] == out_np.shape[idx]), f'Unexpected shape: {profile[c]} {out_np.shape}' else: profile[c] = out_np.shape[idx] for field in ['crs', 'transform']: assert (field in profile), f'{field} not in profile: {profile}. it will not write cog without geo information' profile['BIGTIFF'] = 'IF_SAFER' if ('dtype' not in profile): profile['dtype'] = str(out_np.dtype) with rasterio.Env() as env: cog_driver = ('COG' in env.drivers()) if ('RESAMPLING' not in profile): profile['RESAMPLING'] = 'CUBICSPLINE' if cog_driver: if path_tiff_save.startswith('gs://'): with tempfile.NamedTemporaryFile(dir=dir_tmpfiles, suffix='.tif', delete=True) as fileobj: name_save = fileobj.name else: name_save = path_tiff_save profile['driver'] = 'COG' with rasterio.open(name_save, 'w', **profile) as rst_out: if (tags is not None): rst_out.update_tags(**tags) rst_out.write(out_np) if path_tiff_save.startswith('gs://'): subprocess.run(['gsutil', '-m', 'mv', name_save, path_tiff_save]) if os.path.exists(name_save): os.remove(name_save) return path_tiff_save print('COG driver not available. Generate COG manually with GTiff driver') for (idx, b) in enumerate(['blockysize', 'blockxsize']): if (b in profile): assert (profile[b] <= 512), f'{b} is {profile[b]} must be <=512 to be displayed in GEE ' else: profile[b] = min(512, out_np.shape[(idx + 1)]) if ((out_np.shape[1] >= 512) or (out_np.shape[2] >= 512)): profile['tiled'] = True profile['driver'] = 'GTiff' with tempfile.NamedTemporaryFile(dir=dir_tmpfiles, suffix='.tif', delete=True) as fileobj: named_tempfile = fileobj.name with rasterio.open(named_tempfile, 'w', **profile) as rst_out: if (tags is not None): rst_out.update_tags(**tags) rst_out.write(out_np) add_overviews(rst_out, tile_size=profile['blockysize']) print('Copying temp file') rasterio_shutil.copy(rst_out, path_tiff_save, copy_src_overviews=True, tiled=True, blockxsize=profile['blockxsize'], blockysize=profile['blockysize'], driver='GTiff') rasterio_shutil.delete(named_tempfile) return path_tiff_save
_call_aside def _initialize(g=globals()): manager = ResourceManager() g['_manager'] = manager g.update(((name, getattr(manager, name)) for name in dir(manager) if (not name.startswith('_'))))
def __compute_torperf_error_rates(daily_counts): err_rates = [] for day in daily_counts: total = int(daily_counts[day]['requests']) if (total <= 0): continue timeouts = int(daily_counts[day]['timeouts']) failures = int(daily_counts[day]['failures']) err_rates.append((((timeouts + failures) / float(total)) * 100.0)) return err_rates
def import_module_404ok(*args, **kwargs): try: mod = import_module(*args, **kwargs) except (ModuleNotFoundError, ImportError) as e: mod = None return mod
class DownSample(nn.Module): def __init__(self, in_features: int, out_features: int, dropout: float, add_IC: bool): super().__init__() assert (in_features > out_features) self.in_features = in_features self.out_features = out_features self.add_IC = add_IC if self.add_IC: self.norm_layer = nn.BatchNorm1d(in_features) self.dropout = nn.Dropout(p=dropout) self.linear = nn.Linear(in_features, out_features) self.relu = nn.ReLU() def forward(self, x: torch.tensor) -> torch.Tensor: out = x if self.add_IC: out = self.norm_layer(out) out = self.dropout(out) out = self.linear(out) out = self.relu(out) return out
def compute_overlap_alignment_laywise_IoU_layerwise_DocSim(opts, max_real, num_gen): (stats_bbox_real, stats_bbox_fake, stats_bbox_class, stats_mask, stats_overlap, stats_alignment) = metric_utils_layout.compute_maxIoU_overlap_alignment_wrapper(opts=opts, rel_lo=0, rel_hi=1, max_items=max_real) bbox_real = stats_bbox_real.get_all().astype(np.float32) bbox_fake = stats_bbox_fake.get_all().astype(np.float32) bbox_class = stats_bbox_class.get_all().astype(np.int64) mask = stats_mask.get_all().astype(np.bool) overlap = stats_overlap.get_all().astype(np.float32) alignment = stats_alignment.get_all().astype(np.float32) if (opts.rank != 0): return (float('nan'), float('nan'), float('nan'), float('nan')) layouts_real = [] layouts_fake = [] layoutwise_iou = [] layoutwise_docsim = [] for j in range(bbox_real.shape[0]): _mask = mask[j] b_real = bbox_real[j][_mask] b_fake = bbox_fake[j][_mask] l = bbox_class[j][_mask] layouts_real.append((b_real, l)) layouts_fake.append((b_fake, l)) layoutwise_iou.append(compute_iou_for_layout((b_real, l), (b_fake, l))) layoutwise_docsim.append(compute_docsim_for_layout((b_real, l), (b_fake, l))) return (float(np.mean(overlap)), float(np.mean(alignment)), float(np.mean(np.array(layoutwise_iou))), float(np.mean(np.array(layoutwise_docsim))))
def get_train_val_indices(train_dataset, val_split=0.2): all_targets = [t for (i, (p, t)) in enumerate(train_dataset.samples)] train_classes = np.unique(all_targets) train_idxs = [] val_idxs = [] for cls in train_classes: cls_idxs = np.where((all_targets == cls))[0] v_ = np.random.choice(cls_idxs, replace=False, size=(int((val_split * len(cls_idxs))),)) t_ = [x for x in cls_idxs if (x not in v_)] train_idxs.extend(t_) val_idxs.extend(v_) return (train_idxs, val_idxs)
def get_masked_tokens_from_tagged_text(tagged_text): chunks = tagged_text.split('__') masks = [] curr_offset = 0 clean_text = '' for (chunk_num, chunk) in enumerate(chunks): if ((chunk_num % 2) == 1): masks.append((curr_offset, (curr_offset + len(chunk)))) curr_offset += len(chunk) clean_text += chunk return (masks, clean_text)
class ASR(sb.Brain): def __init__(self, tea_modules_list=None, hparams=None, run_opts=None): super(ASR, self).__init__(modules=None, opt_class=None, hparams=hparams, run_opts=run_opts, checkpointer=None) tea_modules_list_ = [] for tea_modules in tea_modules_list: tea_modules_ = torch.nn.ModuleList(tea_modules) tea_modules_ = tea_modules_.to(self.device) tea_modules_list_.append(tea_modules_) self.tea_modules_list = tea_modules_list_ def compute_forward_tea(self, batch): batch = batch.to(self.device) (wavs, wav_lens) = batch.sig (phns_bos, _) = batch.phn_encoded_bos (phns, phn_lens) = batch.phn_encoded feats = self.hparams.compute_features(wavs) feats = self.hparams.normalize(feats, wav_lens) apply_softmax = torch.nn.Softmax(dim=(- 1)) tea_dict_list = [] for num in range(self.hparams.num_tea): tea_dict = {} self.tea_modules_list[num].eval() with torch.no_grad(): x_tea = tea_enc_list[num](feats) ctc_logits_tea = tea_ctc_lin_list[num](x_tea) p_ctc_tea = self.hparams.log_softmax((ctc_logits_tea / self.hparams.temperature)) e_in_tea = tea_emb_list[num](phns_bos) (h_tea, _) = tea_dec_list[num](e_in_tea, x_tea, wav_lens) seq_logits_tea = tea_seq_lin_list[num](h_tea) p_seq_tea = apply_softmax((seq_logits_tea / self.hparams.temperature)) sequence_ctc = sb.decoders.ctc_greedy_decode(p_ctc_tea, wav_lens, blank_id=self.hparams.blank_index) phns_decode = sb.utils.data_utils.undo_padding(phns, phn_lens) phns_decode = self.label_encoder.decode_ndim(phns_decode) sequence_decode = self.label_encoder.decode_ndim(sequence_ctc) per_stats_ctc = sb.utils.edit_distance.wer_details_for_batch(batch.id, phns_decode, sequence_decode, compute_alignments=False) wer_ctc_tea = [] for item in per_stats_ctc: wer_ctc_tea.append(item['WER']) wer_ctc_tea = exclude_wer(wer_ctc_tea) wer_ctc_tea = np.expand_dims(wer_ctc_tea, axis=0) (_, predictions) = p_seq_tea.max(dim=(- 1)) hyps = sb.decoders.seq2seq.batch_filter_seq2seq_output(predictions, eos_id=self.hparams.eos_index) sequence_ce = self.label_encoder.decode_ndim(hyps) per_stats_ce = sb.utils.edit_distance.wer_details_for_batch(batch.id, phns_decode, sequence_ce, compute_alignments=False) wer_tea = [] for item in per_stats_ce: wer_tea.append(item['WER']) wer_tea = exclude_wer(wer_tea) wer_tea = np.expand_dims(wer_tea, axis=0) tea_dict['p_ctc_tea'] = p_ctc_tea.cpu().numpy() tea_dict['p_seq_tea'] = p_seq_tea.cpu().numpy() tea_dict['wer_ctc_tea'] = wer_ctc_tea tea_dict['wer_tea'] = wer_tea tea_dict_list.append(tea_dict) return tea_dict_list def def_tea_name(self): tea_name = [] for tea_num in range(self.hparams.num_tea): tea = 't{}'.format(tea_num) tea_name.append(tea) return tea_name def fit_save(self, train_set, valid_set=None, test_set=None): data_sets = [train_set, valid_set, test_set] stage = self.hparams.stage tea_name = self.def_tea_name() f_name = '/tea_infer_{}batch.hdf5'.format(self.hparams.batch_size) f = h5py.File((self.hparams.output_folder + f_name), 'w') for num in range(len(stage)): g_sets = f.create_group(stage[num]) with tqdm(data_sets[num], initial=self.step, dynamic_ncols=True) as t: for batch in t: self.step += 1 g_batch = g_sets.create_group(str(self.step)) tea_dict_list = self.compute_forward_tea(batch) for tea_num in range(self.hparams.num_tea): g_tea = g_batch.create_group(tea_name[tea_num]) g_tea.create_dataset('p_ctc_tea', data=tea_dict_list[tea_num]['p_ctc_tea']) g_tea.create_dataset('p_seq_tea', data=tea_dict_list[tea_num]['p_seq_tea']) g_tea.create_dataset('wer_ctc_tea', data=tea_dict_list[tea_num]['wer_ctc_tea'][0]) g_tea.create_dataset('wer_tea', data=tea_dict_list[tea_num]['wer_tea'][0]) self.step = 0 f.close()
_level_function() def metadata(path, storage_options=None, row_groups=None, columns=None, ignore_metadata=False, scan_files=True): import awkward._connect.pyarrow pyarrow_parquet = awkward._connect.pyarrow.import_pyarrow_parquet('ak.from_parquet') import fsspec.parquet if (row_groups is not None): if (not all(((is_integer(x) and (x >= 0)) for x in row_groups))): raise ValueError('row_groups must be a set of non-negative integers') if (len(set(row_groups)) < len(row_groups)): raise ValueError('row group indices must not repeat') (fs, _, paths) = fsspec.get_fs_token_paths(path, mode='rb', storage_options=storage_options) (all_paths, path_for_schema, can_sub) = _all_and_metadata_paths(path, fs, paths, ignore_metadata, scan_files) subrg = ([None] * len(all_paths)) actual_paths = all_paths with fs.open(path_for_schema) as file_for_metadata: parquetfile_for_metadata = pyarrow_parquet.ParquetFile(file_for_metadata) list_indicator = 'list.item' for column_metadata in parquetfile_for_metadata.schema: if ((column_metadata.max_repetition_level > 0) and ('.list.element' in column_metadata.path)): list_indicator = 'list.element' break subform = ak._connect.pyarrow.form_handle_arrow(parquetfile_for_metadata.schema_arrow, pass_empty_field=True) if (columns is not None): subform = subform.select_columns(columns) if (parquetfile_for_metadata.schema_arrow.names == ['']): column_prefix = ('',) else: column_prefix = () metadata = parquetfile_for_metadata.metadata if (scan_files and (not path_for_schema.endswith('/_metadata'))): if (path_for_schema in all_paths): scan_paths = all_paths[1:] else: scan_paths = all_paths for apath in scan_paths: with fs.open(apath, 'rb') as f: md = pyarrow_parquet.ParquetFile(f).metadata md.set_file_path(apath.rsplit('/', 1)[(- 1)]) metadata.append_row_groups(md) if (row_groups is not None): if any(((_ >= metadata.num_row_groups) for _ in row_groups)): raise ValueError(f'Row group selection out of bounds 0..{(metadata.num_row_groups - 1)}') if (not can_sub): raise TypeError('Requested selection of row-groups, but not scanning metadata') path_rgs = {} rgs_path = {} subrg = [] col_counts = [] for i in range(metadata.num_row_groups): fp = metadata.row_group(i).column(0).file_path path_rgs.setdefault(fp, []).append(i) rgs_path[i] = fp actual_paths = [] for select in row_groups: path = rgs_path[select] path2 = next((_ for _ in all_paths if _.endswith(path))) if (path2 not in actual_paths): actual_paths.append(path2) subrg.append([path_rgs[path].index(select)]) else: subrg[(- 1)].append(path_rgs[path].index(select)) col_counts.append(metadata.row_group(select).num_rows) elif can_sub: col_counts = [metadata.row_group(i).num_rows for i in range(metadata.num_row_groups)] else: col_counts = None parquet_columns = subform.columns(list_indicator=list_indicator, column_prefix=column_prefix) return (parquet_columns, subform, actual_paths, fs, subrg, col_counts, metadata)
def to_rgb(img): img = np.atleast_3d(img) channels = img.shape[2] if (channels < 3): img = np.tile(img, 3) img[np.isnan(img)] = 0 img -= np.amin(img) img /= np.amax(img) img *= 255 return img
def warn_on_static_input_change(input_states): for (input, traced_input) in zip(input_states[0], input_states[1]): if isinstance(input, dict): if (list(input.keys()) != list(traced_input.keys())): warning = 'We detected that you are modifying a dictionnary that is an input to your model. Note that dictionaries are allowed as inputs in ONNX but they should be handled with care. Usages of dictionaries is not recommended, and should not be used except for configuration use. Also note that the order and values of the keys must remain the same. ' warnings.warn(warning) elif isinstance(input, str): if (input != traced_input): warning = 'The model seems to have string inputs/outputs. Note that strings will not appear as inputs/outputs of the ONNX graph. ' warnings.warn(warning)
class BottleneckBlock(CNNBlockBase): def __init__(self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm='BN', stride_in_1x1=False, dilation=1, has_pool=False): super().__init__(in_channels, out_channels, stride) self.has_pool = has_pool self.pool_stride = stride stride = 1 if (in_channels != out_channels): self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels)) else: self.shortcut = None (stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride)) self.conv1 = Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels)) self.conv2 = Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=(1 * dilation), bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels)) self.conv3 = Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels)) for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: if (layer is not None): weight_init.c2_msra_fill(layer) if self.has_pool: if (self.pool_stride == 1): self.pool = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.MaxPool2d(kernel_size=2, stride=self.pool_stride, padding=0)) else: self.pool = nn.MaxPool2d(kernel_size=2, stride=self.pool_stride, padding=0) def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) out = F.relu_(out) out = self.conv3(out) if (self.shortcut is not None): shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) if self.has_pool: out = self.pool(out) return out
def _calculate_bin_centers(boundaries: torch.Tensor) -> torch.Tensor: step = (boundaries[1] - boundaries[0]) bin_centers = (boundaries + (step / 2)) bin_centers = torch.cat([bin_centers, (bin_centers[(- 1)] + step).unsqueeze((- 1))], dim=0) return bin_centers
def run_codex_prediction(test_file): print(f'Running codex on {test_file} ...') output_file = test_file.replace('.json', '.json.codex') print(f'Output file: {output_file} ...') if os.path.exists(output_file): passed_cases = open(output_file, 'r').readlines() if (not passed_cases[(- 1)].endswith('\n')): passed_cases = passed_cases[:(- 1)] open(output_file, 'w').writelines(passed_cases) start_idx = len(passed_cases) else: start_idx = 0 print(f'Start from {start_idx} ...') with open(test_file, 'r') as f, open(output_file, 'a') as output_f: for (idx, line) in tqdm(enumerate(f.readlines()[start_idx:])): data = json.loads(line) model_input = data['input'] metadata = data['metadata'] model_output = run_codex_api(model_input) output_f.write((json.dumps({'prediction': model_output, 'ground_truth': data['output'].strip(), 'input': model_input, 'metadata': metadata}) + '\n')) if ((idx % 100) == 0): print(model_output)
def halluication(directory, lang): mode = 'train' if (not os.path.isfile(f'{directory}/{lang}.hall')): print('missing .hall for', lang) return with open(f'{directory}/{lang}.hall.{mode}', 'w') as fp: for toks in read_file(f'{directory}/{lang}.{mode}'): print(*toks, sep='\t', file=fp) for (i, toks) in enumerate(read_file(f'{directory}/{lang}.hall')): if (i == MAX_HALL): break print(toks[0], toks[1], f'fake;{toks[2]}', sep='\t', file=fp)
def train_fast(train_loader, train_table, model, model_bert, opt, bert_config, tokenizer, max_seq_length, num_target_layers, accumulate_gradients=1, check_grad=True, st_pos=0, opt_bert=None, path_db=None, dset_name='train'): model.train() model_bert.train() ave_loss = 0 cnt = 0 cnt_sc = 0 cnt_sa = 0 cnt_wn = 0 cnt_wc = 0 cnt_wo = 0 cnt_wv = 0 cnt_wvi = 0 cnt_lx = 0 cnt_x = 0 for (iB, t) in enumerate(train_loader): cnt += len(t) if (cnt < st_pos): continue (input_ids, input_mask, segment_ids, tokens, tb, sql_i, hds, i_nlu, i_hds, l_n, l_hpu_batch, l_hs, nlu, nlu_t, nlu_tt, t_to_tt_idx, tt_to_t_idx, g_sc, g_sa, g_wn, g_wc, g_wo, g_wv, g_wvi, g_wvi_corenlp) = list(zip(*t)) l_hpu = [hpu1 for l_hpu1 in l_hpu_batch for hpu1 in l_hpu1] all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device) all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device) (wemb_n, wemb_h) = get_wemb_bert_fast(bert_config, model_bert, i_hds, l_n, l_hpu, l_hs, all_input_ids, all_segment_ids, all_input_mask, num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers) (s_sc, s_sa, s_wn, s_wc, s_wo, s_wv) = model(wemb_n, l_n, wemb_h, l_hpu, l_hs, g_sc=g_sc, g_sa=g_sa, g_wn=g_wn, g_wc=g_wc, g_wvi=g_wvi) loss = Loss_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi) if ((iB % accumulate_gradients) == 0): if opt: opt.zero_grad() if opt_bert: opt_bert.zero_grad() loss.backward() if (accumulate_gradients == 1): if opt: opt.step() if opt_bert: opt_bert.step() elif ((iB % accumulate_gradients) == (accumulate_gradients - 1)): loss.backward() if opt: opt.step() if opt_bert: opt_bert.step() else: loss.backward() (pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi) = pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv) (pr_wv_str, pr_wv_str_wp) = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu) pr_wc_sorted = sort_pr_wc(pr_wc, g_wc) pr_sql_i = generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc_sorted, pr_wo, pr_wv_str, nlu) (cnt_sc1_list, cnt_sa1_list, cnt_wn1_list, cnt_wc1_list, cnt_wo1_list, cnt_wvi1_list, cnt_wv1_list) = get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, sql_i, pr_sql_i, mode='train') cnt_lx1_list = get_cnt_lx_list(cnt_sc1_list, cnt_sa1_list, cnt_wn1_list, cnt_wc1_list, cnt_wo1_list, cnt_wv1_list) ave_loss += loss.item() cnt_sc += sum(cnt_sc1_list) cnt_sa += sum(cnt_sa1_list) cnt_wn += sum(cnt_wn1_list) cnt_wc += sum(cnt_wc1_list) cnt_wo += sum(cnt_wo1_list) cnt_wvi += sum(cnt_wvi1_list) cnt_wv += sum(cnt_wv1_list) cnt_lx += sum(cnt_lx1_list) ave_loss /= cnt acc_sc = (cnt_sc / cnt) acc_sa = (cnt_sa / cnt) acc_wn = (cnt_wn / cnt) acc_wc = (cnt_wc / cnt) acc_wo = (cnt_wo / cnt) acc_wvi = (cnt_wv / cnt) acc_wv = (cnt_wv / cnt) acc_lx = (cnt_lx / cnt) acc_x = (cnt_x / cnt) acc = [ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x] aux_out = 1 return (acc, aux_out)
def get_scheduler(optimizer, opt): if (opt.lr_policy == 'lambda'): def lambda_rule(epoch): lr_l = (1.0 - (max(0, (((epoch + 1) + opt.epoch_count) - opt.niter)) / float((opt.niter_decay + 1)))) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif (opt.lr_policy == 'step'): scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif (opt.lr_policy == 'plateau'): scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif (opt.lr_policy == 'cosine'): scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
def import_sample(path: Union[(Path, str)]): path = ((Path(__file__).parent.parent / 'samples') / Path(path)) if (not path.exists()): raise ValueError(f'Sample {path} not found.') name = path.stem spec = importlib.util.spec_from_file_location(name, path) loaded_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(loaded_module) return loaded_module
class Discriminator(nn.Module): def __init__(self, img_size: int=64, ndf: int=64, kd: int=4, nc: int=3, batch_norm: bool=True): super(Discriminator, self).__init__() self.img_size = img_size self.ndf = ndf self.kd = kd self.nc = nc pd = 1 sd = 2 self.spectral_transform = SpectralLoss(rows=img_size, cols=img_size) layers = nn.Sequential() layers.add_module('Conv1', nn.Conv2d(nc, ndf, kd, sd, pd, bias=False)) layers.add_module('ReLU1', nn.LeakyReLU(0.2)) blocks = (int(np.log2(img_size)) - 3) for i in range(blocks): f_in = (ndf * (2 ** i)) f_in = min(f_in, 512) f_out = (ndf * (2 ** (i + 1))) f_out = min(f_out, 512) layers.add_module(f'Conv{(2 + i)}', nn.Conv2d(f_in, f_out, kd, sd, pd, bias=False)) if batch_norm: layers.add_module(f'BatchNorm{(2 + i)}', nn.BatchNorm2d(f_out)) layers.add_module(f'ReLU{(2 + i)}', nn.LeakyReLU(0.2)) f_in = min((ndf * (2 ** blocks)), 512) layers.add_module(f'Conv{(2 + blocks)}', nn.Conv2d(f_in, 1, kd, 1, 0, bias=False)) self._forward = layers def forward(self, x): y = self._forward(x) return y def par_count(self): c = 0 for p in self.parameters(): c += np.prod(p.shape) return c def print_par_count(self): for (name, p) in self.named_parameters(): print(f'{name:>40}: {str(p.shape):>40} {np.prod(p.shape):>15,}') def load(self, state): self.load_state_dict(state) def to_checkpoint(self): chkpt = {} chkpt['state'] = self.state_dict() chkpt['pars'] = {'img_size': self.img_size, 'ndf': self.ndf, 'kd': self.kd, 'nc': self.nc} return chkpt def from_checkpoint(chkpt): D = Discriminator(**chkpt['pars']) D.load(chkpt['state']) return D
def create_worker(queue, get_blob_data): def dummy_worker(worker_id): blob = ('blob_' + str(worker_id)) workspace.FeedBlob(blob, get_blob_data(worker_id)) workspace.RunOperatorOnce(core.CreateOperator('SafeEnqueueBlobs', [queue, blob], [blob, ('status_blob_' + str(worker_id))])) return dummy_worker
class LaserEmbedding(EmbeddingBase): def __init__(self): self.client: DockerClient = docker.from_env() self.__init_laser() self.size = 1024 def dim(self) -> int: return self.size def batcher(self, params, batch: List[List[str]]) -> np.ndarray: batch = [(' '.join(sent) if (sent != []) else ['.']) for sent in batch] return self.run_laser(batch) def run_laser(self, texts: List[str]) -> np.ndarray: resources: Path = Path('resources') tmp_path: Path = (resources / ''.join(random.choices((string.ascii_letters + string.digits), k=16))) tmp_path.mkdir(exist_ok=False) input_path: Path = (tmp_path / 'input.txt') input_path.write_text('\n'.join(texts), encoding='utf-8') self.__run_container(tmp_path) output_path: Path = (tmp_path / 'output.npy') res = np.fromfile(str(output_path.absolute()), dtype=np.float32, count=(- 1)) res.resize((res.shape[0] // self.size), self.size) shutil.rmtree(tmp_path) return res def __run_container(self, tmp_path: Path): docker_cmd = 'bash /app/LASER/tasks/embed/embed.sh /resources/input.txt /resources/output.npy' docker_vol = {str(tmp_path.absolute()): {'bind': '/resources', 'mode': 'rw'}} self.client.containers.run('laser:latest', docker_cmd, remove=True, volumes=docker_vol) def __init_laser(self): try: self.client.images.get('laser:latest') except ImageNotFound: url = ' dockerfile = 'Dockerfile' self.client.images.build(path=url, dockerfile=dockerfile, tag='laser')
def rebuild_sql_val(sql): if ((sql is None) or (not DISABLE_VALUE)): return sql sql['from']['conds'] = rebuild_condition_val(sql['from']['conds']) sql['having'] = rebuild_condition_val(sql['having']) sql['where'] = rebuild_condition_val(sql['where']) sql['intersect'] = rebuild_sql_val(sql['intersect']) sql['except'] = rebuild_sql_val(sql['except']) sql['union'] = rebuild_sql_val(sql['union']) return sql
def _make_dict_of_lists_symmetric(dct: dict): to_add_dict = defaultdict(list) for (key, values) in dct.items(): for value in values: to_add_dict[value].append(key) for (key, to_add_values) in to_add_dict.items(): try: dct[key] += to_add_dict[key] except KeyError: dct[key] = to_add_dict[key] dct[key] = list(set(dct[key]))
def test_does_rdataframe_see_these_as_boolean(): ak_array_in = ak.Array([True, False, True]) data_frame = ak.to_rdataframe({'x': ak_array_in}) assert (data_frame.GetColumnType('x') == 'bool') data_frame_2 = data_frame.Define('y', '!x') ak_array_out = ak.from_rdataframe(data_frame_2, columns=('y',)) assert ([(not x) for x in ak_array_in] == ak_array_out['y'].to_list())
def softmax(x): e = numpy.exp((x - numpy.max(x))) if (e.ndim == 1): return (e / numpy.sum(e, axis=0)) else: return (e / numpy.array([numpy.sum(e, axis=1)]).T)
class UniformBackgroundField(BaseSrc): def __init__(self, receiver_list=None, amplitude=50000, inclination=90, declination=0, **kwargs): self.amplitude = amplitude self.inclination = inclination self.declination = declination super().__init__(receiver_list=receiver_list, **kwargs) def amplitude(self): return self._amplitude def amplitude(self, value): self._amplitude = validate_float('amplitude', value) def inclination(self): return self._inclination def inclination(self, value): self._inclination = validate_float('inclination', value, min_val=(- 90.0), max_val=90.0) def declination(self): return self._declination def declination(self, value): self._declination = validate_float('declination', value) def b0(self): return (self.amplitude * dip_azimuth2cartesian(self.inclination, self.declination).squeeze())
def roi_array_to_dict(a): l = [] a = a[['startx', 'starty', 'endx', 'endy', 'groupx', 'groupy']] for (sx, sy, ex, ey, gx, gy) in a: d = {'top_left': [int(sx), int(sy)], 'bottom_right': [int(ex), int(ey)], 'bin': [int(gx), int(gy)]} l.append(d) return l
_torch _vision class DeiTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = (DeiTImageProcessor if is_vision_available() else None) test_cast_dtype = True def setUp(self): self.image_processor_tester = DeiTImageProcessingTester(self) def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, 'do_resize')) self.assertTrue(hasattr(image_processing, 'size')) self.assertTrue(hasattr(image_processing, 'do_center_crop')) self.assertTrue(hasattr(image_processing, 'center_crop')) self.assertTrue(hasattr(image_processing, 'do_normalize')) self.assertTrue(hasattr(image_processing, 'image_mean')) self.assertTrue(hasattr(image_processing, 'image_std')) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {'height': 20, 'width': 20}) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {'height': 42, 'width': 42}) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84}) def test_batch_feature(self): pass def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
def MonoidAlgebras(base_ring): from sage.categories.monoids import Monoids return Monoids().Algebras(base_ring)
def p_assert_statement(s): pos = s.position() s.next() cond = p_test(s) if (s.sy == ','): s.next() value = p_test(s) else: value = None return Nodes.AssertStatNode(pos, cond=cond, value=value)
def generate_files(train_gen, dev_gen, train_preprocess_path, dev_preprocess_path): if dev_gen: gen_file(train_gen, train_preprocess_path) gen_file(dev_gen, dev_preprocess_path) else: train_writer = tf.python_io.TFRecordWriter(train_preprocess_path) dev_writer = tf.python_io.TFRecordWriter(dev_preprocess_path) line_counter = 1 for case in train_gen: sequence_example = generator_utils.to_example(case) if ((line_counter % 20) == 0): dev_writer.write(sequence_example.SerializeToString()) else: train_writer.write(sequence_example.SerializeToString()) line_counter += 1 train_writer.close() dev_writer.close()
def generate_backward_function_mapping(function_info): function_list = utils.info_to_list(function_info) utils.generate_from_template(join(base, 'python/src/nnabla/backward_functions.py.tmpl'), function_info=function_info, function_list=function_list)
def read_labelmap(labelmap_file): labelmap = [] class_ids = set() name = '' class_id = '' for line in labelmap_file: if line.startswith(' name:'): name = line.split('"')[1] elif (line.startswith(' id:') or line.startswith(' label_id:')): class_id = int(line.strip().split(' ')[(- 1)]) labelmap.append({'id': class_id, 'name': name}) class_ids.add(class_id) return (labelmap, class_ids)
def train_model(model_settings, output_path, tensorboard_logging=False): num_of_envs = model_settings['num_of_envs'] model_path = os.path.join(output_path, 'model') if tensorboard_logging: tb_path = model_path else: tb_path = None try: os.makedirs(model_path) ckpt_path = None ckpt_step = 0 except FileExistsError: print("Folder '{}' already exists".format(model_path)) (ckpt_path, ckpt_step) = utils.get_latest_checkpoint_path(model_path) set_global_seeds(model_settings['seed']) if (model_settings['algorithm'] == 'PPO'): (model, env) = get_PPO_model(model_settings, model_path, ckpt_path, ckpt_step, num_of_envs, tb_path) num_of_active_envs = num_of_envs total_time_steps = (model_settings['total_time_steps'] - ckpt_step) validate_every_timesteps = model_settings['validate_every_timesteps'] elif (model_settings['algorithm'] == 'SAC'): (model, env) = get_SAC_model(model_settings, model_path, ckpt_path, ckpt_step, tb_path) num_of_active_envs = num_of_envs total_time_steps = (model_settings['total_time_steps'] - ckpt_step) validate_every_timesteps = model_settings['validate_every_timesteps'] elif (model_settings['algorithm'] == 'TD3'): (model, env) = get_TD3_model(model_settings, model_path, ckpt_path, ckpt_step, tb_path) num_of_active_envs = num_of_envs total_time_steps = (model_settings['total_time_steps'] - ckpt_step) validate_every_timesteps = model_settings['validate_every_timesteps'] else: raise Exception('{} is not supported for training in the baselines'.format(model_settings['algorithm'])) ckpt_frequency = int((validate_every_timesteps / num_of_active_envs)) checkpoint_callback = CheckpointCallback(save_freq=ckpt_frequency, save_path=model_path, name_prefix='model') if (ckpt_path is None): utils.save_model_settings(os.path.join(model_path, 'model_settings.json'), model_settings) model.learn(int(total_time_steps), callback=checkpoint_callback, reset_num_timesteps=(ckpt_path is None)) model.save(save_path=os.path.join(model_path, 'model_{}_steps'.format(total_time_steps))) if (env.__class__.__name__ == 'SubprocVecEnv'): env.env_method('save_world', output_path) else: env.save_world(output_path) env.close() return model
def mult_sent_answer_counter(): count = 0 for article in aug_data['data']: for para in article['paragraphs']: for qa in para['qas']: for answer in qa['answers']: text = answer['text'] word_start = answer['answer_word_start'] word_stop = answer['answer_word_stop'] if ((word_start is not None) and (word_start[0] != word_stop[0])): count += 1 print(count)
.parametrize('seed', [313]) .parametrize('axis', [None, 0, 1, 2, 3, (0, 2), (1, 2, 3)]) .parametrize('keepdims', [False, True]) .parametrize('inshape', [(2, 3, 4, 5), (2, 1, 4, 5)]) .parametrize('op, ctx, func_name', list_ctx_and_func_name(['sum', 'mean', 'max', 'min', 'prod'])) def test_reduction_forward_backward(op, seed, inshape, axis, keepdims, ctx, func_name): func = getattr(F, op) ref_func = getattr(np, op) rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, func, ref_func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name, atol_b=0.006)
def get_system(name, args, schema=None, timed=False, model_path=None): if (name in ('rulebased', 'neural')): lexicon = Lexicon(schema, args.learned_lex, stop_words=args.stop_words, lexicon_path=args.lexicon) if args.inverse_lexicon: realizer = InverseLexicon.from_file(args.inverse_lexicon) else: realizer = DefaultInverseLexicon() if (name == 'rulebased'): templates = Templates.from_pickle(args.templates) generator = Generator(templates) manager = Manager.from_pickle(args.policy) return RulebasedSystem(lexicon, generator, manager, timed) elif (name == 'neural'): assert args.model_path return NeuralSystem(schema, lexicon, args.model_path, args.fact_check, args.decoding, realizer=realizer) elif (name == 'cmd'): return CmdSystem() else: raise ValueError(('Unknown system %s' % name))
def connect(host, port): ssl_sock = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) ssl_sock.connect((host, port)) return ssl_sock
def accuracy(output, labels): preds = output.max(1)[1].type_as(labels) correct = preds.eq(labels).double() correct = correct.sum() return (correct / len(labels))
def run_return_code_old(command): import subprocess result = subprocess.Popen(command, shell=True) output = result.communicate()[0] return (result.returncode, output)
def parse_request(r): from . import exceptions try: data = r.json() except Exception: if (len(r.text) == 0): data = {} else: data = {'message': r.text} if (r.status_code > 204): data['message'] = data.get('message', '') data['status'] = data.get('status', '') for error in data.get('errors', []): data['message'] += (' ' + error.get('message', '')) raise exceptions.RequestError(status=data['status'], message=data['message']) else: return data
def common_pre_post_processing(func_raw): def func(*args, **kwargs): pre_normalise = kwargs.pop('pre_normalise', False) post_standardise = kwargs.pop('post_standardise', False) post_zeroonescaling = kwargs.pop('post_zeroonescaling', False) post_edgeprior = kwargs.pop('post_edgeprior', False) if pre_normalise: args = list(args) args[0] = standardise(args[0]) args = tuple(args) out = func_raw(*args, **kwargs) if ((type(out) == tuple) and (len(out) > 1)): scores = out[0] else: scores = out if post_standardise: scores = standardise(scores, axis=None) if post_zeroonescaling: scores = ((scores - scores.min()) / (scores.max() - scores.min())) if post_edgeprior: scores /= scores.mean() if ((type(out) == tuple) and (len(out) > 1)): out = list(out) out[0] = scores out = tuple(out) else: out = scores return out return func
def override_option(ctx, param, value): if ((value is None) or (isinstance(value, Iterable) and (len(value) == 0))): value = ctx.params['_'.join(param.name.split('_')[1:])] return value
class SmallUpdateBlock(nn.Module): def __init__(self, args, hidden_dim=96): super(SmallUpdateBlock, self).__init__() self.encoder = SmallMotionEncoder(args) self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=(82 + 64)) self.flow_head = FlowHead(hidden_dim, hidden_dim=128) def forward(self, net, inp, corr, flow): motion_features = self.encoder(flow, corr) inp = torch.cat([inp, motion_features], dim=1) net = self.gru(net, inp) delta_flow = self.flow_head(net) return (net, None, delta_flow)
def get_width(tensor_shape): tensor_shape.assert_has_rank(rank=4) return tensor_shape[2].value
class TestConvLayer(unittest.TestCase): def test_data_loops(self): dls = ConvLayer.data_loops() self.assertEqual(dls[de.FIL], DataDimLoops(le.IFM, le.OFM)) self.assertEqual(dls[de.IFM], DataDimLoops(le.IFM, le.BAT)) self.assertEqual(dls[de.OFM], DataDimLoops(le.OFM, le.BAT)) clayer = ConvLayer(3, 64, [28, 14], 3, strd=2) flayer = FCLayer(2048, 4096, sfil=2) self.assertTupleEqual(FCLayer.data_loops(), dls) self.assertTupleEqual(clayer.data_loops(), dls) self.assertTupleEqual(flayer.data_loops(), dls) def test_input_layer(self): clayer = ConvLayer(3, 64, [28, 14], 3, strd=2) inlayer = clayer.input_layer() self.assertIsInstance(inlayer, Layer) self.assertEqual(inlayer.nofm, 3, 'ConvLayer: input_layer: nofm') self.assertEqual(inlayer.hofm, (((28 - 1) * 2) + 3), 'ConvLayer: input_layer: hofm') self.assertEqual(inlayer.wofm, (((14 - 1) * 2) + 3), 'ConvLayer: input_layer: wofm') def test_ops(self): clayer = ConvLayer(3, 64, [28, 14], 3, strd=2) self.assertEqual(clayer.ops_per_neuron(), ((3 * 3) * 3), 'ConvLayer: ops_per_neurons') self.assertEqual(clayer.total_ops(), (((((3 * 3) * 28) * 14) * 3) * 64), 'ConvLayer: total_ops') def test_filter_size(self): clayer = ConvLayer(3, 64, [28, 14], 3) self.assertEqual(clayer.filter_size(2), ((3 * 3) * 2), 'filter_size') self.assertEqual(clayer.total_filter_size(2), ((((3 * 3) * 3) * 64) * 2), 'total_filter_size') clayer = ConvLayer(3, 64, [28, 14], [3, 1]) self.assertEqual(clayer.filter_size(2), ((3 * 1) * 2), 'filter_size') self.assertEqual(clayer.total_filter_size(2), ((((3 * 1) * 3) * 64) * 2), 'total_filter_size') def test_filter_size_invalid(self): with self.assertRaisesRegex(ValueError, 'ConvLayer: .*sfil.*'): _ = ConvLayer(3, 64, [28, 14], [3, 3, 3]) def test_fclayer(self): flayer = FCLayer(2048, 4096, sfil=2) self.assertEqual(flayer.total_ofmap_size(), 4096, 'FCLayer: ofmap_size') self.assertEqual(flayer.filter_size(), 4, 'FCLayer: filter_size') self.assertEqual(flayer.total_filter_size(), ((2048 * 4096) * 4), 'FCLayer: filter_size') def test_repr(self): for l in [ConvLayer(3, 64, [28, 14], [3, 1]), ConvLayer(3, 64, [28, 14], 3, strd=[7, 5]), ConvLayer(3, 64, 28, 3, strd=7), ConvLayer(3, 64, 28, 3)]: self.assertIn('ConvLayer', repr(l)) self.assertEqual(eval(repr(l)), l) for l in [FCLayer(2048, 4096), FCLayer(100, 300, 7), FCLayer(100, 300, [7, 3])]: self.assertIn('FCLayer', repr(l)) self.assertEqual(eval(repr(l)), l)
def compatible_systems(split_prime_list, complement_exp_vec_dict): S0 = split_prime_list system_list = [] if (len(S0) == 1): q = S0[0] for exponent_vector in complement_exp_vec_dict[q]: for complementary_vector in complement_exp_vec_dict[q][exponent_vector]: pair = [[exponent_vector, complementary_vector]] system_list.append(pair) elif (len(S0) > 1): S1 = S0[:(- 1)] old_systems = compatible_systems(S1, complement_exp_vec_dict) q = S0[(- 1)] gcds = [gcd((q - 1), (qj - 1)) for qj in S1] for exp_vec in complement_exp_vec_dict[q]: l = len(exp_vec) for comp_vec in complement_exp_vec_dict[q][exp_vec]: for old_system in old_systems: if all(((compatible_vectors_check(exp_vec, exp_vec_qj, g, l) and compatible_vectors_check(comp_vec, comp_vec_qj, g, l)) for (g, (exp_vec_qj, comp_vec_qj)) in zip(gcds, old_system))): new_system = (old_system + [[exp_vec, comp_vec]]) system_list.append(new_system) return system_list
class Shape(goos.ProblemGraphNode): node_type = 'goos.shape' def translate(self, offset: np.ndarray) -> 'Shape': return TranslateShape(self, offset)
def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >']) register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3PyViz_methods(root_module, root_module['ns3::PyViz']) register_Ns3PyVizLastPacketsSample_methods(root_module, root_module['ns3::PyViz::LastPacketsSample']) register_Ns3PyVizNetDeviceStatistics_methods(root_module, root_module['ns3::PyViz::NetDeviceStatistics']) register_Ns3PyVizNodeStatistics_methods(root_module, root_module['ns3::PyViz::NodeStatistics']) register_Ns3PyVizPacketCaptureOptions_methods(root_module, root_module['ns3::PyViz::PacketCaptureOptions']) register_Ns3PyVizPacketDropSample_methods(root_module, root_module['ns3::PyViz::PacketDropSample']) register_Ns3PyVizPacketSample_methods(root_module, root_module['ns3::PyViz::PacketSample']) register_Ns3PyVizRxPacketSample_methods(root_module, root_module['ns3::PyViz::RxPacketSample']) register_Ns3PyVizTransmissionSample_methods(root_module, root_module['ns3::PyViz::TransmissionSample']) register_Ns3PyVizTxPacketSample_methods(root_module, root_module['ns3::PyViz::TxPacketSample']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag']) register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag']) register_Ns3SocketPriorityTag_methods(root_module, root_module['ns3::SocketPriorityTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Const_ns3Ipv4Header___amp___Ns3Ptr__lt__const_ns3Packet__gt___Ns3Ipv4L3ProtocolDropReason_Ns3Ptr__lt__ns3Ipv4__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, const ns3::Ipv4Header &, ns3::Ptr<const ns3::Packet>, ns3::Ipv4L3Protocol::DropReason, ns3::Ptr<ns3::Ipv4>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Const_ns3Ipv4Header___amp___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, const ns3::Ipv4Header &, ns3::Ptr<const ns3::Packet>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Ptr__lt__ns3Ipv4__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Ptr<ns3::Ipv4>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return
def setup(**attr): cmdclass = numpy_cmdclass.copy() new_attr = attr.copy() if ('cmdclass' in new_attr): cmdclass.update(new_attr['cmdclass']) new_attr['cmdclass'] = cmdclass if ('configuration' in new_attr): configuration = new_attr.pop('configuration') old_dist = distutils.core._setup_distribution old_stop = distutils.core._setup_stop_after distutils.core._setup_distribution = None distutils.core._setup_stop_after = 'commandline' try: dist = setup(**new_attr) finally: distutils.core._setup_distribution = old_dist distutils.core._setup_stop_after = old_stop if (dist.help or (not _command_line_ok())): return dist config = configuration() if hasattr(config, 'todict'): config = config.todict() _dict_append(new_attr, **config) libraries = [] for ext in new_attr.get('ext_modules', []): new_libraries = [] for item in ext.libraries: if is_sequence(item): (lib_name, build_info) = item _check_append_ext_library(libraries, lib_name, build_info) new_libraries.append(lib_name) elif is_string(item): new_libraries.append(item) else: raise TypeError(('invalid description of extension module library %r' % (item,))) ext.libraries = new_libraries if libraries: if ('libraries' not in new_attr): new_attr['libraries'] = [] for item in libraries: _check_append_library(new_attr['libraries'], item) if ((('ext_modules' in new_attr) or ('libraries' in new_attr)) and ('headers' not in new_attr)): new_attr['headers'] = [] new_attr['distclass'] = NumpyDistribution return old_setup(**new_attr)
class InterfaceFeature(Feature): def __classcall__(cls, name, module, description=None): if isinstance(module, str): module = PythonModule(module) return Feature.__classcall__(cls, name, module, description) def __init__(self, name, module, description): super().__init__(name, description=description) self.module = module def _is_present(self): result = self.module.is_present() if (not result): return result m = importlib.import_module(self.module.name) try: interface = getattr(m, self.name) except Exception as exception: return FeatureTestResult(self, False, reason=f'Interface {self.name} cannot be imported: {exception}') try: interface('2+3') return FeatureTestResult(self, True) except Exception as exception: return FeatureTestResult(self, False, reason=f'Interface {interface} is not functional: {exception}')
def convert_DateProperty(model, prop, kwargs): if (prop.auto_now or prop.auto_now_add): return None kwargs.setdefault('format', '%Y-%m-%d') return f.DateField(**kwargs)
def embed_images_in_inception(imgs, inception_path, layer_name, batch_size=32): input_tensor = tf.placeholder(tf.float32, [None, None, None, 3]) if (not os.path.exists(inception_path)): raise ValueError(('Inception network file not found: ' + inception_path)) graph = tf.contrib.gan.eval.get_graph_def_from_disk(inception_path) feature_tensor = get_inception_features(input_tensor, graph, layer_name) embeddings = [] i = 0 with tf.Session() as sess: while (i < len(imgs)): embeddings.append(sess.run(feature_tensor, feed_dict={input_tensor: imgs[i:(i + batch_size)]})) i += batch_size return np.concatenate(embeddings, axis=0)
_config def task_finetune_lsmdcchoice(): exp_name = 'finetune_lsmdc_choice' video_datasets = ['lsmdc_choice'] image_datasets = [] loss_names = _loss_names({'multiple_choice': 1}) batch_size = 256 max_epoch = 20 max_steps = None warmup_steps = 0.1 draw_false_text = 5 learning_rate = 1e-05 val_check_interval = 0.5 lr_mult = 10
def bench_training(model, batch_size, seq_length, n_samples=110): start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) timings = [] device = next(model.parameters()).data.device data = torch.rand(batch_size, seq_length, 1, device=device).cumsum((- 1)) mask = torch.ones_like(data) for _ in range(n_samples): start.record() nll = ((- model.log_prob(data, mask).mean()) / seq_length) end.record() torch.cuda.synchronize() timings.append(start.elapsed_time(end)) del data del mask return np.array(timings)
_utils.test(arch=supported_archs_cgraph) def test_ndarray_dtype_mismatch_runtime(): n = 4 def test(pos: ti.types.ndarray(ndim=1)): for i in range(n): pos[i] = 2.5 sym_pos = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'pos', ti.f32, ndim=1) g_init = ti.graph.GraphBuilder() g_init.dispatch(test, sym_pos) g = g_init.compile() a = ti.ndarray(ti.i32, shape=(n,)) with pytest.raises(RuntimeError, match='but got an ndarray with dtype='): g.run({'pos': a})
def tf_efficientnet_el(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge('tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model
_module() class YOLOF(SingleStageDetector): 'Implementation of `You Only Look One-level Feature\n < def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
def test_mixed_threads_processes(x): expect = fft.fft(x, workers=2) with multiprocessing.Pool(2) as p: res = p.map(_mt_fft, [x for _ in range(4)]) for r in res: assert_allclose(r, expect) fft.fft(x, workers=2)
def map_sent_entities(document, entities, verbose=True): errors = 0 spans = [] char_index = [s.abs_char_offsets[0] for s in document.sentences] for t in entities: position = None for i in range((len(char_index) - 1)): if ((t.abs_char_start >= char_index[i]) and (t.abs_char_end <= char_index[(i + 1)])): position = i break if ((position == None) and (t.abs_char_start >= char_index[(- 1)])): position = (len(char_index) - 1) if (position == None): values = (document.name, t.abs_char_start, t.abs_char_end) if verbose: msg = f'{[t.text]} {t.span} {t.doc_name}' logger.warning(f'Cross-sentence mention {msg}') errors += 1 continue try: shift = document.sentences[position].abs_char_offsets[0] span = document.sentences[position].text[(t.abs_char_start - shift):(t.abs_char_end - shift)] spans.append((position, t, span)) except Exception as e: logger.error(f'{e}') idx = collections.defaultdict(list) for (i, entity, _) in spans: idx[i].append(entity) return (idx, errors)