code
stringlengths
101
5.91M
class Counter(object): def __init__(self, initval=0): self.val = multiprocessing.RawValue('i', initval) self.lock = multiprocessing.Lock() def increment(self): with self.lock: self.val.value += 1 return self.val.value def value(self): return self.val.value
class CondConvResidual(InvertedResidual): def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, num_experts=0, drop_connect_rate=0.0): self.num_experts = num_experts conv_kwargs = dict(num_experts=self.num_experts) super(CondConvResidual, self).__init__(in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs, drop_connect_rate=drop_connect_rate) self.routing_fn = nn.Linear(in_chs, self.num_experts) def forward(self, x): residual = x pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) x = self.conv_pw(x, routing_weights) x = self.bn1(x) x = self.act1(x) x = self.conv_dw(x, routing_weights) x = self.bn2(x) x = self.act2(x) x = self.se(x) x = self.conv_pwl(x, routing_weights) x = self.bn3(x) if self.has_residual: if (self.drop_connect_rate > 0.0): x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x
def test_getitem_at(): concrete = ak.contents.NumpyArray((np.arange(((2 * 3) * 5)).reshape(2, 3, 5) * 0.1)) abstract = concrete.to_typetracer() assert (concrete.shape == (2, 3, 5)) assert (abstract.shape[1:] == (3, 5)) assert (abstract[0].shape[1:] == (5,)) assert (abstract[0][0].shape[1:] == ()) assert (abstract.form == concrete.form) assert (abstract.form.type == concrete.form.type) assert (abstract[0].form == concrete[0].form) assert (abstract[0].form.type == concrete[0].form.type)
class StructureModuleConfig(): sequence_dim: int = 384 pairwise_dim: int = 128 ipa_dim: int = 16 resnet_dim: int = 128 num_heads_ipa: int = 12 num_qk_points: int = 4 num_v_points: int = 8 dropout_rate: float = 0.1 num_blocks: int = 8 num_transition_layers: int = 1 num_resnet_blocks: int = 2 num_angles: int = 7 trans_scale_factor: int = 10 epsilon: float = 1e-08 inf: float = 100000.0 def to_dict(self): return asdict(self)
def mesh_hook(mesh, mode): if (mode == 'read'): nodes = [[0, 0], [1, 0], [1, 1], [0, 1]] nod_ids = [0, 0, 1, 1] conns = [[[0, 1, 2], [0, 2, 3]]] mat_ids = [[0, 1]] descs = ['2_3'] mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs) elif (mode == 'write'): pass
.expansion class ExpandReducePure(pm.ExpandTransformation): environments = [] def expansion(node: 'Reduce', state: SDFGState, sdfg: SDFG): node.validate(sdfg, state) inedge: graph.MultiConnectorEdge = state.in_edges(node)[0] outedge: graph.MultiConnectorEdge = state.out_edges(node)[0] insubset = dcpy(inedge.data.subset) isqdim = insubset.squeeze() outsubset = dcpy(outedge.data.subset) osqdim = outsubset.squeeze() input_dims = len(insubset) output_dims = len(outsubset) input_data = sdfg.arrays[inedge.data.data] output_data = sdfg.arrays[outedge.data.data] if (len(osqdim) == 0): osqdim = [0] axes = (node.axes if (node.axes is not None) else [i for i in range(len(inedge.data.subset))]) axes = [axis for axis in axes if (axis in isqdim)] nsdfg = SDFG('reduce') nsdfg.add_array('_in', insubset.size(), input_data.dtype, strides=[s for (i, s) in enumerate(input_data.strides) if (i in isqdim)], storage=input_data.storage) nsdfg.add_array('_out', outsubset.size(), output_data.dtype, strides=[s for (i, s) in enumerate(output_data.strides) if (i in osqdim)], storage=output_data.storage) inedge._dst_conn = '_in' outedge._src_conn = '_out' node.add_in_connector('_in') node.add_out_connector('_out') if (len(axes) == 0): nstate = nsdfg.add_state() r = nstate.add_read('_in') w = nstate.add_write('_out') nstate.add_edge(r, None, w, None, dace.Memlet(data='_in', subset=dace.subsets.Range.from_array(nsdfg.arrays['_in']), other_subset=dace.subsets.Range.from_array(nsdfg.arrays['_out']))) return nsdfg if (node.identity is not None): init_state = nsdfg.add_state() nstate = nsdfg.add_state() nsdfg.add_edge(init_state, nstate, dace.InterstateEdge()) init_state.add_mapped_tasklet('reduce_init', {('_o%d' % i): ('0:%s' % symstr(d)) for (i, d) in enumerate(outedge.data.subset.size())}, {}, ('__out = %s' % node.identity), {'__out': dace.Memlet.simple('_out', ','.join([('_o%d' % i) for i in range(output_dims)]))}, external_edges=True) else: nstate = nsdfg.add_state() if (len(axes) != input_dims): (ictr, octr) = (0, 0) input_subset = [] for i in isqdim: if (i in axes): input_subset.append(('_i%d' % ictr)) ictr += 1 else: input_subset.append(('_o%d' % octr)) octr += 1 (ome, omx) = nstate.add_map('reduce_output', {('_o%d' % i): ('0:%s' % symstr(sz)) for (i, sz) in enumerate(outsubset.size())}) outm = dace.Memlet.simple('_out', ','.join([('_o%d' % i) for i in range(output_dims)]), wcr_str=node.wcr) inmm = dace.Memlet.simple('_in', ','.join(input_subset)) else: (ome, omx) = (None, None) outm = dace.Memlet.simple('_out', '0', wcr_str=node.wcr) inmm = dace.Memlet.simple('_in', ','.join([('_i%d' % i) for i in range(len(axes))])) (ime, imx) = nstate.add_map('reduce_values', {('_i%d' % i): ('0:%s' % symstr(insubset.size()[isqdim.index(axis)])) for (i, axis) in enumerate(sorted(axes))}) t = nstate.add_tasklet('identity', {'__inp'}, {'__out'}, '__out = __inp') r = nstate.add_read('_in') w = nstate.add_read('_out') if ome: nstate.add_memlet_path(r, ome, ime, t, dst_conn='__inp', memlet=inmm) nstate.add_memlet_path(t, imx, omx, w, src_conn='__out', memlet=outm) else: nstate.add_memlet_path(r, ime, t, dst_conn='__inp', memlet=inmm) nstate.add_memlet_path(t, imx, w, src_conn='__out', memlet=outm) from dace.transformation import dataflow nsdfg.apply_transformations_repeated(dataflow.MapCollapse) return nsdfg
def calc_validation_error(di_v, xv, tv, err, val_iter): ve = 0.0 for j in range(val_iter): (xv.d, tv.d) = di_v.next() xv.d = (xv.d / 255) err.forward(clear_buffer=True) ve += err.d return (ve / val_iter)
def test_boolean(): a = ak.highlevel.ArrayBuilder() a.boolean(True) a.boolean(True) a.boolean(False) a.boolean(True) assert (to_list(a.snapshot()) == [True, True, False, True]) assert (to_list(a) == [True, True, False, True]) assert (to_list(a.snapshot()[1:(- 1)]) == [True, False])
_module('numpy') def setbufsize(size): if (size > .0): raise ValueError(('Buffer size, %s, is too big.' % size)) if (size < 5): raise ValueError(('Buffer size, %s, is too small.' % size)) if ((size % 16) != 0): raise ValueError(('Buffer size, %s, is not a multiple of 16.' % size)) pyvals = umath.geterrobj() old = getbufsize() pyvals[0] = size umath.seterrobj(pyvals) return old
def parallel(p_iter='fork', ncpus=None, **kwds): if isinstance(p_iter, types.FunctionType): return Parallel()(p_iter) return Parallel(p_iter, ncpus, **kwds)
def _is_mutation_type(data) -> bool: try: QuiverMutationType(data) return True except Exception: return False
def annotate(sent): global client if (client is None): client = CoreNLPClient(default_annotators='ssplit,tokenize'.split(',')) words = [] for sent in client.annotate(sent).sentences: for tok in sent: words.append(tok.word) return words
def iter_dataset_targets(dataset): dataset.init_seq_order(epoch=1) seq_idx = 0 while dataset.is_less_than_num_seqs(seq_idx): dataset.load_seqs(seq_idx, (seq_idx + 1)) segment_name = dataset.get_tag(seq_idx) targets = dataset.get_targets('classes', seq_idx) assert (targets.ndim == 1) targets = targets.astype('int32') (yield (segment_name, targets)) seq_idx += 1
def get_unique_devices_(module): return ({p.device for p in module.parameters()} | {p.device for p in module.buffers()})
class MovesGener(object): def __init__(self, cards_list): self.cards_list = cards_list self.cards_dict = collections.defaultdict(int) for i in self.cards_list: self.cards_dict[i] += 1 self.single_card_moves = [] self.gen_type_1_single() self.pair_moves = [] self.gen_type_2_pair() self.triple_cards_moves = [] self.gen_type_3_triple() self.bomb_moves = [] self.gen_type_4_bomb() self.final_bomb_moves = [] self.gen_type_5_king_bomb() def _gen_serial_moves(self, cards, min_serial, repeat=1, repeat_num=0): if (repeat_num < min_serial): repeat_num = 0 single_cards = sorted(list(set(cards))) seq_records = list() moves = list() start = i = 0 longest = 1 while (i < len(single_cards)): if (((i + 1) < len(single_cards)) and ((single_cards[(i + 1)] - single_cards[i]) == 1)): longest += 1 i += 1 else: seq_records.append((start, longest)) i += 1 start = i longest = 1 for seq in seq_records: if (seq[1] < min_serial): continue (start, longest) = (seq[0], seq[1]) longest_list = single_cards[start:(start + longest)] if (repeat_num == 0): steps = min_serial while (steps <= longest): index = 0 while ((steps + index) <= longest): target_moves = sorted((longest_list[index:(index + steps)] * repeat)) moves.append(target_moves) index += 1 steps += 1 else: if (longest < repeat_num): continue index = 0 while ((index + repeat_num) <= longest): target_moves = sorted((longest_list[index:(index + repeat_num)] * repeat)) moves.append(target_moves) index += 1 return moves def gen_type_1_single(self): self.single_card_moves = [] for i in set(self.cards_list): self.single_card_moves.append([i]) return self.single_card_moves def gen_type_2_pair(self): self.pair_moves = [] for (k, v) in self.cards_dict.items(): if (v >= 2): self.pair_moves.append([k, k]) return self.pair_moves def gen_type_3_triple(self): self.triple_cards_moves = [] for (k, v) in self.cards_dict.items(): if (v >= 3): self.triple_cards_moves.append([k, k, k]) return self.triple_cards_moves def gen_type_4_bomb(self): self.bomb_moves = [] for (k, v) in self.cards_dict.items(): if (v == 4): self.bomb_moves.append([k, k, k, k]) return self.bomb_moves def gen_type_5_king_bomb(self): self.final_bomb_moves = [] if ((20 in self.cards_list) and (30 in self.cards_list)): self.final_bomb_moves.append([20, 30]) return self.final_bomb_moves def gen_type_6_3_1(self): result = [] for t in self.single_card_moves: for i in self.triple_cards_moves: if (t[0] != i[0]): result.append((t + i)) return result def gen_type_7_3_2(self): result = list() for t in self.pair_moves: for i in self.triple_cards_moves: if (t[0] != i[0]): result.append((t + i)) return result def gen_type_8_serial_single(self, repeat_num=0): return self._gen_serial_moves(self.cards_list, MIN_SINGLE_CARDS, repeat=1, repeat_num=repeat_num) def gen_type_9_serial_pair(self, repeat_num=0): single_pairs = list() for (k, v) in self.cards_dict.items(): if (v >= 2): single_pairs.append(k) return self._gen_serial_moves(single_pairs, MIN_PAIRS, repeat=2, repeat_num=repeat_num) def gen_type_10_serial_triple(self, repeat_num=0): single_triples = list() for (k, v) in self.cards_dict.items(): if (v >= 3): single_triples.append(k) return self._gen_serial_moves(single_triples, MIN_TRIPLES, repeat=3, repeat_num=repeat_num) def gen_type_11_serial_3_1(self, repeat_num=0): serial_3_moves = self.gen_type_10_serial_triple(repeat_num=repeat_num) serial_3_1_moves = list() for s3 in serial_3_moves: s3_set = set(s3) new_cards = [i for i in self.cards_list if (i not in s3_set)] subcards = select(new_cards, len(s3_set)) for i in subcards: serial_3_1_moves.append((s3 + i)) return list((k for (k, _) in itertools.groupby(serial_3_1_moves))) def gen_type_12_serial_3_2(self, repeat_num=0): serial_3_moves = self.gen_type_10_serial_triple(repeat_num=repeat_num) serial_3_2_moves = list() pair_set = sorted([k for (k, v) in self.cards_dict.items() if (v >= 2)]) for s3 in serial_3_moves: s3_set = set(s3) pair_candidates = [i for i in pair_set if (i not in s3_set)] subcards = select(pair_candidates, len(s3_set)) for i in subcards: serial_3_2_moves.append(sorted((s3 + (i * 2)))) return serial_3_2_moves def gen_type_13_4_2(self): four_cards = list() for (k, v) in self.cards_dict.items(): if (v == 4): four_cards.append(k) result = list() for fc in four_cards: cards_list = [k for k in self.cards_list if (k != fc)] subcards = select(cards_list, 2) for i in subcards: result.append((([fc] * 4) + i)) return list((k for (k, _) in itertools.groupby(result))) def gen_type_14_4_22(self): four_cards = list() for (k, v) in self.cards_dict.items(): if (v == 4): four_cards.append(k) result = list() for fc in four_cards: cards_list = [k for (k, v) in self.cards_dict.items() if ((k != fc) and (v >= 2))] subcards = select(cards_list, 2) for i in subcards: result.append((([fc] * 4) + [i[0], i[0], i[1], i[1]])) return result def gen_moves(self): moves = [] moves.extend(self.gen_type_1_single()) moves.extend(self.gen_type_2_pair()) moves.extend(self.gen_type_3_triple()) moves.extend(self.gen_type_4_bomb()) moves.extend(self.gen_type_5_king_bomb()) moves.extend(self.gen_type_6_3_1()) moves.extend(self.gen_type_7_3_2()) moves.extend(self.gen_type_8_serial_single()) moves.extend(self.gen_type_9_serial_pair()) moves.extend(self.gen_type_10_serial_triple()) moves.extend(self.gen_type_11_serial_3_1()) moves.extend(self.gen_type_12_serial_3_2()) moves.extend(self.gen_type_13_4_2()) moves.extend(self.gen_type_14_4_22()) return moves
def p2g(): for p in x: base = ti.cast(((x[p] * inv_dx) - 0.5), ti.i32) fx = ((x[p] * inv_dx) - ti.cast(base, float)) w = [(0.5 * ((1.5 - fx) ** 2)), (0.75 - ((fx - 1) ** 2)), (0.5 * ((fx - 0.5) ** 2))] affine = (p_mass * C[p]) for i in ti.static(range(3)): for j in ti.static(range(3)): I = ti.Vector([i, j]) dpos = ((float(I) - fx) * dx) weight = (w[i].x * w[j].y) grid_v[(base + I)] += (weight * (((p_mass * v[p]) - (dt * x.grad[p])) + (affine dpos))) grid_m[(base + I)] += (weight * p_mass)
_numpy_output(check_dtype=True) def test_ufunc_radians_c(A: dace.complex64[10]): return np.radians(A)
def fix_wav_headers(data): headers = extract_wav_headers(data) if ((not headers) or (headers[(- 1)].id != b'data')): return if (len(data) > (2 ** 32)): raise CouldntDecodeError('Unable to process >4GB files') data[4:8] = struct.pack('<I', (len(data) - 8)) pos = headers[(- 1)].position data[(pos + 4):(pos + 8)] = struct.pack('<I', ((len(data) - pos) - 8))
class StackedCell(nn.Module): def __init__(self, input_size, hidden_size, num_layers=1, dropout=0, bias=True, rnn_cell=nn.LSTMCell, residual=False, weight_norm=False): super(StackedCell, self).__init__() self.dropout = nn.Dropout(dropout) self.num_layers = num_layers self.hidden_size = hidden_size self.residual = residual self.layers = nn.ModuleList() for _ in range(num_layers): rnn = rnn_cell(input_size, hidden_size, bias=bias) if weight_norm: rnn = wn(rnn_cell) self.layers.append(rnn) input_size = hidden_size def forward(self, inputs, hidden): def select_layer(h_state, i): if isinstance(h_state, tuple): return tuple([select_layer(s, i) for s in h_state]) else: return h_state[i] next_hidden = [] for (i, layer) in enumerate(self.layers): next_hidden_i = layer(inputs, select_layer(hidden, i)) output = (next_hidden_i[0] if isinstance(next_hidden_i, tuple) else next_hidden_i) if ((i + 1) < self.num_layers): output = self.dropout(output) if (self.residual and (inputs.size((- 1)) == output.size((- 1)))): inputs = (output + inputs) else: inputs = output next_hidden.append(next_hidden_i) if isinstance(hidden, tuple): next_hidden = tuple([torch.stack(h) for h in zip(*next_hidden)]) else: next_hidden = torch.stack(next_hidden) return (inputs, next_hidden)
def minimize_partition(split, args): input_path = path.join(args.input_dir, '{}.conll'.format(split)) output_path = path.join(args.output_dir, '{}.{}.jsonlines'.format(split, args.seg_len)) count = 0 print('Minimizing {}'.format(input_path)) documents = [] with open(input_path, 'r') as input_file: for line in input_file.readlines(): begin_document_match = re.match(conll.BEGIN_DOCUMENT_REGEX, line) if begin_document_match: doc_key = conll.get_doc_key(begin_document_match.group(1), begin_document_match.group(2)) documents.append((doc_key, [])) elif line.startswith('#end document'): continue else: documents[(- 1)][1].append(line) with open(output_path, 'w') as output_file: for document_lines in documents: document = get_document(document_lines, args) output_file.write(json.dumps(document)) output_file.write('\n') count += 1 print('Wrote {} documents to {}'.format(count, output_path))
def test_parse_batch(hparams, dummy_data): neural_hmm = OverFlow(hparams) parsed_batch = neural_hmm.parse_batch(dummy_data) (text_padded, input_lengths, mel_padded, max_len, mel_lengths) = parsed_batch[0] (mel_padded, _) = parsed_batch[1] assert (text_padded.shape[1] == max_len) assert (mel_padded.shape[2] == torch.max(mel_lengths).item())
('data.build_command.shutil.copy') ('data.build_command.Shell.exec') class TestAntCommand(): def setup(self): self.logger = logging.getLogger('test') def test_compile_with_ant(self, shell_mock, copy_mock): shell_mock.return_value = "[javac] Compilation arguments:\n[javac] '-d'\n[javac] '/project/build'\n[javac] '-classpath'\n[javac] '/project/build/classes:/path/dependency1.jar:/path/dependency2.jar'" actual_dependencies = AntCommand('ant', []).execute('-project_dir-', self.logger) assert_equals(shell_mock.mock_calls[0][1], ('ant -debug -verbose',)) assert_in('/path/dependency1.jar', actual_dependencies) assert_in('/path/dependency2.jar', actual_dependencies) def test_compile_with_ant_multi_build(self, shell_mock, copy_mock): shell_mock.return_value = "[javac] Compilation arguments:\n [javac] '-classpath'\n [javac] '/project/build:/path/dependency1.jar'\n --- some intermediate output ---\n [javac] '-classpath'\n [javac] '/path/dependency2.jar'" actual_dependencies = AntCommand('ant', []).execute('-project_dir-', self.logger) assert_equals(shell_mock.mock_calls[0][1], ('ant -debug -verbose',)) assert_in('/path/dependency1.jar', actual_dependencies) assert_in('/path/dependency2.jar', actual_dependencies) def test_error_outputs_error_stream(self, shell_mock, copy_mock): error = '-error-' shell_mock.side_effect = CommandFailedError('ant', '', error) expected_error_output = ('\n' + error) try: AntCommand('ant', []).execute('-project_dir-', self.logger) except CommandFailedError as e: assert_equals(expected_error_output, e.output)
def main(): args = parser.parse_args() if (args.seed is not None): random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed) ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: args.world_size = (ngpus_per_node * args.world_size) mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: main_worker(args.gpu, ngpus_per_node, args)
class FPElement(IndexedFreeModuleElement): def lift_to_free(self): C = self.parent()._j.codomain() return C(self.dense_coefficient_list()) _method def degree(self): if self.is_zero(): raise ValueError('the zero element does not have a well-defined degree') return self.lift_to_free().degree() def dense_coefficient_list(self, order=None): if (order is None): order = self.parent()._indices return [self[i] for i in order] def _lmul_(self, a): return self.parent()((a * self.lift_to_free())) def vector_presentation(self): try: degree = self.lift_to_free().degree() except ValueError: return None F_n = self.parent().vector_presentation(degree) return F_n.quotient_map()(self.lift_to_free().vector_presentation()) def __bool__(self): pres = self.vector_presentation() if (pres is None): return False return bool(pres) def __eq__(self, other): try: return (self - other).is_zero() except TypeError: return False def normalize(self): if self.is_zero(): return self.parent().zero() v = self.vector_presentation() return self.parent().element_from_coordinates(v, self.degree())
class PipXmlrpcTransport(xmlrpc_client.Transport): def __init__(self, index_url, session, use_datetime=False): xmlrpc_client.Transport.__init__(self, use_datetime) index_parts = urllib_parse.urlparse(index_url) self._scheme = index_parts.scheme self._session = session def request(self, host, handler, request_body, verbose=False): parts = (self._scheme, host, handler, None, None, None) url = urllib_parse.urlunparse(parts) try: headers = {'Content-Type': 'text/xml'} response = self._session.post(url, data=request_body, headers=headers, stream=True) response.raise_for_status() self.verbose = verbose return self.parse_response(response.raw) except requests.HTTPError as exc: logger.critical('HTTP error %s while getting %s', exc.response.status_code, url) raise
class HDFSPreprocessor(OpenSetPreprocessor): def __init__(self, config: PreprocessorConfig, label_file: str): super().__init__(config) self.id_separator = ' ' self.label_file = label_file def _get_labels(self, logrecord: LogRecordObject): blk_df = pd.read_csv(self.label_file, header=0) anomaly_blk = set(blk_df[(blk_df['Label'] == 'Anomaly')]['BlockId']) block_ids = logrecord.span_id[constants.SPAN_ID].apply((lambda x: set(self.serial_id_to_predefined_id_map[x].split(self.id_separator)))) labels = block_ids.apply((lambda x: int((len(x.intersection(anomaly_blk)) > 0)))) return labels def _get_ids(self, logrecord: LogRecordObject): predefined_ids = logrecord.body[' BLOCK '] predefined_ids = predefined_ids.apply((lambda x: self.id_separator.join(set(x)))) self.predefined_to_serial_id_map = {k: i for (i, k) in enumerate(list(predefined_ids))} self.serial_id_to_predefined_id_map = {v: k for (k, v) in self.predefined_to_serial_id_map.items()} predefined_ids = predefined_ids.apply((lambda x: self.predefined_to_serial_id_map[x])).astype(int) return predefined_ids
class DynamicMemory(tf.contrib.rnn.RNNCell): def __init__(self, memory_slots, memory_size, keys, activation=prelu, initializer=tf.random_normal_initializer(stddev=0.1)): (self.m, self.mem_sz, self.keys) = (memory_slots, memory_size, keys) (self.activation, self.init) = (activation, initializer) self.U = tf.get_variable('U', [self.mem_sz, self.mem_sz], initializer=self.init) self.V = tf.get_variable('V', [self.mem_sz, self.mem_sz], initializer=self.init) self.W = tf.get_variable('W', [self.mem_sz, self.mem_sz], initializer=self.init) def state_size(self): return [self.mem_sz for _ in range(self.m)] def output_size(self): return [self.mem_sz for _ in range(self.m)] def zero_state(self, batch_size, dtype): return [tf.tile(tf.expand_dims(key, 0), [batch_size, 1]) for key in self.keys] def __call__(self, inputs, state, scope=None): new_states = [] for (block_id, h) in enumerate(state): content_g = tf.reduce_sum(tf.multiply(inputs, h), axis=[1]) address_g = tf.reduce_sum(tf.multiply(inputs, tf.expand_dims(self.keys[block_id], 0)), axis=[1]) g = sigmoid((content_g + address_g)) h_component = tf.matmul(h, self.U) w_component = tf.matmul(tf.expand_dims(self.keys[block_id], 0), self.V) s_component = tf.matmul(inputs, self.W) candidate = self.activation(((h_component + w_component) + s_component)) new_h = (h + tf.multiply(tf.expand_dims(g, (- 1)), candidate)) new_h_norm = tf.nn.l2_normalize(new_h, (- 1)) new_states.append(new_h_norm) return (new_states, new_states)
def register_Ns3EpcX2RlcSetupRequestHeader_methods(root_module, cls): cls.add_constructor([param('ns3::EpcX2RlcSetupRequestHeader const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) cls.add_method('GetDrbid', 'uint8_t', [], is_const=True) cls.add_method('GetGtpTeid', 'uint32_t', [], is_const=True) cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) cls.add_method('GetLcInfo', 'ns3::LteEnbCmacSapProvider::LcInfo', [], is_const=True) cls.add_method('GetLengthOfIes', 'uint32_t', [], is_const=True) cls.add_method('GetLogicalChannelConfig', 'ns3::LteRrcSap::LogicalChannelConfig', []) cls.add_method('GetLteRnti', 'uint16_t', [], is_const=True) cls.add_method('GetMmWaveRnti', 'uint16_t', [], is_const=True) cls.add_method('GetNumberOfIes', 'uint32_t', [], is_const=True) cls.add_method('GetRlcConfig', 'ns3::LteRrcSap::RlcConfig', [], is_const=True) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetSourceCellId', 'uint16_t', [], is_const=True) cls.add_method('GetTargetCellId', 'uint16_t', [], is_const=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) cls.add_method('SetDrbid', 'void', [param('uint8_t', 'drbid')]) cls.add_method('SetGtpTeid', 'void', [param('uint32_t', 'gtpTeid')]) cls.add_method('SetLcInfo', 'void', [param('ns3::LteEnbCmacSapProvider::LcInfo', 'lcInfo')]) cls.add_method('SetLogicalChannelConfig', 'void', [param('ns3::LteRrcSap::LogicalChannelConfig', 'conf')]) cls.add_method('SetLteRnti', 'void', [param('uint16_t', 'rnti')]) cls.add_method('SetMmWaveRnti', 'void', [param('uint16_t', 'rnti')]) cls.add_method('SetRlcConfig', 'void', [param('ns3::LteRrcSap::RlcConfig', 'rlcConfig')]) cls.add_method('SetSourceCellId', 'void', [param('uint16_t', 'sourceCellId')]) cls.add_method('SetTargetCellId', 'void', [param('uint16_t', 'targetCellId')]) return
class VGG(torch_vgg.VGG): def __init__(self, lastBlock, num_classes, *args, **kwargs): super(VGG, self).__init__(*args, **kwargs) self.lastBlock = lastBlock self.avgpool2D = nn.AvgPool2d(kernel_size=2, stride=2) def modify(self, pool2None=[]): remove_layers = [] filter_layers = (lambda x: [l for l in x if (getattr(self, l) is not None)]) if ('4' in pool2None): self.features[27] = None if ('5' in pool2None): self.features[36] = None remove_layers += ['avgpool', 'classifier'] for layer in filter_layers(remove_layers): setattr(self, layer, None) def forward(self, x): outputs = {} r11 = self.features[1](self.features[0](x)) r12 = self.features[3](self.features[2](r11)) outputs['r12'] = r12 p1 = (r12 if (self.features[4] is None) else self.features[4](r12)) r21 = self.features[6](self.features[5](p1)) r22 = self.features[8](self.features[7](r21)) outputs['r22'] = r22 p2 = (r22 if (self.features[9] is None) else self.features[9](r22)) r31 = self.features[11](self.features[10](p2)) r32 = self.features[13](self.features[12](r31)) r33 = self.features[15](self.features[14](r32)) r34 = self.features[17](self.features[16](r33)) outputs['r34'] = r34 p3 = (r34 if (self.features[18] is None) else self.features[18](r34)) r41 = self.features[20](self.features[19](p3)) r42 = self.features[22](self.features[21](r41)) r43 = self.features[24](self.features[23](r42)) r44 = self.features[26](self.features[25](r43)) outputs['r44'] = r44 p4 = (r44 if (self.features[27] is None) else self.features[27](r44)) r51 = self.features[29](self.features[28](p4)) r52 = self.features[31](self.features[30](r51)) r53 = self.features[33](self.features[32](r52)) r54 = self.features[35](self.features[34](r53)) outputs['r54'] = r54 p5 = (r54 if (self.features[36] is None) else self.features[36](r54)) return outputs
def nearsub(train_features, train_labels, test_features, test_labels, n_comp=10): scores_svd = [] classes = np.unique(test_labels) (features_sort, _) = utils.sort_dataset(train_features, train_labels, classes=classes, stack=False) fd = features_sort[0].shape[1] if (n_comp >= fd): n_comp = (fd - 1) for j in np.arange(len(classes)): svd = TruncatedSVD(n_components=n_comp).fit(features_sort[j]) svd_subspace = svd.components_.T svd_j = ((np.eye(fd) - (svd_subspace svd_subspace.T)) test_features.T) score_svd_j = np.linalg.norm(svd_j, ord=2, axis=0) scores_svd.append(score_svd_j) test_predict_svd = np.argmin(scores_svd, axis=0) acc_svd = compute_accuracy(classes[test_predict_svd], test_labels) print('SVD: {}'.format(acc_svd)) return acc_svd
def process(filename, dataset): music = muspy.load(filename) names = list(CONFIG[dataset]['programs'].keys()) n_tracks = len(names) counts = np.zeros(n_tracks, int) for track in music.tracks: if (track.is_drum or (not track.notes)): continue label = names.index(track.name) counts[label] = len(track.notes) return counts
def set_lr(optimizer, lr): for param in optimizer.param_groups: param['lr'] = lr return optimizer
class Curve(Element): def __init__(self, point, Pr): Element.__init__(self, 'Curve', Priority=str(Pr)) for P in LP: V = Vertex(x=P[0], y=P[1], z=P[2]) self.append(V)
def BIBD_136_6_1(): from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet from .incidence_structures import IncidenceStructure inf = (None, None) bibd = [((0, 0), (3, 0), (15, 0), (35, 0), (6, 2), (10, 2)), ((0, 0), (22, 0), (11, 1), (30, 1), (1, 2), (18, 2)), ((0, 0), (5, 0), (18, 1), (41, 1), (13, 2), (42, 2)), ((0, 0), (11, 0), (17, 0), (4, 2), (5, 2), (28, 2)), ((0, 0), (1, 0), (0, 1), (16, 1), (0, 2), (31, 2)), (inf, (0, 0), (9, 0), (18, 0), (27, 0), (36, 0))] gens = (lambda B: [frozenset((((((x * 16) % 45), ((y + 1) % 3)) if ((x, y) != inf) else inf) for (x, y) in B)), frozenset((((((x + 1) % 45), y) if ((x, y) != inf) else inf) for (x, y) in B))]) bibd = RecursivelyEnumeratedSet([frozenset(e) for e in bibd], successors=gens) return IncidenceStructure(bibd)._blocks
def compute_builder_metrics(metric_list, train, test, recs, unseen_flag: bool, item_count: Optional[int]=None): builder = TorchMetricsBuilder(metric_list, top_k=TOP_K, item_count=item_count) builder.reset() tensor_predictions = _convert_recs_to_tensor(recs) tensor_ground_truth = _convert_data_to_tensor(test, (- 1)) tensor_train = _convert_data_to_tensor(train, (- 2)) if unseen_flag: tensor_ground_truth = _compute_unseen_ground_truth(tensor_ground_truth, tensor_train) builder.add_prediction(predictions=tensor_predictions, ground_truth=tensor_ground_truth, train=tensor_train) return builder.get_metrics()
class TFBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class CsObject(): __metaclass__ = ABCMeta def __init__(self, objType): self.objectType = objType self.label = '' self.deleted = 0 self.verified = 0 self.date = '' self.user = '' self.draw = True def __str__(self): pass def fromJsonText(self, jsonText, objId=(- 1)): pass def toJsonText(self): pass def updateDate(self): try: locale.setlocale(locale.LC_ALL, 'en_US.utf8') except locale.Error: locale.setlocale(locale.LC_ALL, 'en_US') except locale.Error: locale.setlocale(locale.LC_ALL, 'us_us.utf8') except locale.Error: locale.setlocale(locale.LC_ALL, 'us_us') except Exception: pass self.date = datetime.datetime.now().strftime('%d-%b-%Y %H:%M:%S') def delete(self): self.deleted = 1 self.draw = False
def p_call_build_packed_args(pos, positional_args, keyword_args): keyword_dict = None subtuples = [(ExprNodes.TupleNode(pos, args=arg) if isinstance(arg, list) else ExprNodes.AsTupleNode(pos, arg=arg)) for arg in positional_args] arg_tuple = reduce(partial(ExprNodes.binop_node, pos, '+'), subtuples) if keyword_args: kwargs = [] dict_items = [] for item in keyword_args: if isinstance(item, tuple): (key, value) = item dict_items.append(ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)) elif item.is_dict_literal: dict_items.extend(item.key_value_pairs) else: if dict_items: kwargs.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True)) dict_items = [] kwargs.append(item) if dict_items: kwargs.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True)) if kwargs: if ((len(kwargs) == 1) and kwargs[0].is_dict_literal): keyword_dict = kwargs[0] else: keyword_dict = ExprNodes.MergedDictNode(pos, keyword_args=kwargs) return (arg_tuple, keyword_dict)
class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase): def testXml(self): self._TestOutput('xml', EXPECTED_XML) def testJSON(self): self._TestOutput('json', EXPECTED_JSON) def _GetOutput(self, out_format): file_path = os.path.join(gtest_test_utils.GetTempDir(), ('test_out.' + out_format)) gtest_prog_path = gtest_test_utils.GetTestExecutablePath('gtest_list_output_unittest_') command = [gtest_prog_path, ('%s=%s:%s' % (GTEST_OUTPUT_FLAG, out_format, file_path)), '--gtest_list_tests'] environ_copy = os.environ.copy() p = gtest_test_utils.Subprocess(command, env=environ_copy, working_dir=gtest_test_utils.GetTempDir()) self.assertTrue(p.exited) self.assertEqual(0, p.exit_code) self.assertTrue(os.path.isfile(file_path)) with open(file_path) as f: result = f.read() return result def _TestOutput(self, test_format, expected_output): actual = self._GetOutput(test_format) actual_lines = actual.splitlines() expected_lines = expected_output.splitlines() line_count = 0 for actual_line in actual_lines: expected_line = expected_lines[line_count] expected_line_re = re.compile(expected_line.strip()) self.assertTrue(expected_line_re.match(actual_line.strip()), ('actual output of "%s",\nwhich does not match expected regex of "%s"\non line %d' % (actual, expected_output, line_count))) line_count = (line_count + 1)
def test_series_expansion(): x = Symbol('x') ex = series(sin((1 + x)), x, n=10) assert (ex.coeff(x, 7) == ((- cos(1)) / 5040)) x = Symbol('x') ex = series((1 / (1 - x)), x, n=10) assert (ex.coeff(x, 9) == 1) ex = series((sin(x) * cos(x)), x, n=10) assert (ex.coeff(x, 8) == 0) assert (ex.coeff(x, 9) == (Integer(2) / Integer(2835))) ex = series((E ** x), x, n=10) assert (ex.coeff(x, 9) == (Integer(1) / Integer(362880))) ex1 = series((1 / sqrt((4 - x))), x, n=50) ex2 = series(((4 - x) ** (Integer((- 1)) / Integer(2))), x, n=50) assert (ex1.coeff(x, 49) == ex2.coeff(x, 49))
_model def ecaresnet50d_pruned(pretrained=False, num_classes=1000, in_chans=3, **kwargs): variant = 'ecaresnet50d_pruned' default_cfg = default_cfgs[variant] model = ResNet(Bottleneck, [3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, num_classes=num_classes, in_chans=in_chans, block_args=dict(attn_layer='eca'), **kwargs) model.default_cfg = default_cfg model = adapt_model_from_file(model, variant) if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def categorical_hinge(y_true, y_pred): pos = K.sum((y_true * y_pred), axis=(- 1)) neg = K.max(((1.0 - y_true) * y_pred), axis=(- 1)) return K.maximum(0.0, ((neg - pos) + 1.0))
def set_expert_model_parallel_attributes(tensor: torch.Tensor, is_parallel: bool): assert (not hasattr(tensor, 'expert_model_parallel')) setattr(tensor, 'expert_model_parallel', is_parallel)
class ConstantInitializer(BaseInitializer): def __init__(self, value=0): self.value = value def __call__(self, shape): return random_float_type((np.ones(shape) * self.value))
class NativeScalerWithGradNormCount(): state_dict_key = 'amp_scaler' def __init__(self): self._scaler = torch.cuda.amp.GradScaler() def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True, layer_names=None): self._scaler.scale(loss).backward(create_graph=create_graph) if update_grad: if (clip_grad is not None): assert (parameters is not None) self._scaler.unscale_(optimizer) norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) else: self._scaler.unscale_(optimizer) norm = get_grad_norm_(parameters, layer_names=layer_names) self._scaler.step(optimizer) self._scaler.update() else: norm = None return norm def state_dict(self): return self._scaler.state_dict() def load_state_dict(self, state_dict): self._scaler.load_state_dict(state_dict)
def main(): config_names = set() make_parameters = DEFAULT_MAKE_PARAMETERS for arg in sys.argv[1:]: if ((arg == '--help') or (arg == '-h')): print_usage() sys.exit(0) elif (arg == '--debug'): config_names.add(DEBUG_CONFIG_NAME) elif (arg == '--all'): config_names |= set(CONFIGS.keys()) elif (arg in CONFIGS): config_names.add(arg) else: make_parameters.append(arg) if (not config_names): config_names.add(DEFAULT_CONFIG_NAME) for config_name in config_names: build(config_name, CONFIGS[config_name], make_parameters)
class IteratorAlgorithm(enum.Enum): Analytic = enum_auto() Optimized = enum_auto() FixedChannels = enum_auto() FewChannels = enum_auto() FixedStrideDilation = enum_auto()
def post_process(): all_samples = load_data(RAW_FILE) process_list = [] for key in all_samples.keys(): process = mp.Process(target=single_post_process, args=(key, all_samples[key])) process_list.append(process) process.start() for process in process_list: process.join()
def get_callback_version(): callback = _get_callback('get_callback_version') if callback: return callback.get_callback_version() else: return None
def test_predict_all_same(create_X_y, create_pool_all_agree): (X, y) = create_X_y expected = y oracle_test = Oracle(create_pool_all_agree) oracle_test.fit(X, y) expected[(expected == 1)] = 0 predicted_labels = oracle_test.predict(X, y) assert np.equal(predicted_labels, expected).all()
class HingeEmbeddingCriterion(Criterion): def __init__(self, margin=1, sizeAverage=True): super(HingeEmbeddingCriterion, self).__init__() self.margin = margin self.sizeAverage = sizeAverage self.buffer = None def updateOutput(self, input, y): if (self.buffer is None): self.buffer = input.new() self.buffer.resize_as_(input).copy_(input) self.buffer[torch.eq(y, (- 1.0))] = 0 self.output = self.buffer.sum().item() self.buffer.fill_(self.margin).add_((- 1), input) self.buffer.clamp_(min=0) self.buffer[torch.eq(y, 1.0)] = 0 self.output = (self.output + self.buffer.sum().item()) if self.sizeAverage: self.output = (self.output / input.nelement()) return self.output def updateGradInput(self, input, y): self.gradInput.resize_as_(input).copy_(y) self.gradInput[torch.mul(torch.eq(y, (- 1)), torch.gt(input, self.margin))] = 0 if self.sizeAverage: self.gradInput.mul_((1.0 / input.nelement())) return self.gradInput
(aspect_ratio='automatic', align='mid', weights=None, range=None, bins=10, edgecolor='black') def histogram(datalist, **options): g = Graphics() g._set_extra_kwds(Graphics._extract_kwds_for_show(options)) g.add_primitive(Histogram(datalist, options=options)) return g
def load_one_hot_char_embeddings(char_vocab_path): vocab = [] for line in open(char_vocab_path, 'r'): vocab.append(line.strip()) char_to_ix = {} for char in vocab: char_to_ix[char] = len(char_to_ix) char_to_ix[' '] = len(char_to_ix) char_to_ix['<UNK>'] = len(char_to_ix) char_embeds = np.eye(len(char_to_ix)) return (char_embeds, char_to_ix)
def test_ast_vectorizer(): in_json = sys.argv[1] with open(in_json) as f: content = json.load(f) ast_indexer = SQLASTIndexer() random.shuffle(content) for example in content: ast = example['sql'] ast2 = copy.deepcopy(ast) print(json.dumps(ast, indent=4)) print() ast_indexer.query(ast2) print(json.dumps(ast2, indent=4)) import pdb pdb.set_trace()
def join_options(options): rv = [] any_prefix_is_slash = False for opt in options: prefix = split_opt(opt)[0] if (prefix == '/'): any_prefix_is_slash = True rv.append((len(prefix), opt)) rv.sort(key=(lambda x: x[0])) rv = ', '.join((x[1] for x in rv)) return (rv, any_prefix_is_slash)
class TestVocabulary(AllenNlpTestCase): def setUp(self): token_indexer = SingleIdTokenIndexer('tokens') text_field = TextField([Token(t) for t in ['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c']], {'tokens': token_indexer}) self.instance = Instance({'text': text_field}) self.dataset = Dataset([self.instance]) super(TestVocabulary, self).setUp() def test_from_dataset_respects_min_count(self): vocab = Vocabulary.from_dataset(self.dataset, min_count=4) words = vocab.get_index_to_token_vocabulary().values() assert ('a' in words) assert ('b' not in words) assert ('c' not in words) vocab = Vocabulary.from_dataset(self.dataset, min_count=1) words = vocab.get_index_to_token_vocabulary().values() assert ('a' in words) assert ('b' in words) assert ('c' in words) def test_from_dataset_respects_exclusive_embedding_file(self): embeddings_filename = (self.TEST_DIR + 'embeddings.gz') with gzip.open(embeddings_filename, 'wb') as embeddings_file: embeddings_file.write('a 1.0 2.3 -1.0\n'.encode('utf-8')) embeddings_file.write('b 0.1 0.4 -4.0\n'.encode('utf-8')) vocab = Vocabulary.from_dataset(self.dataset, min_count=4, pretrained_files={'tokens': embeddings_filename}, only_include_pretrained_words=True) words = vocab.get_index_to_token_vocabulary().values() assert ('a' in words) assert ('b' not in words) assert ('c' not in words) vocab = Vocabulary.from_dataset(self.dataset, min_count=(- 1), pretrained_files={'tokens': embeddings_filename}, only_include_pretrained_words=True) words = vocab.get_index_to_token_vocabulary().values() assert ('a' in words) assert ('b' in words) assert ('c' not in words) def test_from_dataset_respects_inclusive_embedding_file(self): embeddings_filename = (self.TEST_DIR + 'embeddings.gz') with gzip.open(embeddings_filename, 'wb') as embeddings_file: embeddings_file.write('a 1.0 2.3 -1.0\n'.encode('utf-8')) embeddings_file.write('b 0.1 0.4 -4.0\n'.encode('utf-8')) vocab = Vocabulary.from_dataset(self.dataset, min_count=4, pretrained_files={'tokens': embeddings_filename}, only_include_pretrained_words=False) words = vocab.get_index_to_token_vocabulary().values() assert ('a' in words) assert ('b' in words) assert ('c' not in words) vocab = Vocabulary.from_dataset(self.dataset, min_count=(- 1), pretrained_files={'tokens': embeddings_filename}, only_include_pretrained_words=False) words = vocab.get_index_to_token_vocabulary().values() assert ('a' in words) assert ('b' in words) assert ('c' in words) def test_add_word_to_index_gives_consistent_results(self): vocab = Vocabulary() initial_vocab_size = vocab.get_vocab_size() word_index = vocab.add_token_to_namespace('word') assert ('word' in vocab.get_index_to_token_vocabulary().values()) assert (vocab.get_token_index('word') == word_index) assert (vocab.get_token_from_index(word_index) == 'word') assert (vocab.get_vocab_size() == (initial_vocab_size + 1)) vocab.add_token_to_namespace('word') assert ('word' in vocab.get_index_to_token_vocabulary().values()) assert (vocab.get_token_index('word') == word_index) assert (vocab.get_token_from_index(word_index) == 'word') assert (vocab.get_vocab_size() == (initial_vocab_size + 1)) def test_namespaces(self): vocab = Vocabulary() initial_vocab_size = vocab.get_vocab_size() word_index = vocab.add_token_to_namespace('word', namespace='1') assert ('word' in vocab.get_index_to_token_vocabulary(namespace='1').values()) assert (vocab.get_token_index('word', namespace='1') == word_index) assert (vocab.get_token_from_index(word_index, namespace='1') == 'word') assert (vocab.get_vocab_size(namespace='1') == (initial_vocab_size + 1)) word2_index = vocab.add_token_to_namespace('word2', namespace='2') word_index = vocab.add_token_to_namespace('word', namespace='2') assert ('word' in vocab.get_index_to_token_vocabulary(namespace='2').values()) assert ('word2' in vocab.get_index_to_token_vocabulary(namespace='2').values()) assert (vocab.get_token_index('word', namespace='2') == word_index) assert (vocab.get_token_index('word2', namespace='2') == word2_index) assert (vocab.get_token_from_index(word_index, namespace='2') == 'word') assert (vocab.get_token_from_index(word2_index, namespace='2') == 'word2') assert (vocab.get_vocab_size(namespace='2') == (initial_vocab_size + 2)) def test_namespace_dependent_default_dict(self): default_dict = _NamespaceDependentDefaultDict(['bar', '*baz'], (lambda : 7), (lambda : 3)) assert (default_dict['foo'] == 7) assert (default_dict['baz'] == 3) assert (default_dict['bar'] == 3) assert (default_dict['foobaz'] == 3) def test_unknown_token(self): vocab = Vocabulary() oov_token = vocab._oov_token oov_index = vocab.get_token_index(oov_token) assert (oov_index == 1) assert (vocab.get_token_index('unseen word') == oov_index) def test_set_from_file_reads_padded_files(self): vocab_filename = (self.TEST_DIR + 'vocab_file') with codecs.open(vocab_filename, 'w', 'utf-8') as vocab_file: vocab_file.write('<S>\n') vocab_file.write('</S>\n') vocab_file.write('<UNK>\n') vocab_file.write('a\n') vocab_file.write('tricky\x0bchar\n') vocab_file.write('word\n') vocab_file.write('another\n') vocab = Vocabulary() vocab.set_from_file(vocab_filename, is_padded=True, oov_token='<UNK>') assert (vocab._oov_token == DEFAULT_OOV_TOKEN) assert (vocab.get_token_index('random string') == 3) assert (vocab.get_token_index('<S>') == 1) assert (vocab.get_token_index('</S>') == 2) assert (vocab.get_token_index(DEFAULT_OOV_TOKEN) == 3) assert (vocab.get_token_index('a') == 4) assert (vocab.get_token_index('tricky\x0bchar') == 5) assert (vocab.get_token_index('word') == 6) assert (vocab.get_token_index('another') == 7) assert (vocab.get_token_from_index(0) == vocab._padding_token) assert (vocab.get_token_from_index(1) == '<S>') assert (vocab.get_token_from_index(2) == '</S>') assert (vocab.get_token_from_index(3) == DEFAULT_OOV_TOKEN) assert (vocab.get_token_from_index(4) == 'a') assert (vocab.get_token_from_index(5) == 'tricky\x0bchar') assert (vocab.get_token_from_index(6) == 'word') assert (vocab.get_token_from_index(7) == 'another') def test_set_from_file_reads_non_padded_files(self): vocab_filename = (self.TEST_DIR + 'vocab_file') with codecs.open(vocab_filename, 'w', 'utf-8') as vocab_file: vocab_file.write('B-PERS\n') vocab_file.write('I-PERS\n') vocab_file.write('O\n') vocab_file.write('B-ORG\n') vocab_file.write('I-ORG\n') vocab = Vocabulary() vocab.set_from_file(vocab_filename, is_padded=False, namespace='tags') assert (vocab.get_token_index('B-PERS', namespace='tags') == 0) assert (vocab.get_token_index('I-PERS', namespace='tags') == 1) assert (vocab.get_token_index('O', namespace='tags') == 2) assert (vocab.get_token_index('B-ORG', namespace='tags') == 3) assert (vocab.get_token_index('I-ORG', namespace='tags') == 4) assert (vocab.get_token_from_index(0, namespace='tags') == 'B-PERS') assert (vocab.get_token_from_index(1, namespace='tags') == 'I-PERS') assert (vocab.get_token_from_index(2, namespace='tags') == 'O') assert (vocab.get_token_from_index(3, namespace='tags') == 'B-ORG') assert (vocab.get_token_from_index(4, namespace='tags') == 'I-ORG') def test_saving_and_loading(self): vocab_dir = os.path.join(self.TEST_DIR, 'vocab_save') vocab = Vocabulary(non_padded_namespaces=['a', 'c']) vocab.add_token_to_namespace('a0', namespace='a') vocab.add_token_to_namespace('a1', namespace='a') vocab.add_token_to_namespace('a2', namespace='a') vocab.add_token_to_namespace('b2', namespace='b') vocab.add_token_to_namespace('b3', namespace='b') vocab.save_to_files(vocab_dir) vocab2 = Vocabulary.from_files(vocab_dir) assert (vocab2._non_padded_namespaces == ['a', 'c']) assert (vocab2.get_vocab_size(namespace='a') == 3) assert (vocab2.get_token_from_index(0, namespace='a') == 'a0') assert (vocab2.get_token_from_index(1, namespace='a') == 'a1') assert (vocab2.get_token_from_index(2, namespace='a') == 'a2') assert (vocab2.get_token_index('a0', namespace='a') == 0) assert (vocab2.get_token_index('a1', namespace='a') == 1) assert (vocab2.get_token_index('a2', namespace='a') == 2) assert (vocab2.get_vocab_size(namespace='b') == 4) assert (vocab2.get_token_from_index(0, namespace='b') == vocab._padding_token) assert (vocab2.get_token_from_index(1, namespace='b') == vocab._oov_token) assert (vocab2.get_token_from_index(2, namespace='b') == 'b2') assert (vocab2.get_token_from_index(3, namespace='b') == 'b3') assert (vocab2.get_token_index(vocab._padding_token, namespace='b') == 0) assert (vocab2.get_token_index(vocab._oov_token, namespace='b') == 1) assert (vocab2.get_token_index('b2', namespace='b') == 2) assert (vocab2.get_token_index('b3', namespace='b') == 3) assert (vocab.get_index_to_token_vocabulary('a') == vocab2.get_index_to_token_vocabulary('a')) assert (vocab.get_index_to_token_vocabulary('b') == vocab2.get_index_to_token_vocabulary('b')) def test_saving_and_loading_works_with_byte_encoding(self): tokenizer = CharacterTokenizer(byte_encoding='utf-8') token_indexer = TokenCharactersIndexer(character_tokenizer=tokenizer) tokens = [Token(t) for t in ['yvind', 'fur', '']] text_field = TextField(tokens, {'characters': token_indexer}) dataset = Dataset([Instance({'sentence': text_field})]) vocab = Vocabulary.from_dataset(dataset) text_field.index(vocab) indexed_tokens = deepcopy(text_field._indexed_tokens) vocab_dir = os.path.join(self.TEST_DIR, 'vocab_save') vocab.save_to_files(vocab_dir) vocab2 = Vocabulary.from_files(vocab_dir) text_field2 = TextField(tokens, {'characters': token_indexer}) text_field2.index(vocab2) indexed_tokens2 = deepcopy(text_field2._indexed_tokens) assert (indexed_tokens == indexed_tokens2) def test_from_params(self): vocab_dir = os.path.join(self.TEST_DIR, 'vocab_save') vocab = Vocabulary(non_padded_namespaces=['a', 'c']) vocab.add_token_to_namespace('a0', namespace='a') vocab.add_token_to_namespace('a1', namespace='a') vocab.add_token_to_namespace('a2', namespace='a') vocab.add_token_to_namespace('b2', namespace='b') vocab.add_token_to_namespace('b3', namespace='b') vocab.save_to_files(vocab_dir) params = Params({'directory_path': vocab_dir}) vocab2 = Vocabulary.from_params(params) assert (vocab.get_index_to_token_vocabulary('a') == vocab2.get_index_to_token_vocabulary('a')) assert (vocab.get_index_to_token_vocabulary('b') == vocab2.get_index_to_token_vocabulary('b')) vocab2 = Vocabulary.from_params(Params({}), self.dataset) assert (vocab2.get_index_to_token_vocabulary('tokens') == {0: '', 1: '', 2: 'a', 3: 'c', 4: 'b'}) with pytest.raises(ConfigurationError): _ = Vocabulary.from_params(Params({})) with pytest.raises(ConfigurationError): _ = Vocabulary.from_params(Params({'directory_path': vocab_dir, 'min_count': 2}))
def aggregate_run_overlap(run): for doc in run.keys(): for para in run.get(doc).keys(): for para_rel in run.get(doc).get(para).keys(): run.get(doc).get(para)[para_rel] = 1 run_aggregated = {} for doc in run.keys(): for para in run.get(doc).keys(): for (para_rel, value) in run.get(doc).get(para).items(): if run_aggregated.get(doc): if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])): run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): (run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])) + 1)}) else: run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): 1}) else: run_aggregated.update({doc: {}}) if run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])): run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): (run_aggregated.get(doc).get('_'.join(para_rel.split('_')[:1])) + 1)}) else: run_aggregated.get(doc).update({'_'.join(para_rel.split('_')[:1]): 1}) return run_aggregated
class TestFetchSamples(unittest.TestCase): ((testing.get_driver() in ['mysql', 'hive']), 'skip non mysql/hive tests') def test_fetch_sample(self): conn = testing.get_singleton_db_connection() select = 'SELECT * FROM iris.train' name_and_type = db.selected_columns_and_types(conn, select) expect_field_names = [item[0] for item in name_and_type] expect_field_types = [item[1] for item in name_and_type] column_num = len(name_and_type) gen = fetch_samples(conn, select, n=0) self.assertTrue((gen is None)) gen = fetch_samples(conn, select, n=(- 1)) row_num = length(gen()) self.assertTrue(np.array_equal(gen.field_names, expect_field_names)) self.assertTrue(np.array_equal(gen.field_types, expect_field_types)) self.assertGreater(row_num, 25) gen = fetch_samples(conn, select, n=25) n = 0 self.assertTrue(np.array_equal(gen.field_names, expect_field_names)) self.assertTrue(np.array_equal(gen.field_types, expect_field_types)) for rows in gen(): self.assertEqual(len(rows), column_num) n += 1 self.assertEqual(n, 25) gen = fetch_samples(conn, select, n=10) self.assertTrue(np.array_equal(gen.field_names, expect_field_names)) self.assertTrue(np.array_equal(gen.field_types, expect_field_types)) self.assertEqual(length(gen()), 10) gen = fetch_samples(conn, ('%s LIMIT 1' % select), n=1000) self.assertTrue(np.array_equal(gen.field_names, expect_field_names)) self.assertTrue(np.array_equal(gen.field_types, expect_field_types)) self.assertEqual(length(gen()), 1) gen = fetch_samples(conn, select, n=(row_num * 2)) self.assertTrue(np.array_equal(gen.field_names, expect_field_names)) self.assertTrue(np.array_equal(gen.field_types, expect_field_types)) self.assertEqual(length(gen()), row_num)
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) def test_round_double_backward(seed, ctx, func_name): from nbla_test_utils import cap_ignore_region, backward_function_tester rng = np.random.RandomState(seed) inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)] backward_function_tester(rng, F.round, inputs, atol_accum=0.01, backward=[True], ctx=ctx)
def eval_data_dir(data_dir, save_dir: str, model_name: str, bs: int=8, max_source_length: int=1024, type_path='val', n_obs=None, fp16=False, task='summarization', local_rank=None, num_return_sequences=1, dataset_kwargs: Dict=None, prefix='', **generate_kwargs) -> Dict: model_name = str(model_name) assert (local_rank is not None) torch.distributed.init_process_group(backend='nccl', rank=local_rank) save_dir = Path(save_dir) save_path = save_dir.joinpath(f'rank_{local_rank}_output.json') torch.cuda.set_device(local_rank) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).cuda() if fp16: model = model.half() use_task_specific_params(model, task) num_beams = generate_kwargs.pop('num_beams', model.config.num_beams) if (num_return_sequences > num_beams): num_beams = num_return_sequences tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f'Inferred tokenizer type: {tokenizer.__class__}') if (max_source_length is None): max_source_length = tokenizer.model_max_length if (prefix is None): prefix = (prefix or getattr(model.config, 'prefix', '') or '') ds = Seq2SeqDataset(tokenizer, data_dir, max_source_length, max_target_length=1024, type_path=type_path, n_obs=n_obs, prefix=prefix, **dataset_kwargs) sampler = ds.make_sortish_sampler(bs, distributed=True, add_extra_examples=False, shuffle=True) data_loader = DataLoader(ds, sampler=sampler, batch_size=bs, collate_fn=ds.collate_fn) results = [] for batch in tqdm(data_loader): summaries = model.generate(input_ids=batch['input_ids'].to(model.device), attention_mask=batch['attention_mask'].to(model.device), num_return_sequences=num_return_sequences, num_beams=num_beams, **generate_kwargs) preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) ids = batch['ids'] if (num_return_sequences > 1): preds = chunks(preds, num_return_sequences) for (i, pred) in enumerate(preds): results.append({'pred': pred, 'id': ids[i].item()}) save_json(results, save_path) return (results, sampler.num_replicas)
def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None): phase = dataset_opt['phase'] (rank, _) = get_dist_info() if (phase == 'train'): if dist: batch_size = dataset_opt['batch_size_per_gpu'] num_workers = dataset_opt['num_worker_per_gpu'] else: multiplier = (1 if (num_gpu == 0) else num_gpu) batch_size = (dataset_opt['batch_size_per_gpu'] * multiplier) num_workers = (dataset_opt['num_worker_per_gpu'] * multiplier) dataloader_args = dict(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, sampler=sampler, drop_last=True) if (sampler is None): dataloader_args['shuffle'] = True dataloader_args['worker_init_fn'] = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None) elif (phase in ['val', 'test']): dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) else: raise ValueError(f"Wrong dataset phase: {phase}. Supported ones are 'train', 'val' and 'test'.") dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False) dataloader_args['persistent_workers'] = dataset_opt.get('persistent_workers', False) prefetch_mode = dataset_opt.get('prefetch_mode') if (prefetch_mode == 'cpu'): num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1) logger = get_root_logger() logger.info(f'Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}') return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args) else: return torch.utils.data.DataLoader(**dataloader_args)
def pause(fuzzer, jobs=1, input_dir=None, empty_seed=False): logger.debug(f'pause: {fuzzer}') kw = gen_fuzzer_driver_args(fuzzer=fuzzer, jobs=jobs, input_dir=input_dir, empty_seed=empty_seed) kw['command'] = 'pause' fuzzer_driver.main(**kw)
def get_npy_pkg_dir(): import numpy d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'lib', 'npy-pkg-config') return d
def test_clean_output_format(df_addresses: pd.DataFrame) -> None: df_clean = clean_address(df_addresses, 'messy_address', output_format='(zipcode) street_name ~~state_full~~') df_check = df_addresses.copy() df_check['messy_address_clean'] = ['Pine', '(57033) Main', np.nan, 'Main ~~New York~~', '(90015) Figueroa ~~California~~', 'Figueroa', np.nan, np.nan, np.nan, np.nan] df_clean2 = clean_address(df_addresses, 'messy_address', output_format='house_number street_prefix_full street_name street_suffix_full (building)') df_check2 = df_addresses.copy() df_check2['messy_address_clean'] = ['123 Pine Avenue', '1234 West Main Heights', np.nan, '789 North Main Street (Robie House)', '1111 South Figueroa Street', '1111 South Figueroa Street (Staples Center)', np.nan, np.nan, np.nan, np.nan] assert df_check.equals(df_clean) assert df_check2.equals(df_clean2)
_kl(Geometric, Geometric) def _kl_geometric_geometric(p, q): return (((- p.entropy()) - (torch.log1p((- q.probs)) / p.probs)) - q.logits)
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages): message = '' allowed = True candidate_set = set(qids_to_ranked_candidate_passages.keys()) ref_set = set(qids_to_relevant_passageids.keys()) if (candidate_set != ref_set): if (candidate_set >= ref_set): pass elif (candidate_set < ref_set): message = 'Not all queries seem to be ranked. Are you scoring the right set?' else: message = 'The submitted queries do not fully match the queries in the evaluation set. Are you scoring the right set?' for qid in qids_to_ranked_candidate_passages: duplicate_pids = set([item for (item, count) in Counter(qids_to_ranked_candidate_passages[qid]).items() if (count > 1)]) if (len((duplicate_pids - set([0]))) > 0): message = 'Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}'.format(qid=qid, pid=list(duplicate_pids)[0]) allowed = False return (allowed, message)
class LlamaLora(CausalLoraModel): config_name: str = 'llama_lora' def __init__(self, weights_path: Optional[str]=None): super().__init__(LlamaLoraEngine.config_name, weights_path)
def patcher(module, test_dir): mo = MockOpen(test_dir) return mock.patch((module + '.open'), mo)
def test(args, model, test_samplers, rank=0, mode='Test', queue=None): assert (args.num_proc <= 1), 'MXNet KGE does not support multi-process now' logs = [] if (len(args.gpu) > 0): gpu_id = (args.gpu[(rank % len(args.gpu))] if (args.mix_cpu_gpu and (args.num_proc > 1)) else args.gpu[0]) else: gpu_id = (- 1) if args.strict_rel_part: model.load_relation(mx.gpu(gpu_id)) for sampler in test_samplers: count = 0 for (pos_g, neg_g) in sampler: model.forward_test(pos_g, neg_g, logs, gpu_id) metrics = {} if (len(logs) > 0): for metric in logs[0].keys(): metrics[metric] = (sum([log[metric] for log in logs]) / len(logs)) for (k, v) in metrics.items(): print('{} average {}: {}'.format(mode, k, v)) for i in range(len(test_samplers)): test_samplers[i] = test_samplers[i].reset()
def quantize_model(model): qparams = {} for (n, p) in model.state_dict().items(): qp = quantize_tensor(p) qparams[(n + '.quantization.scale')] = torch.FloatTensor([qp.scale]) qparams[(n + '.quantization.zero_point')] = torch.ByteTensor([qp.zero_point]) p.copy_(qp.tensor) model.type('torch.ByteTensor') for (n, p) in qparams.items(): model.register_buffer(n, p) model.quantized = True
def parse_quad(text): doc = json.loads(text) questions = [] for block in doc['data']: paragraphs = block['paragraphs'] for paragraph in paragraphs: qas = paragraph['qas'] for question in qas: questions.append(question['question']) return questions
def generate_cpu_workload(workload_cpu_stress, num_cpu_cores): workload_pid = os.popen('stress-ng -c {} -l {} > /dev/null 2>&1 & echo $!'.format(num_cpu_cores, workload_cpu_stress)).read().strip('\n') print('PID of workload is {} and number of cores used is {}'.format(workload_pid, num_cpu_cores)) return workload_pid
def add_norm_args(parser): group = parser.add_argument_group('Normalization') parser.add_argument('--encoder-norm-self', default='layer', choices=['layer', 'batch', 'power'], help='normalization scheme for encoder') parser.add_argument('--encoder-norm-ff', default='layer', choices=['none', 'layer', 'group', 'batch', 'power'], help='normalization scheme for encoder') parser.add_argument('--encoder-spec-norm', default=False, action='store_true') parser.add_argument('--decoder-norm-self', default='layer', choices=['layer', 'group', 'batch', 'power'], help='normalization scheme for decoder') parser.add_argument('--decoder-norm-ff', default='layer', choices=['none', 'layer', 'group', 'batch', 'power']) group.add_argument('--dropout-type', default='none', choices=['none', 'bernoulli', 'gamma', 'gumbel', 'beta', 'laplace', 'chi', 'normal'], help='droptypes for all the layers') group.add_argument('--dropout-gama', default=0.5, type=float, metavar='N', help='dropout gama for some noisy types') return group
(frozen=True) class MetricNameMatcher(): name: str split: str sub_split: Optional[str] = None perturbation_name: Optional[str] = None def matches(self, metric_name: MetricName) -> bool: if (self.name != metric_name.name): return False if (self.split != metric_name.split): return False if ((self.sub_split is not None) and (self.sub_split != metric_name.sub_split)): return False metric_perturbation_name = (metric_name.perturbation and metric_name.perturbation.name) if (self.perturbation_name != metric_perturbation_name): return False if (metric_name.perturbation and (metric_name.perturbation.computed_on != PERTURBATION_WORST)): return False return True def substitute(self, environment: Dict[(str, str)]) -> 'MetricNameMatcher': return MetricNameMatcher(name=mako.template.Template(self.name).render(**environment), split=mako.template.Template(self.split).render(**environment), perturbation_name=(mako.template.Template(self.perturbation_name).render(**environment) if (self.perturbation_name is not None) else None))
def CalculateGearyAuto(ProteinSequence, AAProperty, AAPropertyName): Result = {} for i in range(len(AAProperty)): Result[AAPropertyName[i]] = CalculateEachGearyAuto(ProteinSequence, AAProperty[i], AAPropertyName[i]) return Result
def test_generator(): trainer(input_hdf5='../sampleData&Model/100samples.hdf5', input_csv='../sampleData&Model/100samples.csv', output_name='test_trainer', cnn_blocks=2, lstm_blocks=1, padding='same', activation='relu', drop_rate=0.2, label_type='gaussian', add_event_r=0.6, add_gap_r=0.2, shift_event_r=0.9, add_noise_r=0.5, mode='generator', train_valid_test_split=[0.6, 0.2, 0.2], batch_size=20, epochs=10, patience=2, gpuid=None, gpu_limit=None) dir_list = [ev for ev in os.listdir('.') if (ev.split('_')[(- 1)] == 'outputs')] if ('test_trainer_outputs' in dir_list): successful = True else: successful = False assert (successful == True)
class VicunaScenario(Scenario): name = 'vicuna' description = 'Vicuna eval questions' tags = ['instructions'] def __init__(self, category: str): super().__init__() self.category: str = category def get_instances(self, output_path: str) -> List[Instance]: def matches_target_category(raw: dict) -> bool: return ((self.category == 'all') or (raw['category'] == self.category)) source_url = ' data_path: str = os.path.join(output_path, 'vicuna_questions.jsonl') ensure_file_downloaded(source_url=source_url, target_path=data_path) instances: List[Instance] = [] for line in open(data_path): raw = json.loads(line) if matches_target_category(raw): instance = Instance(input=Input(text=raw['text']), split=TEST_SPLIT, references=[]) instances.append(instance) return instances
def decompose_graph(graph): (x, edge_index, edge_attr, global_attr) = (None, None, None, None) for key in graph.keys: if (key == 'x'): x = graph.x elif (key == 'edge_index'): edge_index = graph.edge_index elif (key == 'edge_attr'): edge_attr = graph.edge_attr elif (key == 'global_attr'): global_attr = graph.global_attr else: pass return (x, edge_index, edge_attr, global_attr)
def register_Ns3MmWaveLteEnbRrcProtocolReal_methods(root_module, cls): cls.add_constructor([param('ns3::MmWaveLteEnbRrcProtocolReal const &', 'arg0')]) cls.add_constructor([]) cls.add_method('DoDispose', 'void', [], is_virtual=True) cls.add_method('GetLteEnbRrcSapUser', 'ns3::LteEnbRrcSapUser *', []) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('GetUeRrcSapProvider', 'ns3::LteUeRrcSapProvider *', [param('uint16_t', 'rnti')]) cls.add_method('SetCellId', 'void', [param('uint16_t', 'cellId')]) cls.add_method('SetLteEnbRrcSapProvider', 'void', [param('ns3::LteEnbRrcSapProvider *', 'p')]) cls.add_method('SetUeRrcSapProvider', 'void', [param('uint16_t', 'rnti'), param('ns3::LteUeRrcSapProvider *', 'p')]) return
def test_rpad_and_clip_indexed_array(): content = ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])) offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 10])) listoffsetarray = ak.contents.listoffsetarray.ListOffsetArray(offsets, content) content = ak.contents.numpyarray.NumpyArray(np.array([6.6, 7.7, 8.8, 9.9, 5.5, 3.3, 4.4, 0.0, 1.1, 2.2])) offsets = ak.index.Index64(np.array([0, 4, 5, 7, 7, 10])) backward = ak.contents.listoffsetarray.ListOffsetArray(offsets, content) index = ak.index.Index64(np.array([4, 3, 2, 1, 0], dtype=np.int64)) indexedarray = ak.contents.IndexedArray(index, listoffsetarray) assert (to_list(indexedarray) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4], [], [0.0, 1.1, 2.2]]) assert (to_list(ak._do.pad_none(backward, 4, 1, clip=True)) == to_list(ak._do.pad_none(indexedarray, 4, 1, clip=True))) assert (to_list(ak._do.pad_none(indexedarray, 1, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 1, 0, clip=True).form == ak._do.pad_none(indexedarray, 1, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 2, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 2, 0, clip=True).form == ak._do.pad_none(indexedarray, 2, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 3, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 3, 0, clip=True).form == ak._do.pad_none(indexedarray, 3, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 4, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4], []]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 4, 0, clip=True).form == ak._do.pad_none(indexedarray, 4, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 5, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4], [], [0.0, 1.1, 2.2]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 5, 0, clip=True).form == ak._do.pad_none(indexedarray, 5, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 6, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4], [], [0.0, 1.1, 2.2], None]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 6, 0, clip=True).form == ak._do.pad_none(indexedarray, 6, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 7, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4], [], [0.0, 1.1, 2.2], None, None]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 7, 0, clip=True).form == ak._do.pad_none(indexedarray, 7, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 8, 0, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5], [3.3, 4.4], [], [0.0, 1.1, 2.2], None, None, None]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 8, 0, clip=True).form == ak._do.pad_none(indexedarray, 8, 0, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 1, 1, clip=True)) == [[6.6], [5.5], [3.3], [None], [0.0]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 1, 1, clip=True).form == ak._do.pad_none(indexedarray, 1, 1, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 2, 1, clip=True)) == [[6.6, 7.7], [5.5, None], [3.3, 4.4], [None, None], [0.0, 1.1]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 2, 1, clip=True).form == ak._do.pad_none(indexedarray, 2, 1, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 3, 1, clip=True)) == [[6.6, 7.7, 8.8], [5.5, None, None], [3.3, 4.4, None], [None, None, None], [0.0, 1.1, 2.2]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 3, 1, clip=True).form == ak._do.pad_none(indexedarray, 3, 1, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 4, 1, clip=True)) == [[6.6, 7.7, 8.8, 9.9], [5.5, None, None, None], [3.3, 4.4, None, None], [None, None, None, None], [0.0, 1.1, 2.2, None]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 4, 1, clip=True).form == ak._do.pad_none(indexedarray, 4, 1, clip=True).form) assert (to_list(ak._do.pad_none(indexedarray, 5, 1, clip=True)) == [[6.6, 7.7, 8.8, 9.9, None], [5.5, None, None, None, None], [3.3, 4.4, None, None, None], [None, None, None, None, None], [0.0, 1.1, 2.2, None, None]]) assert (ak._do.pad_none(indexedarray.to_typetracer(), 5, 1, clip=True).form == ak._do.pad_none(indexedarray, 5, 1, clip=True).form)
def main(args): if ((args.text is not None) and (args.save_path is not None)): convert_a_text(args.text, args.save_path, args.APP_ID, args.API_KEY, args.SECRET_KEY) return if (args.dataset == 'birds'): pairs_all = get_bird_text_audio_path_pair(args) elif (args.dataset == 'flowers'): pairs_all = get_flower_text_audio_path_pair(args) elif (args.dataset == 'places'): pairs_all = get_place_text_audio_path_pair(args) else: raise NotImplementedError if args.one_by_one: caption_to_audio_one_by_one(pairs_all, args.APP_ID, args.API_KEY, args.SECRET_KEY) else: caption_to_audio(pairs_all, args.APP_ID, args.API_KEY, args.SECRET_KEY)
def remove_gutenberg_header_footer(lines): start_arr = [idx for idx in range(len(lines)) if (('***' in lines[idx]) and ('START' in lines[idx].upper()) and ('GUTENBERG' in lines[idx].upper()))] end_arr = [idx for idx in range(len(lines)) if (('***' in lines[idx]) and ('END' in lines[idx].upper()) and ('GUTENBERG' in lines[idx].upper()))] if ((len(start_arr) > 0) and (len(end_arr) > 0)): return lines[(start_arr[0] + 1):end_arr[0]] elif (len(start_arr) > 0): return lines[(start_arr[0] + 1):] elif (len(end_arr) > 0): return lines[:end_arr[0]] return lines
def main(): HERE = os.path.dirname(__file__) nnabla_examples_root = os.environ.get('NNABLA_EXAMPLES_ROOT', os.path.join(HERE, '../../../../nnabla-examples')) mnist_examples_root = os.path.realpath(os.path.join(nnabla_examples_root, 'image-classification/mnist-collection')) sys.path.append(mnist_examples_root) nnabla_examples_git_url = ' try: from args import get_args except ImportError: print('An envvar `NNABLA_EXAMPLES_ROOT` which locates the local path to [nnabla-examples]({}) repository must be set correctly.'.format(nnabla_examples_git_url), file=sys.stderr) raise from mnist_data import data_iterator_mnist from classification import mnist_lenet_prediction, mnist_resnet_prediction args = get_args(description=__doc__) mnist_cnn_prediction = mnist_lenet_prediction if (args.net == 'resnet'): mnist_cnn_prediction = mnist_resnet_prediction x = nn.Variable([args.batch_size, 1, 28, 28]) h = mnist_cnn_prediction(x, test=False, aug=False) t = nn.Variable([args.batch_size, 1]) loss = F.mean(F.softmax_cross_entropy(h, t)) y = mnist_cnn_prediction(x, test=True, aug=False) nnp_file = '{}_initialized.nnp'.format(args.net) runtime_contents = {'networks': [{'name': 'training', 'batch_size': args.batch_size, 'outputs': {'loss': loss}, 'names': {'x': x, 't': t}}, {'name': 'runtime', 'batch_size': args.batch_size, 'outputs': {'y': y}, 'names': {'x': x}}], 'executors': [{'name': 'runtime', 'network': 'runtime', 'data': ['x'], 'output': ['y']}]} nn.utils.save.save(nnp_file, runtime_contents)
def execute(affs: Chunk, fragments: np.ndarray=None): properties = affs.properties if flip_channel: affs = np.flip(affs, axis=0) affs = np.ascontiguousarray(affs, dtype=np.float32) seg_generator = agglomerate(affs, [threshold], fragments=fragments, aff_threshold_low=aff_threshold_low, aff_threshold_high=aff_threshold_high, scoring_function=scoring_function, force_rebuild=False) seg = next(seg_generator) seg = Chunk(seg) seg.set_properties(properties) return [seg]
def process_data(): train_dataset = Roll2MidiDataset(train=True) train_loader = utils.DataLoader(train_dataset, batch_size=16, shuffle=True) test_dataset = Roll2MidiDataset(train=False) test_loader = utils.DataLoader(test_dataset, batch_size=16) return (train_loader, test_loader)
_model def ecaresnext26tn_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['ecaresnext26tn_32x4d'] block_args = dict(attn_layer='eca') model = ResNet(Bottleneck, [2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered_narrow', avg_down=True, num_classes=num_classes, in_chans=in_chans, block_args=block_args, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def Run(command): p = gtest_test_utils.Subprocess(command, env=environ) if p.terminated_by_signal: return 1 else: return 0
def register_Ns3Mac48AddressValue_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return
def _platform_tags(): if (platform.system() == 'Darwin'): return mac_platforms() elif (platform.system() == 'Linux'): return _linux_platforms() else: return _generic_platforms()
def img_random_brightness(image): brightness = iaa.Multiply((0.2, 1.2)) image = brightness.augment_image(image) return image
def slack_augment(candidate_data=None, num_mixup=None, hyper_alpha=8, score_limit_upper=500, score_limit_low=0): global GUID_COUNT time_start = time.time() new_sample_count = 0 stop_flag = 0 mixup_data = [] mixup_label = [] for i in range((len(candidate_data) - 1)): (sub_sequence_i, subseq_i_index) = find_sub_seq(candidate_data[i].label, window_size, valid_tag_bar) if (len(sub_sequence_i) > 0): for j in range((i + 1), len(candidate_data)): (sub_sequence_j, subseq_j_index) = find_sub_seq(candidate_data[j].label, window_size, valid_tag_bar) if (len(sub_sequence_j) > 0): hyper_lambda = np.random.beta(hyper_alpha, hyper_alpha) (newseq1, newseq2, newseq1_tag, newseq2_tag) = slack_mixup(candidate_data, i, j, subseq_i_index, subseq_j_index, hyper_lambda) if (score_limit_upper < 0): high_quality_1 = True high_quality_2 = True else: (high_quality_1, score_1) = high_quality(newseq1, score_limit_upper, score_limit_low) (high_quality_2, score_2) = high_quality(newseq2, score_limit_upper, score_limit_low) if (high_quality_1 or high_quality_2): GUID_COUNT += 1 mixup_data.append(InputExample(guid=GUID_COUNT, text_a=newseq1, text_b=None, label=candidate_data[i].label)) mixup_label.append(newseq1_tag) new_sample_count += 1 if (new_sample_count >= num_mixup): stop_flag = 1 break if high_quality_2: GUID_COUNT += 1 mixup_data.append(InputExample(guid=GUID_COUNT, text_a=newseq2, text_b=None, label=candidate_data[j].label)) mixup_label.append(newseq2_tag) new_sample_count += 1 if (new_sample_count >= num_mixup): stop_flag = 1 break if stop_flag: break time_end = time.time() print('{} extra samples are generated, the time cost is {} s'.format(new_sample_count, (time_end - time_start))) return (mixup_data, mixup_label, new_sample_count)
def registry_addon(fn): def wrapper_factory(module_name, *args, **kwargs): module_name = ('nn.' + module_name) build_fn = reader_registry[module_name] def wrapper(reader, version): obj = build_fn(reader, version) fn(obj, *args, **kwargs) return obj reader_registry[module_name] = wrapper return wrapper_factory
class PseudoSelection(sel.SelectionFunction): def get_index(self, population: list[T]) -> int: return randomness.next_int(0, len(population))
class CSDSBmat(SpectralMatrix): def __init__(self, test, trial, scale=1, measure=1, assemble=None, kind=None, fixed_resolution=None): SpectralMatrix.__init__(self, test, trial, scale=scale, measure=measure, assemble=assemble, kind=kind, fixed_resolution=fixed_resolution) self._matvec_methods += ['cython', 'self'] def assemble(self, method): (test, trial) = (self.testfunction, self.trialfunction) assert isinstance(test[0], SD) assert isinstance(trial[0], SB) N = test[0].N k = np.arange(N, dtype=float) d = {(- 3): ((((k[3:(- 2)] - 2) * (k[3:(- 2)] + 1)) / k[3:(- 2)]) * np.pi), (- 1): ((((- 2) * ((k[1:(- 3)] + 1) ** 2)) / (k[1:(- 3)] + 2)) * np.pi), 1: ((k[:(- 5)] + 1) * np.pi)} return d def matvec(self, v, c, format=None, axis=0): format = ('cython' if (format is None) else format) (N, M) = self.shape c.fill(0) if (format == 'self'): if (axis > 0): c = np.moveaxis(c, axis, 0) v = np.moveaxis(v, axis, 0) s = ((slice(None),) + ((np.newaxis,) * (v.ndim - 1))) c[3:N] = (self[(- 3)][s] * v[:(M - 1)]) c[1:(N - 1)] += (self[(- 1)][s] * v[:M]) c[:(N - 3)] += (self[1][s] * v[1:M]) if (axis > 0): c = np.moveaxis(c, 0, axis) v = np.moveaxis(v, 0, axis) self.scale_array(c, self.scale) elif (format == 'cython'): cython.Matvec.CDB_matvec(v, c, axis, self[(- 3)], self[(- 1)], self[1]) self.scale_array(c, self.scale) else: format = (None if (format in self._matvec_methods) else format) c = super(CSDSBmat, self).matvec(v, c, format=format, axis=axis) return c
_trainer('GEM') class GEMTrainer(TrainerCallbackHookMixin, GEMTrainerTrainingLoopMixin, TrainerDeviceMixin, TrainerEvaluationLoopMixin, TrainerProfilingMixin, BaseTrainer): def __init__(self, config: DictConfig): super().__init__(config) def load(self): super().load() self.load_fp16_scaler() self.on_init_start() self.get_model_master() self.parallelize_model() self.on_init_end() self._init_CL_setting() self._init_supp_dataloader() self._init_model_field() def _init_CL_setting(self): self.cl_config = self.config.training.CL self.task_info = edict(cl_setting=self.cl_config.cl_setting, task_order=self.cl_config.task_order, task_name=self.cl_config.cur_task, task_abbv=self.cl_config.cur_task[0], task_index=self.cl_config.task_order.index(self.cl_config.cur_task[0])) def _init_model_field(self): self.model_master.task_id = self.task_info.task_index if (not hasattr(self.model_master, 'grad_dims')): self.model_master.grad_dims = [] for param in self.model_master.parameters(): self.model_master.grad_dims.append(param.data.numel()) if (not hasattr(self.model_master, 'grads')): self.model_master.grads = torch.zeros(sum(self.model_master.grad_dims), len(self.task_info.task_order)) self.model_master.grads = self.model_master.grads.to(self.device) def get_model_master(self): self.model_master = self.model def configure_callbacks(self): self.checkpoint_callback = CheckpointCallback(self.config, self) self.early_stop_callback = EarlyStoppingCallback(self.config, self) self.logistics_callback = LogisticsCallback(self.config, self) self.lr_scheduler_callback = LRSchedulerCallback(self.config, self) self.callbacks = [] self.callbacks.append(self.lr_scheduler_callback) self.callbacks.append(self.checkpoint_callback) self.callbacks.append(self.logistics_callback) for callback in self.config.training.get('callbacks', []): callback_type = callback.type callback_param = callback.params callback_cls = registry.get_callback_class(callback_type) self.callbacks.append(callback_cls(self.config, self, **callback_param)) def load_datasets(self): logger.info('Loading datasets') self.dataset_loader = MultiDataModule(self.config) self.train_loader = self.dataset_loader.train_dataloader() self.val_loader = self.dataset_loader.val_dataloader() self.test_loader = self.dataset_loader.test_dataloader() def load_model(self): logger.info('Loading model') if (self.config.model in self.config.model_config): attributes = self.config.model_config[self.config.model] else: warnings.warn((f"Model {self.config.model}'s config not present. " + 'Continuing with empty config')) attributes = OmegaConf.create() if isinstance(attributes, str): attributes = self.config.model_config[attributes] with omegaconf.open_dict(attributes): attributes.model = self.config.model self.model = build_model(attributes) self.model = self.model.to(self.device) def load_optimizer(self): logger.info('Loading optimizer') self.optimizer = build_optimizer(self.model, self.config) def load_metrics(self) -> None: logger.info('Loading metrics') metrics = self.config.evaluation.get('metrics', []) self.metrics = Metrics(metrics) self.metrics_params = self.metrics.required_params def load_fp16_scaler(self): if self.training_config.fp16: assert (version.parse(torch.__version__) >= version.parse('1.6')), f'Using fp16 requires torch version >- 1.6, found: {torch.__version__}' assert (self.device != torch.device('cpu')), 'fp16 cannot be used on cpu' set_torch_grad_scaler = True if (self.training_config.fp16 and self.distributed): try: from fairscale.optim.grad_scaler import ShardedGradScaler from fairscale.optim.oss import OSS if isinstance(self.optimizer, OSS): self.scaler = ShardedGradScaler() set_torch_grad_scaler = False logger.info('Using FairScale ShardedGradScaler') except ImportError: logger.info('Using Pytorch AMP GradScaler') if set_torch_grad_scaler: self.scaler = torch.cuda.amp.GradScaler(enabled=self.training_config.fp16) def train(self): logger.info('===== Model =====') logger.info(self.model) print_model_parameters(self.model) if ('train' in self.run_type): self.on_train_start() self.training_loop() self.on_train_end() self.inference() self.finalize() def inference(self): dataset_type = [] if ('val' in self.run_type): dataset_type.append('val') if any(((rt in self.run_type) for rt in ['inference', 'test', 'predict'])): dataset_type.append('test') for dataset in dataset_type: if self.config.evaluation.predict: self.on_prediction_start() self.prediction_loop(dataset) self.on_prediction_end() else: self.on_test_start() logger.info(f'Starting inference on {dataset} set') (report, meter) = self.evaluation_loop(dataset, use_tqdm=True) self.on_test_end(report=report, meter=meter) def finalize(self): self.dataset_loader.teardown() self.teardown() def _init_supp_dataloader(self): assert self.cl_config.use_cl, 'GEM must enable cl setting' assert self.cl_config.use_replay, 'GEM must enable replaying samples' assert (self.cl_config.replay_method == 'gem'), 'Must use gem for replay method' replay_rate = self.cl_config.replay_rate self.supp_annos = [] for i in range(self.task_info.task_index): prev_task = ABBR2TASK[self.task_info.cl_setting][self.task_info.task_order[i]] anno_prev = load_npy(TASK_DICT[self.task_info.cl_setting][prev_task]['train']) for j in range(i, self.task_info.task_index): n_split = (j + 1) mocking_task_name = ABBR2TASK[self.task_info.cl_setting][self.task_info.task_order[(j + 1)]] mocking_dta_size = FCL_DATA_ATTR[self.task_info.cl_setting][mocking_task_name]['train']['data_size'] mocking_n_sample = np.ceil(((replay_rate * mocking_dta_size) / n_split)).astype(np.int32) if (mocking_n_sample < len(anno_prev)): anno_prev = np_set_seed_and_select(arr=anno_prev, N_select=mocking_n_sample) self.supp_annos.append(np.array(anno_prev)) self.supp_dls = [] for supp_anno in self.supp_annos: dl = deepcopy(self.train_loader) dl.get_datasets()[0].annotation_db = supp_anno dl.set_lengths() dl.set_samplers() self.supp_dls.append(dl)
def test_single_char_arguments(): def toobig_message(r): return 'Character code point not in range({0:#x})'.format(r) toolong_message = 'Expected a character, but multi-character string found' assert (m.ord_char(u'a') == 97) assert (m.ord_char_lv(u'b') == 98) assert (m.ord_char(u'e') == 233) with pytest.raises(ValueError) as excinfo: assert (m.ord_char(u'A') == 256) assert (str(excinfo.value) == toobig_message(256)) with pytest.raises(ValueError) as excinfo: assert m.ord_char(u'ab') assert (str(excinfo.value) == toolong_message) assert (m.ord_char16(u'a') == 97) assert (m.ord_char16(u'e') == 233) assert (m.ord_char16_lv(u'e') == 234) assert (m.ord_char16(u'A') == 256) assert (m.ord_char16(u'') == 8253) assert (m.ord_char16(u'') == 9829) assert (m.ord_char16_lv(u'') == 9825) with pytest.raises(ValueError) as excinfo: assert (m.ord_char16(u'') == 127874) assert (str(excinfo.value) == toobig_message(65536)) with pytest.raises(ValueError) as excinfo: assert m.ord_char16(u'aa') assert (str(excinfo.value) == toolong_message) assert (m.ord_char32(u'a') == 97) assert (m.ord_char32(u'e') == 233) assert (m.ord_char32(u'A') == 256) assert (m.ord_char32(u'') == 8253) assert (m.ord_char32(u'') == 9829) assert (m.ord_char32(u'') == 127874) with pytest.raises(ValueError) as excinfo: assert m.ord_char32(u'aa') assert (str(excinfo.value) == toolong_message) assert (m.ord_wchar(u'a') == 97) assert (m.ord_wchar(u'e') == 233) assert (m.ord_wchar(u'A') == 256) assert (m.ord_wchar(u'') == 8253) assert (m.ord_wchar(u'') == 9829) if (m.wchar_size == 2): with pytest.raises(ValueError) as excinfo: assert (m.ord_wchar(u'') == 127874) assert (str(excinfo.value) == toobig_message(65536)) else: assert (m.ord_wchar(u'') == 127874) with pytest.raises(ValueError) as excinfo: assert m.ord_wchar(u'aa') assert (str(excinfo.value) == toolong_message)
class MedianImputer(): def __init__(self, null_values: Optional[List[Any]]) -> None: self.null_values = null_values self.median = 0 def fit(self, col_df: dd.Series) -> Any: self.median = col_df.values.median() return self def transform(self, col_df: dd.Series) -> dd.Series: result = col_df.map(self.fillna) return result def fit_transform(self, col_df: dd.Series) -> dd.Series: return self.fit(col_df).transform(col_df) def fillna(self, val: Union[(int, float)]) -> Union[(int, float)]: if (isinstance(val, str) and (val in self.null_values)): return self.median if math.isnan(float(val)): return self.median return val
def execution_user_input(): user_input = input('Enter a command: ') subprocess.call(user_input, shell=True) return
class AlgoTrainer(BaseAlgo): def __init__(self, algo_init, args): super(AlgoTrainer, self).__init__(args) self.args = args self.actor = algo_init['actor']['net'] self.actor_opt = algo_init['actor']['opt'] self.critic1 = algo_init['critic1']['net'] self.critic1_opt = algo_init['critic1']['opt'] self.critic2 = algo_init['critic2']['net'] self.critic2_opt = algo_init['critic2']['opt'] self.critic1_target = copy.deepcopy(self.critic1) self.critic2_target = copy.deepcopy(self.critic2) if args['use_automatic_entropy_tuning']: self.log_alpha = algo_init['log_alpha']['net'] self.alpha_opt = algo_init['log_alpha']['opt'] self.target_entropy = algo_init['log_alpha']['target_entropy'] if (self.args['lagrange_thresh'] >= 0): self.log_alpha_prime = algo_init['log_alpha_prime']['net'] self.alpha_prime_opt = algo_init['log_alpha_prime']['opt'] self.critic_criterion = nn.MSELoss() self._n_train_steps_total = 0 self._current_epoch = 0 def _get_tensor_values(self, obs, actions, network): action_shape = actions.shape[0] obs_shape = obs.shape[0] num_repeat = int((action_shape / obs_shape)) obs_temp = obs.unsqueeze(1).repeat(1, num_repeat, 1).view((obs.shape[0] * num_repeat), obs.shape[1]) preds = network(obs_temp, actions) preds = preds.view(obs.shape[0], num_repeat, 1) return preds def _get_policy_actions(self, obs, num_actions, network=None): obs_temp = obs.unsqueeze(1).repeat(1, num_actions, 1).view((obs.shape[0] * num_actions), obs.shape[1]) (new_obs_actions, new_obs_log_pi) = network(obs_temp, reparameterize=True, return_log_prob=True) if (not self.args['discrete']): return (new_obs_actions, new_obs_log_pi.view(obs.shape[0], num_actions, 1)) else: return new_obs_actions def forward(self, obs, reparameterize=True, return_log_prob=True): log_prob = None tanh_normal = self.actor(obs, reparameterize=reparameterize) if return_log_prob: if (reparameterize is True): (action, pre_tanh_value) = tanh_normal.rsample(return_pretanh_value=True) else: (action, pre_tanh_value) = tanh_normal.sample(return_pretanh_value=True) log_prob = tanh_normal.log_prob(action, pre_tanh_value=pre_tanh_value) log_prob = log_prob.sum(dim=1, keepdim=True) elif (reparameterize is True): action = tanh_normal.rsample() else: action = tanh_normal.sample() return (action, log_prob) def _train(self, batch): self._current_epoch += 1 batch = to_torch(batch, torch.float, device=self.args['device']) rewards = batch.rew terminals = batch.done obs = batch.obs actions = batch.act next_obs = batch.obs_next (new_obs_actions, log_pi) = self.forward(obs) if self.args['use_automatic_entropy_tuning']: alpha_loss = (- (self.log_alpha * (log_pi + self.target_entropy).detach()).mean()) self.alpha_opt.zero_grad() alpha_loss.backward() self.alpha_opt.step() alpha = self.log_alpha.exp() else: alpha_loss = 0 alpha = 1 if (self._current_epoch < self.args['policy_bc_steps']): policy_log_prob = self.actor.log_prob(obs, actions) policy_loss = ((alpha * log_pi) - policy_log_prob).mean() else: q_new_actions = torch.min(self.critic1(obs, new_obs_actions), self.critic2(obs, new_obs_actions)) policy_loss = ((alpha * log_pi) - q_new_actions).mean() self.actor_opt.zero_grad() policy_loss.backward() self.actor_opt.step() q1_pred = self.critic1(obs, actions) q2_pred = self.critic2(obs, actions) (new_next_actions, new_log_pi) = self.forward(next_obs, reparameterize=True, return_log_prob=True) (new_curr_actions, new_curr_log_pi) = self.forward(obs, reparameterize=True, return_log_prob=True) if (self.args['type_q_backup'] == 'max'): target_q_values = torch.max(self.critic1_target(next_obs, new_next_actions), self.critic2_target(next_obs, new_next_actions)) target_q_values = (target_q_values - (alpha * new_log_pi)) elif (self.args['type_q_backup'] == 'min'): target_q_values = torch.min(self.critic1_target(next_obs, new_next_actions), self.critic2_target(next_obs, new_next_actions)) target_q_values = (target_q_values - (alpha * new_log_pi)) elif (self.args['type_q_backup'] == 'medium'): target_q1_next = self.critic1_target(next_obs, new_next_actions) target_q2_next = self.critic2_target(next_obs, new_next_actions) target_q_values = ((self.args['q_backup_lmbda'] * torch.min(target_q1_next, target_q2_next)) + ((1 - self.args['q_backup_lmbda']) * torch.max(target_q1_next, target_q2_next))) target_q_values = (target_q_values - (alpha * new_log_pi)) else: (next_actions_temp, _) = self._get_policy_actions(next_obs, num_actions=10, network=self.forward) target_qf1_values = self._get_tensor_values(next_obs, next_actions_temp, network=self.critic1).max(1)[0].view((- 1), 1) target_qf2_values = self._get_tensor_values(next_obs, next_actions_temp, network=self.critic2).max(1)[0].view((- 1), 1) target_q_values = torch.min(target_qf1_values, target_qf2_values) q_target = ((self.args['reward_scale'] * rewards) + (((1.0 - terminals) * self.args['discount']) * target_q_values.detach())) qf1_loss = self.critic_criterion(q1_pred, q_target) qf2_loss = self.critic_criterion(q2_pred, q_target) random_actions_tensor = torch.FloatTensor((q2_pred.shape[0] * self.args['num_random']), actions.shape[(- 1)]).uniform_((- 1), 1).to(self.args['device']) (curr_actions_tensor, curr_log_pis) = self._get_policy_actions(obs, num_actions=self.args['num_random'], network=self.forward) (new_curr_actions_tensor, new_log_pis) = self._get_policy_actions(next_obs, num_actions=self.args['num_random'], network=self.forward) q1_rand = self._get_tensor_values(obs, random_actions_tensor, network=self.critic1) q2_rand = self._get_tensor_values(obs, random_actions_tensor, network=self.critic2) q1_curr_actions = self._get_tensor_values(obs, curr_actions_tensor, network=self.critic1) q2_curr_actions = self._get_tensor_values(obs, curr_actions_tensor, network=self.critic2) q1_next_actions = self._get_tensor_values(obs, new_curr_actions_tensor, network=self.critic1) q2_next_actions = self._get_tensor_values(obs, new_curr_actions_tensor, network=self.critic2) cat_q1 = torch.cat([q1_rand, q1_pred.unsqueeze(1), q1_next_actions, q1_curr_actions], 1) cat_q2 = torch.cat([q2_rand, q2_pred.unsqueeze(1), q2_next_actions, q2_curr_actions], 1) if (self.args['min_q_version'] == 3): random_density = np.log((0.5 ** curr_actions_tensor.shape[(- 1)])) cat_q1 = torch.cat([(q1_rand - random_density), (q1_next_actions - new_log_pis.detach()), (q1_curr_actions - curr_log_pis.detach())], 1) cat_q2 = torch.cat([(q2_rand - random_density), (q2_next_actions - new_log_pis.detach()), (q2_curr_actions - curr_log_pis.detach())], 1) min_qf1_loss = ((torch.logsumexp((cat_q1 / self.args['temp']), dim=1).mean() * self.args['min_q_weight']) * self.args['temp']) min_qf2_loss = ((torch.logsumexp((cat_q2 / self.args['temp']), dim=1).mean() * self.args['min_q_weight']) * self.args['temp']) min_qf1_loss = (min_qf1_loss - (q1_pred.mean() * self.args['min_q_weight'])) min_qf2_loss = (min_qf2_loss - (q2_pred.mean() * self.args['min_q_weight'])) if (self.args['lagrange_thresh'] >= 0): alpha_prime = torch.clamp(self.log_alpha_prime.exp(), min=0.0, max=1000000.0) min_qf1_loss = (alpha_prime * (min_qf1_loss - self.args['lagrange_thresh'])) min_qf2_loss = (alpha_prime * (min_qf2_loss - self.args['lagrange_thresh'])) self.alpha_prime_opt.zero_grad() alpha_prime_loss = (((- min_qf1_loss) - min_qf2_loss) * 0.5) alpha_prime_loss.backward(retain_graph=True) self.alpha_prime_opt.step() qf1_loss = ((self.args['explore'] * qf1_loss) + ((2 - self.args['explore']) * min_qf1_loss)) qf2_loss = ((self.args['explore'] * qf2_loss) + ((2 - self.args['explore']) * min_qf2_loss)) self.critic1_opt.zero_grad() qf1_loss.backward(retain_graph=True) self.critic1_opt.step() self.critic2_opt.zero_grad() qf2_loss.backward() self.critic2_opt.step() self._sync_weight(self.critic1_target, self.critic1, self.args['soft_target_tau']) self._sync_weight(self.critic2_target, self.critic2, self.args['soft_target_tau']) self._n_train_steps_total += 1 def get_model(self): return self.actor def get_policy(self): return self.actor def train(self, train_buffer, val_buffer, callback_fn): for epoch in range(1, (self.args['max_epoch'] + 1)): for step in range(1, (self.args['steps_per_epoch'] + 1)): train_data = train_buffer.sample(self.args['batch_size']) self._train(train_data) res = callback_fn(self.get_policy()) self.log_res(epoch, res) return self.get_policy()
def radial_distortion(camera, x): r_sq = np.square(x).sum(axis=(- 1), keepdims=True) return (x * (1.0 + (r_sq * (camera.k1 + (camera.k2 * r_sq)))))