code
stringlengths
101
5.91M
def is_unique(layout, axis: (Integral | None)=None) -> bool: negaxis = (axis if (axis is None) else (- axis)) starts = ak.index.Index64.zeros(1, nplike=layout._backend.index_nplike) parents = ak.index.Index64.zeros(layout.length, nplike=layout._backend.index_nplike) return layout._is_unique(negaxis, starts, parents, 1)
def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, orthogonalize=None): return _execute(_pocketfft.idctn, x, type, s, axes, norm, overwrite_x, workers, orthogonalize)
def to_pair(value, name): if isinstance(value, Iterable): if (len(value) != 2): raise ValueError('Expected `{}` to have exactly 2 elements, got: ({})'.format(name, value)) return value return tuple(repeat(value, 2))
class SimplifiedBasicBlock(BaseModule): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_fg=None): super(SimplifiedBasicBlock, self).__init__(init_fg) assert (dcn is None), 'Not implemented yet.' assert (plugins is None), 'Not implemented yet.' assert (not with_cp), 'Not implemented yet.' self.with_norm = (norm_cfg is not None) with_bias = (True if (norm_cfg is None) else False) self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=with_bias) if self.with_norm: (self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=with_bias) if self.with_norm: (self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def norm1(self): return (getattr(self, self.norm1_name) if self.with_norm else None) def norm2(self): return (getattr(self, self.norm2_name) if self.with_norm else None) def forward(self, x): identity = x out = self.conv1(x) if self.with_norm: out = self.norm1(out) out = self.relu(out) out = self.conv2(out) if self.with_norm: out = self.norm2(out) if (self.downsample is not None): identity = self.downsample(x) out += identity return out
class add_attn(nn.Module): def __init__(self, x_channels, g_channels=256): super(add_attn, self).__init__() self.W = nn.Sequential(nn.Conv2d(x_channels, x_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(x_channels)) self.theta = nn.Conv2d(x_channels, x_channels, kernel_size=2, stride=2, padding=0, bias=False) self.phi = nn.Conv2d(g_channels, x_channels, kernel_size=1, stride=1, padding=0, bias=True) self.psi = nn.Conv2d(x_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, x, g): input_size = x.size() batch_size = input_size[0] assert (batch_size == g.size(0)) theta_x = self.theta(x) theta_x_size = theta_x.size() phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode='bilinear', align_corners=False) f = F.relu((theta_x + phi_g), inplace=True) sigm_psi_f = torch.sigmoid(self.psi(f)) sigm_psi_f = F.interpolate(sigm_psi_f, size=input_size[2:], mode='bilinear', align_corners=False) y = (sigm_psi_f.expand_as(x) * x) W_y = self.W(y) return W_y
_model def swsl_resnext101_32x16d(pretrained=True, **kwargs): model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args)
def gray2rgb(img): img = (img[(..., None)] if (img.ndim == 2) else img) out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) return out_img
def __plot_scores_kde(scores_pos, scores_neg, fig_name): plt.rcParams['figure.figsize'] = [7.0, 3.5] plt.rcParams['figure.autolayout'] = True fig1 = plt.figure() sns.kdeplot(scores_pos, bw=0.5, color='blue') fig2 = plt.figure() sns.kdeplot(scores_neg, bw=0.5, color='red') pp = PdfPages(fig_name) fig_nums = plt.get_fignums() figs = [plt.figure(n) for n in fig_nums] for fig in figs: fig.savefig(pp, format='pdf') pp.close() fig1.clear() fig2.clear() plt.close(fig1) plt.close(fig2) plt.cla()
def se_resnext101_32x4d(num_classes=1000): model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) settings = pretrained_settings['se_resnext101_32x4d']['imagenet'] initialize_pretrained_model(model, num_classes, settings) return model
class PyList(gdb.Command): def __init__(self): gdb.Command.__init__(self, 'py-list', gdb.COMMAND_FILES, gdb.COMPLETE_NONE) def invoke(self, args, from_tty): import re start = None end = None m = re.match('\\s*(\\d+)\\s*', args) if m: start = int(m.group(0)) end = (start + 10) m = re.match('\\s*(\\d+)\\s*,\\s*(\\d+)\\s*', args) if m: (start, end) = map(int, m.groups()) frame = Frame.get_selected_bytecode_frame() if (not frame): print('Unable to locate gdb frame for python bytecode interpreter') return pyop = frame.get_pyop() if ((not pyop) or pyop.is_optimized_out()): print('Unable to read information on python frame') return filename = pyop.filename() lineno = pyop.current_line_num() if (start is None): start = (lineno - 5) end = (lineno + 5) if (start < 1): start = 1 try: f = open(os_fsencode(filename), 'r') except IOError as err: sys.stdout.write(('Unable to open %s: %s\n' % (filename, err))) return with f: all_lines = f.readlines() for (i, line) in enumerate(all_lines[(start - 1):end]): linestr = str((i + start)) if ((i + start) == lineno): linestr = ('>' + linestr) sys.stdout.write(('%4s %s' % (linestr, line)))
def extract_specific_category_data(category, images, labels, N=None): ind = np.where((labels == category))[0] if (N is not None): ind = ind[0:N] extracted_images = images[ind] extracted_labels = labels[ind] images_extracted_from = np.delete(images, ind, 0) labels_extracted_from = np.delete(labels, ind) return ((extracted_images, extracted_labels), (images_extracted_from, labels_extracted_from))
def read_continuous_bipartite_matrix(fh): m = None n = 0 G = dict() for line in fh: G[n] = dict() sline = line.split() if (m == None): m = len(sline) elif (len(sline) != m): sys.stderr.write(('Error: expecting %d columns but got %d on row %d\n' % (m, len(sline), (n + 1)))) return None for j in xrange(m): G[n][j] = float(sline[j]) n += 1 return (m, G)
class KMeans(object): def __init__(self, num_cluster, seed, hidden_size, gpu_id=0, device='cpu'): self.seed = seed self.num_cluster = num_cluster self.max_points_per_centroid = 4096 self.min_points_per_centroid = 0 self.gpu_id = 0 self.device = device self.first_batch = True self.hidden_size = hidden_size (self.clus, self.index) = self.__init_cluster(self.hidden_size) self.centroids = [] def __init_cluster(self, hidden_size, verbose=False, niter=20, nredo=5, max_points_per_centroid=4096, min_points_per_centroid=0): print(' cluster train iterations:', niter) clus = faiss.Clustering(hidden_size, self.num_cluster) clus.verbose = verbose clus.niter = niter clus.nredo = nredo clus.seed = self.seed clus.max_points_per_centroid = max_points_per_centroid clus.min_points_per_centroid = min_points_per_centroid res = faiss.StandardGpuResources() res.noTempMemory() cfg = faiss.GpuIndexFlatConfig() cfg.useFloat16 = False cfg.device = self.gpu_id index = faiss.GpuIndexFlatL2(res, hidden_size, cfg) return (clus, index) def train(self, x): if (x.shape[0] > self.num_cluster): self.clus.train(x, self.index) centroids = faiss.vector_to_array(self.clus.centroids).reshape(self.num_cluster, self.hidden_size) centroids = torch.Tensor(centroids).to(self.device) self.centroids = nn.functional.normalize(centroids, p=2, dim=1) def query(self, x): (D, I) = self.index.search(x, 1) seq2cluster = [int(n[0]) for n in I] seq2cluster = torch.LongTensor(seq2cluster).to(self.device) return (seq2cluster, self.centroids[seq2cluster])
class BaseClass(TemporaryShowyourworkRepository): local_build_only = True def customize(self): with edit_yaml((self.cwd / 'showyourwork.yml')) as config: config['optimize_caching'] = True config['dependencies'] = {'src/tex/ms.tex': ['src/data/C.dat']} with open((self.cwd / 'Snakefile'), 'w') as f: f.write(SNAKEFILE) with open((((self.cwd / 'src') / 'scripts') / 'A.py'), 'w') as f: f.write(A) with open((((self.cwd / 'src') / 'scripts') / 'B.py'), 'w') as f: f.write(B) with open((((self.cwd / 'src') / 'scripts') / 'C.py'), 'w') as f: f.write(C) def build_local(self): print(f'[{self.repo}] Building the article locally (1/2)...') get_stdout('CI=false showyourwork build', shell=True, cwd=self.cwd) if (not self.cache): get_stdout('mv .showyourwork/cache .', shell=True, cwd=self.cwd) get_stdout('CI=false showyourwork clean --force', shell=True, cwd=self.cwd) if (not self.cache): get_stdout('mkdir -p .showyourwork && mv cache .showyourwork/', shell=True, cwd=self.cwd) print(f'[{self.repo}] Building the article locally (2/2)...') get_stdout('CI=false showyourwork build', shell=True, cwd=self.cwd) def check_build(self): assert (((self.cwd / 'src') / 'data') / 'C.dat').exists() assert (not (((self.cwd / 'src') / 'data') / 'A.dat').exists())
.expansion class ExpandReduceCUDADevice(pm.ExpandTransformation): environments = [CUDA] _SPECIAL_RTYPES = {dtypes.ReductionType.Min_Location: 'ArgMin', dtypes.ReductionType.Max_Location: 'ArgMax'} def expansion(node: 'Reduce', state: SDFGState, sdfg: SDFG): from dace.codegen.prettycode import CodeIOStream from dace.codegen.targets.cpp import unparse_cr_split, cpp_array_expr node.validate(sdfg, state) input_edge: graph.MultiConnectorEdge = state.in_edges(node)[0] output_edge: graph.MultiConnectorEdge = state.out_edges(node)[0] insubset = dcpy(input_edge.data.subset) isqdim = insubset.squeeze() input_dims = len(input_edge.data.subset) output_dims = len(output_edge.data.subset) input_data = sdfg.arrays[input_edge.data.data] output_data = sdfg.arrays[output_edge.data.data] axes = (node.axes if (node.axes is not None) else [i for i in range(input_dims)]) sqaxes = [axis for axis in axes if (axis in isqdim)] if (not sqaxes): return ExpandReducePure.expansion(node, state, sdfg) cuda_globalcode = CodeIOStream() cuda_initcode = CodeIOStream() cuda_exitcode = CodeIOStream() host_globalcode = CodeIOStream() host_localcode = CodeIOStream() output_memlet = output_edge.data redtype = detect_reduction_type(node.wcr) node_id = state.node_id(node) state_id = sdfg.node_id(state) idstr = '{sdfg}_{state}_{node}'.format(sdfg=sdfg.name, state=state_id, node=node_id) if node.out_connectors: dtype = next(node.out_connectors.values()) else: dtype = sdfg.arrays[output_memlet.data].dtype output_type = dtype.ctype if (node.identity is None): raise ValueError('For device reduce nodes, initial value must be specified') if (redtype == dtypes.ReductionType.Custom): (body, [arg1, arg2]) = unparse_cr_split(sdfg, node.wcr) cuda_globalcode.write('\n struct __reduce_{id} {{\n template <typename T>\n DACE_HDFI T operator()(const T &{arg1}, const T &{arg2}) const {{\n {contents}\n }}\n }};'.format(id=idstr, arg1=arg1, arg2=arg2, contents=body), sdfg, state_id, node_id) reduce_op = (((', __reduce_' + idstr) + '(), ') + symstr(node.identity)) elif (redtype in ExpandReduceCUDADevice._SPECIAL_RTYPES): reduce_op = '' else: credtype = ('dace::ReductionType::' + str(redtype)[(str(redtype).find('.') + 1):]) reduce_op = (((', dace::_wcr_fixed<%s, %s>()' % (credtype, output_type)) + ', ') + symstr(node.identity)) input_memlet = input_edge.data reduce_shape = input_memlet.subset.bounding_box_size() num_items = ' * '.join((symstr(s) for s in reduce_shape)) overapprox_memlet = dcpy(input_memlet) if any(((str(s) not in sdfg.free_symbols.union(sdfg.constants.keys())) for s in overapprox_memlet.subset.free_symbols)): propagation.propagate_states(sdfg) for (p, r) in state.ranges.items(): overapprox_memlet = propagation.propagate_subset([overapprox_memlet], input_data, [p], r) overapprox_shape = overapprox_memlet.subset.bounding_box_size() overapprox_items = ' * '.join((symstr(s) for s in overapprox_shape)) input_dims = input_memlet.subset.dims() output_dims = output_memlet.subset.data_dims() reduce_all_axes = ((node.axes is None) or (len(node.axes) == input_dims)) if reduce_all_axes: reduce_last_axes = False else: reduce_last_axes = (sorted(node.axes) == list(range((input_dims - len(node.axes)), input_dims))) if ((not reduce_all_axes) and (not reduce_last_axes)): warnings.warn('Multiple axis reductions not supported with this expansion. Falling back to the pure expansion.') return ExpandReducePureSequentialDim.expansion(node, state, sdfg) if (input_data.storage not in [dtypes.StorageType.GPU_Global, dtypes.StorageType.CPU_Pinned]): warnings.warn('Input of GPU reduction must either reside in global GPU memory or pinned CPU memory') return ExpandReducePure.expansion(node, state, sdfg) if (output_data.storage not in [dtypes.StorageType.GPU_Global, dtypes.StorageType.CPU_Pinned]): warnings.warn('Output of GPU reduction must either reside in global GPU memory or pinned CPU memory') return ExpandReducePure.expansion(node, state, sdfg) kname = (ExpandReduceCUDADevice._SPECIAL_RTYPES[redtype] if (redtype in ExpandReduceCUDADevice._SPECIAL_RTYPES) else 'Reduce') cuda_globalcode.write('\n void *__cub_storage_{sdfg}_{state}_{node} = NULL;\n size_t __cub_ssize_{sdfg}_{state}_{node} = 0;\n '.format(sdfg=sdfg.name, state=state_id, node=node_id), sdfg, state_id, node) if reduce_all_axes: reduce_type = 'DeviceReduce' reduce_range = overapprox_items reduce_range_def = 'size_t num_items' reduce_range_use = 'num_items' reduce_range_call = num_items elif reduce_last_axes: num_reduce_axes = len(node.axes) not_reduce_axes = reduce_shape[:(- num_reduce_axes)] reduce_axes = reduce_shape[(- num_reduce_axes):] overapprox_not_reduce_axes = overapprox_shape[:(- num_reduce_axes)] overapprox_reduce_axes = overapprox_shape[(- num_reduce_axes):] num_segments = ' * '.join([symstr(s) for s in not_reduce_axes]) segment_size = ' * '.join([symstr(s) for s in reduce_axes]) overapprox_num_segments = ' * '.join([symstr(s) for s in overapprox_not_reduce_axes]) overapprox_segment_size = ' * '.join([symstr(s) for s in overapprox_reduce_axes]) reduce_type = 'DeviceSegmentedReduce' iterator = 'dace::stridedIterator({size})'.format(size=overapprox_segment_size) reduce_range = '{num}, {it}, {it} + 1'.format(num=overapprox_num_segments, it=iterator) reduce_range_def = 'size_t num_segments, size_t segment_size' iterator_use = 'dace::stridedIterator(segment_size)' reduce_range_use = 'num_segments, {it}, {it} + 1'.format(it=iterator_use) reduce_range_call = ('%s, %s' % (num_segments, segment_size)) cuda_initcode.write('\n cub::{reduce_type}::{kname}(nullptr, __cub_ssize_{sdfg}_{state}_{node},\n ({intype}*)nullptr, ({outtype}*)nullptr, {reduce_range}{redop});\n cudaMalloc(&__cub_storage_{sdfg}_{state}_{node}, __cub_ssize_{sdfg}_{state}_{node});\n'.format(sdfg=sdfg.name, state=state_id, node=node_id, reduce_type=reduce_type, reduce_range=reduce_range, redop=reduce_op, intype=input_data.dtype.ctype, outtype=output_data.dtype.ctype, kname=kname), sdfg, state_id, node) cuda_exitcode.write('cudaFree(__cub_storage_{sdfg}_{state}_{node});'.format(sdfg=sdfg.name, state=state_id, node=node_id), sdfg, state_id, node) cuda_globalcode.write('\nDACE_EXPORTED void __dace_reduce_{id}({intype} *input, {outtype} *output, {reduce_range_def}, cudaStream_t stream);\nvoid __dace_reduce_{id}({intype} *input, {outtype} *output, {reduce_range_def}, cudaStream_t stream)\n{{\ncub::{reduce_type}::{kname}(__cub_storage_{id}, __cub_ssize_{id},\n input, output, {reduce_range_use}{redop}, stream);\n}}\n '.format(id=idstr, intype=input_data.dtype.ctype, outtype=output_data.dtype.ctype, reduce_type=reduce_type, reduce_range_def=reduce_range_def, reduce_range_use=reduce_range_use, kname=kname, redop=reduce_op)) host_globalcode.write('\nDACE_EXPORTED void __dace_reduce_{id}({intype} *input, {outtype} *output, {reduce_range_def}, cudaStream_t stream);\n '.format(id=idstr, reduce_range_def=reduce_range_def, intype=input_data.dtype.ctype, outtype=output_data.dtype.ctype), sdfg, state_id, node) host_localcode.write('__dace_reduce_{id}(_in, _out, {reduce_range_call}, __dace_current_stream);'.format(id=idstr, reduce_range_call=reduce_range_call)) tnode = dace.nodes.Tasklet('reduce', {'_in': dace.pointer(input_data.dtype)}, {'_out': dace.pointer(output_data.dtype)}, host_localcode.getvalue(), language=dace.Language.CPP) sdfg.append_global_code(host_globalcode.getvalue()) sdfg.append_global_code(cuda_globalcode.getvalue(), 'cuda') sdfg.append_init_code(cuda_initcode.getvalue(), 'cuda') sdfg.append_exit_code(cuda_exitcode.getvalue(), 'cuda') input_edge._dst_conn = '_in' output_edge._src_conn = '_out' node.add_in_connector('_in') node.add_out_connector('_out') return tnode
def get_xy_fd(): feature_columns = [SparseFeat('user', 3, embedding_dim=8), SparseFeat('gender', 2, embedding_dim=8), SparseFeat('item', (3 + 1), embedding_dim=8), SparseFeat('item_gender', (2 + 1), embedding_dim=8), DenseFeat('score', 1)] feature_columns += [VarLenSparseFeat(SparseFeat('hist_item', (3 + 1), embedding_dim=8), 4, length_name='seq_length'), VarLenSparseFeat(SparseFeat('hist_item_gender', (2 + 1), embedding_dim=8), 4, length_name='seq_length')] behavior_feature_list = ['item', 'item_gender'] uid = np.array([0, 1, 2]) ugender = np.array([0, 1, 0]) iid = np.array([1, 2, 3]) igender = np.array([1, 2, 1]) score = np.array([0.1, 0.2, 0.3]) hist_iid = np.array([[1, 2, 3, 0], [1, 2, 3, 0], [1, 2, 0, 0]]) hist_igender = np.array([[1, 1, 2, 0], [2, 1, 1, 0], [2, 1, 0, 0]]) behavior_length = np.array([3, 3, 2]) feature_dict = {'user': uid, 'gender': ugender, 'item': iid, 'item_gender': igender, 'hist_item': hist_iid, 'hist_item_gender': hist_igender, 'score': score, 'seq_length': behavior_length} x = {name: feature_dict[name] for name in get_feature_names(feature_columns)} y = np.array([1, 0, 1]) return (x, y, feature_columns, behavior_feature_list)
(IcmpInst, BaseSMTEncoder) def _icmp(term, smt): x = smt.eval(term.x) y = smt.eval(term.y) cmp = smt._icmp_ops[term.pred](x, y) return bool_to_BitVec(cmp)
def execute_predicted_sparql(sparql): sparql = sparql.replace('wdt:instance_of/wdt:subclass_of', 'wdt:P31/wdt:P279') url = ' extracted_property_names = [x[1] for x in re.findall('(wdt:|p:|ps:|pq:)([a-zA-Z_\\(\\)(\\/_)]+)(?![1-9])', sparql)] pid_replacements = {} for replaced_property_name in extracted_property_names: if (not name_to_pid_mapping.find_one({'name': replaced_property_name})): i = replaced_property_name.replace('_', ' ').lower() pid_query = ('\n SELECT ?property ?propertyLabel WHERE {\n ?property rdf:type wikibase:Property .\n ?property rdfs:label "%s" .\n SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }\n }' % i) time.sleep(1) response = requests.get(url, params={'format': 'json', 'query': pid_query}) response.raise_for_status() data = response.json() if (('results' in data) and ('bindings' in data['results']) and (len(data['results']['bindings']) > 0)): property_id = data['results']['bindings'][0]['property']['value'] property_id = property_id.replace(' '') print('inserting {} for {}'.format(replaced_property_name, property_id)) name_to_pid_mapping.insert_one({'name': replaced_property_name, 'pid': property_id}) else: url = ' params = {'action': 'wbsearchentities', 'search': i, 'language': 'en', 'limit': 20, 'format': 'json', 'type': 'property'} encoded_url = ((url + '?') + urlencode(params)) time.sleep(1) response = requests.get(encoded_url) data = response.json() if (('search' in data) and (len(data['search']) > 0)): property_id = data['search'][0]['id'] print('inserting {} for {} by querying aliases for property'.format(replaced_property_name, property_id)) name_to_pid_mapping.insert_one({'name': replaced_property_name, 'pid': property_id}) else: print('CANNOT FIND PROPERTY: {} for SPARQL {}'.format(replaced_property_name, sparql)) return ([], sparql) pid = name_to_pid_mapping.find_one({'name': replaced_property_name})['pid'] pid_replacements[replaced_property_name] = pid def sub_fcn(match): prefix = match.group(1) value = match.group(2) return (prefix + pid_replacements[value]) sparql = re.sub('(wdt:|p:|ps:|pq:)([a-zA-Z_\\(\\)(\\/_)]+)(?![1-9])', (lambda match: sub_fcn(match)), sparql) extracted_entity_names = [x[1] for x in re.findall('(wd:)([a-zA-PR-Z_0-9-]+)', sparql)] qid_replacements = {} for extracted_entity_name in extracted_entity_names: found = False for i in qid_name_mapping.find(): if ((i['name'] == extracted_entity_name) and ('qid' in i)): found = True qid_replacements[extracted_entity_name] = i['qid'] elif ((i['name'].lower().replace(' ', '_').replace('/', '_').replace('-', '_') == extracted_entity_name) and ('qid' in i)): found = True qid_replacements[extracted_entity_name] = i['qid'] if (not found): try_location = location_search(extracted_entity_name.replace('_', ' ')) if (try_location is not None): try_location = ('wd:' + try_location) print('inserting {} for {}'.format(try_location, extracted_entity_name)) qid_name_mapping.insert_one({'name': extracted_entity_name, 'qid': try_location}) qid_replacements[extracted_entity_name] = try_location else: print('CANNOT FIND ENTITY: {} for SPARQL {}'.format(extracted_entity_name, sparql)) return ([], sparql) def sub_entity_fcn(match): value = match.group(2) return qid_replacements[value] sparql = re.sub('(wd:)([a-zA-PR-Z_0-9-]+)', (lambda match: sub_entity_fcn(match)), sparql) prediction_results = execute_sparql(sparql) return (prediction_results, sparql)
def test(model): datasets = {'DAVIS16_val': TestDAVIS('../DB/DAVIS', '2016', 'val'), 'DAVIS17_val': TestDAVIS('../DB/DAVIS', '2017', 'val'), 'DAVIS17_test-dev': TestDAVIS('../DB/DAVIS', '2017', 'test-dev')} for (key, dataset) in datasets.items(): evaluator = evaluation.Evaluator(dataset) evaluator.evaluate(model, os.path.join('outputs', key))
def test(task, task_dir, evaluator, run_name, num_test_samples, out_dir): def write_log(f, res_dict): try: json.dump(res_dict, f) except: json.dump(res_dict['crashed'], f) f.write('\n') f.flush() os.makedirs(out_dir, exist_ok=True) test_examples_file = f'{task_dir}/test.jsonl' log_file = f'{out_dir}/{run_name}.jsonl' with open(test_examples_file) as f, open(log_file, 'w') as f_log: lines = f.readlines() if (num_test_samples > 0): lines = lines[:num_test_samples] if (task == 'code_as_policies_tabletop'): from moviepy.editor import ImageSequenceClip from evaluator.code_as_policies_env.tester import TESTERS from PIL import Image def make_image_name(): factors = [f'{tester_id}', f'{configs_id}', f"crash_{results['crashed']}", f"success_{results['success']}", instruction.replace(' ', '_')] return '__'.join(factors) render_dir = f'{out_dir}/{run_name}' os.makedirs(render_dir, exist_ok=True) cur_tester_id = '' for line in lines: jsonobj = json.loads(line) tester_id = jsonobj['tester_id'] configs = jsonobj['configs'] configs_id = jsonobj['configs_id'] if (cur_tester_id != tester_id): tester = TESTERS[tester_id]() print(tester_id, configs_id) instruction = tester.reset_env_from_configs(configs) instruction = configs['instruction'] evaluator.setup(tester.env) results = evaluator(instruction, tester) write_log(f_log, results) image_name = make_image_name() if tester.env.cache_video: rendered_clip = ImageSequenceClip(tester.env.cache_video, fps=35) rendered_clip.write_gif(f'{render_dir}/{image_name}.gif') else: camera_img = tester.env.get_camera_image() img = Image.fromarray(camera_img, 'RGB') img.save(f'{render_dir}/{image_name}_initial_state.png') elif (task == 'web_shop'): for line in lines: data = json.loads(line) results = evaluator(data['id']) write_log(f_log, results) else: for line in lines: data = json.loads(line) query = data['prompt'].strip() labels = data['completion'] if (task == 'google_sheets'): results = evaluator(query, labels, data['question_sheet_name'], data['check_format']) elif (task == 'the_cat_api'): results = evaluator(query, labels, data['compare_string_only']) else: results = evaluator(query, labels) write_log(f_log, results) final_results = evaluator.aggregate_results() print(final_results) write_log(f_log, final_results)
_grad() def full_test(model, loader): model.eval() total_correct = total_examples = 0 for batch in loader: batch = batch.to(device) (out, _) = model(batch.x, batch.adj_t) total_correct += int((out.argmax(dim=(- 1)) == batch.y).sum()) total_examples += out.size(0) return (total_correct / total_examples)
def write_version_py(source_root, filename='scipy/version.py'): cnt = "# THIS FILE IS GENERATED DURING THE SCIPY BUILD\n# See tools/version_utils.py for details\n\nshort_version = '%(version)s'\nversion = '%(version)s'\nfull_version = '%(full_version)s'\ngit_revision = '%(git_revision)s'\ncommit_count = '%(commit_count)s'\nrelease = %(isrelease)s\n\nif not release:\n version = full_version\n" (FULLVERSION, GIT_REVISION, COMMIT_COUNT) = get_version_info(source_root) a = open(filename, 'w') try: a.write((cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'commit_count': COMMIT_COUNT, 'isrelease': str(ISRELEASED)})) finally: a.close()
def weights_init_embedding(m, init_cfg): classname = m.__class__.__name__ if (classname.find('AdaptiveEmbedding') != (- 1)): if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if (m.emb_projs[i] is not None): nn.init.normal_(m.emb_projs[i], 0.0, init_cfg.proj_init_std) elif (classname.find('Embedding') != (- 1)): if hasattr(m, 'weight'): init_weight(m.weight, init_cfg) elif (classname.find('ProjectedAdaptiveLogSoftmax') != (- 1)): if (hasattr(m, 'cluster_weight') and (m.cluster_weight is not None)): init_weight(m.cluster_weight, init_cfg) if (hasattr(m, 'cluster_bias') and (m.cluster_bias is not None)): init_bias(m.cluster_bias, init_cfg) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if (m.out_projs[i] is not None): nn.init.normal_(m.out_projs[i], 0.0, init_cfg.proj_init_std) if hasattr(m, 'out_layers_weights'): for i in range(len(m.out_layers_weights)): if (m.out_layers_weights[i] is not None): init_weight(m.out_layers_weights[i], init_cfg)
def _split_tensor_list_constants(g, block): for node in block.nodes(): for subblock in node.blocks(): _split_tensor_list_constants(g, subblock) if _is_constant_tensor_list(node): inputs = [] for val in node.output().toIValue(): input = g.insertConstant(val) input.node().moveBefore(node) inputs.append(input) lc = g.create('prim::ListConstruct', inputs).insertBefore(node).output().setType(ListType.ofTensors()) node.output().replaceAllUsesWith(lc)
def for_search(state, outcome): if (state == 'error'): return {'error': True} if (state == 'ok'): loss = outcome['loss'] var = outcome.get('var', None) return {'loss': loss, 'var': var}
def create_lr_schedule(base_lr, decay_type, total_steps, decay_rate=0.1, decay_steps=0, warmup_steps=0, power=1.0, min_lr=1e-05): def step_fn(step): lr = base_lr step_mwu = jnp.maximum(0.0, (step - warmup_steps)) step_pct = jnp.clip((step_mwu / float((total_steps - warmup_steps))), 0.0, 1.0) if (decay_type == 'cosine'): lr = (min_lr + ((lr * 0.5) * (1.0 + jnp.cos((jnp.pi * step_pct))))) elif (decay_type == 'step'): assert (decay_steps > 0) lr = (lr * (decay_rate ** (step_mwu // decay_steps))) elif decay_type.startswith('poly'): lr = (min_lr + ((lr - min_lr) * ((1.0 - step_pct) ** power))) elif decay_type.startswith('exp'): assert (decay_steps > 0) lr = (lr * (decay_rate ** (step_mwu / decay_steps))) elif ((not decay_type) or decay_type.startswith('const')): lr = lr else: raise ValueError(f'Unknown lr type {decay_type}') lr = jnp.maximum(min_lr, lr) if warmup_steps: lr = (lr * jnp.minimum(1.0, (step / warmup_steps))) return jnp.asarray(lr, dtype=jnp.float32) return step_fn
class TestVisualization(unittest.TestCase): def test_undirected(self): graph = karate_club(True) adjacency = graph.adjacency position = graph.position labels = graph.labels image = svg_graph(adjacency, position, labels=labels) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, labels=list(labels)) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, display_edges=False) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, height=None) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, height=300, width=None) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, height=None, width=200) self.assertEqual(image[1:4], 'svg') n = adjacency.shape[0] edge_labels = [(0, 1, 0), (1, 1, 1), (3, 10, 2)] image = svg_graph(adjacency, position=None, names=np.arange(n), labels=np.arange(n), scores=np.arange(n), seeds=[0, 1], width=200, height=200, margin=10, margin_text=5, scale=3, node_order=np.flip(np.arange(n)), node_size=5, node_size_min=2, node_size_max=6, display_node_weight=True, node_weights=np.arange(n), node_width=2, node_width_max=5, node_color='red', edge_width=2, edge_width_min=2, edge_width_max=4, edge_color='blue', edge_labels=edge_labels, display_edge_weight=True, font_size=14) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position=None, labels={0: 0}) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position=None, scores={0: 0}) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency=None, position=position) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency=None, position=position, edge_labels=edge_labels) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, labels, label_colors={0: 'red', 1: 'blue'}) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, labels, label_colors=['red', 'blue']) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, labels, node_weights=np.arange(adjacency.shape[0])) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, scores=list(np.arange(n))) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, seeds={0: 1, 2: 1}) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, labels=np.arange(n), name_position='left') self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, scale=2, labels=np.arange(n), name_position='left') self.assertEqual(image[1:4], 'svg') with self.assertRaises(ValueError): svg_graph(adjacency, position, labels=[0, 1]) with self.assertRaises(ValueError): svg_graph(adjacency, position, scores=[0, 1]) svg_graph(adjacency, position, scale=2, labels=np.arange(n), name_position='left') def test_directed(self): graph = painters(True) adjacency = graph.adjacency position = graph.position names = graph.names image = svg_graph(adjacency, position, names=names) self.assertEqual(image[1:4], 'svg') image = svg_graph(adjacency, position, display_edges=False) self.assertEqual(image[1:4], 'svg') n = adjacency.shape[0] image = svg_graph(adjacency, position=None, names=np.arange(n), labels=np.arange(n), scores=np.arange(n), name_position='below', seeds=[0, 1], width=200, height=200, margin=10, margin_text=5, scale=3, node_order=np.flip(np.arange(n)), node_size=5, node_size_min=2, node_size_max=6, display_node_weight=True, node_weights=np.arange(n), node_width=2, node_width_max=5, node_color='red', edge_width=2, edge_width_min=2, edge_width_max=4, edge_color='blue', display_edge_weight=True, font_size=14) self.assertEqual(image[1:4], 'svg') def test_bipartite(self): graph = movie_actor(True) biadjacency = graph.biadjacency names_row = graph.names_row names_col = graph.names_col image = svg_bigraph(biadjacency, names_row, names_col) self.assertEqual(image[1:4], 'svg') image = svg_bigraph(biadjacency, display_edges=False) self.assertEqual(image[1:4], 'svg') image = svg_bigraph(biadjacency, reorder=False) self.assertEqual(image[1:4], 'svg') (n_row, n_col) = biadjacency.shape position_row = np.random.random((n_row, 2)) position_col = np.random.random((n_col, 2)) edge_labels = [(0, 1, 0), (1, 1, 1), (3, 10, 2)] image = svg_bigraph(biadjacency=biadjacency, names_row=np.arange(n_row), names_col=np.arange(n_col), labels_row=np.arange(n_row), labels_col=np.arange(n_col), scores_row=np.arange(n_row), scores_col=np.arange(n_col), seeds_row=[0, 1], seeds_col=[1, 2], position_row=position_row, position_col=position_col, color_row='red', color_col='white', width=200, height=200, margin=10, margin_text=5, scale=3, node_size=5, node_size_min=1, node_size_max=30, node_weights_row=np.arange(n_row), node_weights_col=np.arange(n_col), display_node_weight=True, node_width=2, node_width_max=5, edge_labels=edge_labels, edge_width=2, edge_width_min=0.3, edge_width_max=4, edge_color='red', display_edge_weight=True, font_size=14) self.assertEqual(image[1:4], 'svg') def test_disconnect(self): adjacency = test_disconnected_graph() position = np.random.random((adjacency.shape[0], 2)) image = svg_graph(adjacency, position) self.assertEqual(image[1:4], 'svg') biadjacency = test_bigraph_disconnect() image = svg_bigraph(biadjacency) self.assertEqual(image[1:4], 'svg') def test_probs(self): adjacency = bow_tie() probs = np.array([[0.5, 0.5], [0, 0], [1, 0], [0, 1], [0, 1]]) image = svg_graph(adjacency, probs=probs) self.assertEqual(image[1:4], 'svg') probs = sparse.csr_matrix(probs) image = svg_graph(adjacency, probs=probs) self.assertEqual(image[1:4], 'svg') biadjacency = star_wars() probs_row = sparse.csr_matrix([[0.5, 0.5], [0, 0], [1, 0], [0, 1]]) probs_col = sparse.csr_matrix([[0.5, 0.5], [0, 0], [1, 0]]) image = svg_bigraph(biadjacency, probs_row=probs_row, probs_col=probs_col) self.assertEqual(image[1:4], 'svg') def test_labels(self): adjacency = bow_tie() names = ['aa', 'bb', '<>', 'a&b', ''] image = svg_graph(adjacency, names=names) self.assertEqual(image[1:4], 'svg') def test_text(self): image = svg_text(np.array([0, 0]), 'foo', 0.1, 16, 'above') self.assertEqual(image[1:5], 'text') def test_rescale(self): output = rescale(np.array([[0, 0]]), width=4, height=6, margin=2, node_size=10, node_size_max=20, display_node_weight=True, names=np.array(['foo']), name_position='left') self.assertEqual(len(output), 3) def test_write(self): filename = (tempfile.gettempdir() + '/image') graph = karate_club(True) adjacency = graph.adjacency position = graph.position _ = svg_graph(adjacency, position, filename=filename) with open((filename + '.svg'), 'r') as f: row = f.readline() self.assertEqual(row[1:4], 'svg') _ = svg_bigraph(adjacency, position, filename=filename) with open((filename + '.svg'), 'r') as f: row = f.readline() self.assertEqual(row[1:4], 'svg')
def build_vocab(examples): vocab = {} def add_to_vocab(word_list): for word in word_list: if (word not in vocab): vocab[word] = len(vocab) for i in range(len(examples)): add_to_vocab(examples[i].word_list_a) if examples[i].text_b: add_to_vocab(examples[i].word_list_b) return vocab
def dataclass_to_box(dataclass, trace, name_suffix=None, skip_args=None): flattened = dataclasses.astuple(dataclass) names = [field.name for field in dataclasses.fields(dataclass)] suffix = (f'_{name_suffix}' if name_suffix else '') (replacements, node_map) = ({}, {}) for (name, value) in zip(names, flattened): if (skip_args and (name in skip_args)): replacements[name] = value elif hasattr(value, 'get_boxable'): boxable = value.get_boxable() boxable = check_and_cast_args(boxable) node = TracerNode.new_root(boxable, (name + suffix)) box = new_box(boxable, trace, node) value.set_boxable(box) replacements[name] = value node_map[node] = box else: value = check_and_cast_args(value) node = TracerNode.new_root(value, (name + suffix)) box = new_box(value, trace, node) replacements[name] = box node_map[node] = box dataclass = dataclasses.replace(dataclass, **replacements) return (dataclass, node_map)
def set_model(opt): model = FairSupConResNet(name=opt.model) criterion = torch.nn.CrossEntropyLoss() classifier = LinearClassifier(name=opt.model, num_classes=opt.ta_cls) ckpt = torch.load(opt.ckpt, map_location='cpu') state_dict = ckpt['model'] if torch.cuda.is_available(): if (torch.cuda.device_count() > 1): model.encoder = torch.nn.DataParallel(model.encoder) else: new_state_dict = {} for (k, v) in state_dict.items(): k = k.replace('module.', '') new_state_dict[k] = v state_dict = new_state_dict model = model.cuda() classifier = classifier.cuda() criterion = criterion.cuda() cudnn.benchmark = True model.load_state_dict(state_dict) return (model, classifier, criterion)
class TestPointEnv(): def test_pickleable(self): env = PointEnv() round_trip = pickle.loads(pickle.dumps(env)) assert round_trip step_env(round_trip) env.close() round_trip.close() def test_does_not_modify_action(self): env = PointEnv() a = env.action_space.sample() a_copy = a.copy() env.reset() env.step(a) assert (a.all() == a_copy.all()) env.close() def test_observation_space(self): env = PointEnv() obs_space = env.observation_space a = env.action_space.sample() (obs, _, _, _) = env.step(a) assert obs_space.contains(obs) def test_reset(self): env = PointEnv() assert (env._point == np.array([0, 0])).all() a = env.action_space.sample() _ = env.step(a) env.reset() assert (env._point == np.array([0, 0])).all() def test_task(self): env = PointEnv() tasks = env.sample_tasks(5) assert (len(tasks) == 5) for task in tasks: env.set_task(task) assert (env._goal == task['goal']).all() def test_done(self): env = PointEnv() for _ in range(1000): (_, _, done, _) = env.step(env._goal) if done: break else: assert False, 'Should report done'
def _yes_schema(source, line_delimited, schema, nan_string, posinf_string, neginf_string, complex_record_fields, buffersize, initial, resize, highlevel, behavior, attrs): if isinstance(schema, (bytes, str)): schema = json.loads(schema) if (not isinstance(schema, dict)): raise TypeError(f'unrecognized JSONSchema: expected dict, got {schema!r}') container = {} instructions = [] if (schema.get('type') == 'array'): if ('items' not in schema): raise TypeError('JSONSchema type is not concrete: array without items') instructions.append(['TopLevelArray']) form = _build_assembly(schema['items'], container, instructions) is_record = False elif (schema.get('type') == 'object'): form = _build_assembly(schema, container, instructions) is_record = True else: raise TypeError("only 'array' and 'object' types supported at the JSONSchema root") read_one = (not line_delimited) with _get_reader(source) as obj: try: length = _ext.fromjsonobj_schema(obj, container, read_one, buffersize, nan_string, posinf_string, neginf_string, json.dumps(instructions), initial, resize) except Exception as err: raise ValueError(str(err)) from None layout = ak.operations.from_buffers(form, length, container, byteorder=ak._util.native_byteorder, highlevel=False) layout = _record_to_complex(layout, complex_record_fields) if (is_record and read_one): layout = layout[0] return wrap_layout(layout, highlevel=highlevel, attrs=attrs, behavior=behavior)
def exec_bfs(G, workers, calcUntilLayer): futures = {} degreeList = {} t0 = time() vertices = G.keys() parts = workers chunks = partition(vertices, parts) with ProcessPoolExecutor(max_workers=workers) as executor: part = 1 for c in chunks: job = executor.submit(getDegreeListsVertices, G, c, calcUntilLayer) futures[job] = part part += 1 for job in as_completed(futures): dl = job.result() v = futures[job] degreeList.update(dl) logging.info('Saving degreeList on disk...') saveVariableOnDisk(degreeList, 'degreeList') t1 = time() logging.info('Execution time - BFS: {}m'.format(((t1 - t0) / 60))) return
def generate(url): parts = ['"""\n\n webencodings.labels\n \n\n Map encoding labels to their name.\n\n :copyright: Copyright 2012 by Simon Sapin\n :license: BSD, see LICENSE for details.\n\n"""\n\n# XXX Do not edit!\n# This file is automatically generated by mklabels.py\n\nLABELS = {\n'] labels = [(repr(assert_lower(label)).lstrip('u'), repr(encoding['name']).lstrip('u')) for category in json.loads(urlopen(url).read().decode('ascii')) for encoding in category['encodings'] for label in encoding['labels']] max_len = max((len(label) for (label, name) in labels)) parts.extend(((' %s:%s %s,\n' % (label, (' ' * (max_len - len(label))), name)) for (label, name) in labels)) parts.append('}') return ''.join(parts)
_function_dispatch(_rec_append_fields_dispatcher) def rec_append_fields(base, names, data, dtypes=None): return append_fields(base, names, data=data, dtypes=dtypes, asrecarray=True, usemask=False)
class HigherThresholdNearestNeighborBuffer(object): def __init__(self, buffer_size, tolerance=2): self.buffer_size = buffer_size self.tolerance = tolerance self.exempted_queue = [] self.seen_queue = {} def reset(self): self.exempted_queue = [] self.seen_queue = {} def put(self, item): self.exempted_queue.append(item) if (len(self.exempted_queue) > self.buffer_size): self.exempted_queue.pop(0) def get(self): item = self.exempted_queue[0] self.exempted_queue.pop(0) return item def choose(self, nn_idxs): for idx in range(len(nn_idxs)): item = nn_idxs[idx].item() if (item not in self.exempted_queue): if (item in self.seen_queue.keys()): self.seen_queue[item] += 1 if (self.seen_queue[item] > self.tolerance): self.seen_queue[item] = 0 self.put(nn_idxs[idx].item()) continue else: self.seen_queue[item] = 1 return idx return (len(nn_idxs) - 1)
def test_hdf5maker(): preprocessor(mseed_dir='downloads_mseeds', stations_json='station_list.json', overlap=0.3, n_processor=2) dir_list = [ev for ev in os.listdir('.') if (ev.split('_')[(- 1)] == 'hdfs')] assert (dir_list[0] == 'downloads_mseeds_processed_hdfs')
class DummyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.layer = torch.nn.Linear(in_features=32, out_features=2) self.another_layer = torch.nn.Linear(in_features=2, out_features=2) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer(x) return self.another_layer(x)
class PlyProperty(object): def __init__(self, name, val_dtype): self._name = str(name) self._check_name() self.val_dtype = val_dtype def _get_val_dtype(self): return self._val_dtype def _set_val_dtype(self, val_dtype): self._val_dtype = _data_types[_lookup_type(val_dtype)] val_dtype = property(_get_val_dtype, _set_val_dtype) def name(self): return self._name def _check_name(self): if any((c.isspace() for c in self._name)): msg = ('Error: property name %r contains spaces' % self._name) raise RuntimeError(msg) def _parse_one(line): assert (line[0] == 'property') if (line[1] == 'list'): if (len(line) > 5): raise PlyParseError("too many fields after 'property list'") if (len(line) < 5): raise PlyParseError("too few fields after 'property list'") return PlyListProperty(line[4], line[2], line[3]) else: if (len(line) > 3): raise PlyParseError("too many fields after 'property'") if (len(line) < 3): raise PlyParseError("too few fields after 'property'") return PlyProperty(line[2], line[1]) def dtype(self, byte_order='='): return (byte_order + self.val_dtype) def _from_fields(self, fields): return _np.dtype(self.dtype()).type(next(fields)) def _to_fields(self, data): (yield _np.dtype(self.dtype()).type(data)) def _read_bin(self, stream, byte_order): try: return _np.fromfile(stream, self.dtype(byte_order), 1)[0] except IndexError: raise StopIteration def _write_bin(self, data, stream, byte_order): _np.dtype(self.dtype(byte_order)).type(data).tofile(stream) def __str__(self): val_str = _data_type_reverse[self.val_dtype] return ('property %s %s' % (val_str, self.name)) def __repr__(self): return ('PlyProperty(%r, %r)' % (self.name, _lookup_type(self.val_dtype)))
def open_spinner(message): if (sys.stdout.isatty() and (logger.getEffectiveLevel() <= logging.INFO)): spinner = InteractiveSpinner(message) else: spinner = NonInteractiveSpinner(message) try: with hidden_cursor(sys.stdout): (yield spinner) except KeyboardInterrupt: spinner.finish('canceled') raise except Exception: spinner.finish('error') raise else: spinner.finish('done')
def lr_func_step(cur_iter): return (cfg.SOLVER.BASE_LR * (cfg.SOLVER.GAMMA ** (cur_iter // cfg.SOLVER.STEP_SIZE)))
def _read_item(item, scaler=None, flip_indices=False): label = item['class_label'] label = tf.cast(label, tf.int64) del item['class_label'] features = list(item.values()) if flip_indices: m_wbb = features[24] m_wwbb = features[25] features[24] = m_wwbb features[25] = m_wbb features = tf.stack(features, axis=0) features = tf.cast(features, tf.float32) if (scaler is not None): features = (features - scaler.mean_) features = (features / scaler.var_) return (features, label)
_grad() def make_convolutional_sample(batch, model, mode='vanilla', custom_steps=None, eta=1.0, swap_mode=False, masked=False, invert_mask=True, quantize_x0=False, custom_schedule=None, decode_interval=1000, resize_enabled=False, custom_shape=None, temperature=1.0, noise_dropout=0.0, corrector=None, corrector_kwargs=None, x_T=None, save_intermediate_vid=False, make_progrow=True, ddim_use_x0_pred=False): log = dict() (z, c, x, xrec, xc) = model.get_input(batch, model.first_stage_key, return_first_stage_outputs=True, force_c_encode=(not (hasattr(model, 'split_input_params') and (model.cond_stage_key == 'coordinates_bbox'))), return_original_cond=True) log_every_t = (1 if save_intermediate_vid else None) if (custom_shape is not None): z = torch.randn(custom_shape) print(f'Generating {custom_shape[0]} samples of shape {custom_shape[1:]}') z0 = None log['input'] = x log['reconstruction'] = xrec if ismap(xc): log['original_conditioning'] = model.to_rgb(xc) if hasattr(model, 'cond_stage_key'): log[model.cond_stage_key] = model.to_rgb(xc) else: log['original_conditioning'] = (xc if (xc is not None) else torch.zeros_like(x)) if model.cond_stage_model: log[model.cond_stage_key] = (xc if (xc is not None) else torch.zeros_like(x)) if (model.cond_stage_key == 'class_label'): log[model.cond_stage_key] = xc[model.cond_stage_key] with model.ema_scope('Plotting'): t0 = time.time() img_cb = None (sample, intermediates) = convsample_ddim(model, c, steps=custom_steps, shape=z.shape, eta=eta, quantize_x0=quantize_x0, img_callback=img_cb, mask=None, x0=z0, temperature=temperature, noise_dropout=noise_dropout, score_corrector=corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t) t1 = time.time() if ddim_use_x0_pred: sample = intermediates['pred_x0'][(- 1)] x_sample = model.decode_first_stage(sample) try: x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) log['sample_noquant'] = x_sample_noquant log['sample_diff'] = torch.abs((x_sample_noquant - x_sample)) except: pass log['sample'] = x_sample log['time'] = (t1 - t0) return log
_connect.numpy.implements('nanprod') def _nep_18_impl_nanprod(a, axis=None, dtype=UNSUPPORTED, out=UNSUPPORTED, keepdims=False, initial=UNSUPPORTED, where=UNSUPPORTED): return nanprod(a, axis=axis, keepdims=keepdims)
def register_functions(root_module): module = root_module module.add_function('Abs', 'ns3::Time', [param('ns3::Time const &', 'time')]) module.add_function('Abs', 'ns3::int64x64_t', [param('ns3::int64x64_t const &', 'value')]) module.add_function('BreakpointFallback', 'void', []) module.add_function('CalculateDistance', 'double', [param('ns3::Vector2D const &', 'a'), param('ns3::Vector2D const &', 'b')]) module.add_function('CalculateDistance', 'double', [param('ns3::Vector3D const &', 'a'), param('ns3::Vector3D const &', 'b')]) module.add_function('Create', 'ns3::Ptr< ns3::ObjectPtrContainerValue >', [], template_parameters=[u'ns3::ObjectPtrContainerValue']) module.add_function('Create', 'ns3::Ptr< ns3::PointerValue >', [], template_parameters=[u'ns3::PointerValue']) module.add_function('Days', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('Days', 'ns3::Time', [param('double', 'value')]) module.add_function('FemtoSeconds', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('FemtoSeconds', 'ns3::Time', [param('uint64_t', 'value')]) module.add_function('GetLogComponent', 'ns3::LogComponent &', [param('std::string const', 'name')]) module.add_function('Hash32', 'uint32_t', [param('std::string const', 's')]) module.add_function('Hash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) module.add_function('Hash64', 'uint64_t', [param('std::string const', 's')]) module.add_function('Hash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) module.add_function('Hours', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('Hours', 'ns3::Time', [param('double', 'value')]) module.add_function('LogComponentDisable', 'void', [param('char const *', 'name'), param('ns3::LogLevel', 'level')]) module.add_function('LogComponentDisableAll', 'void', [param('ns3::LogLevel', 'level')]) module.add_function('LogComponentEnable', 'void', [param('char const *', 'name'), param('ns3::LogLevel', 'level')]) module.add_function('LogComponentEnableAll', 'void', [param('ns3::LogLevel', 'level')]) module.add_function('LogComponentPrintList', 'void', []) module.add_function('LogGetNodePrinter', 'ns3::LogNodePrinter', []) module.add_function('LogGetTimePrinter', 'ns3::LogTimePrinter', []) module.add_function('LogSetNodePrinter', 'void', [param('ns3::LogNodePrinter', 'np')]) module.add_function('LogSetTimePrinter', 'void', [param('ns3::LogTimePrinter', 'lp')]) module.add_function('MakeBooleanChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeCallbackChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeEmptyAttributeAccessor', 'ns3::Ptr< ns3::AttributeAccessor const >', []) module.add_function('MakeEmptyAttributeChecker', 'ns3::Ptr< ns3::AttributeChecker >', []) module.add_function('MakeEmptyTraceSourceAccessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', []) module.add_function('MakeEnumChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [param('int', 'v1'), param('std::string', 'n1'), param('int', 'v2', default_value='0'), param('std::string', 'n2', default_value='""'), param('int', 'v3', default_value='0'), param('std::string', 'n3', default_value='""'), param('int', 'v4', default_value='0'), param('std::string', 'n4', default_value='""'), param('int', 'v5', default_value='0'), param('std::string', 'n5', default_value='""'), param('int', 'v6', default_value='0'), param('std::string', 'n6', default_value='""'), param('int', 'v7', default_value='0'), param('std::string', 'n7', default_value='""'), param('int', 'v8', default_value='0'), param('std::string', 'n8', default_value='""'), param('int', 'v9', default_value='0'), param('std::string', 'n9', default_value='""'), param('int', 'v10', default_value='0'), param('std::string', 'n10', default_value='""'), param('int', 'v11', default_value='0'), param('std::string', 'n11', default_value='""'), param('int', 'v12', default_value='0'), param('std::string', 'n12', default_value='""'), param('int', 'v13', default_value='0'), param('std::string', 'n13', default_value='""'), param('int', 'v14', default_value='0'), param('std::string', 'n14', default_value='""'), param('int', 'v15', default_value='0'), param('std::string', 'n15', default_value='""'), param('int', 'v16', default_value='0'), param('std::string', 'n16', default_value='""'), param('int', 'v17', default_value='0'), param('std::string', 'n17', default_value='""'), param('int', 'v18', default_value='0'), param('std::string', 'n18', default_value='""'), param('int', 'v19', default_value='0'), param('std::string', 'n19', default_value='""'), param('int', 'v20', default_value='0'), param('std::string', 'n20', default_value='""'), param('int', 'v21', default_value='0'), param('std::string', 'n21', default_value='""'), param('int', 'v22', default_value='0'), param('std::string', 'n22', default_value='""')]) module.add_function('MakeEvent', 'ns3::EventImpl *', [param('void ( * ) ( )', 'f')]) module.add_function('MakeObjectFactoryChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeStringChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeTimeChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeTimeChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [param('ns3::Time const', 'min')]) module.add_function('MakeTimeChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [param('ns3::Time const', 'min'), param('ns3::Time const', 'max')]) module.add_function('MakeTypeIdChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeVector2DChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeVector3DChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('MakeVectorChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) module.add_function('Max', 'ns3::Time', [param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')]) module.add_function('Max', 'ns3::int64x64_t', [param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')]) module.add_function('MicroSeconds', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('MicroSeconds', 'ns3::Time', [param('uint64_t', 'value')]) module.add_function('MilliSeconds', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('MilliSeconds', 'ns3::Time', [param('uint64_t', 'value')]) module.add_function('Min', 'ns3::Time', [param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')]) module.add_function('Min', 'ns3::int64x64_t', [param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')]) module.add_function('Minutes', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('Minutes', 'ns3::Time', [param('double', 'value')]) module.add_function('NanoSeconds', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('NanoSeconds', 'ns3::Time', [param('uint64_t', 'value')]) module.add_function('Now', 'ns3::Time', []) module.add_function('PicoSeconds', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('PicoSeconds', 'ns3::Time', [param('uint64_t', 'value')]) module.add_function('Seconds', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('Seconds', 'ns3::Time', [param('double', 'value')]) module.add_function('TestDoubleIsEqual', 'bool', [param('double const', 'a'), param('double const', 'b'), param('double const', 'epsilon', default_value='std::numeric_limits<double>::epsilon()')]) module.add_function('TimeStep', 'ns3::Time', [param('uint64_t', 'ts')]) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'signed char']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'short']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'int']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'long']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'unsigned char']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'unsigned short']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'unsigned int']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'unsigned long long']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'float']) module.add_function('TypeNameGet', 'std::string', [], template_parameters=[u'double']) module.add_function('Years', 'ns3::Time', [param('ns3::int64x64_t', 'value')]) module.add_function('Years', 'ns3::Time', [param('double', 'value')]) register_functions_ns3_CommandLineHelper(module.get_submodule('CommandLineHelper'), root_module) register_functions_ns3_Config(module.get_submodule('Config'), root_module) register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_SystemPath(module.get_submodule('SystemPath'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) register_functions_ns3_internal(module.get_submodule('internal'), root_module) register_functions_ns3_tests(module.get_submodule('tests'), root_module) return
def format_prompt_with_data_frame(df: pd.DataFrame, prompt_dict: dict, df_postprocessor: Optional[Callable]=None, return_dict=False): if (df_postprocessor is not None): df = df_postprocessor(df) list_dict_data = df.to_dict(orient='records') prompts = [format_prompt(example, prompt_dict) for example in list_dict_data] metadata = {'prompt_dict': prompt_dict} if return_dict: return dict(prompts=prompts, list_dict_data=list_dict_data, metadata=metadata) return (prompts, list_dict_data, metadata)
def __dtype_from_pep3118(stream, is_subdtype): field_spec = dict(names=[], formats=[], offsets=[], itemsize=0) offset = 0 common_alignment = 1 is_padding = False while stream: value = None if stream.consume('}'): break shape = None if stream.consume('('): shape = stream.consume_until(')') shape = tuple(map(int, shape.split(','))) if (stream.next in ('', '=', '<', '>', '^', '!')): byteorder = stream.advance(1) if (byteorder == '!'): byteorder = '>' stream.byteorder = byteorder if (stream.byteorder in ('', '^')): type_map = _pep3118_native_map type_map_chars = _pep3118_native_typechars else: type_map = _pep3118_standard_map type_map_chars = _pep3118_standard_typechars itemsize_str = stream.consume_until((lambda c: (not c.isdigit()))) if itemsize_str: itemsize = int(itemsize_str) else: itemsize = 1 is_padding = False if stream.consume('T{'): (value, align) = __dtype_from_pep3118(stream, is_subdtype=True) elif (stream.next in type_map_chars): if (stream.next == 'Z'): typechar = stream.advance(2) else: typechar = stream.advance(1) is_padding = (typechar == 'x') dtypechar = type_map[typechar] if (dtypechar in 'USV'): dtypechar += ('%d' % itemsize) itemsize = 1 numpy_byteorder = {'': '=', '^': '='}.get(stream.byteorder, stream.byteorder) value = dtype((numpy_byteorder + dtypechar)) align = value.alignment elif (stream.next in _pep3118_unsupported_map): desc = _pep3118_unsupported_map[stream.next] raise NotImplementedError('Unrepresentable PEP 3118 data type {!r} ({})'.format(stream.next, desc)) else: raise ValueError(('Unknown PEP 3118 data type specifier %r' % stream.s)) extra_offset = 0 if (stream.byteorder == ''): start_padding = ((- offset) % align) intra_padding = ((- value.itemsize) % align) offset += start_padding if (intra_padding != 0): if ((itemsize > 1) or ((shape is not None) and (_prod(shape) > 1))): value = _add_trailing_padding(value, intra_padding) else: extra_offset += intra_padding common_alignment = _lcm(align, common_alignment) if (itemsize != 1): value = dtype((value, (itemsize,))) if (shape is not None): value = dtype((value, shape)) if stream.consume(':'): name = stream.consume_until(':') else: name = None if (not (is_padding and (name is None))): if ((name is not None) and (name in field_spec['names'])): raise RuntimeError(("Duplicate field name '%s' in PEP3118 format" % name)) field_spec['names'].append(name) field_spec['formats'].append(value) field_spec['offsets'].append(offset) offset += value.itemsize offset += extra_offset field_spec['itemsize'] = offset if (stream.byteorder == ''): field_spec['itemsize'] += ((- offset) % common_alignment) if ((field_spec['names'] == [None]) and (field_spec['offsets'][0] == 0) and (field_spec['itemsize'] == field_spec['formats'][0].itemsize) and (not is_subdtype)): ret = field_spec['formats'][0] else: _fix_names(field_spec) ret = dtype(field_spec) return (ret, common_alignment)
def get_cifar_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=(- 1)): trn_lbl = np.array(trn_lbl) tst_lbl = np.array(tst_lbl) nrm_trn_idx = np.where((trn_lbl != abn_cls_idx))[0] abn_trn_idx = np.where((trn_lbl == abn_cls_idx))[0] nrm_trn_img = trn_img[nrm_trn_idx] abn_trn_img = trn_img[abn_trn_idx] nrm_trn_lbl = trn_lbl[nrm_trn_idx] abn_trn_lbl = trn_lbl[abn_trn_idx] nrm_tst_idx = np.where((tst_lbl != abn_cls_idx))[0] abn_tst_idx = np.where((tst_lbl == abn_cls_idx))[0] nrm_tst_img = tst_img[nrm_tst_idx] abn_tst_img = tst_img[abn_tst_idx] nrm_tst_lbl = tst_lbl[nrm_tst_idx] abn_tst_lbl = tst_lbl[abn_tst_idx] nrm_trn_lbl[:] = 0 nrm_tst_lbl[:] = 0 abn_trn_lbl[:] = 1 abn_tst_lbl[:] = 1 if (manualseed != (- 1)): nrm_img = np.concatenate((nrm_trn_img, nrm_tst_img), axis=0) nrm_lbl = np.concatenate((nrm_trn_lbl, nrm_tst_lbl), axis=0) abn_img = np.concatenate((abn_trn_img, abn_tst_img), axis=0) abn_lbl = np.concatenate((abn_trn_lbl, abn_tst_lbl), axis=0) idx = np.arange(len(nrm_lbl)) np.random.seed(manualseed) np.random.shuffle(idx) nrm_trn_len = int((len(idx) * 0.8)) nrm_trn_idx = idx[:nrm_trn_len] nrm_tst_idx = idx[nrm_trn_len:] nrm_trn_img = nrm_img[nrm_trn_idx] nrm_trn_lbl = nrm_lbl[nrm_trn_idx] nrm_tst_img = nrm_img[nrm_tst_idx] nrm_tst_lbl = nrm_lbl[nrm_tst_idx] new_trn_img = np.copy(nrm_trn_img) new_trn_lbl = np.copy(nrm_trn_lbl) new_tst_img = np.concatenate((nrm_tst_img, abn_trn_img, abn_tst_img), axis=0) new_tst_lbl = np.concatenate((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), axis=0) return (new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl)
def read_hf_webdataset(url: str, multimodal_cfg: Dict[(str, Any)], tokenizer, is_train: bool, rsample_frac=None): urls = expand_url_to_file_list(url) if is_train: urls = repeat_shards(urls) assert urls[0].endswith('.tar') dataset = IterableDataset.from_generator(gen_from_webdataset_shards, gen_kwargs={'shards': urls, 'multimodal_cfg': multimodal_cfg, 'tokenizer': tokenizer, 'is_train': is_train}) if (os.environ.get('WORLD_SIZE') and (int(os.environ['WORLD_SIZE']) > 1)): dataset = split_dataset_by_node(dataset, rank=int(os.environ['RANK']), world_size=int(os.environ['WORLD_SIZE'])) return dataset
def sentence_similarity(question): model = sent2vec.Sent2vecModel() model.load_model('torontobooks_unigrams.bin') a = [e[0] for e in model.embed_sentence(question).reshape((- 1), 1)] data = np.load('data.npy').item() similar_dic = {} for (k, v) in data.iteritems(): b = [e[0] for e in model.embed_sentence(k).reshape((- 1), 1)] result = (1 - spatial.distance.cosine(a, b)) similar_dic[k] = result sorted_list = sorted(similar_dic.items(), key=operator.itemgetter(1), reverse=True) final_3_qna_dic = {} final_3_qna_dic[sorted_list[0][0]] = data[sorted_list[0][0]] final_3_qna_dic[sorted_list[1][0]] = data[sorted_list[1][0]] final_3_qna_dic[sorted_list[2][0]] = data[sorted_list[2][0]] topdata = final_3_qna_dic text = '' for (k, v) in topdata.iteritems(): text += v text += ' ' sumresp = summarize(text, word_count=50) return sumresp.replace('\n', ' ')
def get_argparser(): parser = argparse.ArgumentParser() parser.add_argument('--data_root', type=str, default='./datasets/data', help='path to Dataset') parser.add_argument('--dataset', type=str, default='voc', choices=['lvis', 'voc', 'cityscapes', 'ade20k', 'coco'], help='Name of dataset') parser.add_argument('--num_channels', type=int, default=6, help='num channels in last layer (default: None)') parser.add_argument('--num_neighbours', type=int, default=6, help='num of neighbours for softmax (default: None)') parser.add_argument('--num_classes', type=int, default=21, help='num classes in the dataset (default: None)') parser.add_argument('--model', type=str, default='deeplabv3plus_mobilenet', choices=['deeplabv3plus_resnet50', 'deeplabv3plus_resnet101', 'deeplabv3plus_mobilenet'], help='model name') parser.add_argument('--separable_conv', action='store_true', default=False, help='apply separable conv to decoder and aspp') parser.add_argument('--output_stride', type=int, default=16, choices=[8, 16]) parser.add_argument('--reduce_dim', action='store_true', default=False) parser.add_argument('--test_only', action='store_true', default=False) parser.add_argument('--freeze_backbone', action='store_true', default=False) parser.add_argument('--save_val_results', action='store_true', default=False, help='save segmentation results to "./results"') parser.add_argument('--total_itrs', type=int, default=200000.0, help='epoch number (default: 30k)') parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') parser.add_argument('--temp', type=float, default=20.0, help='multiplying factor for norm loss') parser.add_argument('--lr_policy', type=str, default='poly', choices=['poly', 'step', 'multi_poly'], help='learning rate scheduler policy') parser.add_argument('--step_size', type=int, default=10000) parser.add_argument('--crop_val', action='store_true', default=False, help='crop validation (default: False)') parser.add_argument('--batch_size', type=int, default=16, help='batch size (default: 16)') parser.add_argument('--val_batch_size', type=int, default=4, help='batch size for validation (default: 4)') parser.add_argument('--crop_size', type=int, default=513) parser.add_argument('--ckpt', default=None, type=str, help='restore from checkpoint') parser.add_argument('--continue_training', action='store_true', default=False) parser.add_argument('--loss_type', type=str, default='nn_cross_entropy', choices=['cross_entropy', 'nn_cross_entropy'], help='loss type (default: False)') parser.add_argument('--gpu_id', type=str, default='0', help='GPU ID') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay (default: 1e-4)') parser.add_argument('--random_seed', type=int, default=1, help='random seed (default: 1)') parser.add_argument('--print_interval', type=int, default=10, help='print interval of loss (default: 10)') parser.add_argument('--val_interval', type=int, default=100, help='epoch interval for eval (default: 100)') parser.add_argument('--download', action='store_true', default=False, help='download datasets') parser.add_argument('--year', type=str, default='2012', choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC') parser.add_argument('--enable_vis', action='store_true', default=False, help='use visdom for visualization') parser.add_argument('--vis_port', type=str, default='13570', help='port for visdom') parser.add_argument('--vis_env', type=str, default='main', help='env for visdom') parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='directory to save checkpoints') parser.add_argument('--vis_num_samples', type=int, default=15, help='number of samples for visualization (default: 8)') return parser
class _freq_encoder(Function): _fwd(cast_inputs=torch.float32) def forward(ctx, inputs, degree, output_dim): if (not inputs.is_cuda): inputs = inputs.cuda() inputs = inputs.contiguous() (B, input_dim) = inputs.shape outputs = torch.empty(B, output_dim, dtype=inputs.dtype, device=inputs.device) _backend.freq_encode_forward(inputs, B, input_dim, degree, output_dim, outputs) ctx.save_for_backward(inputs, outputs) ctx.dims = [B, input_dim, degree, output_dim] return outputs _bwd def backward(ctx, grad): grad = grad.contiguous() (inputs, outputs) = ctx.saved_tensors (B, input_dim, degree, output_dim) = ctx.dims grad_inputs = torch.zeros_like(inputs) _backend.freq_encode_backward(grad, outputs, B, input_dim, degree, output_dim, grad_inputs) return (grad_inputs, None, None)
def _test_mpi(info, sdfg, dtype): from mpi4py import MPI as MPI4PY comm = MPI4PY.COMM_WORLD rank = comm.Get_rank() commsize = comm.Get_size() mpi_sdfg = None if (commsize < 2): raise ValueError('This test is supposed to be run with at least two processes!') for r in range(0, commsize): if (r == rank): mpi_sdfg = sdfg.compile() comm.Barrier() size = 128 A = np.full(size, rank, dtype=dtype) root = np.array([0], dtype=np.int32) mpi_sdfg(x=A, root=root, n=size) if (not np.allclose(A, np.full(size, 0, dtype=dtype))): raise ValueError('The received values are not what I expected.')
def register_Ns3CsParamVectorTlvValue_methods(root_module, cls): cls.add_constructor([param('ns3::CsParamVectorTlvValue const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Copy', 'ns3::CsParamVectorTlvValue *', [], is_const=True, is_virtual=True) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')], is_virtual=True) return
def cauchy_conj_components_lazy(v, z, w, type=1): (v, z, w) = _broadcast_dims(v, z, w) (v_r, v_i) = (v.real.contiguous(), v.imag.contiguous()) (w_r, w_i) = (w.real.contiguous(), w.imag.contiguous()) z_i = z.imag.contiguous() v_r = LazyTensor(rearrange(v_r, '... N -> ... 1 N 1')) v_i = LazyTensor(rearrange(v_i, '... N -> ... 1 N 1')) w_r = LazyTensor(rearrange(w_r, '... N -> ... 1 N 1')) w_i = LazyTensor(rearrange(w_i, '... N -> ... 1 N 1')) z_i = LazyTensor(rearrange(z_i, '... L -> ... L 1 1')) if (type == 1): num = ((((- v_r) * w_r) - (v_i * w_i)) + ((1j * z_i) * v_r)) denom = ((((w_r ** 2) + (w_i ** 2)) - (z_i ** 2)) - ((2j * w_r) * z_i)) else: z = ((- w_r) + (1j * z_i)) num = ((v_r * z) - (v_i * w_i)) denom = ((z * z) + (w_i ** 2)) r = (num / denom) r = (2 * r.sum(dim=(len(z_i.shape) - 1))) return r.squeeze((- 1))
def combine_paths(*args, **kws): r = [] for a in args: if (not a): continue if is_string(a): a = [a] r.append(a) args = r if (not args): return [] if (len(args) == 1): result = reduce((lambda a, b: (a + b)), map(glob, args[0]), []) elif (len(args) == 2): result = [] for a0 in args[0]: for a1 in args[1]: result.extend(glob(os.path.join(a0, a1))) else: result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) log.debug('(paths: %s)', ','.join(result)) return result
def makestr(node): if isinstance(node, ast.AST): n = 0 nodename = typename(node) s = ('(' + nodename) for (chname, chval) in ast.iter_fields(node): chstr = makestr(chval) if chstr: s += ((((' (' + chname) + ' ') + chstr) + ')') n += 1 if (not n): s += ((' -' + nodename) + '-') s += ')' return s elif isinstance(node, list): n = 0 s = '(list' for ch in node: chstr = makestr(ch) if chstr: s += (' ' + chstr) n += 1 s += ')' return (s if n else '') elif isinstance(node, str): return (('(str ' + escape(node)) + ')') elif isinstance(node, bytes): return (('(bytes ' + escape(str(node))) + ')') else: return (((('(' + typename(node)) + ' ') + str(node)) + ')')
def rank_eval(gt_items, pred_items): (R1, R5, R10, mAP10, med_rank) = ([], [], [], [], []) for (i, cap) in enumerate(gt_items): gt_fname = gt_items[cap] pred_fnames = pred_items[cap] preds = np.asarray([(gt_fname == pred) for pred in pred_fnames]) rank_value = min([idx for (idx, retrieval_success) in enumerate(preds) if retrieval_success]) R1.append(np.sum(preds[:1], dtype=float)) R5.append(np.sum(preds[:5], dtype=float)) R10.append(np.sum(preds[:10], dtype=float)) positions = np.arange(1, 11, dtype=float)[(preds[:10] > 0)] med_rank.append(rank_value) if (len(positions) > 0): precisions = np.divide(np.arange(1, (len(positions) + 1), dtype=float), positions) avg_precision = np.sum(precisions, dtype=float) mAP10.append(avg_precision) else: mAP10.append(0.0) (r1_estimate, _, _, _) = jackknife.jackknife_stats(np.asarray(R1), np.mean, 0.95) (r5_estimate, _, _, _) = jackknife.jackknife_stats(np.asarray(R5), np.mean, 0.95) (r10_estimate, _, _, _) = jackknife.jackknife_stats(np.asarray(R10), np.mean, 0.95) (map_estimate, _, _, _) = jackknife.jackknife_stats(np.asarray(mAP10), np.mean, 0.95) (medrank_estimate, _, _, _) = jackknife.jackknife_stats(np.asarray(med_rank), np.median, 0.95) return {'': r1_estimate, '': r5_estimate, '': r10_estimate, '': map_estimate, 'medRank': medrank_estimate}
def save_results(args, sentences, predictions, idx2label, queries_text): correct = defaultdict(list) example2pred = {} for (i, (sentence, pred)) in enumerate(zip(sentences, predictions)): text = sentence['text'] gold = sentence['gold'] gold = int(gold) pred = int(pred) correct[gold].append((gold == pred)) example2pred[i] = {'input': text, 'gold': idx2label[gold], 'pred': idx2label[pred], 'uid': sentence['uid'], 'exidx': sentence['exidx']} total_crct = [] cls2acc = {} for (key, value) in correct.items(): label_name = queries_text[key] total_crct.extend(value) acc = (len([c for c in value if (c == True)]) / len(value)) cls2acc[label_name] = acc print(f'Label: {label_name}, Accuracy: {acc}, for {len(value)} examples.') print() total_acc = (len([c for c in total_crct if (c == True)]) / len(total_crct)) cls2acc['total'] = total_acc print(f'Total: {total_acc}, for {len(total_crct)} examples.') if (not os.path.exists(f'results_prompting/{args.dataset}/')): os.makedirs(f'results_prompting/{args.dataset}/') with open(f'results_prompting/{args.dataset}/{args.paradigm}_{args.model}_{args.seed}_example2preds.json', 'w') as f: json.dump(example2pred, f) with open(f'results_prompting/{args.dataset}/{args.paradigm}_{args.model}_{args.seed}_cls2acc.json', 'w') as f: json.dump(cls2acc, f)
def create_unique_name(name, list_names): result = name while (result in list_names): result += '_' return result
def _upgrade_greater_than(old_constraint): scalar = old_constraint.get('scalar') high = old_constraint.get('high') low = old_constraint.get('low') high_is_string = isinstance(high, str) low_is_string = isinstance(low, str) strict = old_constraint.get('strict', False) new_constraints = [] if (scalar is None): high_has_multiple = (isinstance(high, list) and (len(high) != 1)) low_has_multiple = (isinstance(low, list) and (len(low) != 1)) high_column_name = (f"'{high}'" if high_is_string else high) low_column_name = (f"'{low}'" if low_is_string else low) if (high_has_multiple or low_has_multiple): warnings.warn(f"Unable to upgrade the GreaterThan constraint specified for 'high' {high_column_name} and 'low' {low_column_name}. Manually add {Inequality.__name__} constraints to capture this logic.") return [] new_constraint = {'constraint_name': Inequality.__name__, 'high_column_name': (high if high_is_string else high[0]), 'low_column_name': (low if low_is_string else low[0]), 'strict_boundaries': strict} new_constraints.append(new_constraint) elif (scalar == 'low'): high = ([high] if high_is_string else high) for column in high: new_constraint = {'constraint_name': ScalarInequality.__name__, 'column_name': column, 'relation': ('>' if strict else '>='), 'value': low} new_constraints.append(new_constraint) else: low = ([low] if low_is_string else low) for column in low: new_constraint = {'constraint_name': ScalarInequality.__name__, 'column_name': column, 'relation': ('<' if strict else '<='), 'value': high} new_constraints.append(new_constraint) return new_constraints
class AutoModelForSequenceClassification(object): def __init__(self): raise EnvironmentError('AutoModelForSequenceClassification is designed to be instantiated using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or `AutoModelForSequenceClassification.from_config(config)` methods.') def from_config(cls, config): for (config_class, model_class) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class(config) raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())))) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop('config', None) if (not isinstance(config, PretrainedConfig)): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) for (config_class, model_class) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items(): if isinstance(config, config_class): return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()))))
class st_gcn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, device, stride=1, dropout=0.5, residual=True, batch_size=64): super().__init__() print('Dropout={}'.format(dropout)) assert (len(kernel_size) == 2) assert ((kernel_size[0] % 2) == 1) padding = (((kernel_size[0] - 1) // 2), 0) self.gcn = ConvTemporalGraphical(in_channels, out_channels, kernel_size[1]) self.lstm_layer = fMRI_LSTM(64, 64, 1, batch_size=batch_size) self.batch_size = batch_size self.tcn = nn.Sequential(nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, (kernel_size[0], 1), (stride, 1), padding), nn.BatchNorm2d(out_channels), nn.Dropout(dropout, inplace=True)) self.device = device if (not residual): self.residual = (lambda x: 0) elif ((in_channels == out_channels) and (stride == 1)): self.residual = (lambda x: x) else: self.residual = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=(stride, 1)), nn.BatchNorm2d(out_channels)) self.relu = nn.ReLU(inplace=True) def forward(self, x, A): res = self.residual(x) (x, A) = self.gcn(x, A) x = torch.mean(x, dim=3) x = (x + res) x = x.permute(0, 2, 1) self.lstm_layer.hidden = self.lstm_layer.init_hidden(batch_size=self.batch_size) x = self.lstm_layer(x) print(x) return (x, A)
class Model(nn.Module): def __init__(self, new_shape): super(Model, self).__init__() self.new_shape = new_shape def forward(self, x): return ((x + 1), (x + 2))
def test_order_by_generator(mock_database): generator = OrderByGenerator(mock_database) table_name = 'example_table' generated_sql = generator.sql_generate(table_name) assert ('ORDERBY-SINGLE' in generated_sql['sql_tags']) assert (len(generated_sql['queries']) == 6) query = f'SELECT * FROM "{table_name}" ORDER BY "col1" ASC' question = f'Show all data ordered by "col1" in ascending order for the table "{table_name}"' assert (query in generated_sql['queries']) assert (question in generated_sql['questions'])
def get_all_model_names(): model_names = set() for module_name in ['modeling_auto', 'modeling_tf_auto', 'modeling_flax_auto']: module = getattr(transformers.models.auto, module_name, None) if (module is None): continue mapping_names = [x for x in dir(module) if (x.endswith('_MAPPING_NAMES') and (x.startswith('MODEL_') or x.startswith('TF_MODEL_') or x.startswith('FLAX_MODEL_')))] for name in mapping_names: mapping = getattr(module, name) if (mapping is not None): for v in mapping.values(): if isinstance(v, (list, tuple)): model_names.update(v) elif isinstance(v, str): model_names.add(v) return sorted(model_names)
class VizdoomEnvMultiplayer(VizdoomEnv): def __init__(self, level, player_id, port, num_players, skip_frames, level_map='map01', bin_resolution=32): super().__init__(level, skip_frames=skip_frames, level_map=level_map) self.port = port self.player_id = player_id self.num_players = num_players self.bin_resolution = bin_resolution self.timestep = 0 self.reward = 0.0 self.update_state = True (self.current_histogram, self.previous_histogram) = (None, None) if self.coord_limits: X = (self.coord_limits[2] - self.coord_limits[0]) Y = (self.coord_limits[3] - self.coord_limits[1]) self.x_bins = int((X / self.bin_resolution)) self.y_bins = int((Y / self.bin_resolution)) self.current_histogram = np.zeros((self.x_bins, self.y_bins), dtype=np.int32) self.previous_histogram = np.zeros_like(self.current_histogram) self.visit_counts = np.zeros((self.x_bins, self.y_bins), dtype=np.int32) def _is_server(self): return (self.player_id == 0) def _ensure_initialized(self, mode='algo'): if self.initialized: return self.game = DoomGame() scenarios_dir = os.path.join(os.path.dirname(__file__), 'scenarios') self.game.load_config(os.path.join(scenarios_dir, CONFIGS[self.level][0])) live_rew = self.game.get_living_reward() self.game.set_living_reward((live_rew / self.skip_frames)) if (mode == 'algo'): (self.screen_w, self.screen_h, self.channels) = (160, 120, 3) self.screen_resolution = ScreenResolution.RES_160X120 elif (mode == 'watch'): (self.screen_w, self.screen_h, self.channels) = (400, 300, 3) self.screen_resolution = ScreenResolution.RES_400X300 self.calc_observation_space() self.game.set_screen_resolution(self.screen_resolution) self.game.set_seed(self.rng.random_integers(0, ((2 ** 32) - 1))) self.game.set_window_visible(False) if self._is_server(): self.game.add_game_args(f'-host {self.num_players} -netmode 0 -port {self.port} ') else: self.game.add_game_args(f'-join 127.0.0.1:{self.port}') self.game.add_game_args(f'+name Player{self.player_id}') self.game.set_mode(Mode.PLAYER) self.game.init() self.initialized = True def reset(self, mode='algo'): self._ensure_initialized(mode) self.timestep = 0 self.full_step = True self.game.new_episode() if ((self.current_histogram is not None) and (self.previous_histogram is not None)): swap = self.current_histogram self.current_histogram = self.previous_histogram self.previous_histogram = swap self.current_histogram.fill(0) self.state = self.game.get_state() img = self.state.screen_buffer return np.transpose(img, (1, 2, 0)) def _get_info(self, eps=1e-08): x_pos = ((self.game.get_game_variable(GameVariable.POSITION_X) - self.coord_limits[0]) / (self.coord_limits[2] - self.coord_limits[0])) y_pos = ((self.game.get_game_variable(GameVariable.POSITION_Y) - self.coord_limits[1]) / (self.coord_limits[3] - self.coord_limits[1])) x_vel = (self.game.get_game_variable(GameVariable.VELOCITY_X) / 7.5) y_vel = (self.game.get_game_variable(GameVariable.VELOCITY_Y) / 7.5) orient = (((self.game.get_game_variable(GameVariable.CAMERA_ANGLE) / 360) * 2) * np.pi) sin_orient = np.sin(orient) cos_orient = np.cos(orient) item1_coll = int(self.game.get_game_variable(GameVariable.USER1)) item2_coll = int(self.game.get_game_variable(GameVariable.USER2)) if (item1_coll == 2): item1_onehot = np.ones(2) else: item1_onehot = np.zeros(2) if (item1_coll != (- 1)): item1_onehot[item1_coll] = 1 if (item2_coll == 2): item2_onehot = np.ones(2) else: item2_onehot = np.zeros(2) if (item2_coll != (- 1)): item2_onehot[item2_coll] = 1 n_found_treasures = (int((item1_coll in (self.player_id, 2))) + int((item2_coll in (self.player_id, 2)))) x_ind = int(((x_pos - eps) * self.x_bins)) y_ind = int(((y_pos - eps) * self.y_bins)) x_onehot = np.zeros(self.x_bins) x_onehot[x_ind] = 1 y_onehot = np.zeros(self.y_bins) y_onehot[y_ind] = 1 return {'pos': (x_pos, y_pos), 'vel': (x_vel, y_vel), 'orient': (sin_orient, cos_orient), 'item1': item1_onehot, 'item2': item2_onehot, 'hist_inds': [x_ind, y_ind], 'onehot_pos': np.concatenate((x_onehot, y_onehot)), 'n_found_treasures': n_found_treasures} def _update_histogram(self, info): if (self.current_histogram is None): return (x_ind, y_ind) = info['hist_inds'] self.current_histogram[(x_ind, y_ind)] += 1 self.visit_counts[(x_ind, y_ind)] += 1 def step(self, action): self._ensure_initialized() info = {'num_frames': self.skip_frames} if (type(action) is int): act = np.zeros(self.action_space.n) act[action] = 1 act = np.uint8(act) act = act.tolist() elif (type(action) is np.ndarray): act = action.tolist() self.game.set_action(act) self.game.advance_action(1, True) self.reward += self.game.get_last_reward() self.timestep += 1 state = self.game.get_state() done = self.game.is_episode_finished() if (not done): observation = np.transpose(state.screen_buffer, (1, 2, 0)) info.update(self._get_info()) self._update_histogram(info) info.update({'visit_counts': self.visit_counts}) self.last_info = info self.last_obs = observation else: info = self.last_info observation = self.last_obs reward = self.reward if self.full_step: self.reward = 0.0 return (observation, reward, done, info) def get_obs(self): if (not self.game.is_episode_finished()): state = self.game.get_state() observation = np.transpose(state.screen_buffer, (1, 2, 0)) else: observation = self.last_obs return observation
def smart_tokenizer_and_embedding_resize(special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel): num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) model.resize_token_embeddings(len(tokenizer)) if (num_new_tokens > 0): input_embeddings = model.get_input_embeddings().weight.data output_embeddings = model.get_output_embeddings().weight.data input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True) output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True) input_embeddings[(- num_new_tokens):] = input_embeddings_avg output_embeddings[(- num_new_tokens):] = output_embeddings_avg
class BasicBlockV2(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, is_first_block_of_first_layer=False, use_cbam=False): super(BasicBlockV2, self).__init__() self.is_first_block_of_first_layer = is_first_block_of_first_layer if (not is_first_block_of_first_layer): self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes) self.relu = nn.ReLU(True) self.downsample = downsample self.stride = stride if use_cbam: self.cbam = CBAM(planes) else: self.cbam = None def forward(self, x): out = x if (not self.is_first_block_of_first_layer): out = self.bn1(x) out = self.relu(out) identity = (self.downsample(out) if (self.downsample is not None) else x) out = self.conv1(out) out = self.bn2(out) out = self.relu(out) out = self.conv2(out) if (self.cbam is not None): out = self.cbam(out) return (out + identity)
def merge_input_batches(input_batches: List[InputBatch], max_num_samples: Optional[int]=None) -> InputBatch: final_input_batch = InputBatch() for (key, val) in vars(input_batches[0]).items(): if (key != 'ray_indices'): if (val is None): setval = None elif isinstance(val, torch.Tensor): setval = torch.cat([getattr(inp_batch, key) for inp_batch in input_batches], dim=0) elif isinstance(val, int): setval = getattr(input_batches[0], key) else: raise RuntimeError('Unknown data type in the input_batches!') setattr(final_input_batch, key, setval) if (input_batches[0].ray_indices is not None): final_ray_indices = [input_batches[0].ray_indices] previous_accumulated_batch_size = 0 for i in range(1, len(input_batches)): previous_accumulated_batch_size += input_batches[(i - 1)].num_rays final_ray_indices.append((input_batches[i].ray_indices + previous_accumulated_batch_size)) final_input_batch.ray_indices = torch.cat(final_ray_indices, dim=0) if (max_num_samples is not None): num_rays = final_input_batch.num_rays num_samples = final_input_batch.num_samples if (num_samples > max_num_samples): final_ray_cutoff = final_input_batch.ray_indices[max_num_samples] for (key, val) in vars(final_input_batch).items(): if isinstance(val, torch.Tensor): if (key == 'ray_masks'): setval = val[(val.cumsum(0) < final_ray_cutoff)] elif (val.shape[0] == num_rays): setval = val[:final_ray_cutoff] elif (val.shape[0] == num_samples): setval = val[(final_input_batch.ray_indices < final_ray_cutoff)] setattr(final_input_batch, key, setval) if (final_input_batch.frame_numbers is not None): final_input_batch.unique_frame_numbers = torch.unique(final_input_batch.frame_numbers, sorted=False, return_inverse=False).view((- 1), 1) return final_input_batch
def main(): p = optparse.OptionParser(usage=(__doc__ or '').strip()) (options, args) = p.parse_args() if (len(args) != 0): p.error('invalid number of arguments') pwd = os.path.dirname(__file__) src_files = (os.path.abspath(__file__), os.path.abspath(os.path.join(pwd, 'functions.json')), os.path.abspath(os.path.join(pwd, 'add_newdocs.py'))) dst_files = ('_ufuncs.pyx', '_ufuncs_defs.h', '_ufuncs_cxx.pyx', '_ufuncs_cxx.pxd', '_ufuncs_cxx_defs.h', 'cython_special.pyx', 'cython_special.pxd') os.chdir(BASE_DIR) if all_newer(src_files, dst_files): print('scipy/special/_generate_pyx.py: all files up-to-date') return (ufuncs, fused_funcs) = ([], []) with open('functions.json') as data: functions = json.load(data) for (f, sig) in functions.items(): ufuncs.append(Ufunc(f, sig)) fused_funcs.append(FusedFunc(f, sig)) generate_ufuncs('_ufuncs', '_ufuncs_cxx', ufuncs) generate_fused_funcs('cython_special', '_ufuncs', fused_funcs)
def test_sampler_init_gpu_when_not_available(esm6, mock_no_gpu): pytest.raises(Exception, esm_sampler.ESM_sampler, esm6, device='gpu')
def compute_derivedSF(shortAxis, longAxis, area, perimt, MinDiameter, MaxDiameter, hull_area, hull_perimtr): esf = (shortAxis / longAxis) csf = (((4 * np.pi) * area) / (perimt ** 2)) sf1 = (shortAxis / MaxDiameter) sf2 = (MinDiameter / MaxDiameter) elg = (MaxDiameter / MinDiameter) cvx = np.sqrt((area / hull_area)) cmpt = (((4 * np.pi) * area) / hull_perimtr) return (esf, csf, sf1, sf2, elg, cvx, cmpt)
.parametrize('test_x, expected', [(tf.constant([[0.0, 0.0]]), tf.constant([[0., 0.]])), (tf.constant([[0.5, 1.0]]), tf.constant([[0., 0.9873655]])), (tf.constant([[[0.5, 1.0]], [[0.0, 0.0]]]), tf.constant([[[0., 0.9873655]], [[0., 0.]]])), (tf.constant([[[0.5, 1.0], [0.0, 0.0]]]), tf.constant([[[0., 0.9873655], [0., 0.]]]))]) def test_vlmop2_has_expected_output(test_x: TensorType, expected: TensorType) -> None: f = VLMOP2(2).objective npt.assert_allclose(f(test_x), expected, rtol=1e-05)
def average_precision(r): r = (np.asarray(r) != 0) out = [precision_at_k(r, (k + 1)) for k in range(r.size) if r[k]] if (not out): return 0.0 return np.mean(out)
def _find_single_yield_expression(node): yield_statements = _find_yield_statements(node) if (len(yield_statements) != 1): return (None, None) return yield_statements[0]
def create_train_model(model_creator, hparams, scope=None, num_workers=1, jobid=0, extra_args=None): src_file = ('%s.%s' % (hparams.train_prefix, hparams.src)) tgt_file = ('%s.%s' % (hparams.train_prefix, hparams.tgt)) src_vocab_file = hparams.src_vocab_file tgt_vocab_file = hparams.tgt_vocab_file graph = tf.Graph() with graph.as_default(), tf.container((scope or 'train')): (src_vocab_table, tgt_vocab_table) = vocab_utils.create_vocab_tables(src_vocab_file, tgt_vocab_file, hparams.share_vocab) src_dataset = tf.data.TextLineDataset(src_file) tgt_dataset = tf.data.TextLineDataset(tgt_file) skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64) with tf.device('CPU:0'): iterator = iterator_utils.get_iterator(src_dataset, tgt_dataset, src_vocab_table, tgt_vocab_table, batch_size=hparams.batch_size, sos=hparams.sos, eos=hparams.eos, random_seed=hparams.random_seed, num_buckets=hparams.num_buckets, src_max_len=hparams.src_max_len, tgt_max_len=hparams.tgt_max_len, skip_count=skip_count_placeholder, num_shards=num_workers, shard_index=jobid) model_device_fn = None if extra_args: model_device_fn = extra_args.model_device_fn with tf.device(model_device_fn): model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.TRAIN, source_vocab_table=src_vocab_table, target_vocab_table=tgt_vocab_table, scope=scope, extra_args=extra_args) return TrainModel(graph=graph, model=model, iterator=iterator, skip_count_placeholder=skip_count_placeholder)
class LeftRegularBand(UniqueRepresentation, Parent): def __init__(self, alphabet=('a', 'b', 'c', 'd')): self.alphabet = alphabet Parent.__init__(self, category=Semigroups().Finite().FinitelyGenerated()) def _repr_(self): return ('An example of a finite semigroup: the left regular band generated by %s' % (self.alphabet,)) def product(self, x, y): assert (x in self) assert (y in self) x = x.value y = y.value return self((x + ''.join((c for c in y if (c not in x))))) _method def semigroup_generators(self): return Family([self(i) for i in self.alphabet]) def an_element(self): return self(''.join((self.alphabet[2:] + self.alphabet[0:2]))) class Element(ElementWrapper): wrapped_class = str __lt__ = ElementWrapper._lt_by_value
def get_evaluation_metrics(inputs, targets): ssim = tf.reduce_sum(tf.image.ssim(inputs, targets, max_val=255)) psnr = tf.reduce_sum(tf.image.psnr(inputs, targets, max_val=255)) rmse = tf.reduce_sum(RMSE(inputs, targets)) return (ssim, psnr, rmse)
def prewitt(image, mask=None, *, axis=None, mode='reflect', cval=0.0): output = _generic_edge_filter(image, smooth_weights=PREWITT_SMOOTH, axis=axis, mode=mode, cval=cval) output = _mask_filter_result(output, mask) return output
def test_deprecate_parameter(): with pytest.warns(FutureWarning, match='is deprecated from'): deprecate_parameter(Sampler(), '0.2', 'a') with pytest.warns(FutureWarning, match="Use 'b' instead."): deprecate_parameter(Sampler(), '0.2', 'a', 'b')
def update_preprocessing_parameters(args): if (args.dataset_code == 'redd_lf'): args.cutoff = {'aggregate': 6000, 'refrigerator': 400, 'washer_dryer': 3500, 'microwave': 1800, 'dishwasher': 1200} args.threshold = {'refrigerator': 50, 'washer_dryer': 20, 'microwave': 200, 'dishwasher': 10} args.min_on = {'refrigerator': 10, 'washer_dryer': 300, 'microwave': 2, 'dishwasher': 300} args.min_off = {'refrigerator': 2, 'washer_dryer': 26, 'microwave': 5, 'dishwasher': 300} args.c0 = {'refrigerator': 1e-06, 'washer_dryer': 0.001, 'microwave': 1.0, 'dishwasher': 1.0} elif (args.dataset_code == 'uk_dale'): args.cutoff = {'aggregate': 6000, 'kettle': 3100, 'fridge': 300, 'washing_machine': 2500, 'microwave': 3000, 'dishwasher': 2500, 'toaster': 3100} args.threshold = {'kettle': 2000, 'fridge': 50, 'washing_machine': 20, 'microwave': 200, 'dishwasher': 10, 'toaster': 1000} args.min_on = {'kettle': 2, 'fridge': 10, 'washing_machine': 300, 'microwave': 2, 'dishwasher': 300, 'toaster': 2000} args.min_off = {'kettle': 0, 'fridge': 2, 'washing_machine': 26, 'microwave': 5, 'dishwasher': 300, 'toaster': 0} args.c0 = {'kettle': 1.0, 'fridge': 1e-06, 'washing_machine': 0.01, 'microwave': 1.0, 'dishwasher': 1.0, 'toaster': 1.0} elif (args.dataset_code == 'refit'): args.cutoff = {'Aggregate': 10000, 'Kettle': 3000, 'Fridge-Freezer': 1700, 'Washing_Machine': 2500, 'Microwave': 1300, 'Dishwasher': 2500, 'TV': 80} args.threshold = {'Kettle': 2000, 'Fridge-Freezer': 5, 'Washing_Machine': 20, 'Microwave': 200, 'Dishwasher': 10, 'TV': 10} args.min_on = {'Kettle': 2, 'Fridge-Freezer': 10, 'Washing_Machine': 10, 'Microwave': 2, 'Dishwasher': 300, 'TV': 2} args.min_off = {'Kettle': 0, 'Fridge-Freezer': 2, 'Washing_Machine': 26, 'Microwave': 5, 'Dishwasher': 300, 'TV': 0} args.c0 = {'Kettle': 1.0, 'Fridge-Freezer': 1e-06, 'Washing_Machine': 0.01, 'Microwave': 1.0, 'Dishwasher': 1.0, 'TV': 1.0} args.window_stride = (120 if (args.dataset_code == 'redd_lf') else 240) args.house_indicies = ([1, 2, 3, 4, 5, 6] if (args.dataset_code == 'redd_lf') else ([1, 2, 3, 4, 5] if (args.dataset_code == 'uk_dale') else [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21])) return args
class RMTorch(MeshTorchLayer): def __init__(self, units: int, num_layers: int=None, hadamard: bool=False, basis: str=DEFAULT_BASIS, bs_error: float=0.0, theta_init: Union[(str, tuple, np.ndarray)]='haar_rect', phi_init: Union[(str, tuple, np.ndarray)]='random_phi', gamma_init: Union[(str, tuple, np.ndarray)]='random_gamma'): super(RMTorch, self).__init__(RectangularMeshModel(units, num_layers, hadamard, bs_error, basis, theta_init, phi_init, gamma_init))
def kernel_gaussian(x, ls, z=None): if (z is None): z = x return np.exp(((- np.sum(((x - z) ** 2))) / (2 * (ls ** 2))))
def update_config(): import argparse, sys parser = argparse.ArgumentParser(description='Classification model training') parser.add_argument('--config_file', type=str, default=None, required=True, help='Optional config file for params') parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--input-size', default=224, type=int, help='images input size') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--model-ema', action='store_true') parser.add_argument('--no-model-ema', action='store_false', dest='model_ema') parser.set_defaults(model_ema=True) parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='') parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='') parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt-eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"') parser.add_argument('--lr', type=float, default=0.0005, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') parser.add_argument('--warmup-lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min-lr', type=float, default=1e-05, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR') parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10') parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') (parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),) parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--repeated-aug', action='store_true') parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug') parser.set_defaults(repeated_aug=True) parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0. (default: 0.8)') parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)') parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL', help='Name of teacher model to train (default: "regnety_160"') parser.add_argument('--teacher-path', type=str, default='') parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help='') parser.add_argument('--distillation-alpha', default=0.5, type=float, help='') parser.add_argument('--distillation-tau', default=1.0, type=float, help='') parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'], type=str, help='Image Net dataset path') parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('opts', help='see config.py for all options', default=None, nargs=argparse.REMAINDER) if (len(sys.argv) == 1): parser.print_help() sys.exit(1) args = parser.parse_args() if (args.config_file is not None): cfg_from_file(args.config_file) if (args.opts is not None): cfg_from_list(args.opts) return args
def _select_rand_weights(weight_idx=0, transforms=None): transforms = (transforms or _RAND_TRANSFORMS) assert (weight_idx == 0) rand_weights = _RAND_CHOICE_WEIGHTS_0 probs = [rand_weights[k] for k in transforms] probs /= np.sum(probs) return probs
def Res50_Deeplab(num_classes=21): model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes) return model
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')]) cls.add_method('Delete', 'void', [param('ns3::EventImpl *', 'object')], is_static=True) return
def compute_clip_loss(img, text): img = torch.nn.functional.upsample_bilinear(img, (224, 224)) tokenized_text = clip.tokenize([text]).to(device) (img_logits, _text_logits) = clip_model(img, tokenized_text) return ((1 / img_logits) * 100)
def get_topic_summary_dict(topics): words_and_weights = {} for (num, data) in enumerate(topics): words_and_weights[str((num + 1))] = {} words_and_weights[str((num + 1))]['name'] = '' words_and_weights[str((num + 1))]['words'] = data return words_and_weights
def maple(model, data): return (lambda X: other.MapleExplainer(model.predict, data).attributions(X, multiply_by_input=False))
class TestActivationCheckpointing(unittest.TestCase): def test_activation_checkpointing_does_not_change_metrics(self): base_flags = ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--restore-file', 'x.pt', '--log-format', 'json', '--log-interval', '1', '--max-update', '2'] def _train(extra_flags): with self.assertLogs() as logs: train_translation_model(data_dir, 'transformer_iwslt_de_en', (base_flags + extra_flags), run_validation=True, extra_valid_flags=['--log-format', 'json']) return logs.records with tempfile.TemporaryDirectory('test_transformer_with_act_cpt') as data_dir: create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) ckpt_logs = _train(['--checkpoint-activations']) baseline_logs = _train([]) assert (len(baseline_logs) == len(ckpt_logs)) baseline_train_stats = read_last_log_entry(baseline_logs, 'train') ckpt_train_stats = read_last_log_entry(ckpt_logs, 'train') assert (baseline_train_stats['train_loss'] == ckpt_train_stats['train_loss']) baseline_valid_stats = read_last_log_entry(baseline_logs, 'valid') ckpt_valid_stats = read_last_log_entry(ckpt_logs, 'valid') assert (baseline_valid_stats['valid_loss'] == ckpt_valid_stats['valid_loss'])
def get_default_temp_dir(): tempfile.gettempdir() return os.path.join(tempfile.tempdir, 'sepp')
def test_optimize_freeze_check(): with goos.OptimizationPlan() as plan: x = goos.Variable([1]) y = goos.Variable([1]) y.freeze() obj = (((x + y) ** 2) + 3) goos.opt.scipy_minimize(obj, method='L-BFGS-B') plan.run() assert (x.get().array == (- 1)) assert (y.get().array == 1)
class BertConfig(PretrainedConfig): pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, **kwargs): super(BertConfig, self).__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps
class LuminosityInner(ProcessingPlasmaProperty): outputs = ('luminosity_inner',) def calculate(r_inner, t_inner): return ((((4 * np.pi) * const.sigma_sb.cgs) * (r_inner[0] ** 2)) * (t_inner ** 4)).to('erg/s')
class TaskProcessor(object): def __init__(self, task: BaseTask, data_path: str, output_path: str, model_path: str, resample: str=None): self.task: BaseTask = task self.data_path: str = data_path self.model_path = model_path self.output_path = output_path self.task_output_path = os.path.join(self.output_path, task.spec().output_path()) self.resample = self._parse_resample_string(resample) self.cli_path = os.environ.get('CLI_PATH', '') if (not os.path.exists(self.task_output_path)): os.makedirs(self.task_output_path, exist_ok=True) def _parse_resample_string(self, resample) -> Optional[Dict]: if (not resample): return None values = [val.strip() for val in resample.split(',')] res = {} for value in values: if (not (':' in value)): continue keyval = value.split(':', maxsplit=2) key = keyval[0].strip() val = float(keyval[1].strip()) res[key] = val return res def prepare(self): self._prepare_split('train') self._prepare_split('dev') self._prepare_split('test') self._spm_encode() self._fairseq_preprocess() def _resampling_wrapper(self, data_path, split): resampled = [] for record in self.task.read(data_path, split): label = record.label resampling_value = self.resample.get(label) if (resampling_value is None): (yield record) elif (resampling_value < 1): if (random.random() <= resampling_value): (yield record) elif (resampling_value > 1): (yield record) decimal_part = (resampling_value % 1) whole_part = int(resampling_value) for idx in range(whole_part): if (idx == 0): (yield record) else: resampled.append(record) if (random.random() <= decimal_part): resampled.append(record) else: (yield record) for record in resampled: (yield record) def _prepare_split(self, split: str): if ((split == 'dev') and self.task.spec().no_dev_set): return num_inputs: int = self.task.spec().num_inputs num_outputs: int = (num_inputs + 1) outputs: List[TextIO] = self._open_outputs(split, num_inputs) reader = (self._resampling_wrapper if ((split == 'train') and (self.resample is not None)) else self.task.read) for record in reader(self.data_path, split): rec_out = [record.label] rec_out.extend(record.inputs) assert (len(rec_out) == num_outputs), rec_out for idx in range(num_outputs): if ((rec_out[idx] is None) and (idx == 0) and (split == 'test')): continue outputs[idx].write(rec_out[idx]) outputs[idx].write('\n') for output in outputs: output.close() def _open_outputs(self, split: str, num_inputs: int) -> List[TextIO]: res = [] res.append(open(os.path.join(self.task_output_path, (split + '.label')), 'w', encoding='utf-8')) for idx in range(num_inputs): file_name = ((split + '.raw.input') + str(idx)) res.append(open(os.path.join(self.task_output_path, file_name), 'w', encoding='utf-8')) return res def _spm_encode(self): for file in os.listdir(self.task_output_path): if ('.raw.input' in file): self._spm_encode_file(os.path.join(self.task_output_path, file)) def _spm_encode_file(self, file_path: str): output_path = file_path.replace('.raw.input', '.input') spm_model = os.path.join(self.model_path, 'sentencepiece.bpe.model') spm_encode(file_path, output_path, spm_model, threads=1, encode_ids=False, max_length=510) def _fairseq_preprocess(self): num_inputs: int = self.task.spec().num_inputs output_path: str = (self.task_output_path + '-bin') if os.path.exists(output_path): shutil.rmtree(output_path) for input_idx in range(num_inputs): self._fairseq_preprocess_input(input_idx, output_path) if (self.task.spec().task_type == 'regression'): self._copy_labels(output_path) else: self._fairseq_preprocess_labels(output_path) def _copy_labels(self, output_path: str): destdir = os.path.join(output_path, 'label') os.mkdir(destdir) shutil.copy(os.path.join(self.task_output_path, 'train.label'), os.path.join(destdir, 'train.label')) shutil.copy(os.path.join(self.task_output_path, 'dev.label'), os.path.join(destdir, 'valid.label')) def _fairseq_preprocess_input(self, input_idx: int, output_path: str): input_name = f'input{input_idx}' destdir = os.path.join(output_path, input_name) self._run_fairseq_preprocess(input_name, destdir) def _fairseq_preprocess_labels(self, output_path: str): input_name = 'label' destdir = os.path.join(output_path, input_name) self._run_fairseq_preprocess(input_name, destdir) def _run_fairseq_preprocess(self, input_name: str, destdir: str): cpus = multiprocessing.cpu_count() cmd = [f'{self.cli_path}fairseq-preprocess', '--only-source', '--workers', str(cpus), '--destdir', destdir] for split in ('train', 'dev', 'test'): if ((input_name == 'label') and (split == 'test')): continue if ((split == 'dev') and self.task.spec().no_dev_set): continue file_name = ((split + '.') + input_name) pref = os.path.join(self.task_output_path, file_name) option = ('--validpref' if (split == 'dev') else f'--{split}pref') cmd.append(option) cmd.append(pref) if (not (input_name == 'label')): dict_path: str = os.path.join(self.model_path, 'dict.txt') cmd.append('--srcdict') cmd.append(dict_path) logging.info('running %s', cmd.__repr__()) subprocess.run(cmd)
def eliminate_overlapping_entities(entities_list): subsumed = set([]) for (sub_i, sub) in enumerate(entities_list): for over in entities_list[:sub_i]: if any([(target in over['targets']) for target in sub['targets']]): subsumed.add(sub['ent_id']) return [entity for entity in entities_list if (entity['ent_id'] not in subsumed)]
def test_streaming_mean(): m = batcher.StreamingMean() values = list(range(10, 20)) for (i, value) in enumerate(values): m.add(value) assert (m.value == np.mean(values[:(i + 1)]))
def encode_dataset(input_file, w_map, c_map): with open(input_file, 'r') as f: lines = f.readlines() (line_idx, features) = read_corpus(lines) (w_st, w_unk, w_con, w_pad) = (w_map['<s>'], w_map['<unk>'], w_map['< >'], w_map['<\n>']) (c_st, c_unk, c_con, c_pad) = (c_map['<s>'], c_map['<unk>'], c_map['< >'], c_map['<\n>']) dataset = list() for (f_idx, f_l) in zip(line_idx, features): tmp_w = [w_st, w_con] tmp_c = [c_st, c_con] tmp_mc = [0, 1] assert (len(f_l) > 0) assert (f_l[0] != '<s>') assert (f_l[(- 1)] != '<eof>') for i_f in f_l: tmp_w = ((tmp_w + ([w_map.get(i_f, w_map.get(i_f.lower(), w_unk))] * len(i_f))) + [w_con]) tmp_c = ((tmp_c + [c_map.get(t, c_unk) for t in i_f]) + [c_con]) tmp_mc = ((tmp_mc + ([0] * len(i_f))) + [1]) tmp_w.append(w_pad) tmp_c.append(c_pad) tmp_mc.append(0) tmp_idx = (f_idx + [(f_idx[(- 1)] + 1)]) dataset.append([tmp_w, tmp_c, tmp_mc, tmp_idx, f_l]) return dataset