content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
""" Generate data to simulate a study, e.g. to test ingest or download features. DESCRIPTION This data is similar to real data in structure and basic statistics (e.g. sparseness), but is biologically meaningless. EXAMPLES # Generate 3 dense matrix files, 25 MB each python make_toy_data.py # Generate 6 dense matrix files, 2 MB each python make_toy_data.py --num-files 6 --size-per-file 2_MiB # Generate 1 raw counts dense matrix file, 2 MB python make_toy_data.py --num-files 1 --size-per-file 2_MiB --raw-count # Generate 1 dense matrix file named AB_meso.txt, 2 GB in raw size, then compress it python make_toy_data.py --num-files 1 --filename-leaf 'meso' --size-per-file 2_GiB --gzip # Generate 1 group of files with sparse matrix files, dense matrix files, metadata and cluster files python make_toy_data.py --num-files 1 --filename-leaf 'portal' --num-cells 1000 --num-genes 20 --matrix-types sparse dense --visualize # Generate 1 group of files with sparse matrix files, dense matrix files, metadata and cluster files using preloaded barcodes and gene names python make_toy_data.py --num-files 1 --filename-leaf 'portal' --num-cells 1000 --num-genes 20 --matrix-types sparse dense --visualize --preloaded-genes path_to_preloaded_genes --preloaded-barcodes path_to_preloaded_barcoded """ from random import randrange import argparse import multiprocessing import gzip import shutil import os import sys from functools import partial from scipy.stats import lognorm import numpy as np sys.path.append('.') sys.path.append('genomes') from genomes.genome_annotations import GenomeAnnotations def split_seq(li, cols=5): """ Chunk an array into an array of len cols + 1 (last element is remainder elements) http://code.activestate.com/recipes/425397/ :param li: list to chunk :param cols: number of chunks :return: chunked 2d list """ start = 0 for i in range(cols): stop = start + len(li[i::cols]) yield li[start:stop] start = stop def fetch_genes(preloaded_genes, num_rows, output_dir): """ Retrieve names (i.e. HUGO symbols) for all given for a species from Ensembl GTF :return: List of gene symbols, list of gene IDs, number of rows """ genes = [] print('Getting gene list') if preloaded_genes: with open(preloaded_genes) as f: # read the genes and gene ids lines = f.readlines() ids = [ [l.strip() for l in line.split()][0] for line in lines if len(line) > 2 ] genes = [ [l.strip() for l in line.split()][1] for line in lines if len(line) > 2 ] # if --num_genes param is higher than the number of genes you tried to preload, lower it if num_rows > len(genes): print( 'Not enough genes in preloaded file, reducing gene number to', len(genes), ) num_rows = len(genes) genes = genes[:num_rows] ids = ids[:num_rows] print('Preloaded', '{:,}'.format(len(genes)), 'genes') return genes, ids, num_rows else: # To consider: Add --species as a CLI argument scp_species = [['Homo sapiens', 'human', '9606']] gtfs = GenomeAnnotations( local_output_dir=output_dir, scp_species=scp_species ).fetch_gtfs() gtf_filename = gtfs[0][0] with gzip.open(gtf_filename, mode='rt') as f: lines = f.readlines() for line in lines: if line[0] == '#': continue columns = line.split('\t') feature_type = columns[2] # gene, transcript, exon, etc. if feature_type != 'gene': continue raw_attrs = [x.strip() for x in columns[8].split(';')] raw_attrs[-1] = raw_attrs[-1].replace('";', '') attrs = {} for raw_attr in raw_attrs: split_attr = raw_attr.split() if len(split_attr) < 2: continue attrs[split_attr[0]] = split_attr[1].strip('"') gene_id = attrs['gene_id'] gene_name = attrs['gene_name'] if 'gene_name' in attrs else gene_id if gene_name in genes: print(f'Duplicate gene name: {gene_name}, skipping') continue genes.append(gene_name) # if --num-genes param is greater than the number of genes you tried to # load, then decrease it if num_rows > len(genes): print('Not enough genes in GTF, reducing gene number to', len(genes)) num_rows = len(genes) genes = genes[:num_rows] ids = ['FAKE00' + str(i) for i in range(num_rows)] return genes, ids, num_rows def fetch_cells( prefix, num_rows, num_columns, bytes_per_file, preloaded_barcodes, visualize, sparse ): """ Retrieve/ Generate cell names :param prefix: String of two uppercase letters, e.g. "AB" :return: dense matrix header, list of barcodes, and number of columns """ print('Generating matrix') letters = ['A', 'B', 'C', 'D'] # Generate header barcodes = [] header = 'GENE\t' # if we have a preloaded barcodes file, read it in, otherwise generate # the random barcodes if preloaded_barcodes: with open(preloaded_barcodes) as f: # load preloaded barcodes/cell names lines = f.readlines() barcodes = [line.strip() for line in lines if len(line) > 2] if num_columns > len(barcodes): # if user param --num-barcodes is higher than the number in the # preloaded file, drop it down print( 'Not enough barcodes in preloaded file, reducing barcode number to', len(barcodes), ) num_columns = len(barcodes) if visualize and num_columns % 8 != 0: # if we want to create cluster files, we have 8 clusters, so # drop down the number of barcodes to a multiple of 8 num_columns -= num_columns % 8 print( 'Visualization relies on having 8 subclusters, reducing number of cells/columns to', num_columns, ) barcodes = barcodes[:num_columns] print('Preloaded', '{:,}'.format(len(barcodes)), 'cells') # make the header header += '\t'.join(barcodes) else: # if no preloaded barcodes, randomly generate them if visualize and num_columns % 8 != 0: num_columns -= num_columns % 8 print( 'Visualization relies on having 8 subclusters, reducing number of cells/columns to', num_columns, ) for i in range(num_columns): random_string = '' for j in range(1, 16): # Generate a 16-character string of random combinations of # letters A, B, C, and D ri1 = randrange(0, 4) # Random integer between 0 and 3, inclusive random_string += letters[ri1] ri2 = str(randrange(1, 9)) ri3 = str(randrange(1, 9)) barcode = 'Foobar' + prefix + ri2 + '_BazMoo_' + ri3 + random_string + '-1' if sparse: barcodes = barcodes + [barcode] header += barcode + '\t' if i % 10000 == 0 and i > 0: print('Created', '{:,}'.format(i), 'cell headers') header = header print('Generated cell headers') return header, barcodes def get_signature_content( prefix, num_rows, num_columns, max_write_size, is_explicit_num_columns, bytes_per_file, preloaded_barcodes, visualize, sparse, crush, genes, raw_count, ): """ Generates "signature" data, incorporating a given prefix. :return: generator for rows of dense matrix and expression scores for sparse matrix, barcodes and num_chunks """ # get the header and barcodes for writing first row of dense matrix, # writing barcodes.tsv file header, barcodes = fetch_cells( prefix, num_rows, num_columns, bytes_per_file, preloaded_barcodes, visualize, sparse, ) # num_chunks is how many rows of the dense matrix we write at a time # (basically) depending on the max_write_size, +1 in case it is 0 num_chunks = round((num_rows * num_columns) // max_write_size) + 1 # Return a generator so we can use a somewhat constant amount of RAM return row_generator, barcodes, num_chunks def generate_metadata_and_cluster(barcodes): """ Generates cluster and metadata files randomly for visualization in the portal :param barcodes: list of cell names :return: metadata file content, cluster file content """ # file heaeders metadata_header = 'NAME\tCLUSTER\tSUBCLUSTER\nTYPE\tgroup\tgroup\n' cluster_header = 'NAME\tX\tY\tZ\nTYPE\tnumeric\tnumeric\tnumeric\n' # clusters- P means positive, N means negative (For X Axis values) clusters = np.asarray(['P', 'N']) # subclusters- P means positive, N means negative (For X Y Z axis) subclusters = np.asarray(['PPP', 'PPN', 'PNP', 'PNN', 'NPP', 'NPN', 'NNP', 'NNN']) # make a var for bar length for convenience bar_length = len(barcodes) # reshape the barcodes to make generating the files easier barcodes_arr = np.asarray(barcodes).reshape(bar_length, 1) # generate the labels for cluster and subcluster cluster_length = bar_length / 2 subcluster_length = bar_length / 8 cluster_groups = np.repeat(clusters, cluster_length).reshape(bar_length, 1) sub_cluster_groups = np.repeat(subclusters, subcluster_length).reshape( bar_length, 1 ) # metadata table rows are barcode, cluster_group, sub_cluster_group metadata_table = np.concatenate( (barcodes_arr, cluster_groups, sub_cluster_groups), axis=1 ) print('Generating cluster coordinates') # generate random coordinate values, but accurately, so P in a dimension # has a positive value, while N has a negative value # round the random numbers to 4 digits cluster_coords = np.round(np.random.uniform(size=(bar_length, 3)), 4) x_mod = np.repeat([1, -1], cluster_length) y_mod = np.repeat([1, -1, 1, -1], cluster_length / 2) z_mod = np.repeat([1, -1, 1, -1, 1, -1, 1, -1], subcluster_length) # multiply the dimension sign arrays by the random numbers to properly cluster print('Modifiying cluster coordinates') mods = np.asarray([x_mod, y_mod, z_mod]).T cluster_coords *= mods # cluster table row is barcode, X, Y, Z cluster_table = np.concatenate((barcodes_arr, cluster_coords), axis=1) # join the tables into strings (tab seperated) and add the proper headers print('Generating cluster and metadata strings') metadata_string = metadata_header + '\n'.join( ['\t'.join(row) for row in metadata_table] ) cluster_string = cluster_header + '\n'.join( ['\t'.join(row) for row in cluster_table] ) return metadata_string, cluster_string def pool_processing( filename_leaf, sparse, dense, visualize, crush, gzip_files, num_rows, num_columns, preloaded_genes, preloaded_barcodes, max_write_size, is_explicit_num_columns, bytes_per_file, genes, ids, output_dir, raw_count, prefix, ): """ Function called by each CPU core in our pool of available CPUs. """ # potential file names stem = os.path.join(output_dir, f"{prefix}_toy_data_{filename_leaf}") dense_name = stem + '.txt' genes_name = stem + '.genes.tsv' barcodes_name = stem + '.barcodes.tsv' matrix_name = stem + '.matrix.mtx' cluster_name = stem + '.cluster.txt' metadata_name = stem + '.metadata.txt' # get list of files we are creating files_to_write = [] if sparse: files_to_write = files_to_write + [matrix_name, genes_name, barcodes_name] if dense: files_to_write = files_to_write + [dense_name] if visualize: files_to_write = files_to_write + [metadata_name, cluster_name] # delete existing files-- since we append files we don't want to append # to existing ones print('Deleting existing files with same name') for file in files_to_write: if os.path.exists(file): os.remove(file) # get the generator function and num chunks for the given barcodes/genes # (if any preloaded, otherwise randomly generate/get from NCBI) row_generator, barcodes, num_chunks = get_signature_content( prefix, num_rows, num_columns, max_write_size, is_explicit_num_columns, bytes_per_file, preloaded_barcodes, visualize, sparse, crush, genes, raw_count, ) # make a var for bar length for convenience bar_len = len(barcodes) # WRITE FILES if sparse: # write the genes.tsv file for sparse matrix with open(genes_name, 'w+') as g: print('Writing gene file') # row format: (tab delimited) gene_id gene_name [g.write(ids[i] + '\t' + genes[i] + '\n') for i in range(num_rows)] # write the barcodes.tsv file for sparse matrix with open(barcodes_name, 'w+') as b: print('Writing barcodes') # row format: barcode_name b.write('\n'.join(barcodes)) # We write the sparse matrix and dense matrix at the same time using the # row generator (because we want to make sure our expression scores are # the same for [cell, gene]) if sparse: print('Writing sparse matrix') if dense: print('Writing dense matrix') if sparse or dense: # helpful stat tracking # nuumber of expressions cores exprs_written = 0 # number of times we had to write to a file num_writes = 0 # we will have to do num_chunks writes total print('Number of writes:', '{:,}'.format(num_chunks)) # iterate through the generator # Generate sparse string header sparse_str = '%%MatrixMarket matrix coordinate integer general\n' sparse_str += ' '.join( [ str(num_rows), str(bar_len), str(round(num_rows * num_columns * (1 - crush))), '\n', ] ) # the row generator returns content (string of joined dense matrix # rows) and exprs (1d array of random expression scores that is gene, # barcode sorted) for content, exprs in row_generator(): # write part of dense matrix if user said to if dense: # append to content string to the dense matrix file with open(dense_name, 'a+') as f: print(f'Writing to dense matrix, @size: {len(content)}') f.write(content) # write part of sparse matrix if user said to if sparse: # append sparse matrix rows to the sparse matrix with open(matrix_name, 'a+') as m: # this step is computationally expensive so tell the user print('Creating sparse matrix string') # we output it sorted by gene and then barcode # sparse matrix format: gene_num, barcode_num, expr (space seperated) for i, expr in enumerate(exprs): # only write the values with actual expression if expr > 0: # generate the gene num and barcode numbers gene_num = str(((i + exprs_written) // num_columns) + 1) barcode_num = str((i % num_columns) + 1) # join the row by space and add it to the string to write line = ' '.join([gene_num, barcode_num, str(expr) + '\n']) sparse_str += line # write the multiple rows strings print( 'Writing', '{:,}'.format(i + 1), 'scores, @ size:', '{:,}'.format(len(sparse_str)), ) m.write(sparse_str) # reset the string sparse_str = '' # keep track of number of scores written exprs_written += len(exprs) # keep track of number of writes to files, inform user num_writes += 1 print('Writes completed:', num_writes) # if user specified in --visualize param, write the cluster and metadata files if visualize: print('Writing metadata file') metadata_string, cluster_string = generate_metadata_and_cluster(barcodes) with open(metadata_name, 'w+') as md: md.write(metadata_string) print('Writing cluster file') with open(cluster_name, 'w+') as c: c.write(cluster_string) # cleanup step: inform user of what files we wrote [print('Wrote file:', file) for file in files_to_write] # if user said to in --gzip param, gzip and overwrite file if gzip_files: for file in files_to_write: print('Gzipping:', file) with open(file, 'rb') as f_in: with gzip.open(file + '.gz', 'wb') as f_out: shutil.copyfileobj(f_in, f_out) def parse_filesize_string(filesize_string): """ Returns number of bytes specified in a human-readable filesize string :param filesize_string: Filesize string, e.g. '300_MiB' :return: num_bytes: Integer number of bytes, e.g. 307200000 """ fss = filesize_string.split('_') # e.g. ['300', 'MB'] filesize_value = float(fss[0]) # e.g. 300.0 filesize_unit_symbol = fss[1][0] # e.g. 'M' # Unit prefix: binary multiplier (in scientific E-notation) unit_multipliers = {'': 1, 'K': 1.024e3, 'M': 1.024e6, 'G': 1.024e9, 'T': 1.024e12} filesize_unit_multiplier = unit_multipliers[filesize_unit_symbol] num_bytes = int(filesize_value * filesize_unit_multiplier) return num_bytes def main(): """Enables running via module or CLI """ args = create_parser().parse_args() make_toy_data(args) if __name__ == "__main__": main()
[ 37811, 198, 8645, 378, 1366, 284, 29308, 257, 2050, 11, 304, 13, 70, 13, 284, 1332, 26151, 393, 4321, 3033, 13, 198, 198, 30910, 40165, 198, 1212, 1366, 318, 2092, 284, 1103, 1366, 287, 4645, 290, 4096, 7869, 357, 68, 13, 70, 13, ...
2.275879
8,221
""" Based on https://gist.github.com/felixkreuk/8d70c8c1507fcaac6197d84a8a787fa0 """ import spur env = { 'USE_SIMPLE_THREADED_LEVEL3': '1', 'OMP_NUM_THREADS': '1', } ts = '/PATH-TO-DIR/ts-1.0/ts' def parallelize(nodes_list, all_runs_args, run_script, on_gpu=False, dry_run=False): """ Running on a list of given servers, a bunch of experiments. Assumes that can connect automatically to the servers :param nodes_list: :param all_runs_args: :param run_script: :param on_gpu: :param dry_run: allows to simply print the intended experiments, and not actually run them :return: """ # assumes automatic connection w/o password connections = [spur.SshShell(hostname=node, username="USERNAME") for node in nodes_list] # ┌──────────────┐ # │ execute tasks│ # └──────────────┘ for sub_exp_idx, combination in enumerate(all_runs_args): args_str = f"{ts} sh {run_script}" for item in combination: args_str += f" {item}" if on_gpu: gpu_id = sub_exp_idx % 4 args_str += f" cuda:0" node_id = sub_exp_idx // 4 % len(nodes_list) env['CUDA_VISIBLE_DEVICES'] = f"{gpu_id}" env['TS_SOCKET'] = f"/tmp/yanai_gpu_{gpu_id}" print(args_str.split(" "), node_id, gpu_id) else: node_id = sub_exp_idx % len(nodes_list) print(args_str.split(" "), node_id) if not dry_run: connections[node_id].run(args_str.split(" "), update_env=env) print(f"==> running {len(all_runs_args)} experiments")
[ 37811, 198, 198, 15001, 319, 3740, 1378, 70, 396, 13, 12567, 13, 785, 14, 69, 417, 844, 74, 260, 2724, 14, 23, 67, 2154, 66, 23, 66, 8628, 22, 69, 6888, 330, 21, 24991, 67, 5705, 64, 23, 64, 41019, 13331, 15, 198, 37811, 198, ...
2.174324
740
#!/usr/bin/env python # coding: utf-8 # In[ ]: import pandas as pd import numpy as np import seaborn as sns from sklearn import preprocessing from sklearn import metrics from sklearn.metrics import r2_score import warnings import pickle # import matplotlib.pyplot as plt # %matplotlib inline # In[ ]: df = pd.read_csv("HR_comma_sep.csv") # In[ ]: df1 = df.drop([ 'last_evaluation', 'Department', 'promotion_last_5years'],axis='columns') # In[ ]: n_sal = preprocessing.LabelEncoder() df1['salary'] = n_sal.fit_transform(df1['salary']) # In[ ]: X = df1[['satisfaction_level', 'number_project', 'average_montly_hours', 'time_spend_company', 'Work_accident', 'salary']].values Y = df1['left'].values # In[ ]: from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.3) # In[ ]: from sklearn.linear_model import LogisticRegression, LinearRegression from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # In[ ]: Ks = 40 mean_acc = np.zeros((Ks-1)) for n in range(1,Ks): #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,Y_train) yhat=neigh.predict(X_test) mean_acc[n-1] = metrics.accuracy_score(Y_test, yhat) # print( "The Highest accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) # In[ ]: m1 = LogisticRegression(max_iter = 500) m2 = LinearRegression() m3 = SVC() m4 = KNeighborsClassifier(n_neighbors = 1) # In[ ]: Model1 = m1.fit(X_train, Y_train) Model2 = m2.fit(X_train, Y_train) Model3 = m3.fit(X_train, Y_train) Model4 = m4.fit(X_train, Y_train) # In[ ]: p1 = m1.predict(X_test) p2 = m2.predict(X_test) p3 = m3.predict(X_test) p4 = m4.predict(X_test) # print("Accuracy of Logistic Regression :", metrics.accuracy_score(Y_test,p1)) # print("Linear Regression R2-score: %.2f" % r2_score(p2 , Y_test)) # print("Accuracy of svc :", metrics.accuracy_score(Y_test,p3)) # print("Accuracy of KNN :", metrics.accuracy_score(Y_test,p4)) a1 = metrics.accuracy_score(Y_test, p1) a2 = r2_score(p2 , Y_test) a3 = metrics.accuracy_score(Y_test, p3) a4 = metrics.accuracy_score(Y_test, p4) # In[ ]: pickle.dump(Model1,open('log_model.pkl','wb')) pickle.dump(Model2,open('lin_model.pkl','wb')) pickle.dump(Model3,open('svc_model.pkl','wb')) pickle.dump(Model4,open('knn_model.pkl','wb')) # In[ ]: Y_test # In[ ]: # plt.scatter(p2,Y_test, color='blue') # plt.plot(X_train, m2.coef_*X_train + m2.intercept_, color='red') #here the regression line is plotted y= (slope*x) + intercept # # plt.xlabel("Engine size") # # plt.ylabel("Emission") # plt.show() # In[ ]: # sns.pairplot(df1,hue='left') # warnings.filterwarnings("ignore") # In[ ]: df1.to_csv('plot.csv') # In[ ]:
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 2361, 25, 628, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 384, 397, 1211, 355, 3013...
2.34173
1,191
# Here are three different implementations for algorithms that solve the best trade problem. # trade_dp is of time complexity O(n) and similar space complexity. # trade_kadane is of time complexity O(n) and space complexity O(1). # trade_kadane_correct is of time complexity O(n) and space complexity O(1). # NOTE: trade_kadane is not correct in this form as found in the book # computer science distilled. As shown in the second example, the # output of the trade_kadane algorithm is obviously incorrect. # Example 1 prices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] print(f"{prices=}, {len(prices)=}") print(f"{trade_dp(prices)=}") # Output "(9, 0)" print(f"{trade_kadane(prices)=}") # Output "(9, 0)" print(f"{trade_kadane_correct(prices)=}") # Output "(9, 0)" # Example 2 prices = [3, 3, 1, 2, 5, 7, 11, 9] print(f"{prices=}, {len(prices)=}") print(f"{trade_dp(prices)=}") # Output "(6, 2)" print(f"{trade_kadane(prices)=}") # Output "(6, 0)" WRONG!!! print(f"{trade_kadane_correct(prices)=}") # Output "(6, 2)" Correct again! # Example 3 prices = [3, 4, 40, 3, 1, 2, 5, 7, 11, 9] print(f"{prices=}, {len(prices)=}") print(f"{trade_dp(prices)=}") # Output "(2, 0)" print(f"{trade_kadane(prices)=}") # Output "(2, 0)" print(f"{trade_kadane_correct(prices)=}") # Output "(2, 0)"
[ 2, 3423, 389, 1115, 1180, 25504, 329, 16113, 326, 8494, 262, 1266, 3292, 1917, 13, 198, 2, 3292, 62, 26059, 318, 286, 640, 13357, 440, 7, 77, 8, 290, 2092, 2272, 13357, 13, 198, 2, 3292, 62, 74, 324, 1531, 318, 286, 640, 13357, ...
2.361111
576
#!/usr/bin/env python # coding: utf-8 import os import sys import time import re import json import commands from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler import color import argparse parser = argparse.ArgumentParser(description='This script is tasks automizer.') parser.add_argument('basepath', action='store', type=str, help='Directory path where you want to automize.') parser.add_argument('mode', action='store', type=str, help='Name of configure file written your tasks.') parser.add_argument('-r', '--recursive', action='store_true') args = parser.parse_args() import tasks configure = [] if args.mode in tasks.bacon: configure = tasks.bacon[args.mode] try: with open('~/.fastener.%s.json'%args.mode) as f: configure = json.load(f); except IOError: pass try: with open('~/.config/fastener/%s.json'%args.mode) as f: configure = json.load(f); except IOError: pass BASEDIR = '.' RECURSIVE = True if args.basepath is not None: BASEDIR = args.basepath if args.recursive is not None: RECURSIVE = args.recursive if __name__ == "__main__": while 1: event_handler = ChangeHandler() observer = Observer() observer.schedule(event_handler, BASEDIR, RECURSIVE) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 640, 198, 11748, 302, 198, 11748, 33918, 198, 11748, 9729, 198, 6738, 26856, 13, 672, 2655, 690, ...
2.658716
545
#!/usr/bin/env python # -*- coding: utf8 -*- """ The MetadataWizard(pymdwizard) software was developed by the U.S. Geological Survey Fort Collins Science Center. See: https://github.com/usgs/fort-pymdwizard for current project source code See: https://usgs.github.io/fort-pymdwizard/ for current user documentation See: https://github.com/usgs/fort-pymdwizard/tree/master/examples for examples of use in other scripts License: Creative Commons Attribution 4.0 International (CC BY 4.0) http://creativecommons.org/licenses/by/4.0/ PURPOSE ------------------------------------------------------------------------------ Provide a pyqt widget for the FGDC component with a shortname matching this file's name. SCRIPT DEPENDENCIES ------------------------------------------------------------------------------ This script is part of the pymdwizard package and is not intented to be used independently. All pymdwizard package requirements are needed. See imports section for external packages used in this script as well as inter-package dependencies U.S. GEOLOGICAL SURVEY DISCLAIMER ------------------------------------------------------------------------------ This software has been approved for release by the U.S. Geological Survey (USGS). Although the software has been subjected to rigorous review, the USGS reserves the right to update the software as needed pursuant to further analysis and review. No warranty, expressed or implied, is made by the USGS or the U.S. Government as to the functionality of the software and related material nor shall the fact of release constitute any such warranty. Furthermore, the software is released on condition that neither the USGS nor the U.S. Government shall be held liable for any damages resulting from its authorized or unauthorized use. Any use of trade, product or firm names is for descriptive purposes only and does not imply endorsement by the U.S. Geological Survey. Although this information product, for the most part, is in the public domain, it also contains copyrighted material as noted in the text. Permission to reproduce copyrighted items for other than personal use must be secured from the copyright owner. ------------------------------------------------------------------------------ """ import requests from PyQt5.QtWidgets import QMessageBox from PyQt5.QtWidgets import QDialog from PyQt5.QtGui import QStandardItemModel from PyQt5.QtGui import QStandardItem from PyQt5.QtGui import QFont from pymdwizard.core import utils from pymdwizard.gui.ui_files import UI_ThesaurusSearch try: from urllib.parse import quote except ImportError: from urllib import quote if __name__ == "__main__": utils.launch_widget(ThesaurusSearch, "Thesaurus Search testing")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 23, 532, 9, 12, 198, 37811, 198, 464, 3395, 14706, 54, 8669, 7, 79, 4948, 67, 86, 8669, 8, 3788, 373, 4166, 416, 262, 198, 52, 13, 50, 13, ...
3.767785
745
#!/usr/bin/python with open('passes.txt') as fh: lines = fh.readlines() lines = [line.replace('B','1') for line in lines] lines = [line.replace('F','0') for line in lines] lines = [line.replace('R','1') for line in lines] lines = [line.replace('L','0') for line in lines] nums = [int(line,2) for line in lines] print(max(nums)) missing = range(0,2**10) missing = [seat for seat in missing if seat not in nums] print(missing)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 4480, 1280, 10786, 6603, 274, 13, 14116, 11537, 355, 277, 71, 25, 198, 220, 220, 220, 3951, 796, 277, 71, 13, 961, 6615, 3419, 198, 198, 6615, 796, 685, 1370, 13, 33491, 10786, 33, 4...
2.7
160
import csv import sys REGISTERED_LABEL = 'Registered' ATTENDEE_LABEL = 'Attendee' if __name__ == '__main__': args = sys.argv if len(args) == 6: args.append(None) process(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])
[ 11748, 269, 21370, 198, 11748, 25064, 198, 198, 31553, 41517, 1961, 62, 48780, 3698, 796, 705, 47473, 6, 198, 17139, 10619, 6500, 62, 48780, 3698, 796, 705, 8086, 437, 1453, 6, 628, 628, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834...
2.021127
142
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # tests/thread_safety.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import functools import queue import random import threading import unittest import rule_engine.ast as ast import rule_engine.engine as engine import rule_engine.errors as errors __all__ = ('ThreadSafetyTests',)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 220, 5254, 14, 16663, 62, 44708, 13, 9078, 198, 2, 198, 2, 220, 2297, 396, 3890, 290, 779, 287, 27...
3.362782
532
#coding:utf-8 # # id: bugs.core_3362_complex # title: Cursors should ignore changes made by the same statement # decription: # This test verifies PSQL issues that were accumulated in miscenaleous tickets. # # tracker_id: CORE-3362 # min_versions: ['3.0.1'] # versions: 3.0.1 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0.1 # resources: None substitutions_1 = [('[ \t]+', ' '), ('line: [\\d]+[,]{0,1} col: [\\d]+', '')] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """ -- see also: -- https://www.sql.ru/forum/1319017/obnovlenie-zapisi-po-kursoru -- Discussed 13.11.2019 with hvlad and dimitr (related to CORE-5794) recreate table test ( id int not null ,data1 int ,data2 int ,data3 int ,data4 int ); set term ^; create or alter procedure sp_set_ctx(a_point varchar(20), a_data1 int, a_data2 int, a_data3 int, a_data4 int) as begin -- Store values of cursor fields in the context variable which name is passed here as 'a_point'. rdb$set_context( 'USER_SESSION', a_point, coalesce(cast( a_data1 as char(6)),'#null#') || ' ' || coalesce(cast( a_data2 as char(6)),'#null#') || ' ' || coalesce(cast( a_data3 as char(6)),'#null#') || ' ' || coalesce(cast( a_data4 as char(6)),'#null#') ); end ^ create or alter procedure sp_test1a as begin -- ::: NOTE ::: -- Only IMPLICIT cursors are stable in 3.0+. -- ############# -- Do _NOT_ try to check following statements using explicit cursor -- (i.e. OPEN <C>; FETCH ...; CLOSE <C>) for select t.id, t.data1, t.data2, t.data3, t.data4 from test t where t.id = 1 as cursor c do begin execute procedure sp_set_ctx('point_0', c.data1, c.data2, c.data3, c.data4 ); update test t set t.data1 = 100001 where current of c; -- make "photo" of all cursor fields: execute procedure sp_set_ctx('point_1', c.data1, c.data2, c.data3, c.data4 ); -- at this point value of c.data1 remains NULL from cursor POV because -- "UPDATE WHERE CURRENT OF C" sees record as it was no changes at all: update test t set t.data2 = 100002 where current of c; -- make "photo" of all cursor fields: execute procedure sp_set_ctx('point_2', c.data1, c.data2, c.data3, c.data4 ); -- at this point value of c.data1 and c.data2 remain NULL from cursor POV because -- "UPDATE WHERE CURRENT OF C" sees record as it was no changes at all: update test t set t.data3 = 100003 where current of c; -- make "photo" of all cursor fields: execute procedure sp_set_ctx('point_3', c.data1, c.data2, c.data3, c.data4 ); delete from test t where current of c; -- this must prevent following UPDATE from execution -- this must fail with "no current record for fetch operation": update test t set t.data4 = 100004 where current of c; execute procedure sp_set_ctx('point_4', c.data1, c.data2, c.data3, c.data4 ); end end ^ set term ;^ commit; insert into test (id) values (1); commit; set bail off; set list on; execute procedure sp_test1a; select * from test; select mon$variable_name as ctx_name, mon$variable_value ctx_value from mon$context_variables where mon$attachment_id = current_connection; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ ID 1 DATA1 <null> DATA2 <null> DATA3 <null> DATA4 <null> CTX_NAME point_0 CTX_VALUE #null# #null# #null# #null# CTX_NAME point_1 CTX_VALUE 100001 #null# #null# #null# CTX_NAME point_2 CTX_VALUE #null# 100002 #null# #null# CTX_NAME point_3 CTX_VALUE #null# #null# 100003 #null# """ expected_stderr_1 = """ Statement failed, SQLSTATE = 22000 no current record for fetch operation -At procedure 'SP_TEST1A' """ @pytest.mark.version('>=3.0.1')
[ 2, 66, 7656, 25, 40477, 12, 23, 198, 2, 198, 2, 4686, 25, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 11316, 13, 7295, 62, 2091, 5237, 62, 41887, 198, 2, 3670, 25, 220, 220, 220, 220, 220, 220, 220, 327, 1834, 669, 815, ...
2.061062
2,260
#!/usr/bin/env python # -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. '''Tests for helper methods in webcompat/helpers.py.''' import os.path import sys import unittest # Add webcompat module to import path sys.path.append(os.path.realpath(os.pardir)) import webcompat from webcompat.helpers import format_link_header from webcompat.helpers import get_browser_name from webcompat.helpers import get_browser from webcompat.helpers import get_name from webcompat.helpers import get_os from webcompat.helpers import get_str_value from webcompat.helpers import get_version_string from webcompat.helpers import normalize_api_params from webcompat.helpers import parse_link_header from webcompat.helpers import rewrite_and_sanitize_link from webcompat.helpers import rewrite_links from webcompat.helpers import sanitize_link ACCESS_TOKEN_LINK = '<https://api.github.com/repositories/17839063/issues?per_page=50&page=3&access_token=12345>; rel="next", <https://api.github.com/repositories/17839063/issues?access_token=12345&per_page=50&page=4>; rel="last", <https://api.github.com/repositories/17839063/issues?per_page=50&access_token=12345&page=1>; rel="first", <https://api.github.com/repositories/17839063/issues?per_page=50&page=1&access_token=12345>; rel="prev"' # nopep8 GITHUB_ISSUES_LINK_HEADER = '<https://api.github.com/repositories/17839063/issues?per_page=50&page=3>; rel="next", <https://api.github.com/repositories/17839063/issues?per_page=50&page=4>; rel="last", <https://api.github.com/repositories/17839063/issues?per_page=50&page=1>; rel="first", <https://api.github.com/repositories/17839063/issues?per_page=50&page=1>; rel="prev"' # nopep8 REWRITTEN_ISSUES_LINK_HEADER = '</api/issues?per_page=50&page=3>; rel="next", </api/issues?per_page=50&page=4>; rel="last", </api/issues?per_page=50&page=1>; rel="first", </api/issues?per_page=50&page=1>; rel="prev"' # nopep8 GITHUB_SEARCH_LINK_HEADER = '<https://api.github.com/search/issues?q=taco&page=2>; rel="next", <https://api.github.com/search/issues?q=taco&page=26>; rel="last"' # nopep8 REWRITTEN_SEARCH_LINK_HEADER = '</api/issues/search?q=taco&page=2>; rel="next", </api/issues/search?q=taco&page=26>; rel="last"' # nopep8 GITHUB_COMMENTS_LINK_HEADER = '<https://api.github.com/repositories/17839063/issues/398/comments?page=2>; rel="next", <https://api.github.com/repositories/17839063/issues/398/comments?page=4>; rel="last"' # nopep8 REWRITTEN_COMMENTS_LINK_HEADER = '</api/issues/398/comments?page=2>; rel="next", </api/issues/398/comments?page=4>; rel="last"' # nopep8 PARSED_LINKED_HEADERS = [{'link': 'https://api.github.com/repositories/17839063/issues?per_page=50&page=3', 'rel': 'next'}, {'link': 'https://api.github.com/repositories/17839063/issues?per_page=50&page=4', 'rel': 'last'}, {'link': 'https://api.github.com/repositories/17839063/issues?per_page=50&page=1', 'rel': 'first'}, {'link': 'https://api.github.com/repositories/17839063/issues?per_page=50&page=1', 'rel': 'prev'}] # nopep8 FIREFOX_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:48.0) Gecko/20100101 Firefox/48.0' # nopep8 FIREFOX_MOBILE_UA_OLD = 'Mozilla/5.0 (Android; Mobile; rv:40.0) Gecko/40.0 Firefox/40.0' # nopep8 FIREFOX_MOBILE_UA = 'Mozilla/5.0 (Android 6.0.1; Mobile; rv:40.0) Gecko/40.0 Firefox/40.0' # nopep8 FIREFOX_TABLET_UA = 'Mozilla/5.0 (Android 4.4; Tablet; rv:41.0) Gecko/41.0 Firefox/41.0' # nopep8 SAFARI_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11) AppleWebKit/601.1.39 (KHTML, like Gecko) Version/9.0 Safari/601.1.39' # nopep8 SAFARI_MOBILE_UA = 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_4 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10B350 Safari/8536.25' # nopep8 SAFARI_TABLET_UA = 'Mozilla/5.0 (iPad; CPU OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3' # nopep8 CHROME_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2720.0 Safari/537.36' # nopep8 CHROME_MOBILE_UA = 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19' # nopep8 CHROME_TABLET_UA = 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Safari/535.19' # nopep8 if __name__ == '__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 770, 8090, 6127, 5178, 318, 2426, 284, 262, 2846, 286, 262, 29258, 5094, 198, 2, 13789, 11, 410, 13, 362, 13, 15...
2.395801
1,905
# pylint: disable=missing-docstring import unittest import numpy as np import tensorflow as tf import tf_encrypted as tfe if __name__ == '__main__': unittest.main()
[ 2, 279, 2645, 600, 25, 15560, 28, 45688, 12, 15390, 8841, 198, 11748, 555, 715, 395, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 48700, 62, 43628, 355, 256, 5036, 628, 198, 198, 361,...
2.85
60
"""To check if your projects are up-to-date, you can let dfetch check it. For each project the local version (based on tag or revision) will be compared against the available version. If there are new versions available this will be shown. """ import argparse import os import dfetch.commands.command import dfetch.manifest.manifest import dfetch.manifest.validate import dfetch.project import dfetch.util from dfetch.log import get_logger logger = get_logger(__name__) class Check(dfetch.commands.command.Command): """Check all projects for updates. Check all project to see if there are any new updates. """ @staticmethod def create_menu(subparsers: "argparse._SubParsersAction") -> None: """Add the parser menu for this action.""" dfetch.commands.command.Command.parser(subparsers, Check) def __call__(self, args: argparse.Namespace) -> None: """Perform the check.""" manifest, path = dfetch.manifest.manifest.get_manifest() with dfetch.util.util.in_directory(os.path.dirname(path)): exceptions = [] for project in manifest.projects: try: dfetch.project.make(project).check_for_update() except RuntimeError as exc: exceptions += [str(exc)] if exceptions: raise RuntimeError("\n".join(exceptions))
[ 37811, 2514, 2198, 611, 534, 4493, 389, 510, 12, 1462, 12, 4475, 11, 345, 460, 1309, 47764, 7569, 2198, 340, 13, 198, 198, 1890, 1123, 1628, 262, 1957, 2196, 357, 3106, 319, 7621, 393, 18440, 8, 481, 307, 3688, 1028, 198, 1169, 1695...
2.65458
524
from django.db import models from djmoney.models.fields import MoneyField from phonenumber_field.modelfields import PhoneNumberField
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 26316, 13, 27530, 13, 25747, 1330, 12911, 15878, 198, 6738, 32896, 268, 4494, 62, 3245, 13, 19849, 25747, 1330, 14484, 15057, 15878, 628, 628 ]
4
34
#!/usr/bin/env python # Copyright (C) 2017 Udacity Inc. # # This file is part of Robotic Arm: Pick and Place project for Udacity # Robotics nano-degree program # # All Rights Reserved. # Author: Harsh Pandya # import modules import rospy import tf from kuka_arm.srv import * from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint from geometry_msgs.msg import Pose from mpmath import * from sympy import * import numpy as np from numpy import array from sympy import symbols, cos, sin, pi, sqrt, atan2 #Create symbol table q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8') # theta d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8') # Link Offset a0, a1, a2, a3, a4, a5, a6 = symbols ('a0:7') # distance between z(i)_axis and z(i-1)_axis alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7') #Angle between Z(i-) and Z(i) DH_table = {alpha0: 0, a0: 0, d1: 0.75, alpha1: -90.0, a1: 0.35, d2: 0, q2: q2-90.0, alpha2: 0, a2: 1.25, d3: 0, alpha3: -90.0, a3: -0.054, d4: 1.5, alpha4: 90.0, a4: 0, d5: 0, alpha5: -90.0, a5: 0, d6: 0, alpha6: 0, a6: 0, d7: 0.303, q7: 0} ROT_EE = Matrix([[0,0,0],[0,0,0],[0,0,0]]) #Modified DH Transformation matrix Function # Create individual transformation matrices T0_1 = TM_Generator(alpha0, a0, d1, q1).subs(DH_table) T1_2 = TM_Generator(alpha1, a1, d2, q2).subs(DH_table) T2_3 = TM_Generator(alpha2, a2, d3, q3).subs(DH_table) T3_4 = TM_Generator(alpha3, a3, d4, q4).subs(DH_table) T4_5 = TM_Generator(alpha4, a4, d5, q5).subs(DH_table) T5_6 = TM_Generator(alpha5, a5, d6, q6).subs(DH_table) T6_G = TM_Generator(alpha6, a6, d7, q7).subs(DH_table) T0_G = T0_1 * T1_2 * T2_3 * T3_4 * T4_5 * T5_6 * T6_G #Fixing gripper rotation in Y axis by 180 and Z axis by 90 r, p , y = symbols('r p y') x_rot = Matrix([ [ 1, 0, 0], [ 0, cos(r), -sin(r)], [ 0, sin(r), cos(r)]]) # ROLL y_rot = Matrix([ [ cos(p), 0, sin(p)], [ 0, 1, 0], [-sin(p), 0, cos(p)]]) # PITCH z_rot = Matrix([ [cos(y), -sin(y), 0], [sin(y), cos(y), 0], [ 0, 0, 1]]) # YAW Rot_Fixed = z_rot.subs(y, radians(180)) * y_rot.subs(p,radians(-90)) ROT_Error = z_rot * y_rot * x_rot ROT_EE = ROT_Error * Rot_Fixed if __name__ == "__main__": IK_server()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 15069, 357, 34, 8, 2177, 35774, 4355, 3457, 13, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 3851, 6210, 7057, 25, 12346, 290, 8474, 1628, 329, 35774, 4355, 198, 2, 47061, 38706, 1...
1.841463
1,394
# -*- coding: utf-8 -*- """ app.utils ~~~~~~~~~ Provides misc utility functions """ from __future__ import ( absolute_import, division, print_function, unicode_literals) from json import loads, dumps try: from json.decoder import JSONDecodeError except ImportError: JSONDecodeError = ValueError try: from time import monotonic except ImportError: from time import time as monotonic from ast import literal_eval from datetime import datetime as dt, timedelta from functools import wraps import requests import pygogo as gogo from flask import make_response, request from dateutil.relativedelta import relativedelta from http.client import responses from meza import fntools as ft from app import cache from builtins import * # noqa # pylint: disable=unused-import logger = gogo.Gogo(__name__, monolog=True).logger # https://baconipsum.com/?paras=5&type=meat-and-filler&make-it-spicy=1 BACON_IPSUM = [ 'Spicy jalapeno bacon ipsum dolor amet prosciutto bresaola ball chicken.', 'Alcatra officia enim, labore eiusmod kielbasa pancetta turducken.', 'Aliqua pork loin picanha turducken proident.', 'Qui meatloaf fatback cillum meatball tail duis short ribs commodo.', 'Ball tip non salami meatloaf in, tri-tip dolor filet mignon.', 'Leberkas tenderloin ball tip sirloin, ad culpa drumstick laborum.', 'Porchetta eiusmod pastrami voluptate pig kielbasa jowl occaecat.', 'Shank landjaeger andouille ea, in drumstick prosciutto bacon excepteur.', 'Prosciutto alcatra minim elit, fugiat ut sausage beef tri-tip.', 'Non culpa irure magna turkey short loin filet mignon.', 'Chuck prosciutto laborum cupidatat shank pariatur ribeye in elit tempor.', 'Dolor pig ham hock officia picanha chuck sed shankle dolore.', 'Short ribs non ea beef ball tip, shoulder dolore.', 'Tri-tip leberkas excepteur nisi sunt turducken deserunt.', 'Turducken picanha doner, eiusmod short loin et fatback short ribs bacon.', 'Tongue magna esse brisket cupim fugiat adipisicing aute veniam picanha.', 'Pastrami hamburger prosciutto labore veniam pork loin voluptate.', 'Venison excepteur pork ground round sausage est mollit.', 'Biltong sunt bresaola porchetta excepteur porchetta.', 'Consequat alcatra jowl commodo, anim incididunt officia beef tail.', 'Ground round deserunt in, tri-tip kielbasa ball tip ex.', 'Labore est cow nulla kielbasa, turducken ham adipisicing mollit kevin.', 'Cillum in shank leberkas occaecat ea andouille.' ] def jsonify(status=200, indent=2, sort_keys=True, **kwargs): """ Creates a jsonified response. Necessary because the default flask.jsonify doesn't correctly handle sets, dates, or iterators Args: status (int): The status code (default: 200). indent (int): Number of spaces to indent (default: 2). sort_keys (bool): Sort response dict by keys (default: True). kwargs (dict): The response to jsonify. Returns: (obj): Flask response """ options = {'indent': indent, 'sort_keys': sort_keys, 'ensure_ascii': False} kwargs['status'] = responses[status] json_str = dumps(kwargs, cls=ft.CustomEncoder, **options) response = make_response((json_str, status)) response.headers['Content-Type'] = 'application/json; charset=utf-8' response.headers['mimetype'] = 'application/json' response.last_modified = dt.utcnow() response.add_etag() return response def parse(string): """ Parses a string into an equivalent Python object Args: string (str): The string to parse Returns: (obj): equivalent Python object Examples: >>> parse('True') True >>> parse('{"key": "value"}') {'key': 'value'} """ if string.lower() in {'true', 'false'}: return loads(string.lower()) else: try: return literal_eval(string) except (ValueError, SyntaxError): return string def make_cache_key(*args, **kwargs): """ Creates a memcache key for a url and its query parameters Returns: (obj): Flask request url """ return request.url def fmt_elapsed(elapsed): """ Generates a human readable representation of elapsed time. Args: elapsed (float): Number of elapsed seconds. Yields: (str): Elapsed time value and unit Examples: >>> formatted = fmt_elapsed(1000) >>> next(formatted) == '16 minutes' True >>> next(formatted) == '40 seconds' True """ # http://stackoverflow.com/a/11157649/408556 # http://stackoverflow.com/a/25823885/408556 attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds'] delta = relativedelta(seconds=elapsed) for attr in attrs: value = getattr(delta, attr) if value: yield '%d %s' % (value, attr[:-1] if value == 1 else attr) # https://gist.github.com/glenrobertson/954da3acec84606885f5 # http://stackoverflow.com/a/23115561/408556 # https://github.com/pallets/flask/issues/637 def cache_header(max_age, **ckwargs): """ Add Flask cache response headers based on max_age in seconds. If max_age is 0, caching will be disabled. Otherwise, caching headers are set to expire in now + max_age seconds If round_to_minute is True, then it will always expire at the start of a minute (seconds = 0) Example usage: @app.route('/map') @cache_header(60) def index(): return render_template('index.html') """ return decorator
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 220, 220, 220, 598, 13, 26791, 198, 220, 220, 220, 220, 15116, 93, 628, 220, 220, 220, 47081, 12747, 10361, 5499, 198, 37811, 198, 6738, 11593, 37443, 834, ...
2.638208
2,120
# Code Listing #6 """ Example with collections.deque for rotating sequences """ from collections import deque def rotate_seq1(seq1, n): """ Rotate a sequence left by n """ # E.g: rotate([1,2,3,4,5], 2) => [4,5,1,2,3] k = len(seq1) - n return seq1[k:] + seq1[:k] def rotate_seq2(seq1, n): """ Rotate a sequence left by n using deque """ d = deque(seq1) d.rotate(n) return d
[ 2, 6127, 7343, 278, 1303, 21, 198, 198, 37811, 198, 198, 16281, 351, 17268, 13, 2934, 4188, 329, 24012, 16311, 198, 198, 37811, 198, 198, 6738, 17268, 1330, 390, 4188, 628, 198, 4299, 23064, 62, 41068, 16, 7, 41068, 16, 11, 299, 259...
2.305556
180
### Basic NxN sliding puzzle solver import numpy as np # Switch pieces blank piece 16 up, down, left, or right if possible
[ 44386, 14392, 399, 87, 45, 22292, 15027, 1540, 332, 220, 198, 11748, 299, 32152, 355, 45941, 220, 628, 220, 220, 220, 1303, 14645, 5207, 9178, 3704, 1467, 510, 11, 866, 11, 1364, 11, 393, 826, 611, 1744, 220, 198 ]
3.384615
39
#!/usr/bin/env python from my_devices import cisco3, arista1 from napalm import get_network_driver if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 616, 62, 42034, 1330, 269, 4861, 18, 11, 610, 12523, 16, 198, 6738, 25422, 38182, 1330, 651, 62, 27349, 62, 26230, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, ...
2.72549
51
import pytest from dvc.cli import main from dvc.lock import Lock, LockError
[ 11748, 12972, 9288, 198, 198, 6738, 288, 28435, 13, 44506, 1330, 1388, 198, 6738, 288, 28435, 13, 5354, 1330, 13656, 11, 13656, 12331, 628, 198 ]
3.16
25
# Generated by Django 3.1.2 on 2021-03-23 11:31 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 17, 319, 33448, 12, 3070, 12, 1954, 1367, 25, 3132, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
'''9. Write a Python program to get unique values from a list. ''' my_list = [10, 20, 30, 40, 20, 50, 60, 40] print("Original List : ",my_list) my_set = set(my_list) my_new_list = list(my_set) print("List of unique numbers : ",my_new_list)
[ 7061, 6, 24, 13, 19430, 257, 11361, 1430, 284, 651, 3748, 3815, 422, 257, 1351, 13, 705, 7061, 198, 198, 1820, 62, 4868, 796, 685, 940, 11, 1160, 11, 1542, 11, 2319, 11, 1160, 11, 2026, 11, 3126, 11, 2319, 60, 198, 4798, 7203, 2...
2.637363
91
"""Tests for the config module. NOTE: If you intend to import Config and monkeypatch the config dir (you should, to make it work) you need to ensure that you only import Config inside your test function. Otherwise xdg.XDG_CONFIG_HOME will, in the automated environment, fail to be evaluated and the tests will crash. """ import pytest import os import importlib import yaml # TODO: Instead of this mess, try to mock the entire xdg module. Its # problem is currently that all the code runs module-scope. Changing the # xdg module could work too. ### Fixtures ### @pytest.fixture def _config_reload_yield_home(_monkeypatch_environment): """Reload the config module for testing and yield $HOME. This uses the _monkeypatch_environment fixture to ensure that $HOME is monkeypatched before we get into any of this. """ # Important to reload xdg before config, so the changes in xdg # propagate import cdparacord.xdg import cdparacord.config importlib.reload(cdparacord.xdg) importlib.reload(cdparacord.config) yield _monkeypatch_environment @pytest.fixture def mock_temp_home(_config_reload_yield_home): """Ensure a fake homedir exists.""" yield _config_reload_yield_home @pytest.fixture def mock_uncreatable_config_dir(_config_reload_yield_home): """Ensure a config dir cannot be created. This involves creating an XDG_CONFIG_HOME that cannot be written to. """ from cdparacord import xdg # Create unreadable dir os.mkdir(xdg.XDG_CONFIG_HOME, 0o000) yield _config_reload_yield_home # Make it usable again so we don't error when the cleanup starts # deleting these directories os.chmod(xdg.XDG_CONFIG_HOME, 0o700) @pytest.fixture def _create_config_file_paths(_config_reload_yield_home): """Yield the configuration file path. TODO: Some way of keeping sync with the config filename in Config in case it changes? """ from cdparacord import xdg config_dir = os.path.join(xdg.XDG_CONFIG_HOME, 'cdparacord') config_file = os.path.join(config_dir, 'config.yaml') os.makedirs(config_dir) # Open and close the file, creating it os.close(os.open(config_file, os.O_WRONLY | os.O_CREAT | os.O_EXCL)) yield (config_dir, config_file) @pytest.fixture def mock_config_file(_create_config_file_paths): """Ensure the config file can be read.""" config_dir, config_file = _create_config_file_paths yield config_file @pytest.fixture def mock_unreadable_config_file(_create_config_file_paths): """Ensure the config file cannot be read.""" config_dir, config_file = _create_config_file_paths # Make config file forbidden to read os.chmod(config_file, 0o000) yield config_file # Make the tempfile accessible again os.chmod(config_file, 0o600) ### Tests ### def test_create_config(mock_temp_home): """Try creating configuration dir in an empty home directory.""" from cdparacord import config c = config.Config() def test_get_encoder(mock_temp_home): """Try getting the value of 'encoder' from a default configuration.""" from cdparacord import config c = config.Config() # Would error if we couldn't find it c.get('encoder') def test_fail_to_get_variable(mock_temp_home): """Try to fail getting a nonexistent value from defaults.""" from cdparacord import config c = config.Config() with pytest.raises(KeyError): c.get('nonextant') def test_fail_to_create_config_dir(mock_uncreatable_config_dir): """Try to fail to create a configuration directory. Specifically, the fixture sets up permissions so we're not allowed to create the directory. """ from cdparacord import config with pytest.raises(config.ConfigError): c = config.Config() def test_read_config_file(mock_config_file): """Try to read a configuration file.""" from cdparacord import config config_file = mock_config_file # Setup our expectations var_name = 'editor' expected_value = 'probably-not-a-real-editor' # Write them to the file with open(config_file, 'w') as f: yaml.safe_dump({var_name: expected_value}, f) c = config.Config() # We should get the value in the file assert c.get(var_name) == expected_value def test_read_invalid_config(mock_config_file): """Try to fail to read a valid configuration from file.""" from cdparacord import config config_file = mock_config_file with open(config_file, 'w') as f: yaml.safe_dump(["wrong"], f) with pytest.raises(config.ConfigError): c = config.Config() def test_fail_to_open_config_file(mock_unreadable_config_file): """Try to fail to open a configuration file. Specifically, the fixture sets up permission so we're not allowed to open the file, even though it exists. """ from cdparacord import config with pytest.raises(config.ConfigError): c = config.Config() def test_ensure_default_encoder_keys_are_strings(mock_temp_home): """Test default encoder configuration.""" from cdparacord import config c = config.Config() assert len(c.get('encoder')) == 1 for encoder in c.get('encoder'): encoder_params = c.get('encoder')[encoder] # If it's not a list something's wrong assert type(encoder_params) is list for item in encoder_params: # And the params should be strings assert type(item) is str def test_ensure_default_postaction_keys_are_strings(mock_temp_home): """Test default encoder configuration.""" from cdparacord import config c = config.Config() for post_action in ('post_rip', 'post_encode', 'post_finished'): for action in c.get(post_action): assert len(action) == 1 for action_key in action: action_params = action[action_key] # If it's not a list something's wrong assert type(action_params) is list for item in action_params: # And the params should be strings assert type(item) is str
[ 37811, 51, 3558, 329, 262, 4566, 8265, 13, 198, 198, 16580, 25, 1002, 345, 14765, 284, 1330, 17056, 290, 21657, 17147, 262, 4566, 26672, 357, 5832, 198, 21754, 11, 284, 787, 340, 670, 8, 345, 761, 284, 4155, 326, 345, 691, 1330, 170...
2.722934
2,263
#!/usr/bin/env python import random x = 1 seq = [0,1,2,3,4,5,6,7,8,9] for i in seq: x = random.randint(0,1) x_str = str(x) print("x_str" + " is " + x_str) print("Done!")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 220, 198, 11748, 4738, 198, 87, 796, 352, 198, 41068, 796, 685, 15, 11, 16, 11, 17, 11, 18, 11, 19, 11, 20, 11, 21, 11, 22, 11, 23, 11, 24, 60, 198, 1640, 1312, 287, 33756, 25, 19...
1.848485
99
from enum import Enum
[ 6738, 33829, 1330, 2039, 388, 628 ]
3.833333
6
import pandas as pd import numpy as np from matplotlib import pyplot as plt data_path = "data/raw/titanic.csv"
[ 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 198, 7890, 62, 6978, 796, 366, 7890, 14, 1831, 14, 83, 18642, 291, 13, 40664, 1, 198 ]
2.690476
42
import sys import os.path import shutil import glob import pyspark from pyspark import SparkConf, SparkContext, SQLContext from pprint import pprint try: from awsglue.transforms import * from awsglue.utils import getResolvedOptions from awsglue.context import GlueContext from awsglue.dynamicframe import DynamicFrame from awsglue.job import Job import boto3 except Exception as e: print('local dev')
[ 11748, 25064, 198, 11748, 28686, 13, 6978, 198, 11748, 4423, 346, 198, 11748, 15095, 198, 11748, 279, 893, 20928, 198, 6738, 279, 893, 20928, 1330, 17732, 18546, 11, 17732, 21947, 11, 16363, 21947, 198, 6738, 279, 4798, 1330, 279, 4798, ...
2.95302
149
from django.shortcuts import render from django.db.models import Q from api.v1.tools.paginator import customPagination # serializers imports from django.utils import timezone from datetime import datetime from django.db import DatabaseError, transaction from django.db import IntegrityError, transaction from django.conf import settings from rest_framework.decorators import api_view from .serializers import UsePackageSerializer, PostPricingSerializer,PricingSerializer,PaidPackageSerializer, UserCartSerializer, PostCartSerializer # rest_frameworks imports from rest_framework.views import APIView from rest_framework import status from rest_framework.permissions import IsAuthenticated # model imports from pricing.models import Pricing, UserCart, TotalBill, PackageBill, PaidPackageInfo from django.contrib.auth.decorators import login_required # custom response format from api.v1.ResponseFormat import responseFormat from listing.models import PaidPackageInfo,DraftPackage,Listing # package transcation @api_view(['POST'])
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 1195, 198, 6738, 40391, 13, 85, 16, 13, 31391, 13, 79, 363, 20900, 1330, 2183, 47, 363, 1883, 198, 2, 11389, 11341, 17944, 198, 6738...
3.823529
272
from pyramda.function.curry import curry import builtins @curry
[ 6738, 12972, 859, 6814, 13, 8818, 13, 66, 16682, 1330, 34611, 198, 11748, 3170, 1040, 628, 198, 31, 66, 16682, 198 ]
3.142857
21
# Generated by Django 2.1.4 on 2018-12-13 18:29 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 16, 13, 19, 319, 2864, 12, 1065, 12, 1485, 1248, 25, 1959, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
import numpy as np import matplotlib.pyplot as plt import mdtraj as md from scattering.scattering import structure_factor from scattering.utils.io import get_fn
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 45243, 9535, 73, 355, 45243, 198, 198, 6738, 45765, 13, 1416, 16475, 1330, 4645, 62, 31412, 198, 6738, 45765, 13, 26791, 13, 952, ...
3.521739
46
from time import sleep collors = { "red": '\033[31m', "green": '\033[32m', "yellow": '\033[33m', "purple": '\033[35m', "blue": '\033[34m', "end": '\033[m' } def inputInt(txt): """ A function that will validate weather a number is integer, or not. :param txt: The text to be shown just like the standard input :return: A callable constant as an integer number. """ while True: number = str(input(txt)).strip() if number.strip() == '' or number.isalpha(): print(f'{collors["red"]}Invalid integer number, try again!{collors["end"]}') else: return int(number) break def fileCheck(file): """ It checks if the file, given as the parameter file, already exists or not. If there's no such file, it creates a new one, called as the same parameter name given. :param file: The file to be checked """ try: file_manager = open(file, 'rt') except FileNotFoundError: file_manager = open(file, 'at+') print(f'{collors["green"]}{file} was successfully created!{collors["end"]}') else: file_manager.close() def newBook(file): """ Made by two basic inputs, it will be given as input the name and age, and it will add on the file given as parameter. :param file: The file to be worked. """ print(f'{collors["purple"]}New book: {collors["end"]}') while True: name = input('Type the name: ').strip().title() name_validation = name.strip().replace(' ', '') if not name_validation.isalpha(): print(f'{collors["red"]}Invalid name, try again!{collors["end"]}') else: break age = inputInt('Type the age: ') with open(file, 'at') as file_manager: file_manager.write(f'{name};{age}\n') print(f'{collors["yellow"]}Registering {name}...{collors["end"]}') sleep(1) print(f'{collors["green"]}{name} was successfully booked!{collors["end"]}') sleep(1) def printList(file): """ Read the file given, and print the components of it, in a formatted way. :param file: file to be read. """ print(f'{collors["yellow"]}Loading list...{collors["end"]}') sleep(1) print('-' * 40) print(f'{collors["green"]}Booked list: {collors["end"]}'.center(40)) print('-' * 40) with open(file, 'rt') as file_manager: for line in file_manager: data = line.split(';') data[1] = data[1].replace('\n', '') print(f'{data[0]:<30}{data[1]} Years') sleep(1) def mainMenu(file): """ Show a menu on the screen, giving the options that you have. After you chose an option, it will unchain a distinct block of code, from 1 to 3. As shown, 1 to book a new person, 2 to show the booked list and 3 to leave. :param file: file to be worked. """ while True: print('-' * 40) print('Main menu: '.center(40)) print('-' * 40) print(f'{collors["yellow"]}1{collors["end"]} - {collors["blue"]}Register a new person;{collors["end"]}') print(f'{collors["yellow"]}2{collors["end"]} - {collors["blue"]}Show the booked list;{collors["end"]}') print(f'{collors["yellow"]}3{collors["end"]} - {collors["blue"]}Exit.{collors["end"]}') print('-' * 40) while True: cmd = inputInt('Your command: ') if cmd > 3: print(f'{collors["red"]}Invalid command, try again!{collors["end"]}') else: break if cmd == 1: newBook(file) if cmd == 2: printList(file) if cmd == 3: print(f'{collors["green"]}See you soon!{collors["end"]}') break
[ 6738, 640, 1330, 3993, 198, 26000, 669, 796, 1391, 198, 220, 220, 220, 366, 445, 1298, 705, 59, 44427, 58, 3132, 76, 3256, 198, 220, 220, 220, 366, 14809, 1298, 705, 59, 44427, 58, 2624, 76, 3256, 198, 220, 220, 220, 366, 36022, 1...
2.297181
1,632
import unittest import redis import time from concurrent.futures import ProcessPoolExecutor r = redis.Redis(host='localhost', port=6379) k = 'cupid-sim_sim_dmcqm_lhmx_sku_rec_faiss_item_vec_scene102002_v1_s_d_d100_e100-watcher' v = {'id': 'e71bc544-7fa5-11ea-8249-8c85909d35fg', 'status': 0} r.hmset(k, v) a = r.hgetall(k) print(a) # 101001100=r.hmget(k,).hgetall(k)
[ 11748, 555, 715, 395, 198, 198, 11748, 2266, 271, 198, 11748, 640, 198, 6738, 24580, 13, 69, 315, 942, 1330, 10854, 27201, 23002, 38409, 198, 198, 81, 796, 2266, 271, 13, 7738, 271, 7, 4774, 11639, 36750, 3256, 2493, 28, 21, 29088, ...
2.043956
182
# -*- coding: utf-8 -*- from ocelot.logger import create_log, create_detailed_log import json import logging import os import pytest import tempfile base_logger = logging.getLogger('ocelot') detailed_logger = logging.getLogger('ocelot-detailed')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 267, 5276, 313, 13, 6404, 1362, 1330, 2251, 62, 6404, 11, 2251, 62, 15255, 6255, 62, 6404, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 129...
2.850575
87
## This program extracts a phrase-pairs from the word alignments of a parallel corpus ## import optparse, sys, os, logging, time from types import * from zgc import zgc from itertools import izip # Constants weight_rules = False # When distributing the unit-count among the rules, should it be weighted by the # of rule occurrences # Global Variables rule_indx = 1 srcWrds = [] tgtWrds = [] srcSentlen = 0 tgtSentlen = 0 ruleDict = {} # dictionary of rules for each sentence, ruleDict[(src, tgt)] = estimated rule count (1.0 for initial phrase pairs at the begining) alignDoD = {} # Dict of dict to store fwd alignments revAlignDoD = {} # Dict of dict to store rev alignments tgtCntDict = {} ruleIndxCntDict = {} fAlignDoD = {} rAlignDoD = {} def readSentAlign(): 'Reads the input phrase span file for src & tgt sentences, alignment and initial phrases' global opts,ruleDoD global ruleDict, phrPairLst global srcWrds, tgtWrds, srcSentlen, tgtSentlen file_indx = opts.file_prefix dDir = opts.datadir oDir = opts.outdir if not dDir.endswith("/"): dDir += "/" if not oDir.endswith("/"): oDir += "/" srcFile = dDir + file_indx + '.' + opts.src tgtFile = dDir + file_indx + '.' + opts.tgt alignFile = dDir + file_indx + '.' + opts.alg outFile = oDir + file_indx + '.out' outTgtFile = oDir + 'tgt.' + file_indx + '.out' sent_count = 0 phrLst = [] aTupLst = [] totalTime = 0 print "Using the maximum phrase length :", opts.max_phr_len print "Enforcing tight phrase-pairs constraint :", opts.tight_phrases_only print "Reading the src file :", srcFile print "Reading the tgt file :", tgtFile print "Reading the alignment file :", alignFile srcF = open(srcFile, 'r') tgtF = open(tgtFile, 'r') alignF = open(alignFile, 'r') for src_sent, tgt_sent, alignLine in izip(srcF, tgtF, alignF): align = alignLine.strip() srcWrds = src_sent.strip().split() srcSentlen = len(srcWrds) tgtWrds = tgt_sent.strip().split() tgtSentlen = len(tgtWrds) for align_pos in align.split(): m = align_pos.split('-') e = -1 if m[0] == 'Z' else int(m[0]) f = -1 if m[1] == 'Z' else int(m[1]) aTupLst.append((e, f)) try: # Store forward alignments alignDoD[m[0]][m[1]] = 1 except KeyError: alignDoD[m[0]] = {} alignDoD[m[0]][m[1]] = 1 try: # Store reverse alignments revAlignDoD[m[1]][m[0]] = 1 except KeyError: revAlignDoD[m[1]] = {} revAlignDoD[m[1]][m[0]] = 1 align_tree = zgc(opts.max_phr_len) phrPairLst = align_tree.getAlignTree(srcSentlen, tgtSentlen, aTupLst) if not opts.tight_phrases_only: phrPairLst = addLoosePhrases(phrPairLst) if opts.max_phr_len >= srcSentlen and not ((0, srcSentlen-1),(0, tgtSentlen-1)) in phrPairLst: phrPairLst.append(((0, srcSentlen-1),(0, tgtSentlen-1))) for ppair in phrPairLst: unaligned_edge = False # If the boundary term of source or target phrase has an unaligned word, ignore the phrase-pair # Earlier bug fixed on March '09 # Unless the tight-phrase options is set to False if not alignDoD.has_key( str(ppair[0][0]) ) or not revAlignDoD.has_key( str(ppair[1][0]) ) or \ not alignDoD.has_key( str(ppair[0][1]) ) or not revAlignDoD.has_key( str(ppair[1][1]) ): if opts.tight_phrases_only: continue if abs(ppair[1][0] - ppair[1][1]) >= opts.max_phr_len: continue if abs(ppair[0][0] - ppair[0][1]) >= opts.max_phr_len: continue init_phr_pair = (' '.join( [str(x) for x in xrange(ppair[0][0], ppair[0][1]+1) ] ), \ ' '.join( [str(x) for x in xrange(ppair[1][0], ppair[1][1]+1)] ) ) if init_phr_pair in ruleDict: ruleDict[init_phr_pair] += 1.0 else: ruleDict[init_phr_pair] = 1.0 # Create a dict of dict for storing initial phrase pairs (tuples of source and target spans) # For every extracted phrase pair call the function compFeatureCounts() to: # i. convert the word positions in the phrase pair into lexical entries, and # ii. find the alignment for the phrase pair and compute the joint count p(s, t) for rule in ruleDict.keys(): compFeatureCounts(rule) # Clear the variables at the end of current sentence alignDoD.clear() revAlignDoD.clear() ruleDict.clear() del aTupLst[:] sent_count += 1 if sent_count % 2000 == 0: print "Sentences processed : %6d ..." % sent_count # Write the rule counts, forward and reverse alignments to files with open(outFile, 'w') as oF: for rule in sorted( ruleIndxCntDict.iterkeys() ): r_indx, rule_count = ruleIndxCntDict[rule] f_alignments = ' ## '.join( fAlignDoD[r_indx].keys() ) r_alignments = ' ## '.join( rAlignDoD[r_indx].keys() ) oF.write( "%s ||| %g ||| %s ||| %s\n" % (rule, rule_count, r_alignments, f_alignments) ) with open(outTgtFile, 'w') as tF: for tgt in sorted( tgtCntDict.iterkeys() ): tF.write( "%s ||| %g\n" % (tgt, tgtCntDict[tgt]) ) return None def compFeatureCounts(rule): 'Convert to lexical phrase and find the alignment for the entries in the phrase. Also compute feature counts P(s|t), P(t|s), P_w(s|t) and P_w(t|s)' global srcWrds, tgtWrds global fAlignDoD, rAlignDoD srcLexLst = [] tgtLexLst = [] alignLst = [] sPosLst = rule[0].split() tPosLst = rule[1].split() # Convert the word positions in source side of the rule to corresponding lexemes item_indx = 0 for s_tok in sPosLst: if s_tok.startswith('X__'): srcLexLst.append(s_tok) else: srcLexLst.append(srcWrds[int(s_tok)]) # Find the forward alignment for the lexemes in the rule alignment = getFwrdAlignment(item_indx, s_tok, tPosLst) alignLst.append(alignment) #if len(alignment) > 0: # alignLst.append(alignment) item_indx += 1 fAlignment = ' '.join(alignLst) # Convert the word positions in target side of the rule to corresponding lexemes del alignLst[:] item_indx = 0 for t_tok in tPosLst: if t_tok.startswith('X__'): tgtLexLst.append(t_tok) else: tgtLexLst.append(tgtWrds[int(t_tok)]) # Find the reverse alignment for the lexemes in the rule alignment = getRvrsAlignment(item_indx, t_tok, sPosLst) alignLst.append(alignment) #if len(alignment) > 0: # alignLst.append(alignment) item_indx += 1 rAlignment = ' '.join(alignLst) # Get the lexical rule and add its count from the current sentence to total count so far curr_rindx = updateRuleCount(' '.join(srcLexLst), ' '.join(tgtLexLst), rule) # Update forward and reverse alignment dicts f_align_indx = getAlignIndex(fAlignment) r_align_indx = getAlignIndex(rAlignment) if not fAlignDoD.has_key(curr_rindx): fAlignDoD[curr_rindx] = {} rAlignDoD[curr_rindx] = {} if not fAlignDoD[curr_rindx].has_key(f_align_indx): fAlignDoD[curr_rindx][f_align_indx] = 1 if not rAlignDoD[curr_rindx].has_key(r_align_indx): rAlignDoD[curr_rindx][r_align_indx] = 1 def updateRuleCount(mc_src, mc_tgt, rule): ''' Updates phrase and target counts ''' global rule_indx, ruleDict, ruleIndxCntDict, tgtCntDict if not tgtCntDict.has_key(mc_tgt): tgtCntDict[mc_tgt] = 0 tgtCntDict[mc_tgt] += ruleDict[rule] mc_key = mc_src + ' ||| ' + mc_tgt # ' ||| ' is the delimiter separating items in the key/value if ruleIndxCntDict.has_key(mc_key): curr_rindx, curr_cnt = ruleIndxCntDict[mc_key] ruleIndxCntDict[mc_key] = ( curr_rindx, curr_cnt + ruleDict[rule] ) else: ruleIndxCntDict[mc_key] = (rule_indx, ruleDict[rule]) curr_rindx = rule_indx rule_indx += 1 return curr_rindx def getFwrdAlignment(item_indx, s_pos, tPosLst): 'Computes the alignment and lexical weights in forward direction' alignLst = [] if alignDoD.has_key(s_pos): alignKeyLst = alignDoD[s_pos].keys() alignKeyLst.sort() for t_pos in alignKeyLst: try: # Get the alignment and append it to the list alignment = str(item_indx) + '-' + str(tPosLst.index(t_pos)) alignLst.append(alignment) except ValueError: pass else: alignLst.append( str(item_indx) + '-Z' ) # 'Z' represents 'NULL' (i.e. word is unaligned) return ' '.join(alignLst) def getRvrsAlignment(item_indx, t_pos, sPosLst): 'Computes the alignment and lexical weights in reverse direction' alignLst = [] if revAlignDoD.has_key(t_pos): alignKeyLst = revAlignDoD[t_pos].keys() alignKeyLst.sort() for s_pos in alignKeyLst: try: # Get the alignment and append it to the list alignment = str(sPosLst.index(s_pos)) + '-' + str(item_indx) alignLst.append(alignment) except ValueError: pass else: alignLst.append( 'Z-' + str(item_indx) ) # 'Z' represents 'NULL' (i.e. word is unaligned) return ' '.join(alignLst) if __name__ == '__main__': global opts optparser = optparse.OptionParser() optparser.add_option("-d", "--datadir", dest="datadir", default="corpora", help="data directory (default=corpora)") optparser.add_option("-o", "--outdir", dest="outdir", default="phrase-pairs", help="data directory (default=phrase-pairs)") optparser.add_option("-p", "--prefix", dest="file_prefix", default="train", help="prefix of parallel data files (default=train)") optparser.add_option("-e", "--target", dest="tgt", default="en", help="suffix of English (target language) filename (default=en)") optparser.add_option("-f", "--source", dest="src", default="cn", help="suffix of French (source language) filename (default=cn)") optparser.add_option("-a", "--alignment", dest="alg", default="align", help="suffix of alignment filename (default=align)") optparser.add_option("-l", "--logfile", dest="log_file", default=None, help="filename for logging output") optparser.add_option("","--tightPhrase", dest="tight_phrases_only", default=False, action="store_true", help="extract just tight-phrases (default=False)") optparser.add_option("", "--maxPhrLen", dest="max_phr_len", default=7, type="int", help="maximum phrase length (default=7)") (opts, _) = optparser.parse_args() if opts.log_file: logging.basicConfig(filename=opts.log_file, filemode='w', level=logging.INFO) xtract_begin = time.time() readSentAlign() xtrct_time = time.time() - xtract_begin sys.stderr.write( "Phrase extraction time :: %1.7g sec\n\n" % (xtrct_time) )
[ 2235, 770, 1430, 32139, 257, 9546, 12, 79, 3468, 422, 262, 1573, 10548, 902, 286, 257, 10730, 35789, 22492, 198, 198, 11748, 2172, 29572, 11, 25064, 11, 28686, 11, 18931, 11, 640, 220, 198, 6738, 3858, 1330, 1635, 198, 6738, 1976, 364...
2.125069
5,413
from __future__ import with_statement try: import simplejson as json except ImportError: import json import base64 import datetime import unittest from flask import g from flask_peewee.rest import Authentication from flask_peewee.rest import RestAPI from flask_peewee.rest import RestResource from flask_peewee.rest import UserAuthentication from flask_peewee.tests.base import FlaskPeeweeTestCase from flask_peewee.tests.test_app import AModel from flask_peewee.tests.test_app import APIKey from flask_peewee.tests.test_app import BDetails from flask_peewee.tests.test_app import BModel from flask_peewee.tests.test_app import CModel from flask_peewee.tests.test_app import DModel from flask_peewee.tests.test_app import EModel from flask_peewee.tests.test_app import FModel from flask_peewee.tests.test_app import Message from flask_peewee.tests.test_app import Note from flask_peewee.tests.test_app import TestModel from flask_peewee.tests.test_app import User from flask_peewee.utils import check_password from flask_peewee.utils import get_next from flask_peewee.utils import make_password
[ 6738, 11593, 37443, 834, 1330, 351, 62, 26090, 198, 198, 28311, 25, 198, 220, 220, 220, 1330, 2829, 17752, 355, 33918, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 1330, 33918, 198, 198, 11748, 2779, 2414, 198, 11748, 4818, 8079, ...
3.161473
353
""" Unit tests for the asset upload endpoint. """ import json from datetime import datetime from io import BytesIO from unittest import mock from unittest.mock import patch from ddt import data, ddt from django.conf import settings from django.test.utils import override_settings from opaque_keys.edx.keys import AssetKey from opaque_keys.edx.locator import CourseLocator from PIL import Image from pytz import UTC from cms.djangoapps.contentstore.tests.utils import CourseTestCase from cms.djangoapps.contentstore.utils import reverse_course_url from cms.djangoapps.contentstore.views import assets from common.djangoapps.static_replace import replace_static_urls from xmodule.assetstore import AssetMetadata from xmodule.contentstore.content import StaticContent from xmodule.contentstore.django import contentstore from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.modulestore.xml_importer import import_course_from_xml TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT MAX_FILE_SIZE = settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB * 1000 ** 2 FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy() FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) class AssetsTestCase(CourseTestCase): """ Parent class for all asset tests. """ def upload_asset(self, name="asset-1", asset_type='text'): """ Post to the asset upload url """ asset = self.get_sample_asset(name, asset_type) response = self.client.post(self.url, {"name": name, "file": asset}) return response def get_sample_asset(self, name, asset_type='text'): """ Returns an in-memory file of the specified type with the given name for testing """ sample_asset = BytesIO() sample_file_contents = b"This file is generated by python unit test" if asset_type == 'text': sample_asset.name = f'{name}.txt' sample_asset.write(sample_file_contents) elif asset_type == 'image': image = Image.new("RGB", size=(50, 50), color=(256, 0, 0)) image.save(sample_asset, 'jpeg') sample_asset.name = f'{name}.jpg' elif asset_type == 'opendoc': sample_asset.name = f'{name}.odt' sample_asset.write(sample_file_contents) sample_asset.seek(0) return sample_asset class BasicAssetsTestCase(AssetsTestCase): """ Test getting assets via html w/o additional args """ def test_relative_url_for_split_course(self): """ Test relative path for split courses assets """ with modulestore().default_store(ModuleStoreEnum.Type.split): module_store = modulestore() course_id = module_store.make_course_key('edX', 'toy', '2012_Fall') import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=contentstore(), target_id=course_id, create_if_not_present=True ) course = module_store.get_course(course_id) filename = 'sample_static.html' html_src_attribute = f'"/static/{filename}"' asset_url = replace_static_urls(html_src_attribute, course_id=course.id) url = asset_url.replace('"', '') base_url = url.replace(filename, '') self.assertIn(f"/{filename}", url) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) # simulation of html page where base_url is up-to asset's main directory # and relative_path is dom element with its src relative_path = 'just_a_test.jpg' # browser append relative_path with base_url absolute_path = base_url + relative_path self.assertIn(f"/{relative_path}", absolute_path) resp = self.client.get(absolute_path) self.assertEqual(resp.status_code, 200) class PaginationTestCase(AssetsTestCase): """ Tests the pagination of assets returned from the REST API. """ def test_json_responses(self): """ Test the ajax asset interfaces """ self.upload_asset("asset-1") self.upload_asset("asset-2") self.upload_asset("asset-3") self.upload_asset("asset-4", "opendoc") # Verify valid page requests self.assert_correct_asset_response(self.url, 0, 4, 4) self.assert_correct_asset_response(self.url + "?page_size=2", 0, 2, 4) self.assert_correct_asset_response( self.url + "?page_size=2&page=1", 2, 2, 4) self.assert_correct_sort_response(self.url, 'date_added', 'asc') self.assert_correct_sort_response(self.url, 'date_added', 'desc') self.assert_correct_sort_response(self.url, 'display_name', 'asc') self.assert_correct_sort_response(self.url, 'display_name', 'desc') self.assert_correct_filter_response(self.url, 'asset_type', '') self.assert_correct_filter_response(self.url, 'asset_type', 'OTHER') self.assert_correct_filter_response( self.url, 'asset_type', 'Documents') self.assert_correct_filter_response( self.url, 'asset_type', 'Documents,Images') self.assert_correct_filter_response( self.url, 'asset_type', 'Documents,OTHER') self.assert_correct_text_search_response(self.url, 'asset-1.txt', 1) self.assert_correct_text_search_response(self.url, 'asset-1', 1) self.assert_correct_text_search_response(self.url, 'AsSeT-1', 1) self.assert_correct_text_search_response(self.url, '.txt', 3) self.assert_correct_text_search_response(self.url, '2', 1) self.assert_correct_text_search_response(self.url, 'asset 2', 1) self.assert_correct_text_search_response(self.url, '2 asset', 1) self.assert_correct_text_search_response(self.url, '*.txt', 0) self.assert_correct_asset_response(self.url + "?text_search=", 0, 4, 4) #Verify invalid request parameters self.assert_invalid_parameters_error(self.url, 'asset_type', 'edX') self.assert_invalid_parameters_error(self.url, 'asset_type', 'edX, OTHER') self.assert_invalid_parameters_error(self.url, 'asset_type', 'edX, Images') # Verify querying outside the range of valid pages self.assert_correct_asset_response( self.url + "?page_size=2&page=-1", 0, 2, 4) self.assert_correct_asset_response( self.url + "?page_size=2&page=2", 2, 2, 4) self.assert_correct_asset_response( self.url + "?page_size=3&page=1", 3, 1, 4) self.assert_correct_asset_response( self.url + "?page_size=1&page=5&asset_type=OTHER", 0, 1, 1) self.assert_correct_asset_response( self.url + "?page_size=1&page=5&asset_type=Images", 5, 0, 0) @mock.patch('xmodule.contentstore.mongo.MongoContentStore.get_all_content_for_course') def test_mocked_filtered_response(self, mock_get_all_content_for_course): """ Test the ajax asset interfaces """ asset_key = self.course.id.make_asset_key( AssetMetadata.GENERAL_ASSET_TYPE, 'test.jpg') upload_date = datetime(2015, 1, 12, 10, 30, tzinfo=UTC) thumbnail_location = [ 'c4x', 'edX', 'toy', 'thumbnail', 'test_thumb.jpg', None] mock_get_all_content_for_course.return_value = [ [ { "asset_key": asset_key, "displayname": "test.jpg", "contentType": "image/jpg", "url": "/c4x/A/CS102/asset/test.jpg", "uploadDate": upload_date, "id": "/c4x/A/CS102/asset/test.jpg", "portable_url": "/static/test.jpg", "thumbnail": None, "thumbnail_location": thumbnail_location, "locked": None } ], 1 ] # Verify valid page requests self.assert_correct_filter_response(self.url, 'asset_type', 'OTHER') def assert_correct_asset_response(self, url, expected_start, expected_length, expected_total): """ Get from the url and ensure it contains the expected number of responses """ resp = self.client.get(url, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content.decode('utf-8')) assets_response = json_response['assets'] self.assertEqual(json_response['start'], expected_start) self.assertEqual(len(assets_response), expected_length) self.assertEqual(json_response['totalCount'], expected_total) def assert_correct_sort_response(self, url, sort, direction): """ Get from the url w/ a sort option and ensure items honor that sort """ resp = self.client.get( url + '?sort=' + sort + '&direction=' + direction, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content.decode('utf-8')) assets_response = json_response['assets'] self.assertEqual(sort, json_response['sort']) self.assertEqual(direction, json_response['direction']) name1 = assets_response[0][sort] name2 = assets_response[1][sort] name3 = assets_response[2][sort] if direction == 'asc': self.assertLessEqual(name1, name2) self.assertLessEqual(name2, name3) else: self.assertGreaterEqual(name1, name2) self.assertGreaterEqual(name2, name3) def assert_correct_filter_response(self, url, filter_type, filter_value): """ Get from the url w/ a filter option and ensure items honor that filter """ filter_value_split = filter_value.split(',') if filter_value else [] requested_file_extensions = [] all_file_extensions = [] for requested_filter in filter_value_split: if requested_filter == 'OTHER': for file_type in settings.FILES_AND_UPLOAD_TYPE_FILTERS: all_file_extensions.extend(file_type) else: file_extensions = settings.FILES_AND_UPLOAD_TYPE_FILTERS.get( requested_filter, None) if file_extensions is not None: requested_file_extensions.extend(file_extensions) resp = self.client.get( url + '?' + filter_type + '=' + filter_value, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content.decode('utf-8')) assets_response = json_response['assets'] self.assertEqual(filter_value_split, json_response['assetTypes']) if filter_value != '': content_types = [asset['content_type'].lower() for asset in assets_response] if 'OTHER' in filter_value_split: for content_type in content_types: # content_type is either not any defined type (i.e. OTHER) or is a defined type (if multiple # parameters including OTHER are used) self.assertTrue( content_type in requested_file_extensions or content_type not in all_file_extensions ) else: for content_type in content_types: self.assertIn(content_type, requested_file_extensions) def assert_invalid_parameters_error(self, url, filter_type, filter_value): """ Get from the url w/ invalid filter option(s) and ensure error is received """ resp = self.client.get( url + '?' + filter_type + '=' + filter_value, HTTP_ACCEPT='application/json') self.assertEqual(resp.status_code, 400) def assert_correct_text_search_response(self, url, text_search, number_matches): """ Get from the url w/ a text_search option and ensure items honor that search query """ resp = self.client.get( url + '?text_search=' + text_search, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content.decode('utf-8')) assets_response = json_response['assets'] self.assertEqual(text_search, json_response['textSearch']) self.assertEqual(len(assets_response), number_matches) text_search_tokens = text_search.split() for asset_response in assets_response: for token in text_search_tokens: self.assertIn(token.lower(), asset_response['display_name'].lower()) @ddt class UploadTestCase(AssetsTestCase): """ Unit tests for uploading a file """ @data( (int(MAX_FILE_SIZE / 2.0), "small.file.test", 200), (MAX_FILE_SIZE, "justequals.file.test", 200), (MAX_FILE_SIZE + 90, "large.file.test", 413), ) @mock.patch('cms.djangoapps.contentstore.views.assets.get_file_size') class DownloadTestCase(AssetsTestCase): """ Unit tests for downloading a file. """ @patch('xmodule.modulestore.mixed.MixedModuleStore.find_asset_metadata') def test_pickling_calls(self, patched_find_asset_metadata): """ Tests if assets are not calling find_asset_metadata """ patched_find_asset_metadata.return_value = None self.client.get(self.uploaded_url, HTTP_ACCEPT='text/html') self.assertFalse(patched_find_asset_metadata.called) class AssetToJsonTestCase(AssetsTestCase): """ Unit test for transforming asset information into something we can send out to the client via JSON. """ @override_settings(LMS_ROOT_URL="https://lms_root_url") class LockAssetTestCase(AssetsTestCase): """ Unit test for locking and unlocking an asset. """ def test_locking(self): """ Tests a simple locking and unlocking of an asset in the toy course. """ def verify_asset_locked_state(locked): """ Helper method to verify lock state in the contentstore """ asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.html') content = contentstore().find(asset_location) self.assertEqual(content.locked, locked) def post_asset_update(lock, course): """ Helper method for posting asset update. """ content_type = 'application/txt' upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC) asset_location = course.id.make_asset_key('asset', 'sample_static.html') url = reverse_course_url( 'assets_handler', course.id, kwargs={'asset_key_string': str(asset_location)} ) resp = self.client.post( url, # pylint: disable=protected-access json.dumps(assets._get_asset_json( "sample_static.html", content_type, upload_date, asset_location, None, lock)), "application/json" ) self.assertEqual(resp.status_code, 201) return json.loads(resp.content.decode('utf-8')) # Load the toy course. module_store = modulestore() course_items = import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=contentstore(), verbose=True ) course = course_items[0] verify_asset_locked_state(False) # Lock the asset resp_asset = post_asset_update(True, course) self.assertTrue(resp_asset['locked']) verify_asset_locked_state(True) # Unlock the asset resp_asset = post_asset_update(False, course) self.assertFalse(resp_asset['locked']) verify_asset_locked_state(False) class DeleteAssetTestCase(AssetsTestCase): """ Unit test for removing an asset. """ def setUp(self): """ Scaffolding """ super().setUp() self.url = reverse_course_url('assets_handler', self.course.id) # First, upload something. self.asset_name = 'delete_test' self.asset = self.get_sample_asset(self.asset_name) response = self.client.post(self.url, {"name": self.asset_name, "file": self.asset}) self.assertEqual(response.status_code, 200) self.uploaded_url = json.loads(response.content.decode('utf-8'))['asset']['url'] self.asset_location = AssetKey.from_string(self.uploaded_url) self.content = contentstore().find(self.asset_location) def test_delete_asset(self): """ Tests the happy path :) """ test_url = reverse_course_url( 'assets_handler', self.course.id, kwargs={'asset_key_string': str(self.uploaded_url)}) resp = self.client.delete(test_url, HTTP_ACCEPT="application/json") self.assertEqual(resp.status_code, 204) def test_delete_image_type_asset(self): """ Tests deletion of image type asset """ image_asset = self.get_sample_asset(self.asset_name, asset_type="image") thumbnail_image_asset = self.get_sample_asset('delete_test_thumbnail', asset_type="image") # upload image response = self.client.post(self.url, {"name": "delete_image_test", "file": image_asset}) self.assertEqual(response.status_code, 200) uploaded_image_url = json.loads(response.content.decode('utf-8'))['asset']['url'] # upload image thumbnail response = self.client.post(self.url, {"name": "delete_image_thumb_test", "file": thumbnail_image_asset}) self.assertEqual(response.status_code, 200) thumbnail_url = json.loads(response.content.decode('utf-8'))['asset']['url'] thumbnail_location = StaticContent.get_location_from_path(thumbnail_url) image_asset_location = AssetKey.from_string(uploaded_image_url) content = contentstore().find(image_asset_location) content.thumbnail_location = thumbnail_location contentstore().save(content) with mock.patch('opaque_keys.edx.locator.CourseLocator.make_asset_key') as mock_asset_key: mock_asset_key.return_value = thumbnail_location test_url = reverse_course_url( 'assets_handler', self.course.id, kwargs={'asset_key_string': str(uploaded_image_url)}) resp = self.client.delete(test_url, HTTP_ACCEPT="application/json") self.assertEqual(resp.status_code, 204) def test_delete_asset_with_invalid_asset(self): """ Tests the sad path :( """ test_url = reverse_course_url( 'assets_handler', self.course.id, kwargs={'asset_key_string': "/c4x/edX/toy/asset/invalid.pdf"} ) resp = self.client.delete(test_url, HTTP_ACCEPT="application/json") self.assertEqual(resp.status_code, 404) def test_delete_asset_with_invalid_thumbnail(self): """ Tests the sad path :( """ test_url = reverse_course_url( 'assets_handler', self.course.id, kwargs={'asset_key_string': str(self.uploaded_url)}) self.content.thumbnail_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/invalid') contentstore().save(self.content) resp = self.client.delete(test_url, HTTP_ACCEPT="application/json") self.assertEqual(resp.status_code, 204)
[ 37811, 198, 26453, 5254, 329, 262, 11171, 9516, 36123, 13, 198, 37811, 628, 198, 11748, 33918, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 33245, 1330, 2750, 4879, 9399, 198, 6738, 555, 715, 395, 1330, 15290, 198, 6738, 555, 715...
2.271842
8,630
# -*- coding: utf-8 -*- u"""PyTest for :mod:`sirepo.template.srw.py` :copyright: Copyright (c) 2018 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function import pytest import zipfile from pykern import pkunit from sirepo.template import template_common
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 84, 37811, 20519, 14402, 329, 1058, 4666, 25, 63, 82, 557, 7501, 13, 28243, 13, 27891, 86, 13, 9078, 63, 198, 198, 25, 22163, 4766, 25, 15069, 357, 66, 8, 2864, 5325...
2.889764
127
# -*- coding: utf-8 -*- # This file is part of Viper - https://github.com/viper-framework/viper # See the file 'LICENSE' for copying permission. from viper.common.abstracts import Command from viper.core.database import Database from viper.core.session import __sessions__ class Tags(Command): """ This command is used to modify the tags of the opened file. """ cmd = "tags" description = "Modify tags of the opened file"
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 770, 2393, 318, 636, 286, 34517, 532, 3740, 1378, 12567, 13, 785, 14, 8903, 525, 12, 30604, 14, 8903, 525, 198, 2, 4091, 262, 2393, 705, 43, 2149, 24290, 6, 329, ...
3.090278
144
# Clousure print('==========Clousure==========') # Fungsi bersarang dengan closure hasil_hitung = hitung() print(hasil_hitung()) print(hasil_hitung()) print(hasil_hitung()) print('') # Fungsi bersarang biasa hitung() hitung() hitung()
[ 2, 1012, 516, 495, 198, 198, 4798, 10786, 2559, 855, 2601, 516, 495, 2559, 855, 11537, 198, 198, 2, 376, 2150, 13396, 275, 364, 283, 648, 2853, 1030, 16512, 198, 198, 10134, 346, 62, 17945, 2150, 796, 2277, 2150, 3419, 198, 198, 479...
2.42
100
import rdkit import molecule_utils from collections import Iterable import pandas as pd from src import setting degrees = [0, 1, 2, 3, 4, 5] class Node(object): """ Class represent graph node. Args: ntype (string): Node type ext_id (string): External identifier data: Node payload (default None) """ def _add_neighbor(self, neighbors, new_neighbors): """ Add neighbor(s) for the node. Args: neighbors (Node or an iterable of Node): Old neighbor(s). new_neighbors (Node or an iterable of Node): Neighbor(s) to add. undirected (bool): If the edge is undirected (default False). """ if isinstance(new_neighbors, Node): new_neighbors = [new_neighbors] if isinstance(new_neighbors, Iterable) and \ all([isinstance(node, Node) for node in new_neighbors]): neighbors.update(new_neighbors) else: raise ValueError("`neighbors` has to be either a Node object \ or an iterable of Node objects!") if __name__ == '__main__': smiles = ['CCC1=C/C(=C/2\C=CNN2)/C(=O)C=C1OCCCCCC(C)(C)C3=NNN=N3', 'CCCCCC(=O)OC1=C(SC2=CC=CC=C2N3C1=CC=C3)C4=CC=CC=C4'] molecules = Molecules(smiles) print(len(molecules.get_neighbor_idx_by_degree('atom', 3))) print(molecules.get_neighbor_idx_by_batch('atom'))
[ 11748, 374, 67, 15813, 198, 11748, 27756, 62, 26791, 198, 6738, 17268, 1330, 40806, 540, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 12351, 1330, 4634, 198, 198, 13500, 6037, 796, 685, 15, 11, 352, 11, 362, 11, 513, 11, 604, 11...
2.161043
652
# # MIT License # # Copyright (c) 2022 GT4SD team # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """TorchDrug GraphAF training utilities.""" import ast import logging import os from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, Optional from torchdrug.core import Engine from torchdrug.layers import distribution from torchdrug.models import RGCN, GraphAF from torchdrug.tasks import AutoregressiveGeneration # isort: off import torch from torch import optim from torch import nn # isort: on from ....cli.argument_parser import eval_lambda from ...core import TrainingPipelineArguments from .. import DATASET_FACTORY from ..core import TorchDrugTrainingPipeline logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) """ Necessary because torchdrug silently overwrites the default nn.Module. This is quite invasive and causes significant side-effects in the rest of the code. See: https://github.com/DeepGraphLearning/torchdrug/issues/77 """ nn.Module = nn._Module # type: ignore class TorchDrugGraphAFTrainingPipeline(TorchDrugTrainingPipeline): """TorchDrug GraphAF training pipelines.""" def train( # type: ignore self, training_args: Dict[str, Any], model_args: Dict[str, Any], dataset_args: Dict[str, Any], ) -> None: """Generic training function for training a (GraphAF) model. For details see: Shi, Chence, et al. "GraphAF: a Flow-based Autoregressive Model for Molecular Graph Generation". International Conference on Learning Representations (ICLR), 2020. Args: training_args: training arguments passed to the configuration. model_args: model arguments passed to the configuration. dataset_args: dataset arguments passed to the configuration. """ try: params = {**training_args, **dataset_args, **model_args} model_path = params["model_path"] training_name = params["training_name"] dataset_name = params["dataset_name"] logger.info(f"Model with name {training_name} starts.") model_dir = Path(model_path).joinpath(training_name) os.makedirs(model_dir, exist_ok=True) # Set up the dataset here joint_dataset_args = { "verbose": params.get("verbose", 1), "lazy": params.get("lazy", False), "transform": eval_lambda(params.get("transform", "lambda x: x")), "node_feature": params.get("node_feature", "default"), "edge_feature": params.get("edge_feature", "default"), "graph_feature": params.get("graph_feature", None), "with_hydrogen": params.get("with_hydrogen", False), "kekulize": not params.get("no_kekulization", False), } if dataset_name not in DATASET_FACTORY.keys(): raise ValueError( f"Dataset {dataset_name} is not supported. Choose from " f"{DATASET_FACTORY.keys()}" ) if dataset_name != "custom": # This is a native TorchDrug dataset dataset = DATASET_FACTORY[dataset_name]( path=params["dataset_path"], **joint_dataset_args ) else: # User brought their own dataset dataset = DATASET_FACTORY["custom"]( file_path=params["file_path"], target_fields=[params["target_field"]], smiles_field=params.get("smiles_field", "smiles"), **joint_dataset_args, ) hidden_dims = ast.literal_eval(params["hidden_dims"]) num_atom_type = dataset.num_atom_type num_bond_type = dataset.num_bond_type model = RGCN( input_dim=params.get("input_dim", num_atom_type), num_relation=params.get("num_relation", num_bond_type), hidden_dims=hidden_dims, batch_norm=params.get("batch_norm", True), edge_input_dim=params.get("edge_input_dim", None), short_cut=params.get("short_cut", False), activation=params.get("activation", "relu"), concat_hidden=params.get("concat_hidden", False), readout=params.get("readout", "sum"), ) task = params.get("task") if dataset_name == "custom" and task and params["target_field"] != task: raise ValueError( "If custom dataset is used & task is specified, then target_field " "has to be set s.t. it extracts the task/property of interest. " f"Not task={task} and target_field={params['target_field']}" ) criterion = ast.literal_eval(params["criterion"]) if "ppo" in criterion.keys() and ( params["no_kekulization"] or params["node_feature"] != "symbol" ): # See torchdrug issue: https://github.com/DeepGraphLearning/torchdrug/issues/77 raise ValueError( "For property optimiz. leave `no_kekulization` at the default (" "False) & set `node_feature` to `symbol` and not: " f"{params['no_kekulization']} and `{params['node_feature']}`." ) # Model prior/flow initialization node_prior = distribution.IndependentGaussian( torch.zeros(num_atom_type), torch.ones(num_atom_type) ) edge_prior = distribution.IndependentGaussian( torch.zeros(num_bond_type + 1), torch.ones(num_bond_type + 1) ) node_flow = GraphAF( model, node_prior, num_layer=params.get("num_node_flow_layers", 12) ) edge_flow = GraphAF( model, edge_prior, num_layer=params.get("num_edge_flow_layers", 12), use_edge=not params.get("no_edge", False), ) task = AutoregressiveGeneration( node_flow, edge_flow, task=params.get("task", None), max_edge_unroll=params.get("max_edge_unroll", 12), max_node=params.get("max_node", 38), criterion=ast.literal_eval(params["criterion"]), num_node_sample=params.get("num_node_sample", -1), num_edge_sample=params.get("num_edge_sample", -1), agent_update_interval=params.get("agent_update_interval", 10), gamma=params.get("gamma", 0.9), reward_temperature=params.get("reward_temperature", 1.0), baseline_momentum=params.get("baseline_momentum", 0.9), ) optimizer = optim.Adam( task.parameters(), lr=params.get("learning_rate", 1e-5) ) device = (0,) if torch.cuda.is_available() else None solver = Engine( task, dataset, None, # validation data None, # test data optimizer, batch_size=params.get("batch_size", 16), log_interval=params.get("log_interval", 100), scheduler=params.get("scheduler", None), gpus=params.get("gpus", device), gradient_interval=params.get("gradient_interval", 1), num_worker=params.get("num_worker", 0), ) # Necessary since we have re-assigned nn.Module to the native torch.nn.Module # rather than the torchdrug-overwritten version. solver.model.device = solver.device weight_paths = sorted(list(model_dir.glob("*.pkl")), key=os.path.getmtime) if len(weight_paths) > 0: solver.load( weight_paths[-1], load_optimizer=params.get("load_optimizer", False) ) logger.info(f"Restored existing model from {weight_paths[-1]}") logger.info( "To avoid this, set `training_name` & `model_path` to a new folder." ) epochs = params.get("epochs", 10) solver.train(num_epoch=epochs) # Save model task_name = f"task={params.get('task')}_" if params.get("task") else "" data_name = "data=" + ( dataset_name + "_" + params["file_path"].split(os.sep)[-1].split(".")[0] if dataset_name == "custom" else dataset_name ) solver.save( model_dir.joinpath( f"graphaf_data={data_name}_{task_name}epoch={epochs}.pkl" ) ) except Exception: logger.exception( "Exception occurred while running TorchDrugGraphAFTrainingPipeline" ) @dataclass class TorchDrugGraphAFModelArguments(TrainingPipelineArguments): """Arguments pertaining to model instantiation.""" __name__ = "model_args" hidden_dims: str = field( default="[128, 128]", metadata={"help": "Dimensionality of each hidden layer"}, ) batch_norm: bool = field( default=False, metadata={"help": "Whether the RGCN uses batch normalization"} ) edge_input_dim: Optional[int] = field( default=None, metadata={"help": "Dimension of edge features"} ) short_cut: bool = field( default=False, metadata={"help": "Whether the RGCN uses a short cut"} ) activation: str = field( default="relu", metadata={"help": "Activation function for RGCN"} ) concat_hidden: bool = field( default=False, metadata={ "help": "Whether hidden representations from all layers are concatenated" }, ) num_node_flow_layers: int = field( default=12, metadata={"help": "Number of layers in the node flow GraphAF model"}, ) num_edge_flow_layers: int = field( default=12, metadata={"help": "Number of layers in the edge flow GraphAF model"}, ) no_edge: bool = field( default=False, metadata={ "help": "Whether to use edge features in the edge GraphAF model. Per " "default, edges are used." }, ) readout: str = field( default="sum", metadata={"help": "RGCN Readout function. Either `sum` or `mean`"}, ) max_edge_unroll: int = field( default=12, metadata={ "help": "max node id difference. Inferred from training data if not provided" }, ) max_node: int = field( default=38, metadata={ "help": "max number of node. Inferred from training data if not provided." }, ) criterion: str = field( default="{'nll': 1.0}", metadata={ "help": "training criterion. Available criteria are `nll` and `ppo` for" " regular training and property optimization respectively. If dict, the " "keys are criterions and values are the corresponding weights. If list, " "both criteria are used with equal weights." }, ) num_node_sample: int = field( default=-1, metadata={"help": "Number of node samples per graph."}, ) num_edge_sample: int = field( default=-1, metadata={"help": "Number of edge samples per graph."}, ) agent_update_interval: int = field( default=10, metadata={ "help": "Update the agent every n batches (similar to gradient accumulation)" }, ) gamma: float = field( default=0.9, metadata={"help": "Reward discount rate"}, ) reward_temperature: float = field( default=1.0, metadata={ "help": "Temperature for the reward (larger -> higher mean reward)" "lower -> higher maximal reward." }, ) baseline_momentum: float = field( default=0.9, metadata={"help": "Momentum for value function baseline"}, )
[ 2, 198, 2, 17168, 13789, 198, 2, 198, 2, 15069, 357, 66, 8, 33160, 7963, 19, 10305, 1074, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2, 286, 428, 3788, 290, 3917...
2.204351
6,068
res = 0 for i in range(1, int(input())+1): res += i print(res)
[ 411, 796, 657, 198, 198, 1640, 1312, 287, 2837, 7, 16, 11, 493, 7, 15414, 28955, 10, 16, 2599, 198, 220, 220, 220, 581, 15853, 1312, 198, 198, 4798, 7, 411, 8 ]
2.125
32
from selenium import webdriver from selenium.webdriver.common.keys import Keys driver = webdriver.Firefox() driver.get("https://marketplace-dev.allizom.org/") driver.implicitly_wait(12) driver.find_element_by_xpath("//*[@id='site-header']/mkt-header-child-toggle[1]").click() driver.implicitly_wait(2) driver.find_element_by_xpath("//*[@id='search-q']").send_keys("Hello"+ Keys.RETURN) assert "Hello | Firefox Marketplace" in driver.title driver.close()
[ 6738, 384, 11925, 1505, 1330, 3992, 26230, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 11321, 13, 13083, 1330, 26363, 628, 198, 26230, 796, 3992, 26230, 13, 13543, 12792, 3419, 198, 26230, 13, 1136, 7203, 5450, 1378, 10728, 5372, ...
2.803681
163
#!/usr/bin/env python3.7 import os BLOCK_SIZE = 256 P = [ (0, [1, 7]), (1, [0, 8]), (0, [5, 3]), (1, [8, 6]), (0, [3, 9]), (1, [4, 0]), (0, [9, 1]), (1, [6, 2]), (0, [7, 5]), (1, [2, 4]), ] if __name__ == "__main__": pt1 = int(bytes.hex(os.urandom(BLOCK_SIZE // 8)), 16) pt2 = int(bytes.hex(os.urandom(BLOCK_SIZE // 8)), 16) C1 = CbC(pt1, 99) C2 = CbC(pt2, 99) ct1 = C1.encrypt(pt2) ct2 = C2.encrypt(pt1) print(pt1, ct1) print(pt2, ct2) print(C1.decrypt(ct1)) solve()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 13, 22, 198, 11748, 28686, 198, 198, 9148, 11290, 62, 33489, 796, 17759, 198, 198, 47, 796, 685, 198, 220, 220, 220, 357, 15, 11, 685, 16, 11, 767, 46570, 198, 220, 220, 220, 357, ...
1.68693
329
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2018 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .take import take from .compress import compress from .extract import extract from .choose import choose, TensorChoose from .unravel_index import unravel_index, TensorUnravelIndex from .nonzero import nonzero, TensorNonzero from .flatnonzero import flatnonzero from .slice import TensorSlice _install() del _install
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 7358, 12, 7908, 41992, 4912, 31703, 12052, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 36...
3.594796
269
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------- # @File : token.py # @Created : 2020/10/23 5:59 下午 # @Software : PyCharm # # @Author : Liu.Qi # @Contact : liuqi_0725@aliyun.com # # @Desc : token api # ------------------------------------------------------------------------------- from flask import current_app,request,Blueprint from tokenmanager.core.utils import E400,render_json from tokenmanager.core.Security import create_token,verify_token,get_pub_key from vanaspyhelper.LoggerManager import log token = Blueprint("token" , __name__) @token.route('/.well-known/jwks.json') def _jwks(): """ 返回签名 公钥 :return: json """ log.info("处理请求:获取公钥") key = { "alg": current_app.config['JWT_ALGORITHM'], # 算法 "e": "AQAB", "n": get_pub_key(), # 公钥 "kty": "RSA", "use": "Signature" # 用途 } log.info("处理请求:获取公钥处理完成") return render_json(key) @token.route("/oauth/token",methods=['POST']) def generate_token(): """ 生成 token :return: json """ try: data = request.json log.info("处理请求:创建 token . Data: %s ",data) grant_type = data.get('grant_type', '') client_id = data.get('client_id', '') signature = data.get('signature', '') timestamp = int(data.get('timestamp', '0')) # 封装 token res_json = create_token(client_id ,signature, timestamp, grant_type) log.info("处理请求:创建 token . Result: %s ", res_json) return render_json(res_json) except Exception as e3: log.error("处理请求:创建 token . Error: %s ", str(e3)) return E400(str(e3)) @token.route('/verify_token', methods=['POST']) def verify_token_data(): """ 验证 token :return: """ try: # 报文要用 json报文,使用 双引号 " data = request.json log.info("处理请求:验证 token . Data: %s ", data) token = data['access_token'] audience = data.get('client_id', '') res_json = verify_token(token,audience) log.info("处理请求:验证 token . Result: %s ", res_json) return render_json(res_json) except Exception as e: log.error("处理请求:验证 token . Error: %s ", str(e)) return E400(str(e))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 16529, 24305, 198, 2, 2488, 8979, 220, 220, 220, 220, 1058, 11241, 13, 9078, 198, 2, 2488, 41972, 220, 1058, 12131, 14, 940, 14, 1954, 642, 25, 3270, 220, 10310, ...
1.936295
1,193
import smart_imports smart_imports.all()
[ 198, 11748, 4451, 62, 320, 3742, 198, 198, 27004, 62, 320, 3742, 13, 439, 3419, 628, 628 ]
2.705882
17
import sys, logging from etl import run_etl if __name__ == '__main__': import logging logging.basicConfig( filename='data/etl.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO ) main()
[ 11748, 25064, 11, 18931, 198, 6738, 2123, 75, 1330, 1057, 62, 316, 75, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1330, 18931, 628, 220, 220, 220, 18931, 13, 35487, 16934, 7, 198, 220, 22...
2.033557
149
#test if the skim is as expected #test the isolation distribution of (loose) leptons, show max isolation value! #print list of found triggers #print list of not found triggers
[ 2, 9288, 611, 262, 39080, 318, 355, 2938, 198, 2, 9288, 262, 15133, 6082, 286, 357, 5439, 577, 8, 443, 457, 684, 11, 905, 3509, 15133, 1988, 0, 198, 2, 4798, 1351, 286, 1043, 20022, 198, 2, 4798, 1351, 286, 407, 1043, 20022 ]
4.069767
43
# -*- coding: utf-8 -*- import asyncio import time from bls_py import bls from enum import IntEnum from .bitcoin import address_to_script from .dash_msg import (DSPoolStatusUpdate, DSMessageIDs, ds_msg_str, ds_pool_state_str, DashDsaMsg, DashDsiMsg, DashDssMsg) from .dash_tx import str_ip, CTxIn, CTxOut from .util import bfh PRIVATESEND_QUEUE_TIMEOUT = 30 PRIVATESEND_SESSION_MSG_TIMEOUT = 40 class PSDenoms(IntEnum): '''Denoms values designated in P2P protocol''' D10 = 1 D1 = 2 D0_1 = 4 D0_01 = 8 D0_001 = 16 class MixSessionTimeout(Exception): """Thrown when waiting for next message from MN is timed out""" class MixSessionPeerClosed(Exception): """Thrown when waiting for next message from MN, and MN closes session""" class PSMixSession: '''P2P session with mixing masternode''' @property def verify_ds_msg_sig(self, ds_msg): '''Verify BLS signature of dsq message from masternode based on SML entry for masternode from protx_list''' if not self.sml_entry: return False mn_pub_key = self.sml_entry.pubKeyOperator pubk = bls.PublicKey.from_bytes(mn_pub_key) sig = bls.Signature.from_bytes(ds_msg.vchSig) msg_hash = ds_msg.msg_hash() aggr_info = bls.AggregationInfo.from_msg_hash(pubk, msg_hash) sig.set_aggregation_info(aggr_info) return bls.BLS.verify(sig) def verify_final_tx(self, tx, denominate_wfl): '''Verify final tx from dsf message''' inputs = denominate_wfl.inputs outputs = denominate_wfl.outputs icnt = 0 ocnt = 0 for i in tx.inputs(): if i.prevout.to_str() in inputs: icnt += 1 for o in tx.outputs(): if o.address in outputs: ocnt += 1 if icnt == len(inputs) and ocnt == len(outputs): return True else: return False async def send_dsa(self, pay_collateral_tx): '''Send dsa message to join or create mixing queue''' msg = DashDsaMsg(self.denom, pay_collateral_tx) await self.dash_peer.send_msg('dsa', msg.serialize()) self.logger.debug(f'{self.wfl_lid}: dsa sent') async def send_dsi(self, inputs, pay_collateral_tx, outputs): '''Send dsi message containing inputs to mix, output addresses''' scriptSig = b'' sequence = 0xffffffff vecTxDSIn = [] for i in inputs: prev_h, prev_n = i.split(':') prev_h = bfh(prev_h)[::-1] prev_n = int(prev_n) vecTxDSIn.append(CTxIn(prev_h, prev_n, scriptSig, sequence)) vecTxDSOut = [] for o in outputs: scriptPubKey = bfh(address_to_script(o)) vecTxDSOut.append(CTxOut(self.denom_value, scriptPubKey)) msg = DashDsiMsg(vecTxDSIn, pay_collateral_tx, vecTxDSOut) await self.dash_peer.send_msg('dsi', msg.serialize()) self.logger.debug(f'{self.wfl_lid}: dsi sent') async def send_dss(self, signed_inputs): '''Send dss message containing signed inputs of dsf message final tx''' msg = DashDssMsg(signed_inputs) await self.dash_peer.send_msg('dss', msg.serialize()) async def read_next_msg(self, denominate_wfl, timeout=None): '''Read next msg from msg_queue, process and return (cmd, res) tuple''' try: if timeout is None: timeout = PRIVATESEND_SESSION_MSG_TIMEOUT res = await asyncio.wait_for(self.msg_queue.get(), timeout) except asyncio.TimeoutError: raise MixSessionTimeout('Session Timeout, Reset') if not res: # dash_peer is closed raise MixSessionPeerClosed('peer connection closed') elif type(res) == Exception: raise res cmd = res.cmd payload = res.payload if cmd == 'dssu': res = self.on_dssu(payload) return cmd, res elif cmd == 'dsq': self.logger.debug(f'{self.wfl_lid}: dsq read: {payload}') res = self.on_dsq(payload) return cmd, res elif cmd == 'dsf': self.logger.debug(f'{self.wfl_lid}: dsf read: {payload}') res = self.on_dsf(payload, denominate_wfl) return cmd, res elif cmd == 'dsc': self.logger.wfl_ok(f'{self.wfl_lid}: dsc read: {payload}') res = self.on_dsc(payload) return cmd, res else: self.logger.debug(f'{self.wfl_lid}: unknown msg read, cmd: {cmd}') return None, None def on_dssu(self, dssu): '''Process dssu message from masternode, containing state update''' session_id = dssu.sessionID if not self.session_id: if session_id: self.session_id = session_id if self.session_id != session_id: raise Exception(f'Wrong session id {session_id},' f' was {self.session_id}') self.state = dssu.state self.msg_id = dssu.messageID self.entries_count = dssu.entriesCount state = ds_pool_state_str(self.state) msg = ds_msg_str(self.msg_id) if (dssu.statusUpdate == DSPoolStatusUpdate.ACCEPTED and dssu.messageID != DSMessageIDs.ERR_QUEUE_FULL): self.logger.debug(f'{self.wfl_lid}: dssu read:' f' state={state}, msg={msg},' f' entries_count={self.entries_count}') elif dssu.statusUpdate == DSPoolStatusUpdate.ACCEPTED: raise Exception('MN queue is full') elif dssu.statusUpdate == DSPoolStatusUpdate.REJECTED: raise Exception(f'Get reject status update from MN: {msg}') else: raise Exception(f'Unknown dssu statusUpdate: {dssu.statusUpdate}') def on_dsq(self, dsq): '''Process dsq messages broadcasted from masternodes, and inform about existing queue states''' denom = dsq.nDenom if denom != self.denom: raise Exception(f'Wrong denom in dsq msg: {denom},' f' session denom is {self.denom}.') # signature verified in dash_peer on receiving dsq message for session # signature not verifed for dsq with fReady not set (go to recent dsq) if not dsq.fReady: # additional check raise Exception('Get dsq with fReady not set') if self.fReady: raise Exception('Another dsq on session with fReady set') self.masternodeOutPoint = dsq.masternodeOutPoint self.fReady = dsq.fReady self.nTime = dsq.nTime def on_dsf(self, dsf, denominate_wfl): '''Process dsf message from masternode, containing final tx to sign''' session_id = dsf.sessionID if self.session_id != session_id: raise Exception(f'Wrong session id {session_id},' f' was {self.session_id}') if not self.verify_final_tx(dsf.txFinal, denominate_wfl): raise Exception('Wrong txFinal') return dsf.txFinal def on_dsc(self, dsc): '''Process dsc message from masternode, which indicates mixing session is complete''' session_id = dsc.sessionID if self.session_id != session_id: raise Exception(f'Wrong session id {session_id},' f' was {self.session_id}') msg_id = dsc.messageID if msg_id != DSMessageIDs.MSG_SUCCESS: raise Exception(ds_msg_str(msg_id))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 30351, 952, 198, 11748, 640, 198, 6738, 698, 82, 62, 9078, 1330, 698, 82, 198, 6738, 33829, 1330, 2558, 4834, 388, 198, 198, 6738, 764, 35395, 1330, 2209, ...
2.09011
3,640
from time import time from sso.providers.identity_provider import IdentityProvider from sso.providers.oauth2.views import OAuth2CallbackView, OAuth2LoginView class OAuth2Provider(IdentityProvider): """The `OAuth2Provider` provides a generic identity provider that uses the OAuth 2.0 protocol as a means for authenticating a user. OAuth scopes are configured through the oauth_scopes class property, however may be overriden using the ``config['oauth_scopes']`` object. """ oauth_access_token_url = '' oauth_authorize_url = '' refresh_token_url = '' oauth_scopes = () @staticmethod
[ 6738, 640, 1330, 640, 198, 198, 6738, 264, 568, 13, 15234, 4157, 13, 738, 414, 62, 15234, 1304, 1330, 27207, 29495, 198, 6738, 264, 568, 13, 15234, 4157, 13, 12162, 1071, 17, 13, 33571, 1330, 440, 30515, 17, 47258, 7680, 11, 440, 30...
3.08867
203
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 4818, 8079, 198, 6738, 5366, 13, 9945, 1330, 20613, 198, 6738, 5366, 13, 85, 17, 1330, 6060, 44, 4254, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198 ]
2.976744
43
import pigpio import math minPulse = 500 # us maxPulse = 2500 # us minAngle = -math.pi / 2 maxAngle = math.pi / 2 pi = pigpio.pi()
[ 198, 11748, 12967, 79, 952, 198, 11748, 10688, 198, 198, 1084, 47, 9615, 796, 5323, 1303, 514, 198, 9806, 47, 9615, 796, 33507, 1303, 514, 198, 1084, 13450, 293, 796, 532, 11018, 13, 14415, 1220, 362, 198, 9806, 13450, 293, 796, 10688...
2.368421
57
from tkinter import * import db import sqlite3 import pycep_correios # from validacoes import * from tkinter import messagebox as mb janela = Tk() janela.title('Veterinario') janela.geometry('350x350+200+100') janela.resizable(width=False, height=False) photo2 = PhotoImage(file='logo.png') logo2 = Label(janela, image=photo2, bg='#292929') logo2.pack(padx=0, pady=0) loged = Label(janela, text='Seja Bem Vindo!', font=('', 20, 'bold'), bg='#3CB371', fg='white', pady=20) loged.pack(side=BOTTOM, fill=X) # Cria menu de Subjanelas # Cria Label e Entrys # Cria Botões # Cria subjanela Pessoa # Cria subjanela Animal # Cria subjanela Veterinário Interface() # Instancia o Menu de Subjanelas janela.mainloop()
[ 6738, 256, 74, 3849, 1330, 1635, 198, 11748, 20613, 198, 11748, 44161, 578, 18, 198, 11748, 12972, 344, 79, 62, 10215, 260, 4267, 198, 2, 422, 4938, 330, 3028, 1330, 1635, 198, 6738, 256, 74, 3849, 1330, 3275, 3524, 355, 285, 65, 62...
2.254237
354
##--------------------------------------------------------------------------- # R1_realistic.py # G.Urbancic # # Takes raw .dat files: # (1) removes NaN values and replace by interpolation # (2) determines weather or not there is bad data during the day # [x,y,z,T]< [35,35,35,25] ##--------------------------------------------------------------------------- import numpy as np import glob # path to ascii raw data datdir = '/Users/gabin/Documents/ABOA/DATA/CSAT_2m/' # ascii file names fnames = np.sort(np.array(glob.glob(datdir+'2m*.dat'))) print("number of data files:" + str(len(fnames))) size = 20 maxm = [35,35,35,25] for fname in fnames: Ddate = fname.replace(datdir,"") Ddate = Ddate.replace("_0000.dat","") print("*************************************") print(Ddate) print("*************************************") # Read data [time,x,y,z,T] = np.transpose(np.genfromtxt(fname,usecols=(0,3,5,7,9))) b_std = [0,0,0,0] b_mean = [0,0,0,0] j = 0 for D in [x,y,z,T]: # Calculate mean # correct for NaN Values msk_nans = np.isnan(D) if len(msk_nans[msk_nans])>0: xdata = np.arange(len(D)) D[msk_nans] = np.interp(xdata[msk_nans], xdata[~msk_nans],D[~msk_nans]) dat = np.array([ D[x:x+size] for x in range( len(D) - size + 1 ) ]) std = np.std(dat,axis = -1) mean = np.mean(dat,axis = -1) for i in std: if i < 0.0001: b_std[j] = b_std[j]+ 1 b_std[j] = b_std[j]/len(std) * 100 for i in mean: if i > maxm[j]: b_mean[j] = b_mean[j]+1 b_mean[j] = b_mean[j]/len(mean) * 100 j = j + 1 print('std max error: ' + str(max([b_std[0],b_std[1],b_std[2],b_std[3]]))) print('mean max error: ' + str(max([b_mean[0],b_mean[1],b_mean[2],b_mean[3]])))
[ 2235, 10097, 32284, 198, 2, 371, 16, 62, 5305, 2569, 13, 9078, 220, 198, 2, 402, 13, 16692, 65, 1192, 291, 220, 198, 2, 198, 2, 33687, 8246, 764, 19608, 3696, 25, 220, 198, 2, 220, 220, 220, 357, 16, 8, 20694, 11013, 45, 3815, ...
2.017544
969
import time import os import matplotlib import matplotlib.pyplot as plt import numpy as np import torch import torchvision.transforms.functional as F from .datasets import BojaDataSet from .._file_utils import get_highest_numbered_file from .._image_utils import draw_bboxes from .. import _models from .._s3_utils import s3_bucket_exists, s3_download_highest_numbered_file from .._settings import ( DEFAULT_LOCAL_DATA_DIR, DEFAULT_S3_DATA_DIR, LABEL_FILE_NAME, IMAGE_DIR_NAME, ANNOTATION_DIR_NAME, MANIFEST_DIR_NAME, MODEL_STATE_DIR_NAME, MANIFEST_FILE_TYPE, MODEL_STATE_FILE_TYPE, NETWORKS, ) matplotlib.use("TKAgg") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument( "--local_data_dir", type=str, default=DEFAULT_LOCAL_DATA_DIR, help="Local data directory.", ) parser.add_argument( "--manifest_path", type=str, ) parser.add_argument("--model_path", type=str) parser.add_argument( "--s3_bucket_name", type=str, ) parser.add_argument( "--s3_data_dir", type=str, default=DEFAULT_S3_DATA_DIR, help="Prefix of the s3 data objects.", ) parser.add_argument( "--network", type=str, choices=NETWORKS, default=NETWORKS[0], help="The neural network to use for object detection", ) parser.add_argument( "--threshold", type=float, default=0.5, help="The threshold above which to display predicted bounding boxes", ) args = parser.parse_args() main(args)
[ 11748, 640, 198, 11748, 28686, 198, 198, 11748, 2603, 29487, 8019, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 10178, 13, 7645, 23914, 13, 45124, ...
2.301105
724
from Jumpscale import j JSBASE = j.application.JSBaseClass
[ 6738, 449, 8142, 38765, 1330, 474, 628, 198, 41, 16811, 11159, 796, 474, 13, 31438, 13, 20120, 14881, 9487, 628 ]
3.1
20
"""Console backend""" import logging from unittest.mock import patch from notifications.providers import BaseNotificationProvider from .synchronous import SynchronousBackend
[ 37811, 47581, 30203, 37811, 198, 198, 11748, 18931, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 19605, 13, 15234, 4157, 1330, 7308, 3673, 2649, 29495, 198, 198, 6738, 764, 28869, 11413, 516, 1330, 16065, 11413, 51...
4.045455
44
import time from datetime import datetime from file_utils import load_app_info if __name__ == '__main__': main()
[ 11748, 640, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 2393, 62, 26791, 1330, 3440, 62, 1324, 62, 10951, 628, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, ...
2.906977
43
import os from typing import List from ..package_spec import PackageSpec from ..python_version import AvailablePythonVersion from ..step_builder import CommandStepBuilder from ..utils import BuildkiteLeafStep, BuildkiteStep, CommandStep, GroupStep
[ 11748, 28686, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 11485, 26495, 62, 16684, 1330, 15717, 22882, 198, 6738, 11485, 29412, 62, 9641, 1330, 14898, 37906, 14815, 198, 6738, 11485, 9662, 62, 38272, 1330, 9455, 8600, 32875, 198, 6738, ...
4.114754
61
import json import sys from collections import defaultdict cut = True best_other_alg = 'zlib' if len(sys.argv) > 1 and 'b95' in sys.argv[1]: best_other_alg = 'b95' elif len(sys.argv) > 1 and 'b11' in sys.argv[1]: best_other_alg = 'b11' elif len(sys.argv) > 1: assert 'zlib' in sys.argv[1] sub_item = 6 combo_scores = defaultdict(lambda:0) data_list = [] zlib_other_list = [] score_record = [] for line in sys.stdin: try: if cut: line = line[line.find(':')+1:] raw = json.loads(line) b11_cost = raw['b11'][0] b95_cost = raw['b95'][0] zlib_cost = raw['zlib'][0] other_cost = raw[best_other_alg][0] if raw['~raw']*.995 < zlib_cost: continue clist = raw['~'][sub_item] data_list.append(clist) zlib_other_list.append((zlib_cost, other_cost)) for k0 in range(len(clist) - 1): for k1 in range(k0 + 1, len(clist)): key = (k0, k1) score = min(clist[k0][0], clist[k1][0], other_cost, zlib_cost) combo_scores[key] += score except Exception: continue best_combo = min([(v, k[0], k[1]) for k, v in combo_scores.iteritems()]) score_record.append(best_combo[0]) best_elements = [best_combo[1], best_combo[2]] print 'partial', best_elements,'score',score_record sys.stdout.flush() combo_scores = defaultdict(lambda:0) for (sample, other) in zip(data_list, zlib_other_list): for k in range(len(sample)): combo_scores[k] += min(sample[best_elements[0]][0], sample[best_elements[1]][0], sample[k][0], other[0], other[1]) best_val = min([(v,k) for k, v in combo_scores.iteritems()]) score_record.append(best_val[0]) best_elements.append(best_val[1]) print 'partial', best_elements,'score',score_record sys.stdout.flush() combo_scores = defaultdict(lambda:0) for (sample, other) in zip(data_list, zlib_other_list): for k in range(len(sample)): combo_scores[k] += min(sample[best_elements[0]][0], sample[best_elements[1]][0], sample[best_elements[2]][0], sample[k][0], other[0], other[1]) best_val = min([(v,k) for k, v in combo_scores.iteritems()]) score_record.append(best_val[0]) best_elements.append(best_val[1]) print 'partial', best_elements,'score',score_record sys.stdout.flush() combo_scores = defaultdict(lambda:0) for (sample, other) in zip(data_list, zlib_other_list): for k in range(len(sample)): combo_scores[k] += min(sample[best_elements[0]][0], sample[best_elements[1]][0], sample[best_elements[2]][0], sample[best_elements[3]][0], sample[k][0], other[0], other[1]) best_val = min([(v,k) for k, v in combo_scores.iteritems()]) score_record.append(best_val[0]) best_elements.append(best_val[1]) print 'partial', best_elements,'score',score_record sys.stdout.flush() combo_scores = defaultdict(lambda:0) for (sample, other) in zip(data_list, zlib_other_list): for k in range(len(sample)): combo_scores[k] += min(sample[best_elements[0]][0], sample[best_elements[1]][0], sample[best_elements[2]][0], sample[best_elements[3]][0], sample[best_elements[4]][0], sample[k][0], other[0], other[1]) best_val = min([(v,k) for k, v in combo_scores.iteritems()]) score_record.append(best_val[0]) best_elements.append(best_val[1]) print 'partial', best_elements,'score',score_record sys.stdout.flush() combo_scores = defaultdict(lambda:0) prescient_score = 0 for (sample, other) in zip(data_list, zlib_other_list): prescient_score += min(min(x[0] for x in sample), min(other)) for k in range(len(sample)): combo_scores[k] += min(sample[best_elements[0]][0], sample[best_elements[1]][0], sample[best_elements[2]][0], sample[best_elements[3]][0], sample[best_elements[4]][0], sample[best_elements[5]][0], sample[k][0], other[0], other[1]) best_val = min([(v,k) for k, v in combo_scores.iteritems()]) score_record.append(best_val[0]) best_elements.append(best_val[1]) print best_elements,'score',score_record,'best',prescient_score
[ 11748, 33918, 198, 11748, 25064, 198, 6738, 17268, 1330, 4277, 11600, 198, 8968, 796, 6407, 198, 13466, 62, 847, 62, 14016, 796, 705, 89, 8019, 6, 198, 361, 18896, 7, 17597, 13, 853, 85, 8, 1875, 352, 290, 705, 65, 3865, 6, 287, 2...
1.952421
2,354
import matplotlib.pyplot as plt file1 = open("results.txt", "r") file1 = file1.readlines() vertices = [] s_times = [] p_times = [] for line in file1: line = line.split() if line[0][0] == 'S': s_times.append(float(line[2])) vertices.append(int(line[1])) elif line[0][0] == 'P': p_times.append(float(line[2])) plt.scatter(vertices, s_times, label="sequential program") plt.scatter(vertices, p_times, label="parallel program") plt.title('Time taken for max-cut via Ising Annealing') plt.legend() plt.xlabel('Number of vertices') plt.ylabel('Time taken in seconds') plt.show()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 198, 7753, 16, 796, 1280, 7203, 43420, 13, 14116, 1600, 366, 81, 4943, 198, 7753, 16, 796, 2393, 16, 13, 961, 6615, 3419, 198, 198, 1851, 1063, 796, 17635, 198, 82, 62, ...
2.357692
260
from math import floor, ceil, sqrt, log2 from libraries import Digits
[ 6738, 10688, 1330, 4314, 11, 2906, 346, 11, 19862, 17034, 11, 2604, 17, 198, 6738, 12782, 1330, 7367, 896, 628 ]
3.55
20
""" Two words are “rotate pairs” if you can rotate one of them and get the other (see rotate_word in Exercise 8.5). Write a program that reads a wordlist and finds all the rotate pairs. """ from base_functions import make_wordlist, make_worddict, in_bisect, my_rotate_word def rotate_pairs(word_list: list) -> dict: """Takes a word list and finds all the words that can be obtained through the rotation of others. """ pairs = {} for word in word_list: lenght = len(word) + 1 for rotation in range(1, lenght): rotated = my_rotate_word(word, rotation) if in_bisect(word_list, rotated): pairs[dict] = rotated return pairs def rotate_pairs_v2(word_dict: dict) -> dict: """Takes a word list and finds all the words that can be obtained through the rotation of others. """ pairs = {} for word in word_dict: # lenght = len(word) + 1 for rotation in range(1, 14): rotated = my_rotate_word(word, rotation) if rotated in word_dict: if pairs.get(word) == None: pairs[word] = [rotated] else: pairs[word] += [rotated] return pairs if __name__ == '__main__': # words_list = make_wordlist() # pairs = rotate_pairs(words_list) # print(pairs) words_dict = make_worddict() pairs_v2 = rotate_pairs_v2(words_dict) print(pairs_v2)
[ 37811, 198, 7571, 2456, 389, 564, 250, 10599, 378, 14729, 447, 251, 611, 345, 460, 23064, 530, 286, 606, 290, 651, 262, 584, 220, 198, 7, 3826, 23064, 62, 4775, 287, 32900, 807, 13, 20, 737, 198, 16594, 257, 1430, 326, 9743, 257, ...
2.294656
655
from datetime import datetime from string import Template from typing import Optional, Union import discord from redbot.core import Config, commands from redbot.core.bot import Red from redbot.core.utils.chat_formatting import ( bold, humanize_list, humanize_number, inline, ) from .abc import CompositeMetaClass from .commands import Commands from .falxclass import Allowance from .listeners import Listeners DEFAULT_LEAVING_TEXT = ( "Someone on your server invited me ($bot_name) but your server is not whitelisted. " "In order to add me in your server, you are required to contact my owner.\nFor that " "reason, I will be leaving your server until you get whitelisted, then you'll be able to " "invite me again!" ) DEFAULT_GUILD_SETTINGS = { "is_allowed": False, "author": None, "added_at": None, "reason": None, "is_brut": True, } DEFAULT_GLOBAL_SETTINGS = { "notification_channel": None, "leaving_message": DEFAULT_LEAVING_TEXT, "autoremove": True, "enabled": True } class Falx(commands.Cog, Commands, Listeners, name="Falx", metaclass=CompositeMetaClass): """ Automatic guild manager. This cog act as a guild approve system. Only the bot's owner(s) can use these commands. Each guild must be whitelisted before inviting the bot, otherwise the bot will automatically leave the server. If the bot leaves a guild that has been whitelisted before, their guild will be removed from the whitelist and will require a new validation. """ def get_approve_color(self, left_guild: bool) -> discord.Color: """ Get a colour for the embed if the guild is left. """ return discord.Color.red() if left_guild else discord.Color.green() async def should_leave_guild(self, guild: discord.Guild) -> bool: """ Determine if the guild should be left. """ if self.is_enabled: guild_info = await Allowance.from_guild(guild, self.config) return guild_info.is_allowed return False
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 4731, 1330, 37350, 198, 6738, 19720, 1330, 32233, 11, 4479, 198, 198, 11748, 36446, 198, 6738, 2266, 13645, 13, 7295, 1330, 17056, 11, 9729, 198, 6738, 2266, 13645, 13, 7295, 13, 13645, 133...
2.853793
725
import inspect import rlcompleter if __name__ == '__main__': Klass().foo()
[ 11748, 10104, 198, 11748, 374, 75, 785, 1154, 353, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 14770, 562, 22446, 21943, 3419, 198 ]
2.5625
32
import os import pathlib import pytest from ecranner.config.config import ( YAMLLoader, EnvFileLoader, FileLoader, load_yaml, load_dot_env ) from ecranner.config.exceptions import ( ConfigurationError, EnvFileNotFoundError, ConfigurationNotFoundError ) @pytest.fixture()
[ 11748, 28686, 198, 11748, 3108, 8019, 198, 11748, 12972, 9288, 198, 6738, 9940, 2596, 1008, 13, 11250, 13, 11250, 1330, 357, 198, 220, 220, 220, 575, 2390, 3069, 1170, 263, 11, 2039, 85, 8979, 17401, 11, 9220, 17401, 11, 198, 220, 220...
2.820755
106
import deepspeech import numpy import os import wget from typing import List from speechless.utils.storage import make_cache_dir_rel from speechless.processing.tokenization import EditToken SCORER_URL = 'https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer' MODEL_URL = 'https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm' DEEPSPEECH_CACHE_DIR = make_cache_dir_rel('deepspeech') SCORER_FILE = str(DEEPSPEECH_CACHE_DIR / os.path.basename(SCORER_URL)) MODEL_FILE = str(DEEPSPEECH_CACHE_DIR / os.path.basename(MODEL_URL)) def transcript_count_words(transcript: deepspeech.CandidateTranscript) -> dict: """Count occurrences of individual words in transcript This function counts occurrences of each separate words. It returns a dictionary, in wich keys correspond to words and values to the number of occurrences. Args: transcript (deepspeech.CandidateTranscript): Transcript for the counting. Returns: dict: A dictionary, containing numbers of occurrences. """ tokens = transcript.tokens words = dict() for token in tokens: word = token.text print(word) if word not in words: words.update({word: 1}) else: value = words.get(word) words.update({word: value + 1}) return words def transcript_to_string(transcript: deepspeech.CandidateTranscript) -> str: """Convert transcript to string Args: transcript (deepspeech.CandidateTranscript): Transcript to convert. Returns: str: Concatenated text from all the tokens from the transcript. """ tokens = transcript.tokens s = '' for token in tokens: s += token.text return s def string_count_words(string: str) -> dict: """Count occurrences of individual words in a given string This function counts occurrences of each separate words. It returns a dictionary, in wich keys correspond to words and values to the number of occurrences. Args: string (str): String for the counting. Returns: dict: A dictionary, containing numbers of occurrences. """ word_list = string.split() words = {} for word in word_list: if word not in words: words.update({word: 1}) else: value = words.get(word) words.update({word: value + 1}) return words def transcript_to_edit_tokens(transcript: deepspeech.CandidateTranscript) -> List[EditToken]: """Create a list of EditTokens from transcript Args: transcript (transcript: deepspeech.CandidateTranscript): Transcript to convert. Returns: List[EditToken]: List with tokens """ tokens = [] start_i = -1 for i, token in enumerate(transcript.tokens[:-1]): if start_i == -1: start_i = i if token.text == ' ' and start_i != i: tokens.append( EditToken(''.join([t.text for t in transcript.tokens[start_i:i]]), transcript.tokens[start_i].start_time, token.start_time)) start_i = -1 tokens.append( EditToken( ''.join([t.text for t in transcript.tokens[start_i:]]), transcript.tokens[start_i].start_time, transcript.tokens[-1].start_time + (transcript.tokens[-1].start_time - transcript.tokens[-2].start_time))) return tokens def speech_to_text(audio: numpy.array) -> List[EditToken]: """Perform a speech to text transcription Args: audio (numpy.array): A 16-bit, mono raw audio signal. Returns: List[EditToken]: A transcript containing recognized words and their timestamps. """ get_deepspeech_resources() model = deepspeech.Model(MODEL_FILE) model.enableExternalScorer(SCORER_FILE) return transcript_to_edit_tokens(model.sttWithMetadata(audio).transcripts[0]) def remove_characters(s: str, characters: str) -> str: """Remove given characters from string Args: s (str): String to remove characters from. chracters (str): Character set containing characters to remove. Returns: str: Copy of a given string, with specified characters removed. """ for c in characters: s = s.replace(c, '') return s def load_and_adjust_script(file: str) -> str: """Load text from file and adjust it for comparison with transcript Args: file (str): Path to file. Returns: str: Adjusted text. """ content = '' with open(file, encoding='UTF-8') as f: content = f.read() content = content.lower() content = content.replace('\n', ' ') content = remove_characters(content, ',.?!:"*()') content = content.replace(' ', ' ') return content def test(transcript: deepspeech.CandidateTranscript, compare_to: str) -> float: """Test transcription accuracy by comparing it with another text Args: transcipt (deepspeech.CandidateTranscript): Transcript to test. compare_to (str): Path to text file to use for comparison. Returns: float: Value from range <0, 1>, where 1 represents complete similarity. """ # dictionary1 = transcript_count_words(transcript) text = transcript_to_string(transcript) dictionary1 = string_count_words(text) dictionary2 = string_count_words(load_and_adjust_script(compare_to)) result = 1.0 length = len(dictionary2) for key, value in dictionary1.items(): value2 = dictionary2.get(key, 0) if value != value2: result = result * \ (1.0 - (abs(value - value2)/max(value, value2)/length)) return result
[ 11748, 2769, 45862, 198, 11748, 299, 32152, 198, 11748, 28686, 198, 11748, 266, 1136, 198, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 4046, 1203, 13, 26791, 13, 35350, 1330, 787, 62, 23870, 62, 15908, 62, 2411, 198, 6738, 4046, 1203,...
2.891791
1,876
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import click from jina.clients import py_client @click.command() @click.argument("text") @click.option("--host", default="localhost") @click.option("--top_k", "-k", default=5) if __name__ == "__main__": main()
[ 834, 22163, 4766, 834, 796, 366, 15269, 357, 66, 8, 12131, 449, 1437, 9552, 15302, 13, 1439, 2489, 10395, 526, 198, 834, 43085, 834, 796, 366, 25189, 4891, 12, 17, 13, 15, 1, 198, 198, 11748, 3904, 198, 198, 6738, 474, 1437, 13, 5...
2.817391
115
''' ref at https://github.com/aakankshaws/Python/blob/master/weather_data_bs4.py https://medium.com/@aakankshaws/using-beautifulsoup-requests-to-scrape-weather-data-9c6e9d317800 Using BeautifulSoup, requests to scrape weather data, then save it into a timestamped cvs file ''' import requests from bs4 import BeautifulSoup # SF next 10 day weather page = requests.get("https://weather.com/weather/tenday/l/USCA0987:1:US") content = page.content soup = BeautifulSoup(content, "html.parser") l = [] all = soup.find("div", {"class": "locations-title ten-day-page-title"}).find("h1").text table = soup.find_all("table", {"class": "twc-table"}) for items in table: for i in range(len(items.find_all("tr")) - 1): d = {} try: d["day"] = items.find_all("span", {"class": "date-time"})[i].text d["date"] = items.find_all("span", {"class": "day-detail"})[i].text d["desc"] = items.find_all("td", {"class": "description"})[i].text d["temp"] = items.find_all("td", {"class": "temp"})[i].text d["precip"] = items.find_all("td", {"class": "precip"})[i].text d["wind"] = items.find_all("td", {"class": "wind"})[i].text d["humidity"] = items.find_all("td", {"class": "humidity"})[i].text except: d["day"] = "None" d["date"] = "None" d["desc"] = "None" d["temp"] = "None" d["precip"] = "None" d["wind"] = "None" d["humidity"] = "None" # print("") l.append(d) import pandas import time timestr = time.strftime("%Y%m%d-%H%M%S") df = pandas.DataFrame(l) print(df) # filename = SF_weather20190202-211748.csv cvs_filename="D:\\github\\walter-repo\\coding\\python\\SF_weather-"+timestr+".csv" df.to_csv(cvs_filename) ''' date day desc ... precip temp wind 0 FEB 2 Tonight Rain/Wind Early ... 100% --50° SSE 28 mph 1 FEB 3 Sun Rain/Wind ... 80% 53°47° WSW 22 mph 2 FEB 4 Mon Rain/Wind ... 90% 49°43° WSW 20 mph 3 FEB 5 Tue AM Light Rain ... 70% 49°40° WNW 11 mph 4 FEB 6 Wed AM Clouds/PM Sun ... 0% 51°42° N 8 mph 5 FEB 7 Thu Mostly Cloudy ... 10% 52°43° SSW 6 mph 6 FEB 8 Fri PM Showers ... 40% 51°45° SSW 9 mph 7 FEB 9 Sat Showers ... 40% 52°44° SW 13 mph 8 FEB 10 Sun AM Showers ... 40% 52°43° SW 11 mph 9 FEB 11 Mon Partly Cloudy ... 10% 52°44° NNE 11 mph 10 FEB 12 Tue Showers ... 40% 54°44° SE 9 mph 11 FEB 13 Wed Showers ... 50% 54°45° S 11 mph 12 FEB 14 Thu Showers ... 50% 54°45° SSW 11 mph 13 FEB 15 Fri Showers ... 40% 54°45° SW 12 mph 14 FEB 16 Sat Showers ... 60% 54°44° SW 13 mph [15 rows x 7 columns] Process finished with exit code 0 '''
[ 7061, 6, 198, 5420, 379, 3740, 1378, 12567, 13, 785, 14, 64, 461, 2283, 71, 8356, 14, 37906, 14, 2436, 672, 14, 9866, 14, 23563, 62, 7890, 62, 1443, 19, 13, 9078, 198, 5450, 1378, 24132, 13, 785, 14, 31, 64, 461, 2283, 71, 8356,...
1.91545
1,644
import cv2 import matplotlib import numpy as np from matplotlib import pyplot as plt # * syn where to set this # must use 'Agg' to plot out onto image matplotlib.use("Agg") #### def fig2data(fig, dpi=180): """Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it. Args: fig: a matplotlib figure Return: a numpy 3D array of RGBA values """ buf = io.BytesIO() fig.savefig(buf, format="png", dpi=dpi) buf.seek(0) img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) buf.close() img = cv2.imdecode(img_arr, 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img #### #### #### __converter_dict = {"scalar": _Scalar, "conf_mat": _ConfusionMatrix, "image": _Image} ####
[ 11748, 269, 85, 17, 198, 11748, 2603, 29487, 8019, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 198, 2, 1635, 6171, 810, 284, 900, 428, 198, 2, 1276, 779, 705, 46384, 6, 28...
2.355623
329
import csv import pandas import sys import json from tabulate import tabulate import argparse import math import plotting_utils as utils TEXFILE_TEMPLATE = r""" {###ENTRIES PLACEHOLDER###} """ ENTRY_TEMPLATE = r""" \addplot[only marks, color={col{###TOOL NAME PLACEHOLDER###}}, mark={###TOOL MARK PLACEHOLDER###}] coordinates {{###POINTS PLACEHOLDER###}}; """ TIME_LIMIT = 600000 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('datafiles', nargs="+") parser.add_argument("-o", "--outputfile", type=str, required=True) parser.add_argument("-t", "--tools", nargs="+", type=str, required=True, help="Which tools to plot.") args = parser.parse_args() tex_filepath = args.outputfile collected_data = {} # read input for filepath in args.datafiles: json_obj = utils.read_json_from_file(filepath) json_obj = [unify_name(entry) for entry in json_obj] json_obj = [entry for entry in json_obj if entry.get("error", "") == ""] data = group_entries( json_obj, lambda entry: entry["sampleName"]) for method_name in set(data.keys()) | set(collected_data.keys()): entry_list = collected_data.get(method_name, []) entry_list += data.get(method_name, []) collected_data[method_name] = entry_list # print("\n".join([str(x) for x in tool1_data])) write_texfile(tex_filepath, collected_data, args.tools)
[ 11748, 269, 21370, 198, 11748, 19798, 292, 198, 11748, 25064, 198, 11748, 33918, 198, 6738, 7400, 5039, 1330, 7400, 5039, 198, 11748, 1822, 29572, 198, 11748, 10688, 198, 11748, 29353, 62, 26791, 355, 3384, 4487, 628, 198, 198, 51, 6369, ...
2.448445
611
from Core.Assertions.Assertion import Assert, APIAssert
[ 6738, 7231, 13, 8021, 861, 507, 13, 8021, 861, 295, 1330, 2195, 861, 11, 3486, 3539, 824, 861, 628 ]
3
19
import interactions import config from tools.embed import ( create_info_embed, create_error_embed, create_review_embeds) from tools import check, database, component
[ 198, 11748, 12213, 198, 198, 11748, 4566, 198, 6738, 4899, 13, 20521, 1330, 357, 198, 220, 220, 220, 2251, 62, 10951, 62, 20521, 11, 220, 198, 220, 220, 220, 2251, 62, 18224, 62, 20521, 11, 220, 198, 220, 220, 220, 2251, 62, 19023, ...
2.8
70
from spada.methods import get_switches from spada.network import gene_network import os import pickle import pytest scriptPath = os.path.realpath(__file__) dataPath = os.path.dirname(scriptPath) + "/../../data/" g = get_switches.GetSwitches(dataPath + 'annotation.pklz') g.run(dataPath + 'switches') gn = g._genes txs = g._txs
[ 6738, 599, 4763, 13, 24396, 82, 1330, 651, 62, 2032, 9249, 198, 6738, 599, 4763, 13, 27349, 1330, 9779, 62, 27349, 198, 198, 11748, 28686, 198, 11748, 2298, 293, 198, 11748, 12972, 9288, 198, 198, 12048, 15235, 796, 28686, 13, 6978, 1...
2.713115
122
# -*- coding: utf-8 -*- """IdentityServicesEngineAPI node_services API fixtures and tests. Copyright (c) 2021 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import pytest from fastjsonschema.exceptions import JsonSchemaException from ciscoisesdk.exceptions import MalformedRequest from ciscoisesdk.exceptions import ciscoisesdkException from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.1', reason='version does not match') @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services @pytest.mark.node_services
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 7390, 26858, 31007, 13798, 17614, 10139, 62, 30416, 7824, 34609, 290, 5254, 13, 198, 198, 15269, 357, 66, 8, 33448, 28289, 290, 14, 273, 663, 29116, 13, 198, 198,...
3.434211
532
#! -*- encoding: utf-8 -*- from __future__ import unicode_literals import pytest @pytest.mark.django_db @pytest.mark.django_db
[ 2, 0, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 12972, 9288, 628, 198, 31, 9078, 9288, 13, 4102, 13, 28241, 14208, 62, 9945, 628, 198, ...
2.392857
56
#!/usr/bin/python3 # Let's use textgenrnn to train a natural language # model with Lord Andy works! # But first, imports from textgenrnn import textgenrnn # We need to instantiate a `textgenrnn` object textgen = textgenrnn() # Let's train the model with the script for Love Never Dies textgen.reset() textgen.train_from_largetext_file('scripts/love-never-dies.txt', new_model=True, word_level=True, num_epochs=1) # Here goes nothing! textgen.generate_to_file('generated/love-never-ever-dies.md', temperature=0.75)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 198, 2, 3914, 338, 779, 2420, 5235, 81, 20471, 284, 4512, 257, 3288, 3303, 198, 2, 2746, 351, 4453, 12382, 2499, 0, 198, 198, 2, 887, 717, 11, 17944, 198, 6738, 2420, 5235, 81, 20471,...
2.373494
249
from django.urls import path from common import views app_name = 'common' urlpatterns = [ path('health-check/', views.health_check, name='health-check') ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 198, 6738, 2219, 1330, 5009, 198, 198, 1324, 62, 3672, 796, 705, 11321, 6, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 13948, 12, 9122, 14, 3256, 5009, 13, 1394...
2.927273
55
load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_library") load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_test") load(":runtime_resources.bzl", "runtime_resources") def grab_android_local_test( name, srcs, deps, associates = [], custom_package = "", **kwargs): """A macro that generates test targets to execute all android library unit tests. Usage: The macro creates a single build target to compile all Android unit test classes and then creates multiple parallel test targets for each Test class. The name of the test class is derived from test class name and location of the file on disk. The macro adds a mocked Android jar to compile classpath similar to Android Gradle Plugin's testOptions.unitTests.returnDefaultValues = true feature. The macro assumes Kotlin is used and will use rules_kotlin's kt_jvm_library to compile test sources with mocked android.jar on the classpath. The test will be executed with java_test. Executing via Robolectric is currently not supported. Args: name: name for the test target, srcs: the test sources under test. deps: the build dependencies to use for the generated the android local test target and all valid arguments that you want to pass to the android_local_test target associates: associates target to allow access to internal members from the main Kotlin target """ runtime_resources_name = name + "-runtime-resources" runtime_resources( name = runtime_resources_name, deps = deps, ) _gen_test_targets( test_compile_rule_type = kt_jvm_library, test_runner_rule_type = native.java_test, name = name, srcs = srcs, associates = associates, deps = deps + [":" + runtime_resources_name], test_compile_deps = [ "@grab_bazel_common//tools/test:mockable-android-jar", ], test_runtime_deps = [ "@grab_bazel_common//tools/test:mockable-android-jar", "@com_github_jetbrains_kotlin//:kotlin-reflect", ], **kwargs ) def grab_kt_jvm_test( name, srcs, deps, associates = [], **kwargs): """A macro that generates test targets to execute all Kotlin unit tests. Usage: The macro creates a single build target to compile all unit test classes and then creates multiple parallel test targets for each Test class. The name of the test class is derived from test class name and location of the file disk. Args: name: name for the test target, srcs: the test sources under test. deps: the build dependencies to use for the generated the android local test target and all valid arguments that you want to pass to the android_local_test target associates: associates target to allow access to internal members from the main Kotlin target """ _gen_test_targets( test_compile_rule_type = kt_jvm_library, test_runner_rule_type = native.java_test, name = name, srcs = srcs, associates = associates, deps = deps, test_compile_deps = [], test_runtime_deps = [ "@com_github_jetbrains_kotlin//:kotlin-reflect", ], **kwargs ) def _gen_test_targets( test_compile_rule_type, test_runner_rule_type, name, srcs, deps, test_compile_deps, test_runtime_deps, associates = [], **kwargs): """A macro to auto generate and compile target and runner targets for tests. Usage: The macro works under certain assumptions and only works for Kotlin files. The macro builds all test sources in a single target specified by test_compile_rule_type and then generates parallel runner targets with test_runner_rule_type. In order for this to function correctly, the Kotlin test file and the class name should be the same and package name of test class should mirror the location of the file on disk. The root source set path must be either src/main/java or src/main/kotlin (this can be made configurable in the future). Args: test_compile_rule_type: The rule type that will be used for compiling test sources test_runner_rule_type: The rule type that will be used for running test targets name: name of the target srcs: All test sources, mixed Java and Kotlin are supported during build phase but only Kotlin is supported in runner phase. deps: All dependencies required for building test sources test_compile_deps: Any dependencies required for the build target. test_runtime_deps: Any dependencies required for the test runner target. associates: The list of associate targets to allow access to internal members. """ test_build_target = name + "_build" test_compile_rule_type( name = test_build_target, srcs = srcs, deps = test_compile_deps + deps, associates = associates, testonly = True, ) test_names = [] for src in srcs: if src.endswith("Test.kt") or src.endswith("Tests.kt"): # src/test/java/com/grab/test/TestFile.kt path_split = src.rpartition("/") # [src/test/java/com/grab/test,/,TestFile.kt] test_file = path_split[2] # Testfile.kt test_file_name = test_file.split(".")[0] # Testfile # Find package name from path path = path_split[0] # src/main/java/com/grab/test test_package = "" if path.find("src/test/java/") != -1 or path.find("src/test/kotlin/") != -1: # TODO make this path configurable path = path.split("src/test/java/")[1] if path.find("src/test/java/") != -1 else path.split("src/test/kotlin/")[1] # com/grab/test test_class = path.replace("/", ".") + "." + test_file_name # com.grab.test.TestFile test_target_name = test_class.replace(".", "_") test_names.append(test_target_name) test_runner_rule_type( name = test_class.replace(".", "_"), test_class = test_class, runtime_deps = test_runtime_deps + [ ":" + test_build_target, ], jvm_flags = [ "-Xverify:none", ], **kwargs ) if len(test_names) >= 0: native.test_suite( name = name, tests = test_names, )
[ 2220, 7203, 31, 952, 62, 65, 41319, 62, 38785, 62, 74, 313, 2815, 1003, 74, 313, 2815, 25, 74, 313, 2815, 13, 65, 48274, 1600, 366, 21841, 62, 73, 14761, 62, 32016, 4943, 198, 2220, 7203, 31, 952, 62, 65, 41319, 62, 38785, 62, 7...
2.426816
2,767
import numpy as np import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, log_loss import time import sys import warnings if not sys.warnoptions: warnings.simplefilter("ignore") sub_part = int(sys.argv[1]); path_train = sys.argv[2]; path_test = sys.argv[3]; path_val = sys.argv[4]; train_arr = np.genfromtxt(path_train,delimiter=','); train_arr = train_arr[2:,1:].copy() X_train = train_arr[:,:-1].astype(int) Y_train = train_arr[:,-1].astype(int) val_arr = np.genfromtxt(path_val,delimiter=','); val_arr = val_arr[2:,1:].copy() X_val = val_arr[:,:-1].astype(int) Y_val = val_arr[:,-1].astype(int) test_arr = np.genfromtxt(path_test,delimiter=','); test_arr = test_arr[2:,1:].copy() X_test = test_arr[:,:-1].astype(int) Y_test = test_arr[:,-1].astype(int) if(sub_part == 4): dc = DecisionTreeClassifier(max_depth=3, min_samples_split=1150, min_samples_leaf=75); dc.fit(X_train, Y_train); train_pred = dc.predict(X_train); print("Train Accuracy :",accuracy_score(Y_train, train_pred)); val_pred = dc.predict(X_val); print("Val Accuracy :",accuracy_score(Y_val, val_pred)); test_pred = dc.predict(X_test); print("Test Accuracy :",accuracy_score(Y_test, test_pred)); elif(sub_part == 5): train1_X = cat_edit(X_train) test1_X = cat_edit(X_test) val1_X = cat_edit(X_val) dc = DecisionTreeClassifier(max_depth=3, min_samples_split=1150, min_samples_leaf=75); dc.fit(train1_X, Y_train); train_pred = dc.predict(train1_X); print("Train Accuracy :",accuracy_score(Y_train, train_pred)); val_pred = dc.predict(val1_X); print("Val Accuracy :",accuracy_score(Y_val, val_pred)); test_pred = dc.predict(test1_X); print("Test Accuracy :",accuracy_score(Y_test, test_pred)); elif(sub_part== 6): train1_X = cat_edit(X_train) test1_X = cat_edit(X_test) val1_X = cat_edit(X_val) rmfr = RandomForestClassifier(n_estimators=100, max_features=10, bootstrap=True) rmfr.fit(train1_X, Y_train); train_pred = rmfr.predict(train1_X); print("Train Accuracy :",accuracy_score(Y_train, train_pred)); val_pred = rmfr.predict(val1_X); print("Val Accuracy :",accuracy_score(Y_val, val_pred)); test_pred = rmfr.predict(test1_X); print("Test Accuracy :",accuracy_score(Y_test, test_pred)); else: print("Part not attempted");
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 1341, 35720, 13, 21048, 1330, 26423, 27660, 9487, 7483, 198, 6738, 1341, 35720, 13, 1072, 11306, 1330, 14534, 34605, 9487, 7483, 198, 6738, 1341, 35720, 13, ...
2.45614
969
import cv2 import numpy as np import pandas as pd from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard from sklearn.model_selection import train_test_split import sys import params input_size = params.input_size epochs = params.max_epochs batch_size = params.batch_size df_train = pd.read_csv('input/train_masks.csv') models = params.model_factory m_names = params.model_names ids_train = df_train['img'].map(lambda s: s.split('.')[0]) ids_train_split, ids_valid_split = train_test_split(ids_train, test_size=0.2, random_state=42) callbacks = [EarlyStopping(monitor='val_loss', patience=8, verbose=1, min_delta=1e-4), ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4, verbose=1, epsilon=1e-4), ModelCheckpoint(monitor='val_loss', filepath='weights/best_weights.hdf5', save_best_only=True, save_weights_only=True), TensorBoard(log_dir="logs/{}".format(str(sys.argv[1])), histogram_freq=0, batch_size=params.batch_size, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='batch')] models[m_names.index(str(sys.argv[1]))].fit_generator(generator=train_generator(), steps_per_epoch=np.ceil(float(len(ids_train_split)) / float(batch_size)), epochs=epochs, verbose=2, callbacks=callbacks, validation_data=valid_generator(), validation_steps=np.ceil(float(len(ids_valid_split)) / float(batch_size)))
[ 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 41927, 292, 13, 13345, 10146, 1330, 12556, 1273, 33307, 11, 44048, 35972, 2202, 3646, 378, 559, 11, 9104, 9787, 4122, 11, 309, 22...
1.695067
1,338
import os from PIL import Image, ImageDraw, ImageFont import cv2 import numpy as np from cv2 import VideoWriter, VideoWriter_fourcc import crawler from config import config import text_processing if __name__ == "__main__": main() font = ImageFont.truetype("msyh.ttc", 40, encoding="utf-8") font.getsize_multiline
[ 11748, 28686, 198, 6738, 350, 4146, 1330, 7412, 11, 7412, 25302, 11, 7412, 23252, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 269, 85, 17, 1330, 7623, 34379, 11, 7623, 34379, 62, 14337, 535, 198, 198, 11748...
2.754098
122
import unittest from simtk.openmm.app import * from simtk.openmm import * from simtk.unit import * import simtk.openmm.app.element as elem class TestGromacsGroFile(unittest.TestCase): """Test the Gromacs GRO file parser""" def test_Triclinic(self): """Test parsing a file that describes a triclinic box.""" gro = GromacsGroFile('systems/triclinic.gro') self.assertEqual(len(gro.positions), 8) expectedPositions = [ Vec3(1.744, 2.788, 3.162), Vec3(1.048, 0.762, 2.340), Vec3(2.489, 1.570, 2.817), Vec3(1.027, 1.893, 3.271), Vec3(0.937, 0.825, 0.009), Vec3(2.290, 1.887, 3.352), Vec3(1.266, 1.111, 2.894), Vec3(0.933, 1.862, 3.490)]*nanometers for (p1, p2) in zip(expectedPositions, gro.positions): self.assertEqual(p1, p2) expectedVectors = [ Vec3(2.5, 0, 0), Vec3(0.5, 3.0, 0), Vec3(0.7, 0.9, 3.5)]*nanometers for (v1, v2) in zip(expectedVectors, gro.getPeriodicBoxVectors()): self.assertEqual(v1, v2) self.assertEqual(Vec3(2.5, 3.0, 3.5)*nanometers, gro.getUnitCellDimensions()) for i in range(4): self.assertEqual(elem.chlorine, gro.elements[i]) self.assertEqual('Cl', gro.atomNames[i]) self.assertEqual('Cl', gro.residueNames[i]) for i in range(4, 8): self.assertEqual(elem.sodium, gro.elements[i]) self.assertEqual('Na', gro.atomNames[i]) self.assertEqual('Na', gro.residueNames[i]) if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 985, 30488, 13, 9654, 3020, 13, 1324, 1330, 1635, 198, 6738, 985, 30488, 13, 9654, 3020, 1330, 1635, 198, 6738, 985, 30488, 13, 20850, 1330, 1635, 198, 11748, 985, 30488, 13, 9654, 3020, 13, 1324, 13, ...
1.881684
879
from pathlib import Path import click from databricks_cli.configure.config import debug_option from databricks_sync import CONTEXT_SETTINGS, log from databricks_sync.cmds import templates @click.command(context_settings=CONTEXT_SETTINGS, help="Initialize export configuration file.") @click.option('--filename', '-f', required=True, help="This is the filename to create the config file for the export.") @debug_option @click.pass_context
[ 6738, 3108, 8019, 1330, 10644, 198, 198, 11748, 3904, 198, 6738, 4818, 397, 23706, 62, 44506, 13, 11250, 495, 13, 11250, 1330, 14257, 62, 18076, 198, 198, 6738, 4818, 397, 23706, 62, 27261, 1330, 22904, 13918, 62, 28480, 51, 20754, 11, ...
3.548387
124
from djangorest_routes.config.base import * DEBUG = env("DEBUG") ALLOWED_HOSTS += ["djangorest_auth.digitalstade.com", "djangorest_auth.herokuapp.com"] # Database # https://docs.djangoproject.com/en/4.0/ref/settings/#databases DATABASES = { "default": dj_database_url.parse(env("DATABASE_URL")), } # Application Definition INSTALLED_APPS += [] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.0/howto/static-files/ STATIC_ROOT = "staticfiles" STATIC_URL = "/static/" # EMAIL CONFIGURATION EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" EMAIL_HOST = "smtp.gmail.com" EMAIL_HOST_USER = env("EMAIL_HOST_USER") EMAIL_PORT = env("EMAIL_PORT") EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD") EMAIL_USE_TLS = True DEFAULT_FROM_EMAIL = EMAIL_HOST_USER # HTTPS SETTINGS SECURE_SSL_REDIRECT = True SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True SECURE_HSTS_SECONDS = 99999990 SECURE_HSTS_INCLUDE_SUBDOMAINS = True SECURE_HSTS_PRELOAD = True
[ 6738, 42625, 648, 26522, 62, 81, 448, 274, 13, 11250, 13, 8692, 1330, 1635, 628, 198, 30531, 796, 17365, 7203, 30531, 4943, 198, 198, 7036, 3913, 1961, 62, 39, 10892, 50, 15853, 14631, 28241, 648, 26522, 62, 18439, 13, 34725, 301, 671...
2.370283
424
from sklearn.cluster import KMeans import numpy as np import logging import sys import os import warnings from sklearn import metrics from sklearn.metrics.cluster import normalized_mutual_info_score from sklearn.metrics.cluster import adjusted_mutual_info_score if not sys.warnoptions: warnings.simplefilter("ignore") os.environ["PYTHONWARNINGS"] = "ignore" # Also affect subprocesses
[ 6738, 1341, 35720, 13, 565, 5819, 1330, 509, 5308, 504, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 18931, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 14601, 198, 6738, 1341, 35720, 1330, 20731, 198, 6738, 1341, 35720, 13, 4164,...
3.319328
119
import numpy as np from sklearn.utils.validation import check_is_fitted from sklearn.utils.extmath import randomized_svd from sklearn.utils.validation import check_array from sklearn.base import BaseEstimator, TransformerMixin from numpy.linalg import norm import warnings def _soft_thresh(X, threshold): "Apply soft thresholding to an array" sign = np.sign(X) return np.multiply(sign, np.maximum(np.abs(X) - threshold, 0)) def _sv_thresh(X, threshold, num_svalue): """ Perform singular value thresholding. Parameters --------- X : array of shape [n_samples, n_features] The input array. threshold : float The threshold for the singualar values. num_svalue : int The number of singular values to compute. Returns ------- X_thresh : array of shape [n_samples, n_features] The output after performing singular value thresholding. grater_sv : int The number of singular values of `X` which were greater than `threshold` (U, s, V): tuple The singular value decomposition """ m, n = X.shape U, s, V = randomized_svd(X, num_svalue) greater_sv = np.count_nonzero(s > threshold) s = _soft_thresh(s, threshold) S = np.diag(s) X_thresh = np.dot(U, np.dot(S, V)) return X_thresh, greater_sv, (U, s, V) def rpca(M, lam=None, mu=None, max_iter=1000, eps_primal=1e-7, eps_dual=1e-5, rho=1.6, initial_sv=10, max_mu=1e6, verbose=False): """Implements the Robust PCA algorithm via Principal Component Pursuit [1]_ The Robust PCA algorithm minimizes .. math:: \\lVert L \\rVert_* + \\lambda \\lVert S \\rVert_1 subject to .. math:: M = L + S where :math:`\\lVert X \\rVert_1` is the sum of absolute values of the matrix `X`. The algorithm used for optimization is the "Inexact ALM" method specified in [2]_ Parameters ---------- M : array-like, shape (n_samples, n_features) The input matrix. lam : float, optional The importance given to sparsity. Increasing this parameter will yeild a sparser `S`. If not given it is set to :math:`\\frac{1}{\\sqrt{n}}` where ``n = max(n_samples, n_features)``. mu : float, optional The initial value of the penalty parameter in the Augmented Lagrangian Multiplier (ALM) algorithm. This controls how much attention is given to the constraint in each iteration of the optimization problem. max_iter : int, optional The maximum number of iterations the optimization algortihm will run for. eps_primal : float, optional The threshold for the primal error in the convex optimization problem. If the primal and the dual error fall below ``eps_primal`` and ``eps_dual`` respectively, the algorithm converges. eps_dual : float, optinal The theshold for the dual error in the convex optimzation problem. rho : float, optional The ratio of the paramter ``mu`` between two successive iterations. For each iteration ``mu`` is updated as ``mu = mu*rho``. initial_sv : int, optional The number of singular values to compute during the first iteration. rho_max : float, optional The maximum value that ``rho`` is allowed to take. verbose : bool, optional Whether to print convergence statistics during each iteration. Returns ------- L : array, shape (n_samples, n_features) The low rank component. S : array, shape (n_samples, n_features) The sparse component. (U, s, Vt) : tuple of arrays The singular value decomposition of the ``L`` n_iter : int The number of iterations taken to converge. References ---------- .. [1] : Emmanuel J. Cand`es 1,2, Xiaodong Li, Yi Ma, John Wright4, 2009: Robust Principal Component Analysis? .. [2] : Zhouchen Lin, Minming Chen, Yi Ma, 2013 : The Augmented Lagrange Multiplier Method for Exact Recovery of Corrupted Low-Rank Matrices """ # See http://arxiv.org/pdf/1009.5055v3.pdf # This implementation follows Algorithm 5 from the paper with minor # modifications if lam is None: lam = 1.0/np.sqrt(max(M.shape)) d = min(M.shape) # See "Choosing Parameters" paragraph in section 4 mu = 1.25/norm(M, 2) # The sparse matrix S = np.zeros_like(M) # The low rank matrix L = np.zeros_like(M) # See equation 10 J = min(norm(M, 2), np.max(np.abs(M))) Y = M/J M_fro_norm = norm(M, 'fro') # This variable tried to predict how many singular values will be required. sv = initial_sv for iter_ in range(max_iter): # See Section 4, paragraph "Order of Updating A and E" to see why # `S` iterate is computed before `L` ierate. S_old = S S = _soft_thresh(M - L + (Y/mu), lam/mu) L, svp, (U, s, V) = _sv_thresh(M - S + (Y/mu), 1/mu, sv) Y = Y + mu*(M - L - S) mu_old = mu mu = rho*mu mu = min(mu, max_mu) # See Equation 18 if svp < sv: sv = svp + 1 else: sv = svp + int(round(0.05*d)) sv = min(sv, M.shape[0], M.shape[1]) primal_error = norm(M - L - S, 'fro')/M_fro_norm dual_error = mu_old*norm(S - S_old, 'fro')/M_fro_norm if verbose: print('rpca: Iteration %d - Primal Error = %e Dual Error = %e' % (iter_, primal_error, dual_error)) if primal_error < eps_primal and dual_error < eps_dual: break if iter_ >= max_iter: warnings.warn('rpca: Failed to converge within %d iterations' % max_iter) n_iter = iter_ return L, S, (U, s, V), n_iter class RobustPCA(BaseEstimator, TransformerMixin): """Implements the Robust PCA algorithm via Principal Component Pursuit [1]_ Robust PCA is designed to overcome the susceptibility to outliers of the classical :py:class:`sklearn.decomposition.PCA` algorithm. The Robust PCA algorithm tries to seperate out the outliers in the data into the ``S`` matrix while ``L`` contains the low-rank approximation. The Robust PCA algorithm minimizes .. math:: \\lVert L \\rVert_* + \\lambda \\lVert S \\rVert_1 subject to .. math:: M = L + S where :math:`\\lVert X \\rVert_1` is the sum of absolute values of the matrix `X`. The algorithm used for optimization is the "Inexact ALM" method specified in [2]_ Parameters ---------- M : array-like, shape (n_samples, n_features) The input matrix. lam : float, optional The importance given to sparsity. Increasing this parameter will yeild a sparser `S`. If not given it is set to :math:`\\frac{1}{\\sqrt{n}}` where ``n = max(n_samples, n_features)``. mu : float, optional The initial value of the penalty parameter in the Augmented Lagrangian Multiplier (ALM) algorithm. This controls how much attention is given to the constraint in each iteration of the optimization problem. max_iter : int, optional The maximum number of iterations the optimization algortihm will run for. eps_primal : float, optional The threshold for the primal error in the convex optimization problem. If the primal and the dual error fall below ``eps_primal`` and ``eps_dual`` respectively, the algorithm converges. eps_dual : float, optinal The theshold for the dual error in the convex optimzation problem. rho : float, optional The ratio of the paramter ``mu`` between two successive iterations. For each iteration ``mu`` is updated as ``mu = mu*rho``. initial_sv : int, optional The number of singular values to compute during the first iteration. rho_max : float, optional The maximum value that ``rho`` is allowed to take. verbose : bool, optional Whether to print convergence statistics during each iteration. Attributes ---------- n_components_ : int The rank of the low-rank approximation of the data. This is same as the rank of `low_rank_`. components_ : array, [n_components, n_features] The principal axes of the low rank subspace that the algorithm chose. low_rank_ : array, [n_samples, n_features] The low rank approximation of the fitted data. Same as ``L`` in the optimization problem. n_ter_ : int The number of iterations taken to converge. """ def fit(self, X, y=None): """Fit the model with ``X``. Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ X = check_array(X, dtype=np.float) L, S, (U, s, Vt), self.n_iter_ = rpca(X, self.lam, self.mu, self.max_iter, self.eps_primal, self.eps_dual, self.rho, self.initial_sv, self.max_mu, self.verbose) self.low_rank_ = L r = np.count_nonzero(s) self.n_components_ = r self.components_ = Vt[:r] return self def transform(self, X): """ Reduce dimensions of ``X`` by projecting into a low rank subspace Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) The projection of ``X`` along the low-rank subspace """ check_is_fitted(self, 'components_') X = check_array(X, dtype=np.float) return np.dot(X, self.components_.T) def inverse_transform(self, X): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original: array, [n_samples, n_features] ``X`` projected onto the original space. Notes ----- Applying :py:func:`transform` and :py:func:`inverse_transform` successively to data should return a low-rank approximation of the data in the original feature space. """ check_is_fitted(self, 'components_') X = check_array(X, dtype=np.float) return np.dot(X, self.components_)
[ 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 26791, 13, 12102, 341, 1330, 2198, 62, 271, 62, 38631, 198, 6738, 1341, 35720, 13, 26791, 13, 2302, 11018, 1330, 23925, 62, 82, 20306, 198, 6738, 1341, 35720, 13, 26791, 13, 1...
2.433708
4,533
# # PySNMP MIB module CISCO-WLAN-VLAN-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-WLAN-VLAN-MIB # Produced by pysmi-0.3.4 at Wed May 1 12:00:14 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint") WepKeyType128, CDot11IfMicAlgorithm, CDot11IfWepKeyPermuteAlgorithm = mibBuilder.importSymbols("CISCO-DOT11-IF-MIB", "WepKeyType128", "CDot11IfMicAlgorithm", "CDot11IfWepKeyPermuteAlgorithm") ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt") ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup") ObjectIdentity, ModuleIdentity, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, IpAddress, iso, MibIdentifier, Bits, Integer32, NotificationType, TimeTicks, Counter64, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "ModuleIdentity", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "IpAddress", "iso", "MibIdentifier", "Bits", "Integer32", "NotificationType", "TimeTicks", "Counter64", "Gauge32") TextualConvention, TruthValue, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "RowStatus", "DisplayString") ciscoWlanVlanMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 268)) ciscoWlanVlanMIB.setRevisions(('2002-06-12 00:00', '2002-04-04 00:00', '2002-03-07 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: ciscoWlanVlanMIB.setRevisionsDescriptions(('Remove tkip(3) option from the cwvlWlanEncryptionMode, and added cwvlWlanEncryptionAlgorithm and cwvlWlanWepKeyHashing objects to cwvlWlanVlanTable.', 'Added tkip(3) option and removed wepMic option from the cwvlWlanEncryptionMode, and added an cwvlWlanEncryptionMandatory object to cwvlWlanVlanTable.', 'Initial version of this MIB module.',)) if mibBuilder.loadTexts: ciscoWlanVlanMIB.setLastUpdated('200206120000Z') if mibBuilder.loadTexts: ciscoWlanVlanMIB.setOrganization('Cisco System Inc.') if mibBuilder.loadTexts: ciscoWlanVlanMIB.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive, San Jose CA 95134-1706. USA Tel: +1 800 553-NETS E-mail: cs-dot11@cisco.com') if mibBuilder.loadTexts: ciscoWlanVlanMIB.setDescription('This MIB module provides network management support for device VLAN configuration on IEEE 802.11 wireless LAN. ACRONYMS AES Advanced Encryption Standard, an encryption mechanism. MIC Message Integrity Check. WEP Wired Equivalent Privacy, an encryption mechanism.') ciscoWlanVlanMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 1)) cwvlRoamDomainConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 1)) cwvlDot11VlanConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2)) cwvlWlanDot1qEncapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlWlanDot1qEncapEnabled.setReference('IEEE 802.1Q-1998, Section 8.11.9.') if mibBuilder.loadTexts: cwvlWlanDot1qEncapEnabled.setStatus('current') if mibBuilder.loadTexts: cwvlWlanDot1qEncapEnabled.setDescription("This object enables and disables IEEE 802.1Q type encapsulation for all VLANs. If this object is set to 'false', then the 802.1Q encapsulation is disabled on all interfaces. If this object is set to 'true', then the 802.1Q encapsulation is enabled on all interfaces.") cwvlBridgingNativeVlanId = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 1, 2), CwvlVlanIdOrZero()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlBridgingNativeVlanId.setStatus('current') if mibBuilder.loadTexts: cwvlBridgingNativeVlanId.setDescription("This object specifies the native VLAN ID for layer 2 bridging. If this object is set to '0', there is no layer 2 bridging native VLAN ID. Setting this object will automatically update the dot1qPvid for all interfaces in the Q-BRIDGE-MIB (if supported) to the same value provided it is not '0'. The dot1qPvid will be read-only. The dot1qPvid specifies the native VLAN ID on each device interface. If this object is '0', the return value of dot1qPvid is not valid. If the device is attached to a VLAN port of an Ethernet bridge or switch, then the device must have a non-zero native VLAN ID, and that VLAN ID must also match the VLAN ID of the port on the bridge or switch. The native VLAN ID is the default VLAN ID for frames received that are not otherwise associated with a VLAN ID.") cwvlVoIPVlanEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 1, 3), TruthValue().clone('true')).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlVoIPVlanEnabled.setStatus('current') if mibBuilder.loadTexts: cwvlVoIPVlanEnabled.setDescription("This object enables and disables VoIP VLAN functionality for this agent. If this object is set to 'true', and cwvlVoIPVlanId is a non-zero, the value of the cwvlVoIPVlanId object is the user-configured VoIP VLAN ID. If this object is set to 'true', and cwvlVoIPVlanId is CDP VVID, this agent automatically enables the VoIP VLAN when it receives CDP messages with non-zero VVID field on its root port, otherwise, the VoIP VLAN is disabled. If this object is set to 'false', then the VoIP VLAN is disabled and no station can associate with a VoIP VLAN ID.") cwvlVoIPVlanId = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 1, 4), CwvlVlanIdOrZero().clone(4095)).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlVoIPVlanId.setStatus('current') if mibBuilder.loadTexts: cwvlVoIPVlanId.setDescription("This object is the VoIP VLAN ID. All VoIP VLAN ID values are non-zero VLAN ID. A value of '0' is used to represent CDP VVID.") cwvlPublicVlanId = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 1, 5), CwvlVlanIdOrZero()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlPublicVlanId.setStatus('current') if mibBuilder.loadTexts: cwvlPublicVlanId.setDescription("The object is the Public VLAN ID. This VLAN is the only VLAN which may be configured on an 802.11 network interface to not require WEP encryption. All other VLANs require WEP encryption in order to isolate the broadcast domains. If the value of object is '0', there is no specific VLAN ID for the Public VLAN.") cwvlWlanVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1), ) if mibBuilder.loadTexts: cwvlWlanVlanTable.setStatus('current') if mibBuilder.loadTexts: cwvlWlanVlanTable.setDescription('This table contains attributes for configuration and security management of VLANs. Devices can configure to have multiple VLANs on an interface. VLANs on different interfaces of the same VLAN ID must have the same configuration. Therefore, attributes for each conceptual row applies to the VLANs of the corresponding VLAN ID on all interfaces.') cwvlWlanVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-WLAN-VLAN-MIB", "cwvlWlanVlanId")) if mibBuilder.loadTexts: cwvlWlanVlanEntry.setStatus('current') if mibBuilder.loadTexts: cwvlWlanVlanEntry.setDescription('Each entry includes parameters for to enable VLAN and configure encryption and key usages for a particular VLAN.') cwvlWlanVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 1), CwvlVlanIdOrZero()) if mibBuilder.loadTexts: cwvlWlanVlanId.setStatus('current') if mibBuilder.loadTexts: cwvlWlanVlanId.setDescription('This is the VLAN ID to which the parameters in each conceptual row shall be applied.') cwvlWlanEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 2), TruthValue().clone('true')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanEnabled.setStatus('current') if mibBuilder.loadTexts: cwvlWlanEnabled.setDescription("If the value is 'true', this VLAN is enabled on all trunk and hybrid ports. If the value is 'false', this VLAN is disabled on all ports.") cwvlWlanNUcastKeyRotateInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000000))).setUnits('seconds').setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanNUcastKeyRotateInterval.setStatus('current') if mibBuilder.loadTexts: cwvlWlanNUcastKeyRotateInterval.setDescription("The object specifies the WEP encryption key rotation period. If the value is '0', it indicates no key rotation.") cwvlWlanEncryptionMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("wep", 2), ("aes", 3))).clone('none')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanEncryptionMode.setStatus('current') if mibBuilder.loadTexts: cwvlWlanEncryptionMode.setDescription('Encryption mode used on the VLANs are: none (1) - No encryption and use VLan as security mechanism, wep (2) - WEP encryption, aes (3) - Advanced Encryption Standard.') cwvlWlanEncryptionMandatory = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 5), TruthValue().clone('true')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanEncryptionMandatory.setStatus('current') if mibBuilder.loadTexts: cwvlWlanEncryptionMandatory.setDescription("Encryption option for wep(2) selection of cwvlWlanEncryptionMode: 'true' - WEP encryption is mandatory, 'false' - WEP encryption is option.") cwvlWlanMicAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 6), CDot11IfMicAlgorithm().clone('micNone')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanMicAlgorithm.setStatus('current') if mibBuilder.loadTexts: cwvlWlanMicAlgorithm.setDescription('This is the auxiliary MIC type used on WEP-encoded packets for client stations assigned to this VLAN.') cwvlWlanWepKeyPermuteAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 7), CDot11IfWepKeyPermuteAlgorithm().clone('wepPermuteNone')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanWepKeyPermuteAlgorithm.setStatus('current') if mibBuilder.loadTexts: cwvlWlanWepKeyPermuteAlgorithm.setDescription('This is the function through which the WEP encryption key is permuted between key renewal periods for client stations assigned to this VLAN.') cwvlWlanWepKeyHashing = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 8), TruthValue().clone('false')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanWepKeyHashing.setStatus('current') if mibBuilder.loadTexts: cwvlWlanWepKeyHashing.setDescription("This is an optional key hashing for WEP encryption. If the value is 'true', the hashing option is applied. If the value is 'false', the hashing option is not applied to WEP encryption.") cwvlWlanEncryptionAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("standard", 1), ("cisco", 2))).clone('cisco')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanEncryptionAlgorithm.setStatus('current') if mibBuilder.loadTexts: cwvlWlanEncryptionAlgorithm.setDescription('This object determines if Standard IEEE 802.11 or Cisco propriety AES, MIC, and hashing for WEP encryption is applied. If the value is standard(1), the Standard IEEE 802.11 encryption is applied. If the value is cisco(2), the Cisco propriety encryption is applied.') cwvlWlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 1, 1, 10), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: cwvlWlanRowStatus.setStatus('current') if mibBuilder.loadTexts: cwvlWlanRowStatus.setDescription("This is used to create a new row, modify or delete an existing row and a VLAN configuration in this table. A VLAN can only be activated by setting this object to `active' by the agent. When it is `active', the VLAN is being used or referenced in other system configurations. A VLAN should only be deleted or taken out of service, (by setting this object to `destroy' or `outOfService') if only if it is not referenced by all associated system configurations.") cwvlWlanNUcastKeyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 2), ) if mibBuilder.loadTexts: cwvlWlanNUcastKeyTable.setStatus('current') if mibBuilder.loadTexts: cwvlWlanNUcastKeyTable.setDescription("This table contains shared WEP keys for all IEEE 802.11 packets transmitted and received frames over a VLAN identified by the cwvlWlanVlanId if encryption is enabled (i.e., the cwvlWlanEncryptionMode is wep(2) or aes(3)) on the VLAN. If WEP encryption is enabled for the transmitted IEEE 802.11 frames, then the Default Shared WEP key in the set are used to encrypt the transmitted both broadcast and multicast frames associated with the cwvlWlanVlanId. Key '1' in the set is the default key. The Default Shared WEP key is also used to encrypt or decrypt unicast frames, associated with the cwvlWlanVlanId, if an individual session key is not defined for the target station address.") cwvlWlanNUcastKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 2, 1), ).setIndexNames((0, "CISCO-WLAN-VLAN-MIB", "cwvlWlanVlanId"), (0, "CISCO-WLAN-VLAN-MIB", "cwvlWlanNUcastKeyIndex")) if mibBuilder.loadTexts: cwvlWlanNUcastKeyEntry.setStatus('current') if mibBuilder.loadTexts: cwvlWlanNUcastKeyEntry.setDescription('Each entry contains the key index, key length, and key value. There is a maximum of 4 keys per VLAN or key set. Each key set is indexed by the VLAN ID.') cwvlWlanNUcastKeyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))) if mibBuilder.loadTexts: cwvlWlanNUcastKeyIndex.setStatus('current') if mibBuilder.loadTexts: cwvlWlanNUcastKeyIndex.setDescription("This object is a representative of the corresponding 802.11 WEP Key Index used when transmitting or receiving frames with this key. SNMP table indexing conventions require table index to be non-zero. Therefore, this object has to be one greater than the actual 802.11 WEP key index. A value of '1' for this object corresponds to a value of '0' for the 802.11 WEP key index.") cwvlWlanNUcastKeyLen = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 2, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 13))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlWlanNUcastKeyLen.setStatus('current') if mibBuilder.loadTexts: cwvlWlanNUcastKeyLen.setDescription("This object specifies the length in octets of cwvlWlanNUcastKeyValue. Common values are 5 for 40-bit WEP key and 13 for 128-bit WEP key. A value of '0' means that the key is not set.") cwvlWlanNUcastKeyValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 2, 1, 3), WepKeyType128()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlWlanNUcastKeyValue.setStatus('current') if mibBuilder.loadTexts: cwvlWlanNUcastKeyValue.setDescription('This is the WEP secret key value. The agent always returns a zero-length string when this object is read for security reason.') cwvlWlanWepChangeNotifEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 268, 1, 2, 3), TruthValue().clone('false')).setMaxAccess("readwrite") if mibBuilder.loadTexts: cwvlWlanWepChangeNotifEnabled.setStatus('current') if mibBuilder.loadTexts: cwvlWlanWepChangeNotifEnabled.setDescription('Indicates whether ciscoWlanVlanWepChangeNotif notifications will or will not be sent by the agent when the WEP key in the cwvlWlanNUcastKeyTable are changed.') ciscoWlanVlanMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 0)) ciscoWlanVlanWepChangeNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 268, 0, 1)).setObjects(("CISCO-WLAN-VLAN-MIB", "cwvlWlanNUcastKeyValue")) if mibBuilder.loadTexts: ciscoWlanVlanWepChangeNotif.setStatus('current') if mibBuilder.loadTexts: ciscoWlanVlanWepChangeNotif.setDescription('This ciscoWlanVlanWepChangeNotif notification will be sent when the WEP configuration in the cwvlWlanNUcastKeyTable is changed. The cwvlWlanNUcastKeyValue specify the new key value for a given key for a VLAN. The sending of these notifications can be enabled or disabled via the cwvlWlanWepChangeNotifEnabled object.') ciscoWlanVlanMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 2)) ciscoWlanVlanMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 2, 1)) ciscoWlanVlanMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 268, 2, 2)) ciscoWlanVlanMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 268, 2, 1, 1)).setObjects(("CISCO-WLAN-VLAN-MIB", "ciscoWlanRoamDomainGroup"), ("CISCO-WLAN-VLAN-MIB", "ciscoWlanVlanNotificationGroup"), ("CISCO-WLAN-VLAN-MIB", "ciscoWlanDot11VlanConfigGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): ciscoWlanVlanMIBCompliance = ciscoWlanVlanMIBCompliance.setStatus('current') if mibBuilder.loadTexts: ciscoWlanVlanMIBCompliance.setDescription('The compliance statement for the ciscoWlanVlanMIBGroups.') ciscoWlanRoamDomainGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 268, 2, 2, 1)).setObjects(("CISCO-WLAN-VLAN-MIB", "cwvlWlanDot1qEncapEnabled"), ("CISCO-WLAN-VLAN-MIB", "cwvlBridgingNativeVlanId"), ("CISCO-WLAN-VLAN-MIB", "cwvlVoIPVlanEnabled"), ("CISCO-WLAN-VLAN-MIB", "cwvlVoIPVlanId"), ("CISCO-WLAN-VLAN-MIB", "cwvlPublicVlanId")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): ciscoWlanRoamDomainGroup = ciscoWlanRoamDomainGroup.setStatus('current') if mibBuilder.loadTexts: ciscoWlanRoamDomainGroup.setDescription('Global VLAN configuration for wireless LAN roaming domain.') ciscoWlanDot11VlanConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 268, 2, 2, 2)).setObjects(("CISCO-WLAN-VLAN-MIB", "cwvlWlanEnabled"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanNUcastKeyRotateInterval"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanEncryptionMode"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanEncryptionMandatory"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanMicAlgorithm"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanWepKeyPermuteAlgorithm"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanWepKeyHashing"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanEncryptionAlgorithm"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanRowStatus"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanNUcastKeyLen"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanNUcastKeyValue"), ("CISCO-WLAN-VLAN-MIB", "cwvlWlanWepChangeNotifEnabled")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): ciscoWlanDot11VlanConfigGroup = ciscoWlanDot11VlanConfigGroup.setStatus('current') if mibBuilder.loadTexts: ciscoWlanDot11VlanConfigGroup.setDescription('Per VLAN based configurations for IEEE 802.11 wireless LAN.') ciscoWlanVlanNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 268, 2, 2, 3)).setObjects(("CISCO-WLAN-VLAN-MIB", "ciscoWlanVlanWepChangeNotif")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): ciscoWlanVlanNotificationGroup = ciscoWlanVlanNotificationGroup.setStatus('current') if mibBuilder.loadTexts: ciscoWlanVlanNotificationGroup.setDescription('This is the notification group for the CISCO-WLAN-VLAN-MIB.') mibBuilder.exportSymbols("CISCO-WLAN-VLAN-MIB", cwvlVoIPVlanEnabled=cwvlVoIPVlanEnabled, cwvlWlanWepKeyPermuteAlgorithm=cwvlWlanWepKeyPermuteAlgorithm, cwvlPublicVlanId=cwvlPublicVlanId, ciscoWlanVlanMIBGroups=ciscoWlanVlanMIBGroups, cwvlWlanNUcastKeyIndex=cwvlWlanNUcastKeyIndex, cwvlWlanNUcastKeyValue=cwvlWlanNUcastKeyValue, cwvlWlanVlanId=cwvlWlanVlanId, CwvlVlanIdOrZero=CwvlVlanIdOrZero, ciscoWlanVlanMIBCompliance=ciscoWlanVlanMIBCompliance, ciscoWlanVlanMIBObjects=ciscoWlanVlanMIBObjects, cwvlWlanDot1qEncapEnabled=cwvlWlanDot1qEncapEnabled, ciscoWlanVlanMIB=ciscoWlanVlanMIB, cwvlWlanRowStatus=cwvlWlanRowStatus, cwvlWlanWepKeyHashing=cwvlWlanWepKeyHashing, cwvlWlanEncryptionMandatory=cwvlWlanEncryptionMandatory, cwvlWlanEncryptionAlgorithm=cwvlWlanEncryptionAlgorithm, cwvlWlanWepChangeNotifEnabled=cwvlWlanWepChangeNotifEnabled, ciscoWlanVlanMIBNotifications=ciscoWlanVlanMIBNotifications, cwvlDot11VlanConfig=cwvlDot11VlanConfig, cwvlVoIPVlanId=cwvlVoIPVlanId, cwvlWlanNUcastKeyTable=cwvlWlanNUcastKeyTable, cwvlWlanNUcastKeyEntry=cwvlWlanNUcastKeyEntry, cwvlWlanNUcastKeyLen=cwvlWlanNUcastKeyLen, cwvlRoamDomainConfig=cwvlRoamDomainConfig, cwvlWlanVlanTable=cwvlWlanVlanTable, cwvlWlanNUcastKeyRotateInterval=cwvlWlanNUcastKeyRotateInterval, ciscoWlanVlanMIBCompliances=ciscoWlanVlanMIBCompliances, cwvlWlanMicAlgorithm=cwvlWlanMicAlgorithm, ciscoWlanVlanNotificationGroup=ciscoWlanVlanNotificationGroup, cwvlWlanEnabled=cwvlWlanEnabled, ciscoWlanVlanMIBConformance=ciscoWlanVlanMIBConformance, cwvlWlanEncryptionMode=cwvlWlanEncryptionMode, cwvlBridgingNativeVlanId=cwvlBridgingNativeVlanId, PYSNMP_MODULE_ID=ciscoWlanVlanMIB, ciscoWlanDot11VlanConfigGroup=ciscoWlanDot11VlanConfigGroup, cwvlWlanVlanEntry=cwvlWlanVlanEntry, ciscoWlanVlanWepChangeNotif=ciscoWlanVlanWepChangeNotif, ciscoWlanRoamDomainGroup=ciscoWlanRoamDomainGroup)
[ 2, 198, 2, 9485, 15571, 7378, 337, 9865, 8265, 36159, 8220, 12, 54, 25697, 12, 53, 25697, 12, 8895, 33, 357, 4023, 1378, 16184, 76, 489, 8937, 13, 785, 14, 79, 893, 11632, 8, 198, 2, 7054, 45, 13, 16, 2723, 2393, 1378, 14, 14490...
2.785434
7,648
# Generated by Django 3.1.1 on 2020-11-25 21:12 # pylint: disable=invalid-name from django.db import migrations from django.db import models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 16, 319, 12131, 12, 1157, 12, 1495, 2310, 25, 1065, 198, 2, 279, 2645, 600, 25, 15560, 28, 259, 12102, 12, 3672, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 198, 6738, 42625...
2.83871
62
# -*- coding: utf-8 -*- # # Copyright (C) 2009 Rene Liebscher # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) any # later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # """Realize a triangle-shaped fuzzy set.""" __revision__ = "$Id: Triangle.py,v 1.20 2010-10-29 19:24:41 rliebscher Exp $" from fuzzy.set.Polygon import Polygon from fuzzy.utils import prop from fuzzy.Exception import FuzzyException class Triangle(Polygon): r"""Realize a triangle-shaped fuzzy set:: ______ y_max A /|\ / | \ / | \ _/ | \_ y_min | m | | | | alpha|beta See also U{http://pyfuzzy.sourceforge.net/demo/set/Triangle.png} """ def __init__(self, m=0.0, alpha=1.0, beta=1.0, y_max=1.0, y_min=0.0): """ Initialize a triangle-shaped fuzzy set. @param y_max: y-value at top of the triangle (1.0) @param y_min: y-value outside the triangle (0.0) @param m: x-value of top of triangle (0.0) @param alpha: distance of left corner to m (1.0) @param beta: distance of right corner to m (1.0) """ super(Triangle, self).__init__() self._y_max = float(y_max) self._y_min = float(y_min) self._m = float(m) self._alpha = float(alpha) self._beta = float(beta) self._update() # update polygon # pylint: disable=E0211,W0212 @prop def y_max(): #@NoSelf """y-value at top of the triangle @type: float""" return locals() # pylint: disable=E0211,W0212 @prop def y_min(): #@NoSelf """y-value outside the triangle @type: float""" return locals() # pylint: disable=E0211,W0212 @prop def m(): #@NoSelf """x-value of top of triangle @type: float""" return locals() # pylint: disable=E0211,W0212 @prop def alpha(): #@NoSelf """distance of left corner to m @type: float""" return locals() # pylint: disable=E0211,W0212 @prop def beta(): #@NoSelf """distance of right corner to m @type: float""" return locals() def _update(self): """update polygon""" p = super(Triangle, self) p.clear() p.add(self._m-self._alpha, self._y_min) p.add(self._m, self._y_max) p.add(self._m+self._beta, self._y_min) def add(self, x, y, where=Polygon.END): """Don't let anyone destroy our triangle.""" raise FuzzyException() def remove(self, x, where=Polygon.END): """Don't let anyone destroy our triangle.""" raise FuzzyException() def clear(self): """Don't let anyone destroy our triangle.""" raise FuzzyException() def __repr__(self): """Return representation of instance. @return: representation of instance @rtype: string """ return "%s.%s(m=%s, alpha=%s, beta=%s, y_max=%s, y_min=%s)" % ( self.__class__.__module__, self.__class__.__name__, self._m, self._alpha, self._beta, self._y_max, self._y_min, )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 357, 34, 8, 3717, 220, 371, 1734, 12060, 65, 1416, 372, 198, 2, 198, 2, 770, 1430, 318, 1479, 3788, 26, 345, 460, 17678, 4163, 340, 290, 14, 273,...
2.174648
1,775
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This is a complete rewrite of a file licensed as follows: # # Copyright (c) 2014, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### """Test Thin Plate Spline transformer in alg/gdal_tps.cpp. Rewrite of: https://trac.osgeo.org/gdal/browser/trunk/autotest/alg/tps.py """ import unittest from osgeo import gdal from osgeo import osr from autotest2.gcore import gcore_util if __name__ == '__main__': unittest.main()
[ 2, 15069, 1584, 3012, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, ...
3.661591
591
import pytest from tests import utilities @pytest.fixture @pytest.mark.usefixtures("test_states_page")
[ 11748, 12972, 9288, 198, 198, 6738, 5254, 1330, 20081, 628, 198, 31, 9078, 9288, 13, 69, 9602, 628, 198, 31, 9078, 9288, 13, 4102, 13, 1904, 69, 25506, 7203, 9288, 62, 27219, 62, 7700, 4943, 198 ]
3
36
import inspect import time from datetime import timedelta from django.core.management.base import BaseCommand, CommandError from corehq.apps.es.registry import get_registry, registry_entry from corehq.elastic import get_es_export from pillowtop.es_utils import initialize_index, set_index_reindex_settings USAGE = """Reindex data from one ES index into another ES index using Elasticsearch reindex API https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html#reindex-from-remote. """
[ 11748, 10104, 198, 11748, 640, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 11, 9455, 12331, 198, 198, 6738, 4755, 71, 80, 13, 18211, 13, 274, 13, 2301, 459...
3.38961
154
#!/usr/bin/python """ serialrosbags.py: version 0.2.0 History: 2016/10/13: Initial version stream multiple rosbags in sync. """ import sys import rosbag
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 37811, 198, 46911, 4951, 34005, 13, 9078, 25, 2196, 657, 13, 17, 13, 15, 198, 198, 18122, 25, 198, 5304, 14, 940, 14, 1485, 25, 20768, 2196, 4269, 3294, 686, 82, 34005, 287, 17510, 13, 19...
2.836364
55
import numpy as np from sklearn import __version__ from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted from .base import BaseFeatureLibrary from .weak_pde_library import WeakPDELibrary from .generalized_library import GeneralizedLibrary from .polynomial_library import PolynomialLibrary class ParameterizedLibrary(GeneralizedLibrary): """ Parameters ---------- parameter_library : BaseFeatureLibrary, optional (default PolynomialLibrary). data_library : BaseFeatureLibrary, optional (default PolynomialLibrary). num_features : int, optional (default 3) num_parameters : int, optional (default 3) Attributes ---------- """
[ 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 1330, 11593, 9641, 834, 198, 6738, 1341, 35720, 13, 26791, 1330, 2198, 62, 18747, 198, 6738, 1341, 35720, 13, 26791, 13, 12102, 341, 1330, 2198, 62, 271, 62, 38631, 198, 198, 6738, ...
3.519802
202
import random print(random.random() ) print(random.randint(10,99)) print(random.randrange(10,99))
[ 11748, 4738, 198, 4798, 7, 25120, 13, 25120, 3419, 1267, 198, 4798, 7, 25120, 13, 25192, 600, 7, 940, 11, 2079, 4008, 198, 4798, 7, 25120, 13, 25192, 9521, 7, 940, 11, 2079, 4008 ]
2.852941
34
from textfier.core import Dataset # Defines the input sequences sentences = ['hey', 'lets go'] # Defines the input labels labels = [0, 0] # Creates the dataset dataset = Dataset(sentences=sentences, labels=labels) print(dataset.sentences, dataset.labels)
[ 6738, 2420, 69, 959, 13, 7295, 1330, 16092, 292, 316, 198, 198, 2, 2896, 1127, 262, 5128, 16311, 198, 34086, 3007, 796, 37250, 20342, 3256, 705, 5289, 467, 20520, 198, 198, 2, 2896, 1127, 262, 5128, 14722, 198, 23912, 1424, 796, 685, ...
2.877778
90
#!/usr/bin/env python # coding: utf-8 # In[ ]: import pandas as pd import sqlite3 import numpy as np import csv import sys print('Libraries imported.') # In[ ]: conn=sqlite3.connect('greendots.db') cur=conn.cursor() # In[ ]: SQL1= """ CREATE TABLE DB_Veins ( Codi Veina TEXT, Nom TEXT, Barri TEXT )""" # In[ ]: SQL2= """ CREATE TABLE DB_Barris ( Districte TEXT, Codi de Barri INTEGER, Barri TEXT, Latitud Barri REAL Longitud Barri REAL )""" # In[ ]: SQL3= """ CREATE TABLE DB_Comerc ( ID_Bcn_2019 INTEGER, Codi_Grup_Activitat INTEGER, Nom_Grup_Activitat TEXT, Codi_Activitat_2019 INTEGER, Nom_Activitat TEXT, Nom_Local TEXT, SN_Eix INTEGER, Nom_Eix TEXT, X_UTM_ETRS89 REAL, Y_UTM_ETRS89 REAL, Latitud REAL, Longitud REAL, Direccio_Unica TEXT, Nom_Via TEXT, Porta INTEGER, Codi_Barri INTEGER, Nom_Barri TEXT, Codi_Districte INTEGER, Nom_Districte TEXT, Categoria TEXT, Telefon INTEGER )""" # In[ ]: #cur.execute(SQL1) cur.execute(SQL2) cur.execute(SQL3) print('DB_Veins has been creted') # In[ ]: with open('DB_Veins.csv','r') as file: n_records=0 for row in file: cur.execute('INTESERT INTO DB_Veins VALUES(?,?,?)',row.split(',')) conn.comit() n_records=n_records+1 conn.close() print('Done') # In[ ]: with open('DB_Barris.csv','r') as file: n_records=0 for row in file: cur.execute('INTESERT INTO DB_Barris VALUES(?,?,?,?,?)',row.split(',')) conn.comit() n_records=n_records+1 conn.close() print('Done') # In[ ]: with open('DB_Comerc.csv','r') as file: n_records=0 for row in file: cur.execute('INTESERT INTO DB_Comerc VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',row.split(',')) conn.comit() n_records=n_records+1 conn.close() print('Done')
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 2361, 25, 628, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 44161, 578, 18, 198, 11748, 299, 32152, 355, 45941, 198, 1174...
2.096491
912