'\n\timageList = re.compile(imgPattern).findall(divResult)\n\tx=1\n\tfor imageUrl in imageList:\n\t\timageName = \"D:/JDIMG/\"+str(i)+\"-\"+str(x)+\".jpg\"\n\t\timageUrl = \"http://\"+imageUrl\n\t\ttry:\n\t\t\turllib.request.urlretrieve(imageUrl,filename=imageName);\n\t\texcept urllib.error.URLError as e:\n\t\t\tif hasattr(e,\"code\"):\n\t\t\t\tx+=1\n\t\t\tif hasattr(e,\"reason\"):\n\t\t\t\tx+=1\n\t\tx+=1\n\t\tprint(\"保存图片---\"+imageUrl+\"---成功\")\n\n\nfor i in range(1,79):\n\turl = 'https://list.jd.com/list.html?cat=9987,653,655&page='+str(i)\n\tcraw(url,i)\n","sub_path":"images-spider/images-spider.py","file_name":"images-spider.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"559122617","text":"import random\n\nclass Game():\n\n def __init__(self):\n self.player = True\n\n def computer_random(self):\n self.computer_choose = random.randint(1,3)\n print(\"computers move: \", self.computer_choose)\n\n def start_game(self):\n self.player_choose = int(input(\"please choose your move: 1,2 or 3 (rock, paper, scissors): \"))\n #self.computer_random()\n\n # print(self.player)\n\n if self.player_choose == 1:\n print(\"You chose: Rock\")\n elif self.player_choose == 2:\n print(\"You chose: Paper\")\n else:\n print (\"You chose: Scissors\")\n\n\n while self.player == True:\n\n if self.computer_choose == self.player_choose:\n print(\"I'ts a Draw!\")\n self.player = False\n\n elif self.computer_choose == 1 and self.player_choose == 2:\n print(\"You Win!\")\n break\n\n elif self.computer_choose == 2 and self.player_choose == 1:\n print(\"You Win!\")\n break\n\n elif self.computer_choose == 2 and self.player_choose == 3:\n print(\"You Win!\")\n break\n\n elif self.computer_choose == 3 and self.player_choose == 2:\n print(\"You Lose!\")\n break\n\n elif self.computer_choose == 2 and self.player_choose == 2:\n print(\"You Win!\")\n break\n\n # def main(self):\n # begin = input(\"Shall we begin...? y/n: \")\n # if begin == \"y\":\n\nnew_game = Game()\nnew_game.computer_random()\nnew_game.start_game()\n","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"196901188","text":"import numpy as np\nimport anndata\nfrom scipy.sparse import csr_matrix, hstack\nfrom .. import tools \n\ndef run_pipeline(input_file, output_name, **kwargs):\n\tis_raw = not kwargs['processed']\n\n\tif 'seurat_compatible' not in kwargs:\n\t\tkwargs['seurat_compatible'] = False\n\n\t# load input data\n\tif not kwargs['cite_seq']:\n\t\tadata = tools.read_input(input_file, genome = kwargs['genome'], mode = 'a' if (is_raw or kwargs['subcluster']) else 'r+')\n\telse:\n\t\tdata_dict = tools.read_input(input_file, genome = kwargs['genome'], return_a_dict = True)\n\t\tassert len(data_dict) == 2\n\t\tadata = cdata = None\n\t\tfor genome, data in data_dict.items():\n\t\t\tif genome.startswith('CITE_Seq'):\n\t\t\t\tcdata = data\n\t\t\telse:\n\t\t\t\tadata = data\n\t\tassert adata is not None and cdata is not None\n\tprint(\"Inputs are loaded.\")\n\n\tif kwargs['seurat_compatible']:\n\t\tassert is_raw and kwargs['select_variable_genes'] and kwargs['submat_to_dense']\n\n\t# preprocessing\n\tif is_raw:\n\t\t# make gene names unique\n\t\ttools.update_var_names(adata)\n\t\t# filter out low quality cells/genes\n\t\ttools.filter_data(adata, output_filt = kwargs['output_filt'], plot_filt = kwargs['plot_filt'], plot_filt_figsize = kwargs['plot_filt_figsize'], \\\n\t\t\tmito_prefix = kwargs['mito_prefix'], min_genes = kwargs['min_genes'], max_genes = kwargs['max_genes'], min_umis = kwargs['min_umis'], max_umis = kwargs['max_umis'], \\\n\t\t\tpercent_mito = kwargs['percent_mito'], percent_cells = kwargs['percent_cells'], min_genes_on_raw = kwargs['min_genes_on_raw'])\n\t\tif kwargs['seurat_compatible']:\n\t\t\traw_data = adata.copy() # raw as count\n\t\t# normailize counts and then transform to log space\n\t\ttools.log_norm(adata, kwargs['norm_count'])\n\t\t# estimate bias factors\n\t\tif kwargs['batch_correction']:\n\t\t\ttools.set_group_attribute(adata, kwargs['group_attribute'])\n\t\t\ttools.estimate_adjustment_matrices(adata)\n\telif kwargs['subcluster']:\n\t\tadata = tools.get_anndata_for_subclustering(adata, kwargs['subset_selections'])\n\t\tis_raw = True # get submat and then set is_raw to True\n\n\t# dimension reduction --- select variable genes or not\n\tpca_key = kwargs['pca_key']\n\tif is_raw:\n\t\tif kwargs['select_variable_genes']:\n\t\t\tfilter_result = tools.filter_genes_dispersion(adata, kwargs['batch_correction'])\n\t\t\tadata_c = tools.collect_variable_gene_matrix(adata, filter_result.gene_subset)\n\t\t\tif kwargs['submat_to_dense']:\n\t\t\t\tadata_c.X = adata_c.X.toarray()\n\t\t\tif kwargs['batch_correction']:\n\t\t\t\ttools.correct_batch_effects(adata_c)\n\t\t\n\t\t\t# dimension reduction\n\t\t\tif pca_key == 'X_pca':\n\t\t\t\ttools.run_pca(adata_c, nPC = kwargs['nPC'], random_state = kwargs['random_state'])\n\t\t\telse:\n\t\t\t\ttools.run_rpca(adata_c, nPC = kwargs['nPC'], random_state = kwargs['random_state'])\n\t\t\tadata.obsm[pca_key] = adata_c.obsm[pca_key]\n\t\telse:\n\t\t\tassert pca_key == 'X_rpca'\n\t\t\tif kwargs['batch_correction']:\n\t\t\t\ttools.correct_batch_effects(adata)\n\t\t\ttools.run_rpca(adata, nPC = kwargs['nPC'], random_state = kwargs['random_state'])\n\telse:\n\t\tassert pca_key in adata.obsm.keys()\n\n\t# diffusion map\n\tif is_raw:\n\t\ttools.run_diffmap(adata, pca_key, n_jobs = kwargs['n_jobs'], n_components = kwargs['nDC'], alpha = kwargs['diffmap_alpha'], K = kwargs['diffmap_K'], random_state = kwargs['random_state'], full_speed = kwargs['diffmap_full_speed'])\n\telse:\n\t\tassert 'X_diffmap' in adata.obsm.keys()\n\n\t# clustering\n\tif kwargs['run_approx_louvain']:\n\t\t# tools.run_approximated_louvain(adata, 'X_pca', n_jobs = kwargs['n_jobs'], resolution = kwargs['approx_louvain_resolution'], random_state = kwargs['random_state'], n_clusters = kwargs['approx_louvain_nclusters'], n_init = kwargs['approx_louvain_ninit'], class_label = 'approx_pca')\n\t\t# tools.run_approximated_louvain(adata, 'X_diffmap', n_jobs = kwargs['n_jobs'], resolution = kwargs['approx_louvain_resolution'], random_state = kwargs['random_state'], n_clusters = kwargs['approx_louvain_nclusters'], n_init = kwargs['approx_louvain_ninit'], class_label = 'approx_dm')\n\t\ttools.run_approximated_louvain(adata, 'X_dmnorm', n_jobs = kwargs['n_jobs'], resolution = kwargs['approx_louvain_resolution'], random_state = kwargs['random_state'], n_clusters = kwargs['approx_louvain_nclusters'], n_init = kwargs['approx_louvain_ninit'], class_label = 'approx_louvain_labels')\n\n\tif kwargs['run_louvain']:\n\t\ttools.run_louvain(adata, affinity = kwargs['louvain_affinity'], resolution = kwargs['louvain_resolution'], random_state = kwargs['random_state'])\n\n\t# if kwargs['run_kmeans']:\n\t# \ttools.run_kmeans(adata, 'X_diffmap', kwargs['kmeans_n_clusters'], n_jobs = kwargs['n_jobs'], random_state = kwargs['random_state'])\n\t# if kwargs['run_hdbscan']:\n\t# \ttools.run_hdbscan(adata, 'X_diffmap', n_jobs = kwargs['n_jobs'], min_cluster_size = kwargs['hdbscan_min_cluster_size'], min_samples = kwargs['hdbscan_min_samples'])\n\n\t# visualization\n\tif kwargs['run_net_tsne']:\n\t\ttools.run_net_tsne(adata, pca_key, n_jobs = kwargs['n_jobs'], perplexity = kwargs['tsne_perplexity'], random_state = kwargs['random_state'], knn_indices = kwargs['knn_indices'], first_K = kwargs['first_K'])\n\tif kwargs['run_net_fitsne']:\n\t\ttools.run_net_fitsne(adata, pca_key, n_jobs = kwargs['n_jobs'], perplexity = kwargs['tsne_perplexity'], random_state = kwargs['random_state'], knn_indices = kwargs['knn_indices'], first_K = kwargs['first_K'])\n\tif kwargs['run_net_umap']:\n\t\ttools.run_net_umap(adata, pca_key, n_neighbors = kwargs['umap_K'], min_dist = kwargs['umap_min_dist'], spread = kwargs['umap_spread'], random_state = kwargs['random_state'], knn_indices = kwargs['knn_indices'], first_K = kwargs['first_K'])\n\tif kwargs['run_net_fle']:\n\t\ttools.run_net_fle(adata, output_name, n_jobs = kwargs['n_jobs'], K = kwargs['fle_K'], n_steps = kwargs['fle_n_steps'], random_state = kwargs['random_state'], knn_indices = kwargs['knn_indices'], first_K = kwargs['first_K'])\n\n\tif kwargs['run_tsne']:\n\t\ttools.run_tsne(adata, pca_key, n_jobs = kwargs['n_jobs'], perplexity = kwargs['tsne_perplexity'], random_state = kwargs['random_state'])\n\tif kwargs['run_fitsne']:\n\t\ttools.run_fitsne(adata, pca_key, n_jobs = kwargs['n_jobs'], perplexity = kwargs['tsne_perplexity'], random_state = kwargs['random_state'])\n\tif kwargs['run_umap']:\n\t\ttools.run_umap(adata, pca_key, n_neighbors = kwargs['umap_K'], min_dist = kwargs['umap_min_dist'], spread = kwargs['umap_spread'], random_state = kwargs['random_state'])\n\t\t# if kwargs['run_umap_on_diffmap']:\n\t\t# \ttools.run_umap(adata, 'X_diffmap', n_neighbors = kwargs['umap_K'], min_dist = kwargs['umap_min_dist'], spread = kwargs['umap_spread'], random_state = kwargs['random_state'])\n\t\t# \tadata.obsm['X_umap_diffmap'] = adata.obsm['X_umap']\n\tif kwargs['run_fle']:\n\t\ttools.run_force_directed_layout(adata, output_name, n_jobs = kwargs['n_jobs'], K = kwargs['fle_K'], n_steps = kwargs['fle_n_steps'])\n\t\n\t# calculate diffusion-based pseudotime from roots\n\tif kwargs['pseudotime'] is not None:\n\t\tassert 'X_diffmap' in adata.obsm.keys()\n\t\ttools.run_pseudotime_calculation(adata, kwargs['pseudotime'])\n\n\t# merge cite-seq data and run t-SNE\n\tif kwargs['cite_seq']:\n\t\tadt_matrix = np.zeros((adata.shape[0], cdata.shape[1]), dtype = 'float32')\n\t\tidx = adata.obs_names.isin(cdata.obs_names)\n\t\tadt_matrix[idx, :] = cdata[adata.obs_names[idx],].X.toarray()\n\n\t\tvar_names = np.concatenate([adata.var_names, ['AD-' + x for x in cdata.var_names]])\n\n\t\tnew_data = anndata.AnnData(X = hstack([adata.X, csr_matrix(adt_matrix)], format = 'csr'), \n\t\t\tobs = adata.obs,\n\t\t\tobsm = adata.obsm,\n\t\t\tuns = adata.uns,\n\t\t\tvar = {'var_names' : var_names,\n\t\t\t\t 'gene_ids' : var_names,\n\t\t\t\t 'n_cells' : np.concatenate([adata.var['n_cells'].values, [0] * cdata.shape[1]]),\n\t\t\t\t 'percent_cells' : np.concatenate([adata.var['percent_cells'].values, [0.0] * cdata.shape[1]]), \n\t\t\t\t 'robust' : np.concatenate([adata.var['robust'].values, [False] * cdata.shape[1]])\n\t\t\t\t })\n\t\tif 'selected' in adata.var:\n\t\t\tnew_data.var['selected'] = np.concatenate([adata.var['selected'].values, [False] * cdata.shape[1]])\n\t\tnew_data.obsm['CITE-Seq'] = adt_matrix\n\t\tadata = new_data\n\t\tprint(\"ADT count matrix is attached.\")\n\n\t\ttools.run_tsne(adata, 'CITE-Seq', n_jobs = kwargs['n_jobs'], perplexity = kwargs['tsne_perplexity'], random_state = kwargs['random_state'], out_basis = 'citeseq_tsne')\n\t\tprint(\"Antibody embedding is done.\")\n\n\n\t# write out results\n\ttools.write_output(adata, output_name)\n\n\tif kwargs['seurat_compatible']:\n\t\tseurat_data = adata.copy()\n\t\tseurat_data.raw = raw_data\n\t\tseurat_data.uns['scale.data'] = adata_c.X\n\t\tseurat_data.uns['scale.data.rownames'] = adata_c.var_names.values\n\t\tseurat_data.write(output_name + \".seurat.h5ad\")\n\n\tif kwargs['output_loom']:\n\t\tadata.write_loom(output_name + \".loom\")\n\n\tprint(\"Results are written.\")\n","sub_path":"scCloud/scCloud/pipeline/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"556890133","text":"#!/usr/bin/env python3\n'''Collects tweet data and saves to .gz file.'''\n\nimport os\nimport json\nimport re\nimport gzip\nimport time\n\nimport TwitterKov\nfrom TwitterKov import error, warning, log\nfrom TwitterKov.util import twitter\n\nfrom http.client import IncompleteRead\nfrom sys import argv\n\noutFile = False\nLOGGING = True\nnumTweets = -1\ntry:\n\toutFile = argv[1]\n\tnumTweets = int(argv[2])\n\tLOGGING = bool(argv[3])\nexcept (IndexError, ValueError):\n\tpass\nif not outFile:\n\toutFile = '{}.gz'.format(int(time.time()))\n\twarning(\"No output file specified.\")\n\twarning(\"Will write to {}\".format(outFile))\nelif not outFile.endswith('.gz'):\n\toutFile += '.gz'\n\n# Load API keys and get request object\nfn = os.path.join(os.path.dirname(__file__), '../keys.json')\nwith open(fn) as f:\n\tkeys = json.load(f)\nsource = twitter.getSample(**keys)\n\nc = 0\nfn = 'data/'+ outFile\ntweets = []\nlog(\"Tweets written:\")\n\nwith gzip.open(fn, 'wb') as f:\n\twhile numTweets == -1 or c < numTweets:\n\t\ttry:\n\t\t\t# Load the tweet\n\t\t\traw_tweet = source.readline()\n\t\t\ttweet = json.loads(raw_tweet.decode(\"UTF-8\"))\n\t\t\t# Check if english\n\t\t\tif 'lang' in tweet and tweet['lang'] == 'en':\n\t\t\t\tif LOGGING and c % 10 == 0:\n\t\t\t\t\tlog('\\r\\t{}'.format(c))\n\t\t\t\tc += 1\n\t\t\t\tf.write(raw_tweet)\n\t\texcept KeyboardInterrupt as e:\n\t\t\tlog(\"Quiting.\")\n\t\t\tbreak\n\t\texcept IncompleteRead as e:\n\t\t\twarning(\"Incomplete Read\")\n\t\t\tsource = twitter.getSample(**keys)\n\tlog('Done')","sub_path":"TwitterKov/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"281205659","text":"'''\nCreated on 02/10/2015\n\n@author: djchr\n'''\nimport serial, time, sys, serial.tools.list_ports\n\n\nsys.path.append(\"../api/\")\nfrom fableAPI import FableAPI \nglobal api\n\n \napi = FableAPI() \napi.setup()\ntime.sleep(1)\napi.terminate()\n\n","sub_path":"Robot_toolboxPy/Ismael_eclipse/Fable-master/pc-software/python/tests/robustConnect.py","file_name":"robustConnect.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"343606955","text":"import pytest\nfrom ui_test.user_flows import select_questionnaire, submit_form\n\n\ndef test_technical_help(browser):\n select_questionnaire(browser, {\"step1\": \"help\"})\n assert browser.is_text_present(\"Step 2 of 2\")\n assert browser.is_text_present(\"Your details\")\n\n\n@pytest.mark.skip(reason=\"Renable this test once sandbox is setup\")\ndef test_success_page_back_link(browser):\n select_questionnaire(browser, {\"step1\": \"help\"})\n submit_form(\n browser,\n {\n \"message\": \"Automated test message\",\n \"name\": \"Export goods\",\n \"email\": \"somegov@email.com\",\n },\n )\n browser.links.find_by_text(\"find information about exporting to the UK\").click()\n assert browser.is_text_present(\n \"What would you like to ask us about or give feedback on?\"\n )\n","sub_path":"ui_test/specs/test_technical_help.py","file_name":"test_technical_help.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"109417828","text":"from app.models import db, Vault\n\n\ndef seed_vaults():\n i = 1\n while i < 14:\n vault = Vault(\n user_id= i\n )\n db.session.add(vault)\n db.session.commit()\n i += 1\n\ndef undo_vaults():\n db.session.execute('TRUNCATE vaults RESTART IDENTITY CASCADE;')\n db.session.commit()\n\n\n","sub_path":"app/seeds/vaults.py","file_name":"vaults.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"198473233","text":"from enum import Enum\nfrom typing import Callable, List, Optional\n\nfrom sqlalchemy.orm import Query\n\nfrom trailblazer.store.models import Analysis\n\n\ndef filter_analyses_by_id(analyses: Query, analysis_id: int, **kwargs) -> Query:\n \"\"\"Filter analyses by database entry id.\"\"\"\n return analyses.filter(Analysis.id == analysis_id)\n\n\nclass AnalysisFilter(Enum):\n \"\"\"Define Analysis filter functions.\"\"\"\n\n FILTER_BY_ID: Callable = filter_analyses_by_id\n\n\ndef apply_analysis_filter(\n analyses: Query,\n filter_functions: List[Callable],\n analysis_id: Optional[int] = None,\n) -> Query:\n \"\"\"Apply filtering functions and return filtered results.\"\"\"\n for function in filter_functions:\n analyses: Query = function(\n analyses=analyses,\n analysis_id=analysis_id,\n )\n return analyses\n","sub_path":"trailblazer/store/filters/analyses_filters.py","file_name":"analyses_filters.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"387580873","text":"def check_word(w):\n # i : 두문자 사이의 거리, j : 첫문자의 인덱스\n for i in range(1,len(w)):\n dict={}\n for j in range(len(w)-i):\n new_word=w[j]+w[j+i]\n if(new_word in dict):\n return False\n dict[new_word]=1\n return True\n\nwhile(True):\n word=input()\n if(word=='*'): break\n if(len(word)<=2 or check_word(word)):\n print(word,\"is surprising.\")\n else:\n print(word,\"is NOT surprising.\")\n","sub_path":"6. 구현(Simulation)/1972.py","file_name":"1972.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"72200558","text":"#Zooniverse upload module\nimport glob\nimport os\nimport pandas as pd\nfrom panoptes_client import Panoptes, Project, SubjectSet, Subject\nimport rasterio\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\nfrom PIL import Image\nimport numpy as np\n\nfrom deepforest import deepforest\nfrom deepforest import utilities\n\nimport tile_raster\nimport utils\n\ndef utm_project(path):\n\n #Everglades UTM Zone\n dst_crs = 'EPSG:32617'\n\n with rasterio.open(path) as src:\n transform, width, height = calculate_default_transform(\n src.crs, dst_crs, src.width, src.height, *src.bounds)\n kwargs = src.meta.copy()\n kwargs.update({\n 'crs': dst_crs,\n 'transform': transform,\n 'width': width,\n 'height': height\n })\n\n dest_name = \"{}_projected.tif\".format(os.path.splitext(path)[0])\n\n with rasterio.open(dest_name, 'w', **kwargs) as dst:\n for i in range(1, src.count + 1):\n reproject(\n source=rasterio.band(src, i),\n destination=rasterio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=dst_crs,\n resampling=Resampling.nearest)\n\n return dest_name\n\ndef is_white(path):\n d = rasterio.open(path)\n numpy_image = d.read()\n left, bottom, right, top = d.bounds\n\n assert numpy_image.shape[0] == 3\n\n #Check if image is all white\n img_reshaped = numpy_image.reshape(-1, 3)\n white = np.sum(img_reshaped == [255,255,255])/img_reshaped.size\n\n if white > 0.55:\n return True\n else:\n return False\n\ndef find_files(path):\n \"\"\"Search and filter images\"\"\"\n images = {}\n image_paths = glob.glob(os.path.join(path, \"*.tif\"))\n counter = 1\n\n #extract site name\n site_name = os.path.basename(path)\n\n for i in image_paths:\n #Load and get metadata\n d = rasterio.open(i)\n numpy_image = d.read()\n left, bottom, right, top = d.bounds\n\n #Write as a png\n basename = os.path.splitext(i)[0]\n png_name = \"{}.png\".format(basename)\n img = Image.open(i)\n img.save(png_name)\n\n #Create dict\n #crs = d.crs.to_epsg()\n crs = None\n images[png_name] = {\"subject_reference\":counter, \"bounds\":[left,bottom,right,top],\"crs\":crs,\"site\":site_name,\"resolution\":d.res,\"filename\":png_name}\n counter +=1\n\n return images\n\n#Create manifests\ndef create_subject_set(everglades_watch, name=\"demo\"):\n subject_set = SubjectSet()\n subject_set.links.project = everglades_watch\n subject_set.display_name = name\n subject_set.save()\n\n return subject_set\n\ndef upload(subject_set, images, everglades_watch):\n \"\"\"Assign images to projecti\"\"\"\n new_subjects = []\n\n print(\"Uploading {} images\".format(len(images)))\n for filename, metadata in images.items():\n subject = Subject()\n\n subject.links.project = everglades_watch\n subject.add_location(filename)\n\n subject.metadata.update(metadata)\n\n #Trigger upload\n subject.save()\n new_subjects.append(subject)\n subject_set.add(new_subjects)\n\ndef screen_blanks(images, model):\n #Load detection model\n model = deepforest.deepforest(weights=model)\n #model.classes_file = utilities.create_classes(\"/orange/ewhite/everglades/Zooniverse/parsed_images/test.csv\") \n #model.read_classes()\n screened_images = {}\n for filename, metadata in images.items():\n boxes = model.predict_image(filename, return_plot=False)\n\n #small score filter\n boxes = boxes[boxes.score > 0.4]\n \n if not boxes.empty:\n #if any([x in boxes.label.unique() for x in [\"Great Blue Heron\",\"Snowy Egret\",\"Wood Stork\",\"Roseate Spoonbill\"]]):\n screened_images[filename] = metadata\n else:\n print(\"Remove {}, screened empty\".format(filename))\n\n return screened_images\n\ndef main(path, everglades_watch, model=None, save_dir=\"/orange/ewhite/everglades/Zooniverse/\"):\n \"\"\"Args:\n path: a .tif to run\n \"\"\"\n #Create new directory in save_dir\n \n basename = os.path.splitext(os.path.basename(path))[0]\n event = os.path.basename(os.path.dirname(os.path.dirname(path))).replace(\" \",\"\")\n basename = \"{}_{}\".format(event,basename)\n dirname = \"{}/{}\".format(save_dir,basename)\n\n try:\n os.mkdir(dirname)\n except:\n pass\n #raise ValueError(\"dirname: {} exists)\".format(dirname))\n\n #Crop tif\n #Project from longlat to utm\n #check if exists\n #projected_raster_path = \"{}_projected.tif\".format(os.path.splitext(path)[0])\n #if not os.path.exists(projected_raster_path):\n #projected_raster_path = utm_project(path)\n\n saved_file = tile_raster.run(path=path, save_dir=dirname)\n print(\"Created cropped files at {}\".format(saved_file))\n\n #Generate metadata\n images = find_files(saved_file)\n\n #Screen for blanks\n if model:\n screened_images = screen_blanks(images, model)\n print(\"{} images ready for upload\".format(len(screened_images)))\n else:\n screened_images = images\n \n #Create a new subject set\n subject_set = create_subject_set(name=\"{}\".format(basename), everglades_watch=everglades_watch)\n\n #Upload\n upload(subject_set, screened_images, everglades_watch)\n\n return saved_file\n\nif __name__ == \"__main__\":\n\n #auth\n everglades_watch = utils.connect()\n \n model = \"/orange/ewhite/everglades/Zooniverse/predictions/20210224_121421.h5\"\n\n #Currently debugging with just one site\n paths = glob.glob(\"/orange/ewhite/everglades/WadingBirds2020/Raw/Cypress City_03_25_2020/Mapping Photos/*.JPG\")\n paths = [x for x in paths if \"projected\" not in x]\n for path in paths:\n print(path)\n saved_file = main(path, everglades_watch, model)","sub_path":"Zooniverse/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"645866239","text":"list1 = [1, 23, 34, 54, 213, 2, 6]\nlist2 = [1, 23, 4, 2, 3, 5]\n\ncommon = []\nfor x in list1:\n for y in list2:\n if x == y:\n common.append(x)\n\n\nprint(common)\n\n","sub_path":"LIST/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"441630476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated in Mar 2019\n@author: bbujfalussy - ubalazs317@gmail.com\nA script to read behavioral log files in mouse in vivo virtual reality experiments\n\n\"\"\"\n\nimport numpy as np\nfrom string import *\nimport datetime\nimport time\nimport os\nimport pickle\nimport scipy.stats\nfrom scipy.interpolate import interp1d \nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\nimport sys\nfrom sys import version_info\nimport csv\n\nfrom Stages import *\nfrom Corridors import *\n\ndef nan_divide(a, b, where=True):\n 'division function that returns np.nan where the division is not defined'\n N = len(a)\n x = np.zeros(N)\n x.fill(np.nan)\n x = np.divide(a, b, out=x, where=where)\n return x\n\ndef nan_add(a, b):\n 'addition function that handles NANs by replacing them with zero - USE with CAUTION!'\n a[np.isnan(a)] = 0\n b[np.isnan(b)] = 0\n x = np.array(a + b)\n return x\n\nclass Lap_Data:\n 'common base class for individual laps'\n\n def __init__(self, name, lap, laptime, position, lick_times, reward_times, corridor, mode, actions, corridor_list, dt=0.01, printout=False):\n self.name = name\n self.lap = lap\n\n self.raw_time = laptime\n self.raw_position = position\n self.lick_times = lick_times\n self.reward_times = reward_times\n self.corridor = corridor # the ID of the corridor in the given stage; This indexes the corridors in the vector called self.corridors\n self.corridor_list = corridor_list \n self.mode = mode # 1 if all elements are recorded in 'Go' mode\n self.actions = actions\n self.speed_threshold = 5 ## cm / s 106 cm - 3500 roxels; roxel/s * 106.5/3500 = cm/s\n self.corridor_length_roxel = (self.corridor_list.corridors[self.corridor].length - 1024.0) / (7168.0 - 1024.0) * 3500\n\n self.speed_factor = 106.5 / 3500 ## constant to convert distance from pixel to cm\n self.corridor_length_cm = self.corridor_length_roxel * self.speed_factor # cm\n\n self.zones = np.vstack([np.array(self.corridor_list.corridors[self.corridor].reward_zone_starts), np.array(self.corridor_list.corridors[self.corridor].reward_zone_ends)])\n self.n_zones = np.shape(self.zones)[1]\n self.preZoneRate = [None, None] # only if 1 lick zone; Compare the 210 roxels just before the zone with the preceeding 210 \n\n self.dt = 0.01 # resampling frequency = 100 Hz\n\n ####################################################################\n ## resample time and position with a uniform 100 Hz\n nbins = int(round(self.corridor_length_roxel / 70))\n self.bincenters = np.arange(0, self.corridor_length_roxel, 70) + 70 / 2.0\n \n if (len(self.raw_time) > 2):\n F = interp1d(self.raw_time,self.raw_position) \n start_time = np.ceil(self.raw_time.min()/self.dt)*self.dt\n end_time = np.floor(self.raw_time.max()/self.dt)*self.dt\n Ntimes = int(round((end_time - start_time) / self.dt)) + 1\n self.laptime = np.linspace(start_time, end_time, Ntimes)\n ppos = F(self.laptime)\n \n self.lick_position = F(self.lick_times)\n self.reward_position = F(self.reward_times)\n \n ## smooth the position data with a 50 ms Gaussian kernel\n ## smooth the position data with a 50 ms Gaussian kernel\n # sdfilt = 0.05\n # xfilt = np.arange(-4*sdfilt, 4*sdfilt+self.dt, self.dt)\n # filt = np.exp(-(xfilt ** 2) / (2 * (sdfilt**2)))\n # filt = filt / sum(filt)\n\n # dx1 = ppos[1] - ppos[0]\n # dxx1 = ppos[0] - np.arange(20, 0, -1) * dx1\n\n # dx2 = ppos[-1] - ppos[-2]\n # dxx2 = ppos[-1] + np.arange(20, 0, -1) * dx2\n\n # pppos = np.hstack([dxx1, ppos, dxx2])\n # pppos = np.hstack([np.repeat(ppos[0], 20), ppos, np.repeat(ppos[-1], 20)])\n # smooth_position = np.convolve(pppos, filt, mode='valid')\n self.smooth_position = ppos\n \n ## calculate the smoothed speed \n speed = np.diff(self.smooth_position) * self.speed_factor / self.dt # roxel [=rotational pixel] / s \n speed_first = 2 * speed[0] - speed[1] # linear extrapolation: x1 - (x2 - x1)\n self.speed = np.hstack([speed_first, speed])\n \n ####################################################################\n ## calculate the lick-rate and the average speed versus location \n bin_counts = np.zeros(nbins)\n for pos in self.smooth_position:\n bin_number = int(pos // 70)\n bin_counts[bin_number] += 1\n self.T_pos = bin_counts * self.dt\n \n lbin_counts = np.zeros(nbins)\n for lpos in self.lick_position:\n lbin_number = int(lpos // 70)\n lbin_counts[lbin_number] += 1\n self.N_licks = lbin_counts\n self.lick_rate = nan_divide(self.N_licks, self.T_pos, where=(self.T_pos > 0.025))\n \n total_speed = np.zeros(nbins)\n for i in range(len(self.smooth_position)):\n ii = int(self.smooth_position[i] // 70)\n total_speed[ii] = total_speed[ii] + self.speed[i]\n total_speed = total_speed * self.dt\n self.ave_speed = nan_divide(total_speed, self.T_pos, where=(self.T_pos > 0.025))\n \n ####################################################################\n ## Calculate the lick rate befor the reward zone - anticipatory licks 210 roxels before zone start\n ## only when the number of zones is 1!\n \n if (self.n_zones == 1):\n \n zone_start = int(self.zones[0][0] * self.corridor_length_roxel)\n lz_posbins = [0, zone_start-420, zone_start-210, zone_start, self.corridor_length_roxel]\n \n lz_bin_counts = np.zeros(4)\n for pos in self.smooth_position:\n bin_number = [ n for n,i in enumerate(lz_posbins) if i>=pos ][0] - 1\n lz_bin_counts[bin_number] += 1\n T_lz_pos = lz_bin_counts * self.dt\n \n lz_lbin_counts = np.zeros(4)\n for lpos in self.lick_position:\n lbin_number = [ n for n,i in enumerate(lz_posbins) if i>=lpos ][0] - 1\n lz_lbin_counts[lbin_number] += 1\n lz_lick_rate = nan_divide(lz_lbin_counts, T_lz_pos, where=(T_lz_pos>0.025))\n self.preZoneRate = [lz_lick_rate[1], lz_lick_rate[2]]\n else:\n self.lick_position = lick_times\n self.reward_position = reward_times\n self.smooth_position = position\n self.speed = np.zeros(len(position))\n self.T_pos = np.zeros(nbins)\n self.N_licks = np.zeros(nbins)\n self.ave_speed = np.zeros(nbins)\n self.lick_rate = np.zeros(nbins)\n self.preZoneRate = np.zeros(2)\n \n\n def plot_tx(self):\n cmap = plt.cm.get_cmap('jet') \n plt.figure(figsize=(6,4))\n plt.plot(self.laptime, self.smooth_position, c=cmap(50))\n plt.plot(self.raw_time, self.raw_position, c=cmap(90))\n\n plt.scatter(self.lick_times, np.repeat(self.smooth_position.min(), len(self.lick_times)), marker=\"|\", s=100, c=cmap(180))\n plt.scatter(self.reward_times, np.repeat(self.smooth_position.min()+100, len(self.reward_times)), marker=\"|\", s=100, c=cmap(230))\n plt.ylabel('position')\n plt.xlabel('time (s)')\n plot_title = 'Mouse: ' + self.name + ' position in lap ' + str(self.lap) + ' in corridor ' + str(self.corridor)\n plt.title(plot_title)\n plt.ylim(0, self.corridor_length_roxel)\n\n plt.show(block=False)\n \n # time = mm.Laps[55].time\n # smooth_position = mm.Laps[55].smooth_position\n # lick_times = mm.Laps[55].lick_times\n # reward_times = mm.Laps[55].reward_times\n # lap = mm.Laps[55].lap\n # corridor = mm.Laps[55].corridor\n # lick_rate = mm.Laps[55].lick_rate\n # bincenters = np.arange(0, 3500, 175) + 175 / 2.0\n\n # plt.figure(figsize=(6,4))\n # plt.plot(laptime, smooth_position, c='g')\n\n # plt.scatter(lick_times, np.repeat(smooth_position.min(), len(lick_times)), marker=\"|\", s=100)\n # plt.scatter(reward_times, np.repeat(smooth_position.min()+100, len(reward_times)), marker=\"|\", s=100, c='r')\n # plt.ylabel('position')\n # plt.xlabel('time (s)')\n # plot_title = 'Mouse: ' + name + ' position in lap ' + str(lap) + ' in corridor ' + str(corridor)\n # plt.title(plot_title)\n\n # plt.show(block=False)\n\n def plot_xv(self):\n cmap = plt.cm.get_cmap('jet') \n\n fig, ax = plt.subplots(figsize=(6,4))\n plt.plot(self.smooth_position, self.speed, c=cmap(80))\n plt.step(self.bincenters, self.ave_speed, where='mid', c=cmap(30))\n plt.scatter(self.lick_position, np.repeat(5, len(self.lick_position)), marker=\"|\", s=100, c=cmap(180))\n plt.scatter(self.reward_position, np.repeat(10, len(self.reward_position)), marker=\"|\", s=100, c=cmap(230))\n plt.ylabel('speed (cm/s)')\n plt.ylim([min(0, self.speed.min()), max(self.speed.max(), 30)])\n plt.xlabel('position')\n plot_title = 'Mouse: ' + self.name + ' speed in lap ' + str(self.lap) + ' in corridor ' + str(self.corridor)\n plt.title(plot_title)\n\n\n bottom, top = plt.ylim()\n left = self.zones[0,0] * self.corridor_length_roxel\n right = self.zones[1,0] * self.corridor_length_roxel\n\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax.add_patch(polygon)\n if (self.n_zones > 1):\n for i in range(1, np.shape(self.zones)[1]):\n left = self.zones[0,i] * self.corridor_length_roxel\n right = self.zones[1,i] * self.corridor_length_roxel\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax.add_patch(polygon)\n\n ax2 = plt.twinx()\n ax2.step(self.bincenters, self.lick_rate, where='mid', c=cmap(200), linewidth=1)\n ax2.set_ylabel('lick rate (lick/s)', color=cmap(200))\n ax2.tick_params(axis='y', labelcolor=cmap(200))\n ax2.set_ylim([-1,max(2*np.nanmax(self.lick_rate), 20)])\n\n plt.show(block=False) \n\n\n # cmap = plt.cm.get_cmap('jet') \n # smooth_position = mm.Laps[55].smooth_position\n # speed = mm.Laps[55].speed\n # lick_position = mm.Laps[55].lick_position\n # lick_times = mm.Laps[55].lick_times\n # reward_position = mm.Laps[55].reward_position\n # reward_times = mm.Laps[55].reward_times\n # lap = mm.Laps[55].lap\n # corridor = mm.Laps[55].corridor\n # lick_rate = mm.Laps[55].lick_rate\n # ave_speed = mm.Laps[55].ave_speed\n # zones = mm.Laps[0].zones\n # bincenters = np.arange(0, 3500, 175) + 175 / 2.0\n\n # fig, ax = plt.subplots(figsize=(6,4))\n # ax.plot(smooth_position, speed, c=cmap(80))\n # ax.plot(bincenters, ave_speed, c=cmap(30))\n # ax.scatter(lick_position, np.repeat(speed.min(), len(lick_position)), marker=\"|\", s=100, c=cmap(180))\n # ax.scatter(reward_position, np.repeat(speed.min(), len(reward_position)), marker=\"|\", s=100, c=cmap(230))\n # plt.ylabel('speed (roxel/s)')\n # plt.xlabel('position')\n # plot_title = 'Mouse: ' + name + ' speed in lap ' + str(lap) + ' in corridor ' + str(corridor)\n # plt.title(plot_title)\n\n # bottom, top = plt.ylim()\n # left = zones[0,0] * 3500\n # right = zones[1,0] * 3500\n\n # polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n # if (np.shape(zones)[1] > 1):\n # for i in range(1, np.shape(zones)[1]):\n # left = zones[0,i] * 3500\n # right = zones[1,i] * 3500\n # polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n # ax.add_patch(polygon)\n\n\n # ax2 = plt.twinx()\n # ax2.plot(bincenters, lick_rate, c=cmap(200), linewidth=1)\n # ax2.set_ylabel('lick rate', color=cmap(200))\n # ax2.tick_params(axis='y', labelcolor=cmap(200))\n # ax2.set_ylim([-1,2*max(lick_rate)])\n\n # plt.show(block=False) \n\n\n def plot_txv(self):\n cmap = plt.cm.get_cmap('jet') \n fig, (ax_top, ax_bottom) = plt.subplots(2, 1, figsize=(6,6))\n\n ## first, plot position versus time\n ax_top.plot(self.laptime, self.smooth_position, c=cmap(50))\n ax_top.plot(self.raw_time, self.raw_position, c=cmap(90))\n\n ax_top.scatter(self.lick_times, np.repeat(200, len(self.lick_times)), marker=\"|\", s=100, c=cmap(180))\n ax_top.scatter(self.reward_times, np.repeat(400, len(self.reward_times)), marker=\"|\", s=100, c=cmap(230))\n ax_top.set_ylabel('position')\n ax_top.set_xlabel('time (s)')\n plot_title = 'Mouse: ' + self.name + ' position and speed in lap ' + str(self.lap) + ' in corridor ' + str(self.corridor)\n ax_top.set_title(plot_title)\n ax_top.set_ylim(0, self.corridor_length_roxel + 100)\n\n\n ## next, plot speed versus position\n ax_bottom.plot(self.smooth_position, self.speed, c=cmap(80))\n ax_bottom.step(self.bincenters, self.ave_speed, where='mid', c=cmap(30))\n ax_bottom.scatter(self.lick_position, np.repeat(5, len(self.lick_position)), marker=\"|\", s=100, c=cmap(180))\n ax_bottom.scatter(self.reward_position, np.repeat(10, len(self.reward_position)), marker=\"|\", s=100, c=cmap(230))\n ax_bottom.set_ylabel('speed (cm/s)')\n ax_bottom.set_xlabel('position')\n ax_bottom.set_ylim([min(0, self.speed.min()), max(self.speed.max(), 30)])\n\n bottom, top = plt.ylim()\n left = self.zones[0,0] * self.corridor_length_roxel\n right = self.zones[1,0] * self.corridor_length_roxel\n\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax_bottom.add_patch(polygon)\n if (self.n_zones > 1):\n for i in range(1, np.shape(self.zones)[1]):\n left = self.zones[0,i] * self.corridor_length_roxel\n right = self.zones[1,i] * self.corridor_length_roxel\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax_bottom.add_patch(polygon)\n\n ax2 = ax_bottom.twinx()\n ax2.step(self.bincenters, self.lick_rate, where='mid', c=cmap(180), linewidth=1)\n ax2.set_ylabel('lick rate (lick/s)', color=cmap(180))\n ax2.tick_params(axis='y', labelcolor=cmap(180))\n ax2.set_ylim([-1,max(2*np.nanmax(self.lick_rate), 20)])\n\n plt.show(block=False) \n\n\n\nclass anticipatory_Licks:\n 'simple class for containing anticipatory licking data'\n def __init__(self, baseline_rate, anti_rate, corridor):\n nan_rates = np.isnan(baseline_rate) + np.isnan(anti_rate)\n baseline_rate = baseline_rate[np.logical_not(nan_rates)]\n anti_rate = anti_rate[np.logical_not(nan_rates)]\n self.baseline = baseline_rate\n self.anti_rate = anti_rate\n\n self.m_base = np.mean(self.baseline)\n self.m_anti = np.mean(self.anti_rate)\n if (self.m_base < self.m_anti):\n greater = True\n else:\n greater = False\n self.corridor = int(corridor)\n self.test = scipy.stats.wilcoxon(self.baseline, self.anti_rate)\n self.anti = False\n if ((self.test[1] < 0.01 ) & (greater == True)):\n self.anti = True\n\n\nclass Session:\n 'common base class for low level position and licksensor data in a given session'\n\n def __init__(self, datapath, date_time, name, task, sessionID=-1, printout=False):\n self.name = name\n self.stage = 0\n self.sessionID = sessionID\n self.stages = []\n\n stagefilename = datapath + task + '_stages.pkl'\n input_file = open(stagefilename, 'rb')\n if version_info.major == 2:\n self.stage_list = pickle.load(input_file)\n elif version_info.major == 3:\n self.stage_list = pickle.load(input_file, encoding='latin1')\n input_file.close()\n\n corridorfilename = datapath + task + '_corridors.pkl'\n input_file = open(corridorfilename, 'rb')\n if version_info.major == 2:\n self.corridor_list = pickle.load(input_file)\n elif version_info.major == 3:\n self.corridor_list = pickle.load(input_file, encoding='latin1')\n input_file.close()\n\n self.Laps = []\n self.n_laps = 0\n\n self.get_stage(datapath, date_time, name, task)\n self.corridors = np.hstack([0, np.array(self.stage_list.stages[self.stage].corridors)])\n\n self.get_lapdata(datapath, date_time, name, task)\n self.test_anticipatory()\n\n def get_lapdata(self, datapath, date_time, name, task):\n\n time_array=[]\n lap_array=[]\n maze_array=[]\n position_array=[]\n mode_array=[]\n lick_array=[]\n action=[]\n\n data_log_file_string=datapath + 'data/' + name + '_' + task + '/' + date_time + '/' + date_time + '_' + name + '_' + task + '_ExpStateMashineLog.txt'\n data_log_file=open(data_log_file_string)\n log_file_reader=csv.reader(data_log_file, delimiter=',')\n next(log_file_reader, None)#skip the headers\n for line in log_file_reader:\n time_array.append(float(line[0]))\n lap_array.append(int(line[1]))\n maze_array.append(int(line[2]))\n position_array.append(int(line[3]))\n mode_array.append(line[6] == 'Go')\n lick_array.append(line[9] == 'TRUE')\n action.append(str(line[14]))\n\n laptime = np.array(time_array)\n pos = np.array(position_array)\n lick = np.array(lick_array)\n lap = np.array(lap_array)\n maze = np.array(maze_array)\n mode = np.array(mode_array)\n N_0lap = 0 # Counting the non-valid laps\n self.n_laps = 0\n\n for i_lap in np.unique(lap):\n y = lap == i_lap # index for the current lap\n\n mode_lap = np.prod(mode[y]) # 1 if all elements are recorded in 'Go' mode\n\n maze_lap = np.unique(maze[y])\n if (len(maze_lap) == 1):\n corridor = self.corridors[int(maze_lap)] # the maze_lap is the index of the available corridors in the given stage\n else:\n corridor = -1\n\n if (corridor > 0):\n t_lap = laptime[y]\n pos_lap = pos[y]\n \n lick_lap = lick[y]\n t_licks = t_lap[lick_lap]\n \n istart = np.where(y)[0][0]\n iend = np.where(y)[0][-1] + 1\n action_lap = action[istart:iend]\n \n reward_indices = [j for j, x in enumerate(action_lap) if x == \"TrialReward\"]\n t_reward = t_lap[reward_indices]\n \n actions = []\n for j in range(len(action_lap)):\n if not((action_lap[j]) in ['No', 'TrialReward']):\n actions.append([t_lap[j], action_lap[j]])\n \n \n # sessions.append(Lap_Data(name, i, t_lap, pos_lap, t_licks, t_reward, corridor, mode_lap, actions))\n self.Laps.append(Lap_Data(self.name, self.n_laps, t_lap, pos_lap, t_licks, t_reward, corridor, mode_lap, actions, self.corridor_list))\n self.n_laps = self.n_laps + 1\n else:\n N_0lap = N_0lap + 1 # grey zone (corridor == 0) or invalid lap (corridor = -1)\n\n def get_stage(self, datapath, date_time, name, task):\n action_log_file_string=datapath + 'data/' + name + '_' + task + '/' + date_time + '/' + date_time + '_' + name + '_' + task + '_UserActionLog.txt'\n action_log_file=open(action_log_file_string)\n log_file_reader=csv.reader(action_log_file, delimiter=',')\n next(log_file_reader, None)#skip the headers\n for line in log_file_reader:\n if (line[1] == 'Stage'):\n self.stage = int(round(float(line[2])))\n\n def test_anticipatory(self):\n corridor_ids = np.zeros(self.n_laps)\n for i in range(self.n_laps):\n corridor_ids[i] = self.Laps[i].corridor # the true corridor ID\n corridor_types = np.unique(corridor_ids)\n nrow = len(corridor_types)\n self.anticipatory = []\n\n for row in range(nrow):\n ids = np.where(corridor_ids == corridor_types[row])\n n_laps = np.shape(ids)[1]\n n_zones = np.shape(self.Laps[ids[0][0]].zones)[1]\n if (n_zones == 1):\n lick_rates = np.zeros([2,n_laps])\n k = 0\n for lap in np.nditer(ids):\n lick_rates[:,k] = self.Laps[lap].preZoneRate\n k = k + 1\n self.anticipatory.append(anticipatory_Licks(lick_rates[0,:], lick_rates[1,:], corridor_types[row]))\n\n\n def plot_session(self):\n ## find the number of different corridors\n if (self.n_laps > 0):\n corridor_ids = np.zeros(self.n_laps)\n for i in range(self.n_laps):\n corridor_ids[i] = self.Laps[i].corridor\n corridor_types = np.unique(corridor_ids)\n nrow = len(corridor_types)\n nbins = len(self.Laps[0].bincenters)\n cmap = plt.cm.get_cmap('jet') \n\n rowHeight = 2\n if (nrow > 4):\n rowHeight = 1.5\n fig, axs = plt.subplots(nrows=nrow, ncols=1, figsize=(8,rowHeight*nrow), squeeze=False)\n # plt.figure(figsize=(5,2*nrow))\n speed_color = cmap(30)\n speed_color_trial = (speed_color[0], speed_color[1], speed_color[2], (0.05))\n\n lick_color = cmap(200)\n lick_color_trial = (lick_color[0], lick_color[1], lick_color[2], (0.05))\n\n for row in range(nrow):\n # ax = plt.subplot(nrow, 1, row+1)\n ids = np.where(corridor_ids == corridor_types[row])\n avespeed = np.zeros(nbins)\n n_lap_bins = np.zeros(nbins) # number of laps in a given bin (data might be NAN for some laps)\n n_laps = np.shape(ids)[1]\n maxspeed = 10\n for lap in np.nditer(ids):\n axs[row,0].step(self.Laps[lap].bincenters, self.Laps[lap].ave_speed, where='mid', c=speed_color_trial)\n nans_lap = np.isnan(self.Laps[lap].ave_speed)\n avespeed = nan_add(avespeed, self.Laps[lap].ave_speed)\n n_lap_bins = n_lap_bins + np.logical_not(nans_lap)\n if (max(self.Laps[lap].ave_speed) > maxspeed): maxspeed = max(self.Laps[lap].ave_speed)\n maxspeed = min(maxspeed, 60)\n \n avespeed = nan_divide(avespeed, n_lap_bins, n_lap_bins > 0)\n axs[row,0].step(self.Laps[lap].bincenters, avespeed, where='mid', c=speed_color)\n axs[row,0].set_ylim([-1,1.2*maxspeed])\n\n if (row == 0):\n if (self.sessionID >= 0):\n plot_title = 'session:' + str(self.sessionID) + ': ' + str(int(n_laps)) + ' laps in corridor ' + str(int(corridor_types[row]))\n else:\n plot_title = str(int(n_laps)) + ' laps in corridor ' + str(int(corridor_types[row])) \n else:\n plot_title = str(int(n_laps)) + ' laps in corridor ' + str(int(corridor_types[row]))\n\n if (self.Laps[lap].zones.shape[1] > 0):\n bottom, top = axs[row,0].get_ylim()\n left = self.Laps[lap].zones[0,0] * self.corridor_length_roxel\n right = self.Laps[lap].zones[1,0] * self.corridor_length_roxel\n\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n axs[row,0].add_patch(polygon)\n n_zones = np.shape(self.Laps[lap].zones)[1]\n if (n_zones > 1):\n for i in range(1, np.shape(self.Laps[lap].zones)[1]):\n left = self.Laps[lap].zones[0,i] * self.corridor_length_roxel\n right = self.Laps[lap].zones[1,i] * self.corridor_length_roxel\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n axs[row,0].add_patch(polygon)\n # else: ## test for lick rate changes before the zone\n # self.anticipatory = np.zeros([2,n_laps])\n # k = 0\n # for lap in np.nditer(ids):\n # self.anticipatory[:,k] = self.Laps[lap].preZoneRate\n # k = k + 1\n else: # we look for anticipatory licking tests\n P_statement = ', anticipatory P value not tested'\n for k in range(len(self.anticipatory)):\n if (self.anticipatory[k].corridor == corridor_types[row]):\n P_statement = ', anticipatory P = ' + str(round(self.anticipatory[k].test[1],5))\n plot_title = plot_title + P_statement\n\n axs[row,0].set_title(plot_title)\n\n ax2 = axs[row,0].twinx()\n n_lap_bins = np.zeros(nbins) # number of laps in a given bin (data might be NAN for some laps)\n maxrate = 10\n avelick = np.zeros(nbins)\n for lap in np.nditer(ids):\n ax2.step(self.Laps[lap].bincenters, self.Laps[lap].lick_rate, where='mid', c=lick_color_trial, linewidth=1)\n nans_lap = np.isnan(self.Laps[lap].lick_rate)\n avelick = nan_add(avelick, self.Laps[lap].lick_rate)\n n_lap_bins = n_lap_bins + np.logical_not(nans_lap)\n if (max(self.Laps[lap].lick_rate) > maxrate): maxrate = max(self.Laps[lap].lick_rate)\n maxrate = min(maxrate, 20)\n\n avelick = nan_divide(avelick, n_lap_bins, n_lap_bins > 0)\n ax2.step(self.Laps[lap].bincenters, avelick, where='mid', c=lick_color)\n ax2.set_ylim([-1,1.2*maxrate])\n\n\n if (row==(nrow-1)):\n axs[row,0].set_ylabel('speed (cm/s)', color=speed_color)\n axs[row,0].tick_params(axis='y', labelcolor=speed_color)\n ax2.set_ylabel('lick rate (lick/s)', color=lick_color)\n ax2.tick_params(axis='y', labelcolor=lick_color)\n axs[row,0].set_xlabel('position (roxel)')\n else:\n axs[row,0].set_xticklabels([])\n axs[row,0].tick_params(axis='y', labelcolor=speed_color)\n ax2.tick_params(axis='y', labelcolor=lick_color)\n\n plt.show(block=False)\n else:\n fig = plt.figure(figsize=(8,3))\n # plt.scatter([-4, -3, -2], [2,3,4])\n plt.title('No data to show')\n plt.show(block=False)\n\n\n\n\n# #load trigger log \n# datapath = '/Users/ubi/Projects/KOKI/VR/MiceData/'\n# #datapath = 'C:\\Users\\LN-Treadmill\\Desktop\\MouseData\\\\'\n# #datapath = 'C:\\Users\\Treadmill\\Desktop\\RitaNy_MouseData\\\\'\n\n# # date_time = '2019-11-28_19-37-04' # this was OK!\n# date_time = '2019-11-20_08-15-42' # this was not working!\n# # date_time = '2019-11-28_19-01-06' # this was OK!\n# # date_time = '2019-11-27_09-31-56' # this was OK!\n# # date_time = '2019-11-22_13-51-39' # this was OK!\n# name = 'th'\n# task = 'TwoMazes'\n# mm = Session(datapath, date_time, name, task)\n# #\n# #\n# ## # mm.Laps[181].plot_tx()\n# ## # mm.Laps[12].plot_xv()\n# mm.Laps[25].plot_txv()\n# mm.plot_session()\n\n\n# mm.Laps[17].plot_tx()\n# mm.Laps[17].plot_xv()\n# mm.Laps[55].plot_tx()\n# mm.Laps[55].plot_xv()\n\n\n# for i in range(65):\n# mm.Laps[i].plot_tx()\n# raw_input(\"Press Enter to continue...\")\n\n","sub_path":"LogAnal.py","file_name":"LogAnal.py","file_ext":"py","file_size_in_byte":28603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"638925383","text":"import argparse\nimport xnmt.vocabs as vocabs\nimport xnmt.input_readers as input_readers\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\")\nparser.add_argument(\"surface_vocab_file\")\nparser.add_argument(\"nt_vocab_file\")\nparser.add_argument(\"edg_vocab_file\")\nargs = parser.parse_args()\n\nreader = input_readers.CoNLLToRNNGActionsReader(surface_vocab=vocabs.Vocab(vocab_file=args.surface_vocab_file),\n nt_vocab=vocabs.Vocab(vocab_file=args.nt_vocab_file),\n edg_vocab=vocabs.Vocab(vocab_file=args.edg_vocab_file))\n\nfor tree in reader.read_sents(args.input):\n print(str(tree) + \" NONE()\")\n","sub_path":"script/parse/conll_to_actions.py","file_name":"conll_to_actions.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"344963902","text":"import argparse\nimport os\nfrom azureml.core import Run\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport scispacy\nimport spacy\nimport en_core_sci_lg\nfrom spacy_langdetect import LanguageDetector\n\n\nclass LangDetect:\n def __init__(self):\n self.run = Run.get_context()\n self.args = None\n self.df = None\n self.articles_in = 0\n self.articles_non_en = 0\n self.nlp = None\n\n self.get_runtime_arguments()\n\n self.load_dataset()\n self.set_nlp_model()\n\n self.collect_metrics_pre()\n self.lang_detect()\n self.log_metrics_post()\n\n self.output_dataset()\n\n def get_runtime_arguments(self):\n print('--- Get Runtime Arguments')\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--input',\n type=str,\n help='Input extract data'\n )\n parser.add_argument(\n '--output',\n type=str,\n help='Output extract data'\n )\n parser.add_argument(\n '--max_doc_length',\n type=int,\n help='Max doc length'\n )\n\n self.args = parser.parse_args()\n\n print('Input: {}'.format(self.args.input))\n print('Output: {}'.format(self.args.output))\n print('Max doc length: {}'.format(self.args.max_doc_length))\n\n def load_dataset(self):\n print('--- Load Data')\n path = self.args.input + \"/processed.csv\"\n self.df = pd.read_csv(path, dtype={\n 'paper_id': str,\n 'body_text': str,\n 'results': str,\n 'bibliography': str,\n 'subset_source': str,\n 'cord_uid': str,\n 'sha': str,\n 'source': str,\n 'title': str,\n 'doi': str,\n 'pubmed_id': str,\n 'abstract': str,\n 'publish_time': str,\n 'authors': str,\n 'journal': str,\n 'url': str})\n\n print('Raw Input Specifications')\n print(self.df.head())\n print(self.df.columns)\n print(self.df.shape)\n\n def set_nlp_model(self):\n print('--- Set NLP Model')\n self.nlp = en_core_sci_lg.load(disable=['tagger', 'ner'])\n self.nlp.max_length = 1000000\n self.nlp.add_pipe(LanguageDetector(), name='language_detector', last=True)\n\n def detect_article_lang(self):\n print('--- Detect Article Language')\n self.df['text_language'] = self.df.body_text.apply(lambda x: self.nlp(str(x[:int(self.args.max_doc_length)]))._.language['language'])\n articles_by_lang = self.df['text_language'].value_counts()\n print(articles_by_lang)\n\n self.articles_non_en = self.df.loc[self.df[self.df.text_language != 'en'].index].shape\n print('Number of non-english articles: {}'.format(str(self.articles_non_en)))\n\n df_temp = pd.DataFrame({'language': articles_by_lang.index, 'Count': articles_by_lang.values})\n df_temp.sort_values(by=['Count'], inplace=True)\n\n fig_name = 'Articles by Lang'\n fig, ax = plt.subplots(figsize=(20, 10))\n sns.barplot(x='language', y='Count', data=df_temp, palette='husl')\n plt.xlabel('Language', fontsize=24)\n plt.ylabel('Articles', fontsize=24)\n ax.tick_params(axis='both', which='major', labelsize=20)\n self.run.log_image(fig_name, plot=fig)\n self.offline_save_fig(fig_name)\n plt.close()\n\n def offline_save_fig(self, name):\n if 'OfflineRun' in self.run._identity:\n full_path = 'plots/' + name + '.png'\n plt.savefig(full_path, dpi=300, format='png')\n\n def drop_non_english_articles(self):\n print('--- Drop Non-English Articles')\n self.df = self.df.drop(self.df[self.df.text_language != 'en'].index)\n print('Number remaining English language articles: {}'.format(str(self.df.shape[0])))\n\n def lang_detect(self):\n self.detect_article_lang()\n self.drop_non_english_articles()\n\n def collect_metrics_pre(self):\n self.articles_in = len(self.df)\n\n def log_metrics_post(self):\n self.run.log('# Articles In', self.articles_in)\n self.run.log('# Articles Non-En', self.articles_non_en)\n self.run.log('# Articles Out', len(self.df))\n\n def output_dataset(self):\n print('--- Output Dataset')\n self.df.drop(columns=['text_language'], inplace=True)\n if not (self.args.output is None):\n os.makedirs(self.args.output, exist_ok=True)\n path = self.args.output + \"/processed.csv\"\n self.df.to_csv(path, index=False)\n print('Output created: {}'.format(path))\n print('Column definition of output')\n print(self.df.columns)\n\n\nif __name__ == \"__main__\":\n print('--- Language Detection Started')\n lang_detect = LangDetect()\n print('--- Language Detection Completed')","sub_path":"code/dataprep/cord19/step_dataprep_lang_detect.py","file_name":"step_dataprep_lang_detect.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"69226405","text":"import math\nimport xlwt\n\nimport utils\nimport numpy as np\n\n\nclass BM25(object):\n\n def __init__(self, docs, docs_raw):\n self.docs_raw = docs_raw # 未处理的文档列表,包含id、title、subject、description\n self.docs = docs # 传入已经分好词的文档列表\n self.docs_num = len(docs) # 文档总数量\n self.avgdl = sum([len(doc) + 0.0 for doc in docs]) / self.docs_num # 文档词数平均\n self.docs_f = [] # 存储文档中每个词出现次数(每个文档为一个dict)\n self.df = {} # 存储所有词及出现了该词的文档数量\n self.idf = {} # 存储所有词的idf值 log( (docs_num - df[word] + 0.5) / (df[word] + 0.5) )\n self.k1 = 1.5 # 调整参数\n self.b = 0.75 # 调整参数\n self.init()\n\n # 初始化:计算docs_f、df、idf\n def init(self):\n for doc in self.docs:\n # 计算该文档中每个词的出现次数\n doc_f = {}\n for word in doc:\n doc_f[word] = doc_f.get(word, 0) + 1\n self.docs_f.append(doc_f)\n # 计算df\n for word in doc_f.keys():\n self.df[word] = self.df.get(word, 0) + 1\n # 计算idf\n for word, word_df in self.df.items():\n self.idf[word] = math.log(self.docs_num - word_df + 0.5) - math.log(word_df + 0.5)\n\n # 计算查询式q与索引为index的文档的相似度得分(q为分词后的词列表)\n def bm25_score(self, q, index):\n # 该文档所包含的所有词及其词频\n doc_f = self.docs_f[index]\n # 该文档总词数\n dl = len(self.docs[index])\n # 相关度累计得分\n score = 0\n for word in q:\n if word not in doc_f:\n # 文档中不包含该词\n continue\n else:\n # 文当中包含该词,计算相关度得分并累计 (idf(word) * f(word) * (k1+1)) / (f(word) + k1 * (1-b+b*(dl/avgdl)))\n score += (self.idf[word] * doc_f[word] * (self.k1 + 1)\n / (doc_f[word] + self.k1 * (1 - self.b + self.b * dl / self.avgdl)))\n return score\n\n # 计算所有相似度得分,并输出结果前n项到excel文件\n def bm25_score_all(self, q_raw, file_name, n=100000):\n # 查询式预处理\n q = utils.pretreatment(q_raw)\n print('q_raw:', q_raw)\n print('q:', q)\n\n # 计算查询式与所有文档的相似度\n scores = []\n for index in range(self.docs_num):\n score = self.bm25_score(q, index)\n scores.append(score)\n scores = np.array(scores)\n\n # argsort数组排序,传入-scores是倒序排列。返回排序后的原数组索引 [最大的数的索引, 第二大索引, ...]\n scores_sort_index = np.argsort(-scores)\n # 保存到bm_result.xls\n workbook = xlwt.Workbook(encoding=\"utf-8\")\n sheet = workbook.add_sheet(\"sim_result\")\n # 第一列 id\n sheet.write(0, 0, 'id')\n # 第二列 相似度得分\n sheet.write(0, 1, 'sim_score')\n # 第三列 id\n sheet.write(0, 2, 'id')\n # 第四列 title\n sheet.write(0, 3, 'title')\n # 第五列 subject\n sheet.write(0, 4, 'subject')\n # 第六列 description\n sheet.write(0, 5, 'description')\n for i in range(len(scores)):\n if i < n and scores[scores_sort_index[i]] > 0:\n doc = bm25.docs_raw[scores_sort_index[i]]\n # 第一列 id\n sheet.write(i + 1, 0, str(scores_sort_index[i]+1))\n # 第二列 相似度得分\n sheet.write(i + 1, 1, scores[scores_sort_index[i]])\n # 第三列 id\n sheet.write(i + 1, 2, doc[0])\n # 第四列 title\n sheet.write(i + 1, 3, doc[1])\n # 第五列 subject\n sheet.write(i + 1, 4, doc[2])\n # 第六列 description\n sheet.write(i + 1, 5, doc[3])\n else:\n break\n workbook.save(file_name)\n\n\n# 初始话bm25模型(类),并存储到bm25.pkl文件\ndef bm25_init():\n # 读取数据库\n docs_raw = utils.readDb([\"id\", 'title', 'subject', 'description'])\n # 预处理\n docs = []\n for doc in docs_raw:\n doc_str = ''\n # 按权重合并title、subject、description\n # title\n for i in range(5):\n doc_str += doc[1]\n # subject\n for i in range(3):\n doc_str += doc[2]\n # description\n for i in range(1):\n doc_str += doc[3]\n # 预处理(分词、去停用词)\n doc_words = utils.pretreatment(doc_str)\n docs.append(doc_words)\n # if len(docs) > 1000:\n # break\n bm25 = BM25(docs, docs_raw)\n # 保存bm25模型(类)到bm25.pkl文件\n utils.save_model('bm/bm25.pkl', bm25)\n\n\nif __name__ == '__main__':\n\n # 初始话bm25模型(类),并存储到bm25.pkl文件\n # bm25_init()\n\n # 从bm25.pkl模型中读取bm25模型类\n bm25 = utils.read_model('bm25.pkl')\n\n # 查询式\n q_raw = '农村居民人均可支配收入'\n\n # bm25计算相似度并输出\n bm25.bm25_score_all(q_raw, 'bm_result.xls', 1000)\n","sub_path":"bm/bm.py","file_name":"bm.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"145207543","text":"\"\"\"\nkissim.comparison.fingerprint_distance_generator\n\nDefines the pairwise fingerprint distances for a set of fingerprints.\n\"\"\"\n\nimport datetime\nfrom itertools import repeat\nimport logging\nfrom multiprocessing import cpu_count, Pool\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import FingerprintDistance\n\nlogger = logging.getLogger(__name__)\n\n\nclass FingerprintDistanceGenerator:\n \"\"\"\n Generate fingerprint distances for multiple fingerprint pairs based on their feature distances,\n given a feature weighting scheme.\n Uses parallel computing of fingerprint pairs.\n\n Attributes\n ----------\n distance_measure : str\n Type of distance measure, defaults to scaled Euclidean distance.\n molecule_codes : list of str\n Unique molecule codes associated with all fingerprints (sorted alphabetically).\n kinase_names : list of str\n Unique kinase names associated with all fingerprints (sorted alphabetically).\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic, aliphatic,\n sco, exposure,\n distance_to_centroid, distance_to_hinge_region, distance_to_dfg_region,\n distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n data : pandas.DataFrame\n Fingerprint distance and coverage, plus details on both molecule codes associated with\n fingerprint pairs.\n \"\"\"\n\n def __init__(self):\n\n self.distance_measure = None\n self.feature_weights = None\n self.molecule_codes = None\n self.kinase_names = None\n self.data = None\n\n def from_feature_distances_generator(self, feature_distances_generator, feature_weights=None):\n \"\"\"\n Generate fingerprint distances for multiple fingerprint pairs based on their feature\n distances, given a feature weighting scheme.\n Uses parallel computing of fingerprint pairs.\n\n Parameters\n ----------\n feature_distances_generator : kissim.similarity.FeatureDistancesGenerator\n Feature distances for multiple fingerprint pairs.\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic,\n aliphatic, sco, exposure, distance_to_centroid, distance_to_hinge_region,\n distance_to_dfg_region, distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n \"\"\"\n\n start = datetime.datetime.now()\n\n logger.info(f\"SIMILARITY: FingerprintDistanceGenerator: {feature_weights}\")\n\n # Set class attributes\n self.distance_measure = feature_distances_generator.distance_measure\n self.feature_weights = feature_weights\n self.molecule_codes = feature_distances_generator.molecule_codes\n self.kinase_names = feature_distances_generator.kinase_names\n\n # Calculate pairwise fingerprint distances\n fingerprint_distance_list = self._get_fingerprint_distance_from_list(\n self._get_fingerprint_distance,\n list(feature_distances_generator.data.values()),\n self.feature_weights,\n )\n\n # Format result and save to class attribute\n self.data = pd.DataFrame(\n [\n [i.molecule_pair_code[0], i.molecule_pair_code[1], i.distance, i.bit_coverage]\n for i in fingerprint_distance_list\n ],\n columns=\"molecule_code_1 molecule_code_2 distance coverage\".split(),\n )\n\n end = datetime.datetime.now()\n\n logger.info(f\"Start of fingerprint distance generation: {start}\")\n logger.info(f\"End of fingerprint distance generation: {end}\")\n\n @staticmethod\n def _get_fingerprint_distance_from_list(\n _get_fingerprint_distance, feature_distances_list, feature_weights=None\n ):\n \"\"\"\n Get fingerprint distances based on multiple feature distances\n (i.e. for multiple fingerprint pairs).\n Uses parallel computing.\n\n Parameters\n ----------\n _get_fingerprint_distance : method\n Method calculating fingerprint distance for one fingerprint pair\n (based on their feature distances).\n feature_distances_list : list of kissim.similarity.FeatureDistances\n List of distances and bit coverages between two fingerprints for each of their\n features.\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic,\n aliphatic, sco, exposure, distance_to_centroid, distance_to_hinge_region,\n distance_to_dfg_region, distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n\n Returns\n -------\n list of kissim.similarity.FingerprintDistance\n List of distance between two fingerprints, plus details on molecule codes, feature\n weights and feature coverage.\n \"\"\"\n\n # Get start time of computation\n start = datetime.datetime.now()\n logger.info(f\"Calculate pairwise fingerprint distances...\")\n\n # Number of CPUs on machine\n num_cores = cpu_count() - 1\n logger.info(f\"Number of cores used: {num_cores}\")\n\n # Create pool with `num_processes` processes\n pool = Pool(processes=num_cores)\n\n # Apply function to each chunk in list\n fingerprint_distances_list = pool.starmap(\n _get_fingerprint_distance, zip(feature_distances_list, repeat(feature_weights))\n )\n\n # Close and join pool\n pool.close()\n pool.join()\n\n # Get end time of computation\n logger.info(f\"Number of fingerprint distances: {len(fingerprint_distances_list)}\")\n end = datetime.datetime.now()\n\n logger.info(f\"Start: {start}\")\n logger.info(f\"End: {end}\")\n\n return fingerprint_distances_list\n\n @staticmethod\n def _get_fingerprint_distance(feature_distances, feature_weights=None):\n \"\"\"\n Get the fingerprint distance for one fingerprint pair.\n\n Parameters\n ----------\n feature_distances : kissim.similarity.FeatureDistances\n Distances and bit coverages between two fingerprints for each of their features.\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic,\n aliphatic, sco, exposure, distance_to_centroid, distance_to_hinge_region,\n distance_to_dfg_region, distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n\n Returns\n -------\n kissim.similarity.FingerprintDistance\n Distance between two fingerprints, plus details on molecule codes, feature weights and\n feature coverage.\n \"\"\"\n\n fingerprint_distance = FingerprintDistance()\n fingerprint_distance.from_feature_distances(feature_distances, feature_weights)\n\n return fingerprint_distance\n\n def get_structure_distance_matrix(self, fill=False):\n \"\"\"\n Get fingerprint distances for all structure pairs in the form of a matrix (DataFrame).\n\n Parameters\n ----------\n fill : bool\n Fill or fill not (default) lower triangle of distance matrix.\n\n Returns\n -------\n pandas.DataFrame\n Structure distance matrix.\n \"\"\"\n\n # Initialize matrix\n structure_distance_matrix = pd.DataFrame(\n [], columns=self.molecule_codes, index=self.molecule_codes, dtype=float\n )\n\n # Fill matrix with distance values\n for index, row in self.data.iterrows():\n structure_distance_matrix.loc[row.molecule_code_1, row.molecule_code_2] = row.distance\n\n if fill:\n structure_distance_matrix.loc[\n row.molecule_code_2, row.molecule_code_1\n ] = row.distance\n\n # Fill values on matrix main diagonal to 0.0\n for molecule_code in self.molecule_codes:\n structure_distance_matrix.loc[molecule_code, molecule_code] = 0.0\n\n return structure_distance_matrix\n\n def get_kinase_distance_matrix(self, by=\"minimum\", fill=False):\n \"\"\"\n Extract per kinase pair one distance value from the set of structure pair distance values\n and return these fingerprint distances for all kinase pairs in the form of a matrix\n (DataFrame).\n\n Parameters\n ----------\n by : str\n Condition on which the distance value per kinase pair is extracted from the set of\n distances values per structure pair. Default: Minimum distance value.\n fill : bool\n Fill or fill not (default) lower triangle of distance matrix.\n\n Returns\n -------\n pandas.DataFrame\n Kinase distance matrix.\n \"\"\"\n\n # Initialize matrix\n kinase_distance_matrix = pd.DataFrame(\n [], columns=self.kinase_names, index=self.kinase_names, dtype=float\n )\n\n # Fill matrix with distance values\n for index, row in self._get_kinase_distances(by).iterrows():\n kinase_distance_matrix.loc[index[0], index[1]] = row.distance\n\n if fill:\n kinase_distance_matrix.loc[index[1], index[0]] = row.distance\n\n # Fill values on matrix main diagonal to 0.0 which are NaN\n # (i.e. kinases that have only one structure representative)\n for kinase_name in self.kinase_names:\n if np.isnan(kinase_distance_matrix.loc[kinase_name, kinase_name]):\n kinase_distance_matrix.loc[kinase_name, kinase_name] = 0.0\n\n return kinase_distance_matrix\n\n def _get_kinase_distances(self, by=\"minimum\"):\n \"\"\"\n Extract per kinase pair one distance value from the set of structure pair distance values.\n\n Parameters\n ----------\n by : str\n Condition on which the distance value per kinase pair is extracted from the set of\n distances values per structure pair. Default: Minimum distance value.\n\n Returns\n -------\n pandas.DataFrame\n Fingerprint distance and coverage for kinase pairs.\n \"\"\"\n\n # Get distance values for structure pairs\n structure_distances = self._add_kinases_to_fingerprint_distance()\n\n # Group by kinase names\n structure_distances_grouped_by_kinases = structure_distances.groupby(\n by=[\"kinase_1\", \"kinase_2\"], sort=False\n )\n\n # Get distance values per kinase pair based on given condition\n by_terms = \"minimum maximum mean size\".split()\n\n if by == \"minimum\":\n kinase_distances = structure_distances_grouped_by_kinases.min()\n elif by == \"maximum\":\n kinase_distances = structure_distances_grouped_by_kinases.max()\n elif by == \"mean\":\n kinase_distances = structure_distances_grouped_by_kinases.mean()\n elif by == \"size\":\n kinase_distances = structure_distances_grouped_by_kinases.size()\n else:\n raise ValueError(f'Condition \"by\" unknown. Choose from: {\", \".join(by_terms)}')\n\n return kinase_distances\n\n def _add_kinases_to_fingerprint_distance(self):\n \"\"\"\n Add two columns to fingerprint distances for kinase 1 name and kinase 2 name.\n\n Returns\n -------\n pandas.DataFrame\n Fingerprint distance and coverage, plus details on both molecule codes and kinase names\n associated with fingerprint pairs.\n \"\"\"\n\n # Make a copy of distance values per structure pairs\n fingerprint_distance = self.data.copy()\n\n # Add columns for kinase names (kinase pair)\n fingerprint_distance[\"kinase_1\"] = [\n i.split(\"/\")[1].split(\"_\")[0] for i in fingerprint_distance.molecule_code_1\n ]\n fingerprint_distance[\"kinase_2\"] = [\n i.split(\"/\")[1].split(\"_\")[0] for i in fingerprint_distance.molecule_code_2\n ]\n\n return fingerprint_distance\n","sub_path":"kissim/comparison/fingerprint_distance_generator.py","file_name":"fingerprint_distance_generator.py","file_ext":"py","file_size_in_byte":13962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"610458197","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef open_url(keyword):\n payload = {'q':keyword,'sort':'sale-desc'}\n url = 'https://s.taobao.com/search'\n headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n\n res = requests.get(url, params=payload, headers=headers)\n return res\n\ndef main():\n keyword = input('请输入关键词:')\n res = open_url(keyword)\n\n with open(\"items.txt\",'wt',encoding='utf-8') as f:\n f.write(res.text)\n\nif __name__ == '__main__':\n main()\n","sub_path":"淘宝/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"226388409","text":"from fortress.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom fortress.slithir.operations import Nop\n\n\nclass VoidConstructor(AbstractDetector):\n\n ARGUMENT = \"void-cst\"\n HELP = \"Constructor called not implemented\"\n IMPACT = DetectorClassification.LOW\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/fortress/wiki/Detector-Documentation#void-constructor\"\n\n WIKI_TITLE = \"Void constructor\"\n WIKI_DESCRIPTION = \"Detect the call to a constructor that is not implemented\"\n WIKI_RECOMMENDATION = \"Remove the constructor call.\"\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract A{}\ncontract B is A{\n constructor() public A(){}\n}\n```\nWhen reading `B`'s constructor definition, we might assume that `A()` initiates the contract, but no code is executed.\"\"\"\n\n def _detect(self):\n \"\"\"\"\"\"\n results = []\n for c in self.contracts:\n cst = c.constructor\n if cst:\n\n for constructor_call in cst.explicit_base_constructor_calls_statements:\n for node in constructor_call.nodes:\n if any(isinstance(ir, Nop) for ir in node.irs):\n info = [\"Void constructor called in \", cst, \":\\n\"]\n info += [\"\\t- \", node, \"\\n\"]\n\n res = self.generate_result(info)\n\n results.append(res)\n return results\n","sub_path":"fortress/detectors/operations/void_constructor.py","file_name":"void_constructor.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"14589071","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions\nimport time\nimport random\nfrom random import randint\n\nmembers_list = []\n\nclass Roulette(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @has_permissions(administrator=True)\n @commands.command()\n async def rr(self, ctx, add, member : discord.Member = None):\n\n self.add = \"add\"\n message_author = ctx.message.author.display_name\n\n if not member:\n await ctx.send(\"Tu dois renseigner un joueur !\")\n elif not member.display_name in members_list:\n\n members_list.append(member.display_name)\n print(members_list)\n await ctx.send(f'{member.display_name} a bien été ajouté à la liste de participants !')\n await ctx.send(f'Liste de joueurs : {members_list}, Taille de la liste : {len(members_list)}')\n\n @has_permissions(administrator=True)\n @commands.command()\n async def rrstart(self, ctx):\n\n if len(members_list) <= 1:\n await ctx.send(\"Il n'y a pas assez de participants !\")\n return\n\n timer = 5\n\n while timer != 0:\n\n await ctx.send(f'La roulette russe commence dans : {timer}')\n\n time.sleep(1)\n\n timer -= 1\n\n if timer == 0:\n while len(members_list) != 1:\n await ctx.send(\"Here we go! :ye:\")\n await ctx.send(\"Qui aura droit au chatiment divin ? :pensive:\")\n time.sleep(20)\n member_dead = members_list[randint(0, len(members_list)-1)]\n await ctx.send(f'Aïe | ||**{member_dead}**|| est mort.') \n members_list.remove(member_dead)\n await ctx.send(f'**Joueurs restants | {members_list}')\n time.sleep(15)\n\n if len(members_list) == 1:\n await ctx.send(f'Roulette Russe | **{members_list[0]}** a gagné la partie ! @here')\n\n\n\ndef setup(client):\n client.add_cog(Roulette(client))","sub_path":"cogs/roulette_russe.py","file_name":"roulette_russe.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"210464902","text":"from django.contrib import admin\nfrom .forms import CustomUserCreationForm\nfrom django.utils.html import format_html\nfrom hospital.models import Hospital\nfrom vehicle.models import Vehicle,VehicleCategory\nfrom django.urls import path\nfrom django.conf.urls import include, url\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import gettext, gettext_lazy as _\nfrom django.utils.translation import ugettext_lazy\nfrom user.admin import admin_site\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.apps import apps\nclass HospitalAdmin(admin.ModelAdmin):\n list_display_links = None\n change_form_template = 'admin/hospital/change_form.html'\n change_list_template = 'admin/hospital/change_list.html'\n form = CustomUserCreationForm\n model = Hospital\n list_display = ('full_name','email', 'phone','address','status','Action')\n list_filter = ('status',)\n list_per_page = 5 #For Pagination\n\n fieldsets = (\n (None, {'fields': ('full_name','email','phone','address','status','password')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('full_name','email','phone','status','password', 'is_active')}\n ),\n )\n search_fields = ('email',)\n ordering = ('-id',)\n \n def Action(self, obj):\n if(obj.status == 3):\n delete = ''\n edit = ''\n add = ''\n else: \n delete = '
' % (\n obj.id, obj.id)\n add = '
' % (\n obj.id)\n edit = '
' % (obj.id,obj.id)\n\n view = '
' % (\n obj.id)\n \n return format_html(view + delete + edit + add)\n \n def get_urls(self):\n urls = super().get_urls()\n my_urls = [\n url('^view/(?P
\\d+)/$', self.hospital_view),\n url('^add_vehicle/(?P\\d+)/$', self.vehicle_add),\n ]\n return my_urls + urls\n \n def extra_context(self, request):\n context = admin_site.each_context(request)\n context['opts'] = Hospital._meta\n context['site_title'] = ugettext_lazy('Hospital')\n return context\n\n\n @method_decorator(login_required())\n def vehicle_add(self, request,hospital_id):\n context = self.extra_context(request)\n context['title'] = 'Add Vehicle'\n context['data'] = Hospital.objects.get(id=hospital_id)\n context['category'] = VehicleCategory.objects.all()\n return TemplateResponse(request, 'admin/hospital/add_vehicle.html', context=context)\n\n @method_decorator(login_required())\n def hospital_view(self, request,hospital_id):\n context = self.extra_context(request)\n context['title'] = 'Hospital User Details'\n context['userDetail'] = Hospital.objects.get(id=hospital_id)\n context['Vehicle'] = Vehicle.objects.filter(user_id = hospital_id).filter(user_type = 1)\n context['site_title'] = ugettext_lazy('Hospital')\n return TemplateResponse(request, 'admin/hospital_view.html', context=context)\n\nadmin_site.register(Hospital,HospitalAdmin)\n","sub_path":"gelmeko/hospital/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"177087716","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom ProyectoPruebaDjango.apps.categories.forms import CategoryForm\nfrom ProyectoPruebaDjango.apps.categories.models import Category, CategoryModelForm\n\n# Create your views here.\n\n#ejemplo de uso de save() - Para crear o actualizar categoria\ndef create(request):\n\n #create a model Category instance\n category = Category(name='Categoria 1', description='Categoria creada desde modelo')\n #invoke the save() method to create/save the record\n #NO record id reference, so a create operation is made and the reference is updated with id\n category.save()\n\n #change field on instance\n category.name = 'Categoria 2'\n\n print(\"Se creo la categoria con id = \" + str(category.id))\n\n #invoke the save() method ti update/save record\n #record has id reference from prior save() call, so operation is update.\n #category.save()\n category.save(update_fields=['name']) #actualiza solo el campo 'name'. Por defecto Django actualiza todos los campos\n\n print(\"Se actualizo la categoria con id = \" + str(category.id))\n\n return HttpResponse('Categoria creada con exito') #imprime directamente en pagina\n\n#ejemplo de uso de get() para recuperar UN SOLO registro de categoria\ndef search(request, category_id):\n\n try:\n category = Category.objects.get(id=category_id)\n #category = Category.objects.get(name__contains='PHP') #error si get() recupera mas de un registro\n # category, createdOK = Category.objects.get_or_create(name=\"PHP\") #si no recupera, crea un registro\n #usa get y save combinados\n return HttpResponse(\"Se cargo de la base de datos la categoria: %s\" % category.name)\n except ObjectDoesNotExist:\n return HttpResponse(\"No se encontro la categoria con id = %s\" % category_id)\n\n #prueba con input(). La pagina queda cargando hasta recibir el input\n # try:\n # id_input = input(\"Ingrese el id de la categoria a buscar: \")\n # category = Category.objects.get(id=int(id_input))\n # return HttpResponse(\"Se cargo de la base de datos la categoria: %s\" % category.name)\n # except:\n # return HttpResponse(\"No se encontro la categoria con id = %s\" % id_input)\n\n#ejempl de uso de update()\ndef update(request, category_id):\n cant = Category.objects.filter(id=category_id).update(name='Programacion general') #si no se filtrara por primay key\n # (o campo unique), se podrian recuperar varios registros y update() los actualizaria a TODOS\n # filter() retorna un query set (se ve uso en la siguiente view (funcion index))\n return HttpResponse(\"Se actualizo el nombre de: %d categoria/s\" % cant)\n\n# ejemplo de uso de all(), filter() y in_builk() - para listar todas las categirias\ndef index(request):\n categories = Category.objects.all()\n #categories = Category.objects.filter(name='PHP') #recupero categorias con filtro (SELECT ... WHERE name = \"PHP\")\n #categories = Category.objects.in_bulk() #retorna un diccionario con los registros, no un queryset\n #print(categories.query) #permite ver la consulta SQL\n #Artificio: obtiene una lista de categorias (diccionarios) a partir del queryset de all() o filter()\n # Esto se hace para imprimir con el HttpResponse()\n data = [{'id': category.id, 'name': category.name} for category in categories]\n return HttpResponse(str(data))\n\n# views para trabajar con formularios \n\n# create\ndef create_form(request):\n # crear categoria con model form (la clase del form se guarda en models.py junto al modelo)\n if request.method == 'POST':\n # POST, generate form with data from request\n form = CategoryModelForm(request.POST) #se instancia el form con los datos enviados por el\n # usuario por si se debe reenviar al mismo con los datos por no ser validado\n # check if it's valid\n if form.is_valid():\n # Insert into DB\n form.save()\n # redirect to a new URL\n return HttpResponse('Categoria almacenada correctamente en la BD')\n else:\n # GET, generate unbound (blank) form\n form = CategoryModelForm()\n return render(request, 'categories/create.html', {'form':form})\n\n# crear categoria con form standalone (independiente). La clase del form usada se guada en forms.py\n\"\"\" if request.method == \"POST\":\n # POST, generate form with data from request\n form = CategoryForm(request.POST)\n if form.is_valid:\n # process data, insert into DB, generate email, etc\n\n # redirect to a new URL\n return HttpResponse(\"Categoria agregada correctamente\")\n\n else:\n # GET, generate blank form\n form = CategoryForm(initial={'description':'Descripcion opcional'}) \n return render(request, 'categories/create.html', {'form':form}) \"\"\"","sub_path":"ProyectoPruebaDjango/apps/categories/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"387739547","text":"# coding:utf-8\n\nimport pygame\nfrom pygame.locals import *\nfrom sys import exit\nfrom consts import *\nfrom gobang import GoBang\nfrom render import GameRender\n\n# from gobang_ai import GobangAI\n\nif __name__ == '__main__':\n gobang = GoBang()\n render = GameRender(gobang)\n # 先给AI留个接口\n # ai = GobangAI(gobang, ChessboardState.WHITE)\n result = ChessboardState.EMPTY\n enable_ai = False\n\n while True:\n # 捕捉pygame事件\n for event in pygame.event.get():\n # 退出程序\n if event.type == QUIT:\n exit()\n elif event.type == MOUSEBUTTONDOWN:\n # 成功着棋\n if render.one_step():\n result = gobang.get_chess_result()\n else:\n continue\n if result != ChessboardState.EMPTY:\n break\n if enable_ai:\n # ai.one_step()\n result = gobang.get_chess_result()\n else:\n render.change_state()\n\n # 绘制\n render.draw_chess()\n render.draw_mouse()\n\n if result != ChessboardState.EMPTY:\n render.draw_result(result)\n\n # 刷新\n pygame.display.update()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"287825094","text":"\"\"\"\ninput: [1, 2, 3, 4, 5], k = 2\n\noutput: [3, 4, 5, 1, 2]\n\n\n\ninput: [0, 1, 2, 3, 4], k = 1\n\noutput: [1, 2, 3, 4, 0]\n\"\"\"\n\ndef solution(li:list, k:int):\n for i in range(k):\n first = li[0]\n li = li[1:]\n li.append(first)\n return li\n\ndef solution2(li:list, k:int):\n for i in range(k):\n first = li[0]\n length = len(li)\n for index in range(0,li-1):\n li[index] = li[index+1]\n li[-1] = first\n return li\n \ndef solution3(li:list, k:int):\n k = k % len(li)\n first = li[0:k]\n second = li[k:]\n li = second + first\n return li\n\n ","sub_path":"algorithim/src/list_rotation_by_k.py","file_name":"list_rotation_by_k.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"119280306","text":"import sys\nimport pickle\nimport math\nimport random\n\nfrom BloomFilter import BestBloomFilter, BloomFilter\nfrom utils import *\n\nsys.path.append(\"../lib\")\n\n\nclass PLBF(object):\n \"\"\"\n Practical learned bloom filter use gbdt as classifier\n \"\"\"\n def __init__(self, model, data, using_Fpr=True, fp_rate=0.01, total_size=100000, model_size=int(70 * 1024 * 8),\n is_train=True):\n self.model = model\n self.threshold = 0.9\n self.using_Fpr = using_Fpr\n self.is_train = is_train\n (s1, s2) = split_negatives(data, 0.7)\n if self.is_train:\n self.fit(data.positives, data.negatives)\n else:\n self.model.load_model()\n if using_Fpr:\n self.fp_rate = float(fp_rate)\n self.create_best_bloom_filter(data, s2)\n else:\n self.m = total_size - model_size\n self.create_bloom_filter(data, s2)\n\n def check(self, item):\n if self.model.predict(item) > self.threshold:\n return True\n return self.bloom_filter.check(item)\n\n def create_best_bloom_filter(self, data, test_negatives):\n print(\"Creating bloom filter\")\n self.get_threshold(test_negatives, data)\n print(\"model threshold: %f\" % self.threshold)\n\n false_negatives = []\n preds = self.model.predicts(data.positives)\n for i in range(len(data.positives)):\n if preds[i] <= self.threshold:\n false_negatives.append(data.positives[i])\n print(\"Number of false negatives at bloom time\", len(false_negatives))\n self.bloom_filter = BestBloomFilter(len(false_negatives), self.fp_rate / 2)\n for fn in false_negatives:\n self.bloom_filter.add(fn)\n print(\"Created bloom filter\")\n print(\"hash function K: \", self.bloom_filter.hash_count)\n print(\"bBF memory size: \", self.bloom_filter.size)\n\n def fit(self, positives, negatives):\n shuffled = shuffle_for_training(negatives, positives)\n self.model.fit(shuffled[0], shuffled[1])\n print(\"Done fitting\")\n\n # add data to test\n def get_threshold(self, test_negatives, data):\n fp_index = math.ceil((len(test_negatives) * (1 - self.fp_rate / 2)))\n predictions = self.model.predicts(test_negatives)\n predictions.sort()\n\n \"\"\"\n import pandas as pd\n excel_data = pd.DataFrame(predictions)\n writer = pd.ExcelWriter('preds.xlsx') # 写入Excel文件\n excel_data.to_excel(writer, float_format='%.5f')\n writer.save()\n writer.close()\n \n # ---------------------------------------------\n predictions = self.model.predicts(test_negatives[0:10])\n print('test negatives:', predictions)\n result1 = list()\n result2 = list()\n for i in range(10):\n result1.append(self.model.predict(test_negatives[i]))\n for i in range(10):\n result2.append(self.model.predict(data.positives[i]))\n print('negative keys test: ', result1)\n print('positive keys test: ', result2)\n exit()\n # \"\"\"\n self.threshold = predictions[fp_index]\n\n","sub_path":"Code/PLBF-GBDT/PLBF.py","file_name":"PLBF.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"507130587","text":"import json\n\n# some JSON:\nx = '{\"Name\":\"Rana\", \"Age\":23, \"City\":\"Riyadh\"}'\ny = json.loads(x)\n# the result is a Python dictionary:\nprint(y[\"Age\"])\n\n\nx = {\"Name\": \"Rana\", \"Age\": 23, \"City\": \"Riyadh\"}\n# convert into json\ny = json.dumps(x)\n# the result is a json string\nprint(y)\n\n\nx = {\n \"name\": \"Rana\",\n \"age\": 23,\n \"married\": False,\n \"divorced\": False,\n \"children\": None,\n \"pets\": True,\n \"cars\": [\n {\"model\": \"BMW 230\", \"mpg\": 27.5},\n {\"model\": \"Ford Edge\", \"mpg\": 24.1}\n ]\n}\ny = json.dumps(x)\nprint(y)\n","sub_path":"Day55.py","file_name":"Day55.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"640037168","text":"import tensorflow as tf\nimport utils.shapenet_provider as sp\nimport utils.matplot_viewer as mpv\nimport cfgs.pointnet_config as pn_cfg\nimport models.pointnet_cls as model\nimport os\nimport numpy as np\n\n# Download shapenet data\nsp.download_data()\n# Get pointnet config\ncfg = pn_cfg.get_pointnet_config()\n\n\ndef show_pc():\n for idx in range(len(cfg.train_files)):\n temp_data = sp.load_h5(cfg.train_files[idx])\n temp_labels = temp_data[1]\n # print(temp_labels)\n for j in range(3, len(temp_data[0])):\n mpv.show_pointcloud_fromarray(temp_data[0][j], cfg.label_names[temp_labels[j][0]])\n\n\ndef train():\n with tf.Graph().as_default():\n with tf.device('/gpu:' + str(cfg.gpu_idx)):\n pc_pl, labels_pl = model.get_inputs_pl(cfg.batch_size, cfg.point_num)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n print(is_training_pl)\n batch = tf.Variable(0)\n bn_decay = get_bn_decay(batch)\n pred, end_points = model.get_model(pc_pl, is_training_pl, bn_decay=bn_decay)\n loss = model.get_loss(pred, labels_pl, end_points)\n correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))\n accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(cfg.batch_size)\n learning_rate = get_learning_rate(batch)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=batch)\n saver = tf.train.Saver()\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n sess.run(init, {is_training_pl: True})\n ops = {'pc_pl': pc_pl,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss,\n 'train_op': train_op,\n 'step': batch}\n for epoch in range(cfg.max_epoch):\n print('**** EPOCH %03d ****' % epoch)\n\n train_one_epoch(sess, ops)\n\n # Save the variables to disk.\n if epoch % 50 == 0:\n save_path = saver.save(sess, os.path.join(cfg.model_dir, \"model%03d.ckpt\" % epoch))\n print(\"Model saved in file: %s\" % save_path)\n\n\ndef get_learning_rate(batch):\n learning_rate = tf.train.exponential_decay(\n cfg.base_lr, # Base learning rate.\n batch * cfg.batch_size, # Current index into the dataset.\n cfg.decay_step, # Decay step.\n cfg.decay_rate, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!\n return learning_rate\n\n\ndef get_bn_decay(batch):\n bn_momentum = tf.train.exponential_decay(\n 0.5,\n batch*cfg.batch_size,\n float(cfg.decay_step),\n 0.5,\n staircase=True)\n bn_decay = tf.minimum(0.99, 1 - bn_momentum)\n return bn_decay\n\n\ndef train_one_epoch(sess, ops):\n \"\"\" ops: dict mapping from string to tf ops \"\"\"\n is_training = True\n\n # Shuffle train files\n train_file_idxs = np.arange(0, len(cfg.train_files))\n np.random.shuffle(train_file_idxs)\n\n for fn in range(len(cfg.train_files)):\n print('----' + str(fn) + '-----')\n current_data, current_label = sp.loadDataFile(cfg.train_files[train_file_idxs[fn]])\n current_data = current_data[:, 0:cfg.point_num, :]\n current_data, current_label, _ = sp.shuffle_data(current_data, np.squeeze(current_label))\n current_label = np.squeeze(current_label)\n\n file_size = current_data.shape[0]\n num_batches = file_size // cfg.batch_size\n\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n\n for batch_idx in range(num_batches):\n start_idx = batch_idx * cfg.batch_size\n end_idx = (batch_idx + 1) * cfg.batch_size\n\n # Augment batched point clouds by rotation and jittering\n rotated_data = sp.rotate_point_cloud(current_data[start_idx:end_idx, :, :])\n jittered_data = sp.jitter_point_cloud(rotated_data)\n feed_dict = {ops['pc_pl']: jittered_data,\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training, }\n step, _, loss_val, pred_val = sess.run([ops['step'],\n ops['train_op'],\n ops['loss'],\n ops['pred']],\n feed_dict=feed_dict)\n pred_val = np.argmax(pred_val, 1)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += cfg.batch_size\n loss_sum += loss_val\n\n print('mean loss: %f' % (loss_sum / float(num_batches)))\n print('accuracy: %f' % (total_correct / float(total_seen)))\n\n\ntrain()\n","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"492866183","text":"import os\nimport utils\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\n# dataset\nfrom data import SignalDataset\n\n# call model\nfrom model import Baseline\nimport train\n# base config\n\n\ndef config():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data_dir', type=str,\n default='./data', help='signal data dir')\n parser.add_argument('--num_epoch', type=int, default=10, help='epoch')\n parser.add_argument('--lr', type=int, default=1e-3, help='learning rate')\n parser.add_argument('--batch_size', type=int,\n default=16, help='data batch size')\n parser.add_argument('--model', type=str,\n default='Baseline', help='train model')\n parser.add_argument('--save_dir', type=str,\n default='./results', help='output data dir')\n parser.add_argument('--save_name', type=str,\n default='', help='manual name')\n parser.add_argument('--ngpu', type=int, default=1,\n help='Multi gpu training ')\n parser.add_argument('--device', type=None, default=torch.device('cuda:1'),\n help='cuda device index')\n parser.add_argument('--mode', type=str, choices=['3000', '1000'], help='data numbers')\n parser.add_argument('--num_worker', type=int, default=4, help='num workers')\n\n args = parser.parse_args()\n return args\n\n\ndef split_weight(net):\n \"\"\"\n split weights into categories\n one : conv, linear layer => decay\n others : bn weights, bias \n \"\"\"\n\n decay = []\n no_decay = []\n\n for m in net.modules():\n if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):\n decay.append(m.weight)\n\n if m.bias is not None:\n no_decay.append(m.bias)\n else:\n # class에 인자가 있는지 확인\n if hasattr(m, 'weight'):\n no_decay.append(m.weight)\n if hasattr(m, 'bias'):\n no_decay.append(m.bias)\n\n assert len(list(net.parameters())) == len(decay) + len(no_decay)\n # net.parameters() 형태로 반환\n return [dict(params=decay), dict(params=no_decay, weight_decay=0)]\n\n\ndef main():\n args = config()\n # args, inner variable\n device = args.device\n batch_size = args.batch_size\n num_workers = args.num_worker\n torch.backends.cudnn.benchmark = True\n\n # data loader - already SignalDataset to cuda\n # dataset : dictionary train, dev, test\n datasets = {}\n dataloaders = {}\n\n for k in ['train', 'eval', 'test']:\n datasets[k] = SignalDataset(k, args.data_dir)\n dataloaders[k] = DataLoader(\n datasets[k], args.batch_size, shuffle=True, num_workers=4)\n if k == 'test':\n dataloaders[k] = DataLoader(\n datasets[k], args.batch_size, shuffle=False, num_workers=4)\n\n # model load\n\n if args.ngpu > 1:\n print(f\"Model Build....{args.model}\")\n model = args.model().to(device)\n torch.nn.DataParallel(model)\n else:\n print(f\"Model Build....{args.model}\")\n model = Baseline().to(device)\n\n\n # criterion\n\n criterion = nn.BCEWithLogitsLoss()\n # criterion = nn.MSELoss()\n\n # optimizer\n # adam default => le =1e-3 , betas : 0.9, 0.999 eps=1e-8, weight decay=0\n params = split_weight(model)\n #optimizer = optim.Adam(params)\n optimizer = optim.Adamax(params, lr=args.lr)\n # scheduler\n scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)\n\n # Train\n best_model = train.train(dataloaders, model, criterion, optimizer, scheduler, args)\n\n # Test\n #test_loss, test_pred = test(dataloaders, model, criterion, optimizer, scheduler, args)\n\n if not os.path.exists(args.save_dir):\n os.mkdir(args.save_dir)\n\n # save\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Datathon/BIOSIG/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"459202508","text":"import logging\nimport time\n\nimport pytest\n\nfrom common.utils import resize_browser\n\nlogger = logging.getLogger(__name__)\n\n@pytest.fixture(scope='function')\ndef browser(module_browser, base_url, request):\n resize_browser(browser=module_browser, resolution=request.param)\n time.sleep(0.5)\n module_browser.get(base_url)\n if module_browser.is_desktop():\n module_browser.click(xpath=\"//a[@id='best-expense-video-id']\")\n else:\n module_browser.click(xpath=\"//div[contains(@class, 'sticky-cta-mobile')]/a\")\n time.sleep(1)\n return module_browser\n\ndef submit_getdemo_form(browser, email=None, firstname=None, lastname=None, phone=None, company_size=None, agree=None):\n if email:\n browser.input(xpath=\"//input[@name='email']\", keys=email)\n if firstname:\n browser.input(xpath=\"//input[@name='firstname']\", keys=firstname)\n if lastname:\n browser.input(xpath=\"//input[@name='lastname']\", keys=lastname)\n if phone:\n browser.input(xpath=\"//input[@name='phone']\", keys=phone)\n if company_size:\n browser.click(xpath=\"//input[@id='number_of_employees']\")\n browser.click(xpath=f\"//li[@data-value='{company_size}']\")\n if agree:\n browser.click(xpath='//div[contains(@class, \"custom-checkbox\")]')\n browser.click(xpath='//button[text()=\"Get a demo\"]')\n\n@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)\ndef test_bad_email(browser):\n submit_getdemo_form(browser, email='foo')\n e = browser.find(xpath=\"//label[@for='demo-email'][@class='error']\")\n assert e and e.is_displayed(), 'No error displayed for invalid email'\n\n@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)\ndef test_missing_firstname(browser):\n submit_getdemo_form(browser, email='megatron@fyle.in')\n e = browser.find(xpath=\"//label[@for='demo-first-name'][@class='error demo-first-name-error']\")\n assert e and e.is_displayed(), 'No error displayed for missing firstname'\n\n@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)\ndef test_success(browser):\n submit_getdemo_form(browser, email='megatron@fyle.in', firstname='Megatron', lastname='Transformer', phone='123456789', company_size='Under 5', agree=True)\n time.sleep(2)\n e = browser.find(xpath=\"//h3[contains(text(), 'Thank')]\")\n assert e and e.is_displayed(), 'Not displaying thank you message'\n","sub_path":"homepage/test_getdemo.py","file_name":"test_getdemo.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"407542214","text":"\nfrom . import it_planning_app\nfrom .get_incoming import *\nfrom .it_planning import create_graph\nfrom .node_add import add_node\nfrom .node_edit import edit_node\nfrom .node_remove import remove_node\nfrom flask import render_template, session, jsonify, redirect, url_for, request\nfrom login.decorators import is_logged_in, it_allowed\n\n\n@it_planning_app.route('/itPlanning', methods=[\"POST\", \"GET\"])\n@is_logged_in\n@it_allowed\ndef it_planning():\n \"\"\"\n Для пользоватеей с it_role = 1 и 2 будут строиться полные графы\n :return:\n \"\"\"\n ack = create_graph()\n return ack\n\n\n@it_planning_app.route('/itPlanningBubles', methods=[\"GET\"])\n@is_logged_in\n@it_allowed\ndef itPlanningBubles():\n role = request.args.get('role', default=0, type=int)\n scenario = request.args.get('scenario', default=0, type=int)\n if role == session['it_role'] and scenario:\n return render_template('it_planning/itPlanningBubles.html')\n return redirect(url_for('it_planning_app.itPlanningMenu'))\n\n\n@it_planning_app.route('/itPlanningMenu', methods=['POST', 'GET'])\n@is_logged_in\n@it_allowed\ndef itPlanningMenu():\n if request.method == 'POST':\n print(dict(session))\n return jsonify(access=dict(session))\n return render_template('it_planning/itPlanningMenu.html')\n\n\n@it_planning_app.route('/itPlanningChange', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef redacting_node():\n incoming_data = get_edit_incoming()\n return edit_node('commands', incoming_data)\n\n\n@it_planning_app.route('/itPlanningAdd', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef adding_node():\n incoming_data = get_add_incoming()\n return add_node('commands', incoming_data)\n\n\n@it_planning_app.route('/itPlanningRemove', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef removing_node():\n incoming_data = get_remove_incoming()\n return remove_node('commands', incoming_data)\n\n\n@it_planning_app.route('/itPlanningChange_goals', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef redacting_goals_node():\n incoming_data = get_edit_incoming()\n return edit_node(\"goals\", incoming_data)\n\n\n@it_planning_app.route('/itPlanningAdd_goals', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef adding_goals_node():\n incoming_data = get_add_incoming()\n return add_node(\"goals\", incoming_data)\n\n\n@it_planning_app.route('/itPlanningRemove_goals', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef removing_goals_node():\n incoming_data = get_remove_incoming()\n return remove_node(\"goals\", incoming_data)\n","sub_path":"it_planning/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"73614618","text":"'''\nThis class contains functions to\n1) run_MCMC(): execute the MCMC algorithm for SIR models,and save results\n2) load_data(): load nummerical results (the Markov Chain)\n3) report(): compute the marginal likelihood\n4) remove_burning (): remove first smaples of the chain\n5) plot (): for visualize the obtain results\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom SIRparamIden import SirIden as SirIden\nfrom SIRparamIden import simpleSirIden as simpleSirIden\nfrom xSIRparamIden import hyperSirIden as hyperSirIden\nfrom uqVisual import uqPlot\nimport genLib as genLib\nfrom AlgorithmPram import AlgorithmParam as AlgParam\n\n\nclass postProcessing(AlgParam):\n def __init__(self,country_region,model, **kwargs):\n AlgParam.__init__(self, country_region)\n self.model = model\n if model == 'SIR':\n self.SIR = SirIden(country_region= self.country_region) \n elif model =='simSIR':\n self.SIR = simpleSirIden(country_region= self.country_region) \n #gammaSize = 1\n elif model == 'hyperSIR':\n self.SIR = hyperSirIden(initT, deltaT, N, n_MC = self.n_MC, kernel_std_ratio = self.kernel_std_ratio, country_region= self.country_region, observation_deltaT = 14) \n \n if 'loadInit' in kwargs.keys():\n self.loadInit = kwargs['loadInit']\n self.ithrun = kwargs['ithrun']\n if self.loadInit:\n self.filename = model +country_region + str(self.ithrun)\n self.loadfile = model +country_region + str(self.ithrun-1)\n if self.ithrun == 1:\n self.loadfile = model +country_region\n else: self.loadInit = 0\n if self.loadInit == 0:\n self.filename = model + country_region\n f = plt.figure()\n self.SIR.LikelihoodFunc.plotLikelihood()\n f.savefig(self.result_dir+'likelihoodfunction.pdf',bbox_inches='tight')\n self.observationTime4Val = self.SIR.time[np.arange(5,self.SIR.time.size, 20, dtype = int )]\n \n\n def run_mcmc (self,**kwargs):\n #### Perform MCMC\n if self.loadInit == 0:\n np.random.seed(100)\n x_init = self.SIR.prior_pdf_rvs()\n print (x_init)\n print(self.SIR.x2theta_x0(x_init))\n x_init[0:self.SIR.theta_dim//2] = np.log(0.2)\n x_init[self.SIR.theta_dim//2:self.SIR.theta_dim] = np.log(0.2)\n else:\n x_prev = self.load_data(self.result_dir+self.loadfile)\n x_init = np.mean(x_prev, axis = 0)\n x = self.SIR.run_MCMC(x_init = x_init)\n #### End MCMC process\n \n np.save(self.result_dir+self.filename+'.npy', x)\n xremovedBurning = self.remove_burning(x)\n self.report(xremovedBurning)\n self.plot(xremovedBurning)\n return xremovedBurning\n def report(self,xremovedBurning, **kwargs):\n scaledBC, logscaledConst = self.SIR.modelBayesCriterion(xremovedBurning, observationTime = self.observationTime4Val)\n print ('marginal Likelihood', np.exp(np.log(scaledBC) + logscaledConst)) \n if self.loadInit:\n filename = self.model + self.country_region + str(self.ithrun) + 'Cont.csv' \n else:\n filename = self.model + self.country_region +'.csv' \n genLib.report(filename, model = self.model, country_region = self.country_region, initT = self.SIR.initT, N = self.SIR.N,\n n_MC = self.SIR.n_MC,\n deltaT = self.SIR.deltaT, observation_deltaT = self.SIR.observation_deltaT,\n scaledBC= scaledBC, logscaledConst= logscaledConst,\n #executionTime = self.SIR.executionTime,\n marginalLikelihood = np.exp(np.log(scaledBC) + logscaledConst),x_mean = xremovedBurning.mean(axis =0))\n ####\n def remove_burning(self,x):\n return x[-self.n_MC_keep:,:]\n def load_data(self,loadfile):\n x = np.load(loadfile+'.npy')\n xremovedBurning = self.remove_burning(x)\n return xremovedBurning\n def plot(self,x,qtPlot = 0.95): \n x_mean = x.mean(axis = 0)\n if self.model != 'hyperSIR':\n logthetaM, x0M = self.SIR.x2theta_x0(x_mean)\n else:\n logthetaM, x0M, hyperParam = self.SIR.x2theta_x0(x_mean)\n \n print (x_mean)\n fxmean = self.SIR.interpolate_theta(logthetaM)\n plt.figure('beta')\n plt.plot(self.SIR.model.time,(fxmean[0,:]))\n plt.figure('gamma')\n plt.plot(self.SIR.model.time,(fxmean[1,:]))\n \n plt.figure('mean value') \n self.SIR.plot(x_mean)\n plt.xlim ([0, self.SIR.time_4_eval_marginal_likelihood.max() + self.SIR.initT])\n plt.ylim([0,self.SIR.data.confirmed.max()])\n plt.legend()\n \n f= plt.figure('ACF Beta')\n steps = np.arange(0,1000,1,dtype = int)\n uqPlot.autocorrelation(x[:,0],steps)\n f.savefig(self.result_dir+self.model + self.country_region + \"ACFBeta.pdf\", bbox_inches='tight')\n plt.show()\n \n \n \n #Posprocessing data \n active_cases_posterior_samples = np.zeros(shape = (x.shape[0], self.SIR.model.time.size ))# infection and recovered\n total_infections_posterior_samples = np.zeros(shape = (x.shape[0], self.SIR.model.time.size ))# infection and recovered\n beta = np.zeros(shape = (x.shape[0], self.SIR.time_node.size ))# infection and recovered\n gamma = np.zeros(shape = (x.shape[0],self.SIR.time_node.size ))# infection and recovered = np.zeros(shape = (x.shape[0],SIR.time_node.size ))# infection and recovered\n if self.model == 'hyperSIR':\n alpha = np.zeros(shape = (x.shape[0],self.SIR.priorHyperParamDim))# infection and recovered\n \n for i in range(x.shape[0]):\n if self.model != 'hyperSIR':\n theta, x0 = self.SIR.x2theta_x0(x[i,:])\n else:\n theta, x0, hyperParam = self.SIR.x2theta_x0(x[i,:])\n if self.model == 'SIR':\n beta[i,:] = np.exp(theta[0,:])\n gamma[i,:] = np.exp(theta[1,:])\n elif self.model =='simSIR':\n beta[i,:] = np.exp(theta[0:-1])\n gamma[i,:] = np.exp(theta[-1])*np.ones_like(gamma[i,:]) \n elif self.model == 'hyperSIR':\n beta[i,:] = np.exp(theta[0,:])\n gamma[i,:] = np.exp(theta[1,:])\n alpha[i,:] = hyperParam\n \n self.SIR.model.theta = self.SIR.interpolate_theta(theta)\n self.SIR.model.initCondition= x0\n self.SIR.model.eval()\n active_cases_posterior_samples[i,:] = self.SIR.model.state[1,:]\n total_infections_posterior_samples[i,:] = self.SIR.model.state[1,:] + self.SIR.model.state[2,:]\n f=plt.figure('Active cases')\n uqPlot.quantilePlot(active_cases_posterior_samples, self.SIR.model.time + self.SIR.initT, qtPlot)\n plt.plot(self.SIR.data.confirmed -self.SIR.data.deaths - self.SIR.data.recovered, '*-', label = 'data' )\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel('active cases', fontsize= 18)\n f.savefig(self.result_dir+self.model + self.country_region + \"ActiveCases.pdf\", bbox_inches='tight')\n plt.show()\n \n f = plt.figure('Total infected cases')\n uqPlot.quantilePlot(total_infections_posterior_samples, self.SIR.model.time + self.SIR.initT, qtPlot)\n plt.plot(self.SIR.data.confirmed, '*-', label = 'data' )\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel('Cumulative infections', fontsize= 18)\n f.savefig(self.result_dir+self.model + self.country_region +\"totalCases.pdf\", bbox_inches='tight')\n plt.show()\n \n f = plt.figure('beta')\n uqPlot.quantilePlot(beta, self.SIR.time_node + self.SIR.initT, qtPlot)\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel(r'$\\beta$', fontsize= 18)\n if self.country_region == 'Germany':\n plt.ylim([0.0, 0.8])\n if self.country_region == 'Uruguay':\n plt.ylim([0., 1.])\n if self.country_region == 'Saudi Arabia':\n plt.ylim([0.02, 0.18])\n if self.country_region == 'Italy':\n plt.ylim([0, 0.35])\n f.savefig(self.result_dir+self.model + self.country_region +\"beta.pdf\", bbox_inches='tight')\n plt.show()\n \n f = plt.figure('gamma')\n uqPlot.quantilePlot(gamma, self.SIR.time_node + self.SIR.initT, qtPlot)\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel(r'$\\gamma$', fontsize= 18)\n if self.country_region == 'Germany':\n plt.ylim([0.0, 0.8])\n if self.country_region == 'Uruguay':\n plt.ylim([0., 0.55])\n if self.country_region == 'Saudi Arabia':\n plt.ylim([0, 0.14])\n if self.country_region == 'Italy':\n plt.ylim([0, 0.1])\n f.savefig(self.result_dir+self.model + self.country_region + \"gamma.pdf\", bbox_inches='tight')\n \n \n if self.model == 'hyperSIR':\n f = plt.figure('alpha')\n uqPlot.kde(alpha[:,0])\n plt.legend()\n plt.xlabel(r'$\\alpha$', fontsize= 18)\n plt.ylabel('pdf', fontsize= 18)\n f.savefig(self.result_dir+self.model + self.country_region + \"alpha.pdf\", bbox_inches='tight')\n plt.show() \n print('end')","sub_path":"postProcessing.py","file_name":"postProcessing.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"184222644","text":"import datetime\n\n__author__ = 'matvei'\n\nfrom bookshop import db\nfrom models.books.models import Author, Book, BookInLibrary\nfrom models.files.models import File\nfrom models.messages.models import Message\nfrom models.news.models import News\nfrom models.users.models import Role, User, Follower\ndb.create_all()\n\nfrom models.books.models import Author, Book, BookInLibrary, Genre, Comment\nfrom models.news.models import News\nfrom models.users.models import User, Role\n\nannotation = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor ' \\\n 'incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud ' \\\n 'exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute ' \\\n 'irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla ' \\\n 'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia '\n\nrole1 = Role('admin', 'god')\nrole2 = Role('editor', 'can add news books authors')\nrole3 = Role('user', 'slave')\n\ndb.session.add(role1)\ndb.session.add(role2)\ndb.session.add(role3)\ndb.session.commit()\n\nadmin = User('admin', 'admin@example.com', \"11111\", [role1, role2], 'Admin', 'Admin')\nguest = User('guest', 'guest@example.com', \"22222\", [role2, role3], 'User', 'User')\nguest1 = User('matvei', 'a@aa.aaa', \"1\", [role3], 'Matvei', 'Nazaruk')\n\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.add(guest1)\ndb.session.commit()\n\ncontent1 = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor ' \\\n 'incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud ' \\\n 'exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute ' \\\n 'irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla ' \\\n 'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia ' \\\n 'deserunt mollit anim id est laborum.'\ncontent2 = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt' \\\n ' ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ' \\\n 'ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit' \\\n ' in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat' \\\n ' cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'\ncontent3 = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt' \\\n ' ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ' \\\n 'ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit' \\\n ' in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat' \\\n ' cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'\nnews1 = News(\"Lorem ipsum dolor sit amet\", content1, admin.id, main_img='http://www.whydev.org/wp-content/uploads/2014/09/Books.jpg')\nnews2 = News(\"Foo Bar Baz\", content2, admin.id, main_img='http://upload.wikimedia.org/wikipedia/commons/7/76/Urval_av_de_bocker_som_har_vunnit_Nordiska_radets_litteraturpris_under_de_50_ar_som_priset_funnits_(2).jpg')\nnews3 = News(\"news 3333333\", content3, guest.id, main_img='http://static.squarespace.com/static/5148b660e4b030ab54aef81c/t/5473b990e4b0f2adb15dc47c/1416870295741/?format=1000w')\n\ndb.session.add(news1)\ndb.session.add(news2)\ndb.session.add(news3)\ndb.session.commit()\n\nauthor1 = Author(first_name='Jack', last_name='London', country='England')\nauthor2 = Author(first_name='William', last_name='Shakespeare', country='England')\n\ndb.session.add(author2)\ndb.session.add(author1)\ndb.session.commit()\n\ngenre1 = Genre('Sci-fi')\ngenre2 = Genre('Fantasy')\ngenre3 = Genre('Drama')\ngenre4 = Genre('Comic')\n\ndb.session.add(genre1)\ndb.session.add(genre2)\ndb.session.add(genre3)\ndb.session.add(genre4)\ndb.session.commit()\n\nbook1 = Book('Martin Iden', author1.id, \"1900-10\", [genre1], annotation, 'http://www.images-booknode.com/book_cover/4220/full/martin-eden-4220412.jpg')\nbook2 = Book('Beliy Klyk', author1.id, \"1905-1\", [genre2], annotation, 'http://upload.wikimedia.org/wikipedia/commons/1/14/JackLondonwhitefang1.jpg')\nbook3 = Book('Gamlet', author2.id, \"1828-3\", [genre3, genre4], annotation, 'http://th05.deviantart.net/fs70/PRE/i/2012/053/8/a/hamlet_book_cover_complete_by_syrihn-d4qoci9.jpg')\nbook4 = Book('King Lear', author2.id, \"1834-4\", [genre1, genre2], annotation,'http://ecx.images-amazon.com/images/I/51Rg2yW-xXL.jpg')\n\ndb.session.add(book1)\ndb.session.add(book2)\ndb.session.add(book3)\ndb.session.add(book4)\ndb.session.commit()\n\nbook_comment = Comment(book3.id, guest.id, guest.username, 'OMG!!! WTF!!!')\n\ndb.session.add(book_comment)\ndb.session.commit()\n\nuser1_book1 = BookInLibrary(book1.id, admin.id)\nuser1_book2 = BookInLibrary(book3.id, admin.id)\ndb.session.add(user1_book1)\ndb.session.add(user1_book2)\ndb.session.commit()\n","sub_path":"out_date/apps/db_init_fill.py","file_name":"db_init_fill.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"367023336","text":"#!/usr/bin/env python\n#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport inspect, new\nimport tornado.web\n\ndef expose_get(url):\n def wrap(f):\n def wrapped_f(*args):\n f(*args)\n wrapped_f.method = 'get'\n wrapped_f.url = url\n return wrapped_f\n return wrap\n\ndef expose_post(url):\n def wrap(f):\n def wrapped_f(*args):\n f(*args)\n wrapped_f.method = 'post'\n wrapped_f.url = url\n return wrapped_f\n return wrap\n\ndef start_class(obj):\n handlers = []\n obj = obj()\n for name in dir(obj):\n if name[0:2] == '__' or not callable(getattr(obj, name)):\n continue\n func = getattr(obj, name)\n if func.__name__ != expose_post.__name__ and func.__name__ != expose_get.__name__:\n _name = \"EasyRequestHandler-\" + func.url\n methods_dict = { func.method : func }\n _class = new.classobj(_name,(tornado.web.EasyRequestHandler, ), methods_dict)\n handlers.append((func.url, _class))\n return handlers\n\ndef start_module(mod_name):\n handlers = []\n mod = __import__(mod_name)\n for name in dir(mod):\n obj = getattr(mod, name)\n if inspect.isclass(obj):\n if hasattr(obj, 'target'):\n obj.__bases__ = (tornado.web.RequestHandler,)\n handlers.append((obj.target, obj))\n if inspect.isfunction(obj):\n if obj.__name__ != expose_post.__name__ and obj.__name__ != expose_get.__name__ and obj.__name__ != 'start':\n _name = \"Handler-\" + obj.url\n methods_dict = { obj.method : obj }\n _class = new.classobj(_name,(tornado.web.RequestHandler, ), methods_dict)\n handlers.append((obj.url, _class))\n return handlers\n\ndef start(obj, settings={}, port=8888):\n handlers = []\n if inspect.isclass(obj):\n handlers = start_class(obj)\n else:\n handlers = start_module(obj)\n \n application = tornado.web.Application(handlers, **settings)\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"tornado/contrib/easy_app.py","file_name":"easy_app.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"235851067","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.autograd import Variable\n\ndef where(condition, x, y):\n return Variable(condition.float()) * x + Variable((condition != 1).float()) * y\n\nclass MaxPool2d(nn.Module):\n def __init__(self, kernel_size):\n super(MaxPool2d, self).__init__()\n self.kernel_size = kernel_size\n self.layer = nn.MaxPool2d(self.kernel_size)\n\n def forward(self, input_tensor):\n self.in_N, self.in_depth, self.in_h, self.in_w = input_tensor.size()\n return self.layer.forward(input_tensor)\n #\n def lrp(self, R, lrp_var=None,param=None):\n if lrp_var is None or lrp_var.lower() == 'none' or lrp_var.lower() == 'simple':\n return self._simple_lrp(R)\n elif lrp_var.lower() == 'alphabeta' or lrp_var.lower() == 'alpha':\n return self._alphabeta_lrp(R, param)\n\n def _simple_lrp(self, R):\n self.check_shape(R)\n\n hpool = wpool = self.layer.kernel_size\n hstride = wstride = self.layer.stride\n\n Rx = torch.zeros(self.input.size())\n for i in range(self.Hout):\n for j in range(self.Wout):\n # Z = torch.eq(self.output[:, :, i:i+1, j:j + 1], self.input[:, :, i * hstride:i * hstride + hpool, j * wstride:j * wstride + wpool])\n # Z = where(Z, torch.ones_like(Z.float()), torch.zeros_like(Z.float()))\n Z = self.input[:, :, i * hstride:i * hstride + hpool, j * wstride:j * wstride + wpool]\n Zs = (torch.sum(torch.sum(Z, dim=2, keepdim=True),dim=3, keepdim=True))\n Zs += 1e-12 * where(Zs >= 0, torch.ones_like(Zs), torch.ones_like(Zs) * -1)\n\n Rx[:, :, i * hstride:i * hstride + hpool, j * wstride:j * wstride + wpool] += torch.div(Z, Zs) * self.R[:, :, i:i + 1, j:j + 1]\n return Rx\n\n def _alphabeta_lrp(self,R,alpha):\n return self._simple_lrp(R)\n\n def check_shape(self, R):\n self.R = R\n R_shape = self.R.size()\n output_shape = self.output.size()\n if len(R_shape) != 4:\n self.R = torch.reshape(self.R, output_shape)\n N, NF, self.Hout, self.Wout = self.R.size()","sub_path":"modules/Maxpool.py","file_name":"Maxpool.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"271407793","text":"'''\nClass for refining a mesh from one region to another.\nCreated on April 4, 2018\n\n@author: Richard Christie\n'''\n\nfrom scaffoldmaker.annotation.annotationgroup import AnnotationGroup\nfrom scaffoldmaker.utils.octree import Octree\nfrom scaffoldmaker.utils.zinc_utils import *\nfrom opencmiss.zinc.element import Element, Elementbasis\nfrom opencmiss.zinc.field import Field\nfrom opencmiss.zinc.node import Node\nfrom opencmiss.zinc.result import RESULT_OK as ZINC_OK\n\nclass MeshRefinement:\n '''\n Class for refining a mesh from one region to another.\n '''\n\n def __init__(self, sourceRegion, targetRegion, sourceAnnotationGroups = []):\n '''\n Assumes targetRegion is empty.\n :param sourceAnnotationGroups: List of AnnotationGroup for source mesh in sourceRegion.\n A copy containing the refined elements is created by the MeshRefinement.\n '''\n self._sourceRegion = sourceRegion\n self._sourceFm = sourceRegion.getFieldmodule()\n self._sourceCache = self._sourceFm.createFieldcache()\n self._sourceCoordinates = getOrCreateCoordinateField(self._sourceFm)\n # get range of source coordinates for octree range\n self._sourceFm.beginChange()\n sourceNodes = self._sourceFm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)\n minimumsField = self._sourceFm.createFieldNodesetMinimum(self._sourceCoordinates, sourceNodes)\n result, minimums = minimumsField.evaluateReal(self._sourceCache, 3)\n assert result == ZINC_OK, 'MeshRefinement failed to get minimum coordinates'\n maximumsField = self._sourceFm.createFieldNodesetMaximum(self._sourceCoordinates, sourceNodes)\n result, maximums = maximumsField.evaluateReal(self._sourceCache, 3)\n assert result == ZINC_OK, 'MeshRefinement failed to get maximum coordinates'\n xrange = [ (maximums[i] - minimums[i]) for i in range(3) ]\n edgeTolerance = 0.5*(max(xrange))\n if edgeTolerance == 0.0:\n edgeTolerance = 1.0\n minimums = [ (minimums[i] - edgeTolerance) for i in range(3) ]\n maximums = [ (maximums[i] + edgeTolerance) for i in range(3) ]\n minimumsField = None\n maximumsField = None\n self._sourceFm.endChange()\n self._sourceMesh = self._sourceFm.findMeshByDimension(3)\n self._sourceElementiterator = self._sourceMesh.createElementiterator()\n self._octree = Octree(minimums, maximums)\n\n self._targetRegion = targetRegion\n self._targetFm = targetRegion.getFieldmodule()\n self._targetFm.beginChange()\n self._targetCache = self._targetFm.createFieldcache()\n self._targetCoordinates = getOrCreateCoordinateField(self._targetFm)\n\n self._targetNodes = self._targetFm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)\n self._nodetemplate = self._targetNodes.createNodetemplate()\n self._nodetemplate.defineField(self._targetCoordinates)\n\n self._targetMesh = self._targetFm.findMeshByDimension(3)\n self._targetBasis = self._targetFm.createElementbasis(3, Elementbasis.FUNCTION_TYPE_LINEAR_LAGRANGE)\n self._targetEft = self._targetMesh.createElementfieldtemplate(self._targetBasis)\n self._targetElementtemplate = self._targetMesh.createElementtemplate()\n self._targetElementtemplate.setElementShapeType(Element.SHAPE_TYPE_CUBE)\n result = self._targetElementtemplate.defineField(self._targetCoordinates, -1, self._targetEft)\n\n self._nodeIdentifier = 1\n self._elementIdentifier = 1\n\n self._annotationGroups = []\n self._sourceAndTargetMeshGroups = []\n for sourceAnnotationGroup in sourceAnnotationGroups:\n sourceMeshGroup = sourceAnnotationGroup.getMeshGroup(self._sourceMesh)\n targetAnnotationGroup = AnnotationGroup(self._targetRegion, \\\n sourceAnnotationGroup.getName(), sourceAnnotationGroup.getFMANumber(), sourceAnnotationGroup.getLyphID())\n targetMeshGroup = targetAnnotationGroup.getMeshGroup(self._targetMesh)\n self._annotationGroups.append(targetAnnotationGroup)\n self._sourceAndTargetMeshGroups.append( ( sourceMeshGroup, targetMeshGroup) )\n\n def __del__(self):\n self._targetFm.endChange()\n\n def getAnnotationGroups(self):\n return self._annotationGroups\n\n def refineElementCubeStandard3d(self, sourceElement, numberInXi1, numberInXi2, numberInXi3):\n meshGroups = []\n for sourceAndTargetMeshGroup in self._sourceAndTargetMeshGroups:\n if sourceAndTargetMeshGroup[0].containsElement(sourceElement):\n meshGroups.append(sourceAndTargetMeshGroup[1])\n # create nodes\n nids = []\n xi = [ 0.0, 0.0, 0.0 ]\n for k in range(numberInXi3 + 1):\n xi[2] = k/numberInXi3\n for j in range(numberInXi2 + 1):\n xi[1] = j/numberInXi2\n for i in range(numberInXi1 + 1):\n xi[0] = i/numberInXi1\n self._sourceCache.setMeshLocation(sourceElement, xi)\n result, x = self._sourceCoordinates.evaluateReal(self._sourceCache, 3)\n nodeId = self._octree.findObjectByCoordinates(x)\n if nodeId is None:\n node = self._targetNodes.createNode(self._nodeIdentifier, self._nodetemplate)\n self._targetCache.setNode(node)\n result = self._targetCoordinates.setNodeParameters(self._targetCache, -1, Node.VALUE_LABEL_VALUE, 1, x)\n nodeId = self._nodeIdentifier\n self._octree.addObjectAtCoordinates(x, nodeId)\n self._nodeIdentifier += 1\n nids.append(nodeId)\n # create elements\n for k in range(numberInXi3):\n ok = (numberInXi2 + 1)*(numberInXi1 + 1)\n for j in range(numberInXi2):\n oj = (numberInXi1 + 1)\n for i in range(numberInXi1):\n bni = k*ok + j*oj + i\n element = self._targetMesh.createElement(self._elementIdentifier, self._targetElementtemplate)\n enids = [ nids[bni ], nids[bni + 1], nids[bni + oj], nids[bni + oj + 1],\n nids[bni + ok], nids[bni + ok + 1], nids[bni + ok + oj], nids[bni + ok + oj + 1] ]\n result = element.setNodesByIdentifier(self._targetEft, enids)\n #if result != ZINC_OK:\n #print('Element', self._elementIdentifier, result, enids)\n self._elementIdentifier += 1\n\n for meshGroup in meshGroups:\n meshGroup.addElement(element)\n\n\n def refineAllElementsCubeStandard3d(self, numberInXi1, numberInXi2, numberInXi3):\n element = self._sourceElementiterator.next()\n while element.isValid():\n self.refineElementCubeStandard3d(element, numberInXi1, numberInXi2, numberInXi3)\n element = self._sourceElementiterator.next()\n","sub_path":"scaffoldmaker/utils/meshrefinement.py","file_name":"meshrefinement.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"604799795","text":"# Copyright 2021 Curtin University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Author: James Diprose, Aniek Roelofs\n\nimport os\nfrom typing import List, Dict\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport jsonlines\nimport pandas as pd\nimport pendulum\nimport vcr\nfrom airflow.models.connection import Connection\nfrom airflow.utils.state import State\nfrom click.testing import CliRunner\n\nimport academic_observatory_workflows.workflows.oa_web_workflow\nfrom academic_observatory_workflows.config import schema_folder, test_fixtures_folder\nfrom academic_observatory_workflows.tests.test_zenodo import MockZenodo\nfrom academic_observatory_workflows.workflows.oa_web_workflow import (\n Description,\n OaWebWorkflow,\n OaWebRelease,\n clean_ror_id,\n clean_url,\n fetch_institution_logo,\n make_logo_url,\n val_empty,\n make_entity_stats,\n Entity,\n PublicationStats,\n EntityStats,\n EntityHistograms,\n Histogram,\n load_data,\n preprocess_index_df,\n preprocess_data_df,\n make_index_df,\n make_entities,\n save_entities,\n update_index_with_logos,\n update_df_with_percentages,\n make_index,\n save_json,\n load_data_glob,\n save_jsonl_gz,\n)\nfrom observatory.platform.bigquery import bq_find_schema\nfrom observatory.platform.files import load_jsonl\nfrom observatory.platform.gcs import gcs_upload_file\nfrom observatory.platform.observatory_config import Workflow\nfrom observatory.platform.observatory_environment import (\n ObservatoryEnvironment,\n ObservatoryTestCase,\n Table,\n bq_load_tables,\n make_dummy_dag,\n)\n\nacademic_observatory_workflows.workflows.oa_web_workflow.INCLUSION_THRESHOLD = {\"country\": 0, \"institution\": 0}\n\n\nclass TestFunctions(TestCase):\n def test_val_empty(self):\n # Empty list\n self.assertTrue(val_empty([]))\n\n # Non empty list\n self.assertFalse(val_empty([1, 2, 3]))\n\n # None\n self.assertTrue(val_empty(None))\n\n # Empty string\n self.assertTrue(val_empty(\"\"))\n\n # Non Empty string\n self.assertFalse(val_empty(\"hello\"))\n\n def test_clean_ror_id(self):\n actual = clean_ror_id(\"https://ror.org/02n415q13\")\n expected = \"02n415q13\"\n self.assertEqual(actual, expected)\n\n def test_clean_url(self):\n url = \"https://www.auckland.ac.nz/en.html\"\n expected = \"https://www.auckland.ac.nz/\"\n actual = clean_url(url)\n self.assertEqual(expected, actual)\n\n def test_make_logo_url(self):\n expected = \"logos/country/s/1234.jpg\"\n actual = make_logo_url(entity_type=\"country\", entity_id=\"1234\", size=\"s\", fmt=\"jpg\")\n self.assertEqual(expected, actual)\n\n @patch(\"academic_observatory_workflows.workflows.oa_web_workflow.make_logo_url\")\n def test_get_institution_logo(self, mock_make_url):\n mock_make_url.return_value = \"logo_path\"\n mock_clearbit_ref = \"academic_observatory_workflows.workflows.oa_web_workflow.clearbit_download_logo\"\n\n def download_logo(company_url, file_path, size, fmt):\n if not os.path.isdir(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n with open(file_path, \"w\") as f:\n f.write(\"foo\")\n\n ror_id, url, size, width, fmt, build_path = \"ror_id\", \"url.com\", \"size\", 10, \"fmt\", \"build_path\"\n with CliRunner().isolated_filesystem():\n # Test when logo file does not exist yet and logo download fails\n with patch(mock_clearbit_ref) as mock_clearbit_download:\n actual_ror_id, actual_logo_path = fetch_institution_logo(ror_id, url, size, width, fmt, build_path)\n self.assertEqual(ror_id, actual_ror_id)\n self.assertEqual(\"unknown.svg\", actual_logo_path)\n mock_clearbit_download.assert_called_once_with(\n company_url=url,\n file_path=\"build_path/images/logos/institution/size/ror_id.fmt\",\n size=width,\n fmt=fmt,\n )\n mock_make_url.assert_not_called()\n\n mock_make_url.reset_mock()\n\n # Test when logo file does not exist yet and logo is downloaded successfully\n with patch(mock_clearbit_ref, wraps=download_logo) as mock_clearbit_download:\n actual_ror_id, actual_logo_path = fetch_institution_logo(ror_id, url, size, width, fmt, build_path)\n self.assertEqual(ror_id, actual_ror_id)\n self.assertEqual(\"logo_path\", actual_logo_path)\n mock_clearbit_download.assert_called_once_with(\n company_url=url,\n file_path=\"build_path/images/logos/institution/size/ror_id.fmt\",\n size=width,\n fmt=fmt,\n )\n mock_make_url.assert_called_once_with(entity_type=\"institution\", entity_id=ror_id, size=size, fmt=fmt)\n\n mock_make_url.reset_mock()\n\n # Test when logo file already exists\n with patch(mock_clearbit_ref, wraps=download_logo) as mock_clearbit_download:\n actual_ror_id, actual_logo_path = fetch_institution_logo(ror_id, url, size, width, fmt, build_path)\n self.assertEqual(ror_id, actual_ror_id)\n self.assertEqual(\"logo_path\", actual_logo_path)\n mock_clearbit_download.assert_not_called()\n mock_make_url.assert_called_once_with(entity_type=\"institution\", entity_id=ror_id, size=size, fmt=fmt)\n\n def test_make_entity_stats(self):\n \"\"\"Test make_entity_stats\"\"\"\n\n # Input figures for multiple entities\n p_outputs_open = [100, 50, 30]\n n_outputs = [10, 100, 1000]\n n_outputs_open = [10, 100, 1000]\n entities = [\n Entity(\n \"\",\n \"\",\n Description(\"\", \"\"),\n stats=PublicationStats(\n p_outputs_open=p_outputs_open_, n_outputs=n_outputs_, n_outputs_open=n_outputs_open_\n ),\n )\n for p_outputs_open_, n_outputs_, n_outputs_open_ in zip(p_outputs_open, n_outputs, n_outputs_open)\n ]\n stats = make_entity_stats(entities)\n expected_stats = EntityStats(\n 3,\n min=PublicationStats(p_outputs_open=30.0, n_outputs=10, n_outputs_open=10),\n max=PublicationStats(p_outputs_open=100.0, n_outputs=1000, n_outputs_open=1000),\n median=PublicationStats(p_outputs_open=50),\n histograms=EntityHistograms(\n p_outputs_open=Histogram(data=[2, 0, 1], bins=[30.0, 53.33333333333333, 76.66666666666666, 100.0]),\n n_outputs=Histogram(data=[1, 1, 1], bins=[1.0, 1.6666666666666665, 2.333333333333333, 3.0]),\n n_outputs_open=Histogram(data=[1, 1, 1], bins=[1.0, 1.6666666666666665, 2.333333333333333, 3.0]),\n ),\n )\n self.assertEqual(expected_stats, stats)\n\n\ndef load_index_and_data(entity_type: str, index: List[Dict], data: List[Dict]):\n df_index = pd.DataFrame(index)\n preprocess_index_df(entity_type, df_index)\n\n df_data = pd.DataFrame(data)\n preprocess_data_df(entity_type, df_data)\n\n df_index = make_index_df(entity_type, df_index, df_data)\n\n return df_index, df_data\n\n\nclass TestOaWebWorkflow(ObservatoryTestCase):\n maxDiff = None\n dt_fmt = \"YYYY-MM-DD\"\n\n def setUp(self) -> None:\n \"\"\"TestOaWebWorkflow checks that the workflow functions correctly, i.e. outputs the correct files, but doesn't\n check that the calculations are correct (data correctness is tested in TestOaWebRelease).\"\"\"\n\n # For Airflow unit tests\n self.project_id = os.getenv(\"TEST_GCP_PROJECT_ID\")\n self.data_location = os.getenv(\"TEST_GCP_DATA_LOCATION\")\n self.oa_web_fixtures = \"oa_web_workflow\"\n\n # For testing workflow functions\n self.dag_id = \"oa_web_workflow\"\n self.data_bucket_name = \"data-bucket-name\"\n self.conceptrecid = 1055172\n # self.release = OaWebRelease(dag_id=\"dag\", snapshot_date=pendulum.now(), data_bucket_name=)\n # self.workflow = OaWebWorkflow(dag_id=self.dag_id, input_project_id=self.project_id, output_project_id=self.project_id)\n repositories = [\n {\"id\": \"PubMed Central\", \"total_outputs\": 15, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"Europe PMC\", \"total_outputs\": 12, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"arXiv\", \"total_outputs\": 10, \"category\": \"Preprint\", \"home_repo\": False},\n ]\n self.country_index = [\n {\n \"id\": \"NZL\",\n \"name\": \"New Zealand\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"alpha2\": \"NZ\",\n },\n ]\n # The n_ fields are strings because BigQuery exports integers as strings in JSON Lines exports\n self.country_data = [\n {\n \"id\": \"NZL\",\n \"year\": \"2020\",\n \"n_citations\": \"121\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"48\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"11\",\n \"n_outputs_both\": \"26\",\n \"n_outputs_other_platform_open\": \"37\",\n \"n_outputs_other_platform_open_only\": \"11\",\n \"n_outputs_closed\": \"52\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"19\",\n \"n_outputs_hybrid\": \"10\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n {\n \"id\": \"NZL\",\n \"year\": 2021,\n \"n_citations\": \"233\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"45\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"14\",\n \"n_outputs_both\": \"23\",\n \"n_outputs_other_platform_open\": \"31\",\n \"n_outputs_other_platform_open_only\": \"8\",\n \"n_outputs_closed\": \"55\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"20\",\n \"n_outputs_hybrid\": \"9\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n ]\n self.institution_index = [\n {\n \"id\": \"https://ror.org/02n415q13\",\n \"name\": \"Curtin University\",\n \"url\": \"https://curtin.edu.au/\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n \"country_code\": \"AUS\",\n \"country_name\": \"Australia\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"institution_type\": \"Education\",\n \"acronyms\": [],\n },\n ]\n self.institution_data = [\n {\n \"id\": \"https://ror.org/02n415q13\",\n \"year\": 2020,\n \"n_citations\": \"121\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"48\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"11\",\n \"n_outputs_both\": \"26\",\n \"n_outputs_other_platform_open\": \"37\",\n \"n_outputs_other_platform_open_only\": \"11\",\n \"n_outputs_closed\": \"52\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"19\",\n \"n_outputs_hybrid\": \"10\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n {\n \"id\": \"https://ror.org/02n415q13\",\n \"year\": 2021,\n \"n_citations\": \"233\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"45\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"14\",\n \"n_outputs_both\": \"23\",\n \"n_outputs_other_platform_open\": \"31\",\n \"n_outputs_other_platform_open_only\": \"8\",\n \"n_outputs_closed\": \"55\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"20\",\n \"n_outputs_hybrid\": \"9\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n ]\n self.entities = [\n (\"country\", self.country_index, self.country_data, [\"NZL\"]),\n (\"institution\", self.institution_index, self.institution_data, [\"02n415q13\"]),\n ]\n\n ####################################\n # Test workflow with Airflow\n ####################################\n\n def test_dag_structure(self):\n \"\"\"Test that the DAG has the correct structure.\"\"\"\n\n env = ObservatoryEnvironment(enable_api=False)\n with env.create():\n dag = OaWebWorkflow(\n dag_id=self.dag_id,\n cloud_workspace=env.cloud_workspace,\n data_bucket=self.data_bucket_name,\n conceptrecid=self.conceptrecid,\n ).make_dag()\n self.assert_dag_structure(\n {\n \"doi_sensor\": [\"check_dependencies\"],\n \"check_dependencies\": [\"query\"],\n \"query\": [\"download\"],\n \"download\": [\"make_draft_zenodo_version\"],\n \"make_draft_zenodo_version\": [\"download_assets\"],\n \"download_assets\": [\"preprocess_data\"],\n \"preprocess_data\": [\"build_indexes\"],\n \"build_indexes\": [\"download_logos\"],\n \"download_logos\": [\"download_wiki_descriptions\"],\n \"download_wiki_descriptions\": [\"build_datasets\"],\n \"build_datasets\": [\"publish_zenodo_version\"],\n \"publish_zenodo_version\": [\"upload_dataset\"],\n \"upload_dataset\": [\"repository_dispatch\"],\n \"repository_dispatch\": [\"cleanup\"],\n \"cleanup\": [],\n },\n dag,\n )\n\n def test_dag_load(self):\n \"\"\"Test that the DAG can be loaded from a DAG bag.\"\"\"\n\n # Test successful\n env = ObservatoryEnvironment(\n workflows=[\n Workflow(\n dag_id=self.dag_id,\n name=\"Open Access Website Workflow\",\n class_name=\"academic_observatory_workflows.workflows.oa_web_workflow.OaWebWorkflow\",\n cloud_workspace=self.fake_cloud_workspace,\n kwargs=dict(\n data_bucket=self.data_bucket_name,\n conceptrecid=self.conceptrecid,\n ),\n )\n ]\n )\n\n with env.create():\n self.assert_dag_load_from_config(self.dag_id)\n\n # Test required kwargs\n env = ObservatoryEnvironment(\n workflows=[\n Workflow(\n dag_id=self.dag_id,\n name=\"Open Access Website Workflow\",\n class_name=\"academic_observatory_workflows.workflows.oa_web_workflow.OaWebWorkflow\",\n cloud_workspace=self.fake_cloud_workspace,\n kwargs=dict(),\n )\n ]\n )\n\n with env.create():\n with self.assertRaises(AssertionError) as cm:\n self.assert_dag_load_from_config(self.dag_id)\n msg = cm.exception.args[0]\n self.assertTrue(\"missing 2 required keyword-only arguments\" in msg)\n self.assertTrue(\"data_bucket\" in msg)\n self.assertTrue(\"conceptrecid\" in msg)\n\n def setup_tables(\n self, dataset_id_all: str, dataset_id_settings: str, bucket_name: str, snapshot_date: pendulum.DateTime\n ):\n ror = load_jsonl(test_fixtures_folder(\"doi\", \"ror.jsonl\"))\n country = load_jsonl(test_fixtures_folder(self.oa_web_fixtures, \"country.jsonl.gz\"))\n institution = load_jsonl(test_fixtures_folder(self.oa_web_fixtures, \"institution.jsonl.gz\"))\n settings_country = load_jsonl(test_fixtures_folder(\"doi\", \"country.jsonl\"))\n\n oa_web_schema_path = test_fixtures_folder(self.oa_web_fixtures, \"schema\")\n with CliRunner().isolated_filesystem() as t:\n tables = [\n Table(\n \"ror\",\n True,\n dataset_id_all,\n ror,\n bq_find_schema(\n path=os.path.join(schema_folder(), \"ror\"), table_name=\"ror\", release_date=snapshot_date\n ),\n ),\n Table(\n \"country\",\n True,\n dataset_id_all,\n country,\n bq_find_schema(path=oa_web_schema_path, table_name=\"country\"),\n ),\n Table(\n \"institution\",\n True,\n dataset_id_all,\n institution,\n bq_find_schema(path=oa_web_schema_path, table_name=\"institution\"),\n ),\n Table(\n \"country\",\n False,\n dataset_id_settings,\n settings_country,\n bq_find_schema(path=os.path.join(schema_folder(), \"doi\"), table_name=\"country\"),\n ),\n ]\n\n bq_load_tables(\n project_id=self.project_id,\n tables=tables,\n bucket_name=bucket_name,\n snapshot_date=snapshot_date,\n )\n\n @patch(\"academic_observatory_workflows.workflows.oa_web_workflow.Zenodo\")\n @patch(\"academic_observatory_workflows.workflows.oa_web_workflow.trigger_repository_dispatch\")\n def test_telescope(self, mock_trigger_repository_dispatch, mock_zenodo):\n \"\"\"Test the telescope end to end.\"\"\"\n\n mock_zenodo.return_value = MockZenodo()\n execution_date = pendulum.datetime(2021, 11, 14)\n snapshot_date = pendulum.datetime(2021, 11, 21)\n env = ObservatoryEnvironment(project_id=self.project_id, data_location=self.data_location, enable_api=False)\n bq_dataset_id = env.add_dataset(\"data\")\n bq_dataset_id_settings = env.add_dataset(\"settings\")\n data_bucket = env.add_bucket()\n github_token = \"github-token\"\n zenodo_token = \"zenodo-token\"\n\n with env.create() as t:\n # Run fake DOI workflow to test sensor\n dag = make_dummy_dag(\"doi\", execution_date)\n with env.create_dag_run(dag, execution_date):\n # Running all of a DAGs tasks sets the DAG to finished\n ti = env.run_task(\"dummy_task\")\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Setup dependencies\n # Upload fake data to BigQuery\n self.setup_tables(\n dataset_id_all=bq_dataset_id,\n dataset_id_settings=bq_dataset_id_settings,\n bucket_name=env.download_bucket,\n snapshot_date=snapshot_date,\n )\n\n # Upload fake cached zip files file to bucket\n for file_name in [\"images-base.zip\", \"images.zip\"]:\n file_path = test_fixtures_folder(\"oa_web_workflow\", file_name)\n gcs_upload_file(bucket_name=data_bucket, blob_name=file_name, file_path=file_path)\n\n # Setup workflow and connections\n workflow = OaWebWorkflow(\n dag_id=self.dag_id,\n cloud_workspace=env.cloud_workspace,\n data_bucket=data_bucket,\n conceptrecid=self.conceptrecid,\n bq_ror_dataset_id=bq_dataset_id,\n bq_agg_dataset_id=bq_dataset_id,\n bq_settings_dataset_id=bq_dataset_id_settings,\n )\n dag = workflow.make_dag()\n env.add_connection(Connection(conn_id=workflow.github_conn_id, uri=f\"http://:{github_token}@\"))\n env.add_connection(Connection(conn_id=workflow.zenodo_conn_id, uri=f\"http://:{zenodo_token}@\"))\n\n # Run workflow\n with env.create_dag_run(dag, execution_date) as dag_run:\n # Mocked and expected data\n release = OaWebRelease(\n dag_id=self.dag_id,\n run_id=dag_run.run_id,\n snapshot_date=snapshot_date,\n )\n\n # DOI Sensor\n ti = env.run_task(\"doi_sensor\")\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Check dependencies\n ti = env.run_task(workflow.check_dependencies.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Run query\n ti = env.run_task(workflow.query.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Download data\n ti = env.run_task(workflow.download.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"country-index.jsonl.gz\",\n \"institution-index.jsonl.gz\",\n \"country-data-000000000000.jsonl.gz\",\n \"institution-data-000000000000.jsonl.gz\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.download_folder, file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Make draft Zenodo version\n ti = env.run_task(workflow.make_draft_zenodo_version.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Download cached assets\n ti = env.run_task(workflow.download_assets.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"images.zip\",\n \"images-base.zip\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.download_folder, file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Preprocess data\n ti = env.run_task(workflow.preprocess_data.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"country-data.jsonl.gz\",\n \"institution-data.jsonl.gz\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.transform_folder, \"intermediate\", file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Build indexes\n ti = env.run_task(workflow.build_indexes.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"country-index.jsonl.gz\",\n \"institution-index.jsonl.gz\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.transform_folder, \"intermediate\", file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Download logos\n ti = env.run_task(workflow.download_logos.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Download wiki descriptions\n ti = env.run_task(workflow.download_wiki_descriptions.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Build datasets\n ti = env.run_task(workflow.build_datasets.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n build_folder = os.path.join(release.transform_folder, \"build\")\n expected_files = make_expected_build_files(build_folder)\n print(\"Checking expected transformed files\")\n for file in expected_files:\n print(f\"\\t{file}\")\n self.assertTrue(os.path.isfile(file))\n\n # Check that full dataset zip file exists\n archives = [\"data.zip\", \"images.zip\", \"coki-oa-dataset.zip\"]\n for file_name in archives:\n latest_file = os.path.join(release.transform_folder, \"out\", file_name)\n print(f\"\\t{latest_file}\")\n self.assertTrue(os.path.isfile(latest_file))\n\n # Publish Zenodo version\n ti = env.run_task(workflow.publish_zenodo_version.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Upload data to bucket\n ti = env.run_task(workflow.upload_dataset.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n blob_name = f\"{workflow.version}/data.zip\"\n self.assert_blob_exists(data_bucket, blob_name)\n blob_name = f\"{workflow.version}/images.zip\"\n self.assert_blob_exists(data_bucket, blob_name)\n\n # Trigger repository dispatch\n ti = env.run_task(workflow.repository_dispatch.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n mock_trigger_repository_dispatch.called_once_with(github_token, \"data-update/develop\")\n mock_trigger_repository_dispatch.called_once_with(github_token, \"data-update/staging\")\n mock_trigger_repository_dispatch.called_once_with(github_token, \"data-update/production\")\n\n # Test that all workflow data deleted\n ti = env.run_task(workflow.cleanup.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n self.assert_cleanup(release.workflow_folder)\n\n ####################################\n # Test workflow functions\n ####################################\n\n def save_mock_data(self, path: str, test_data):\n with jsonlines.open(path, mode=\"w\") as writer:\n writer.write_all(test_data)\n df = pd.DataFrame(test_data)\n return df\n\n def test_load_data_glob(self):\n with CliRunner().isolated_filesystem() as t:\n path = os.path.join(t, \"data-000000000000.jsonl.gz\")\n save_jsonl_gz(path, [{\"name\": \"Jim\"}, {\"name\": \"David\"}, {\"name\": \"Jane\"}])\n\n path = os.path.join(t, \"data-000000000001.jsonl.gz\")\n save_jsonl_gz(path, [{\"name\": \"Joe\"}, {\"name\": \"Blogs\"}, {\"name\": \"Daniels\"}])\n\n # Compare\n expected = [\n {\"name\": \"Jim\"},\n {\"name\": \"David\"},\n {\"name\": \"Jane\"},\n {\"name\": \"Joe\"},\n {\"name\": \"Blogs\"},\n {\"name\": \"Daniels\"},\n ]\n\n actual = load_data_glob(os.path.join(t, \"data-*.jsonl.gz\"))\n self.assertEqual(expected, actual)\n\n def test_load_data(self):\n entity_type = \"country\"\n with CliRunner().isolated_filesystem() as t:\n # Save Data\n path = os.path.join(t, f\"{entity_type}-index.jsonl\")\n df = self.save_mock_data(path, self.country_index)\n\n # Load csv\n actual_df = load_data(path)\n\n # Compare\n expected_countries = df.to_dict(\"records\")\n actual_countries = actual_df.to_dict(\"records\")\n self.assertEqual(expected_countries, actual_countries)\n\n def test_update_df_with_percentages(self):\n keys = [(\"hello\", \"n_outputs\"), (\"world\", \"n_outputs\")]\n df = pd.DataFrame([{\"n_hello\": 20, \"n_world\": 50, \"n_outputs\": 100}])\n update_df_with_percentages(df, keys)\n expected = {\"n_hello\": 20, \"n_world\": 50, \"n_outputs\": 100, \"p_hello\": 20, \"p_world\": 50}\n actual = df.to_dict(orient=\"records\")[0]\n self.assertEqual(expected, actual)\n\n def test_make_index_df(self):\n with CliRunner().isolated_filesystem() as t:\n # Country\n entity_type = \"country\"\n df_index, df_data = load_index_and_data(entity_type, self.country_index, self.country_data)\n\n expected = [\n {\n \"alpha2\": \"NZ\",\n \"entity_type\": \"country\",\n \"id\": \"NZL\",\n \"name\": \"New Zealand\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180.0,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n }\n ]\n print(\"Checking country records:\")\n actual = df_index.to_dict(\"records\")\n for e, a in zip(expected, actual):\n self.assertDictEqual(e, a)\n\n # Institution\n entity_type = \"institution\"\n df_index, df_data = load_index_and_data(entity_type, self.institution_index, self.institution_data)\n expected = [\n {\n \"entity_type\": \"institution\",\n \"id\": \"02n415q13\",\n \"name\": \"Curtin University\",\n \"url\": \"https://curtin.edu.au/\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n \"country_code\": \"AUS\",\n \"country_name\": \"Australia\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"institution_type\": \"Education\",\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n \"acronyms\": [],\n }\n ]\n\n print(\"Checking institution records:\")\n actual = df_index.to_dict(\"records\")\n for e, a in zip(expected, actual):\n self.assertDictEqual(e, a)\n\n def test_update_index_with_logos(self):\n with CliRunner().isolated_filesystem() as t:\n sizes = [\"sm\", \"md\", \"lg\"]\n\n # Country table\n entity_type = \"country\"\n df_index, _ = load_index_and_data(entity_type, self.country_index, self.country_data)\n update_index_with_logos(t, entity_type, df_index)\n\n for i, row in df_index.iterrows():\n for size in sizes:\n # Check that logo key created\n key = f\"logo_{size}\"\n self.assertTrue(key in row)\n\n # Redirect to md size\n if size == \"lg\":\n size = \"md\"\n\n # Check that correct logo path exists\n item_id = row[\"id\"]\n expected_path = f\"logos/{entity_type}/{size}/{item_id}.svg\"\n actual_path = row[key]\n self.assertEqual(expected_path, actual_path)\n\n # Institution table\n entity_type = \"institution\"\n institution_index = self.institution_index + [\n {\n \"id\": \"https://ror.org/12345\",\n \"name\": \"Foo University\",\n \"country_name\": \"Australia\",\n \"country_code\": \"AUS\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"url\": None,\n \"wikipedia_url\": None,\n \"institution_type\": \"Education\",\n }\n ]\n institution_data = self.institution_data + [\n {\n \"id\": \"https://ror.org/12345\",\n \"year\": 2020,\n \"n_citations\": \"121\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"48\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"11\",\n \"n_outputs_both\": \"26\",\n \"n_outputs_other_platform_open\": \"37\",\n \"n_outputs_other_platform_open_only\": \"11\",\n \"n_outputs_closed\": \"52\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"19\",\n \"n_outputs_hybrid\": \"10\",\n \"n_outputs_no_guarantees\": \"8\",\n },\n ]\n\n # Create index\n df_index, _ = load_index_and_data(entity_type, institution_index, institution_data)\n sizes = [\"sm\", \"md\", \"lg\"]\n with vcr.use_cassette(test_fixtures_folder(\"oa_web_workflow\", \"test_make_logos.yaml\")):\n df_index = update_index_with_logos(t, entity_type, df_index)\n curtin_row = df_index[df_index[\"id\"] == \"02n415q13\"].iloc[0]\n foo_row = df_index[df_index[\"id\"] == \"12345\"].iloc[0]\n for size in sizes:\n # Check that logo was added to dataframe\n key = f\"logo_{size}\"\n self.assertTrue(key in curtin_row)\n self.assertTrue(key in foo_row)\n\n # Check that correct path created\n item_id = curtin_row[\"id\"]\n fmt = \"jpg\"\n if size == \"lg\":\n fmt = \"png\"\n expected_curtin_path = f\"logos/{entity_type}/{size}/{item_id}.{fmt}\"\n expected_foo_path = f\"unknown.svg\"\n self.assertEqual(expected_curtin_path, curtin_row[key])\n self.assertEqual(expected_foo_path, foo_row[key])\n\n # Check that downloaded logo exists\n full_path = os.path.join(t, \"images\", expected_curtin_path)\n self.assertTrue(os.path.isfile(full_path))\n\n def test_save_index_df(self):\n with CliRunner().isolated_filesystem() as t:\n for entity_type, index, data, entity_ids in self.entities:\n # Load index\n df_index = pd.DataFrame(index)\n preprocess_index_df(entity_type, df_index)\n\n # Load data\n df_data = pd.DataFrame(data)\n preprocess_data_df(entity_type, df_data)\n\n # Make index\n df_index = make_index_df(entity_type, df_index, df_data)\n update_index_with_logos(t, entity_type, df_index)\n\n # Make entities\n entities = make_entities(entity_type, df_index, df_data)\n\n # Save index from entities\n data_path = os.path.join(t, \"data\")\n os.makedirs(data_path, exist_ok=True)\n file_path = os.path.join(data_path, f\"{entity_type}.json\")\n data = make_index(entity_type, entities)\n save_json(file_path, data)\n self.assertTrue(os.path.isfile(file_path))\n\n def test_make_entities(self):\n with CliRunner().isolated_filesystem() as t:\n # Country\n entity_type = \"country\"\n\n # Load index\n df_index = pd.DataFrame(self.country_index)\n preprocess_index_df(entity_type, df_index)\n\n # Load data\n df_data = pd.DataFrame(self.country_data)\n preprocess_data_df(entity_type, df_data)\n\n # Make index and entities\n df_index = make_index_df(entity_type, df_index, df_data)\n entities = make_entities(entity_type, df_index, df_data)\n\n repositories = [\n {\"id\": \"PubMed Central\", \"total_outputs\": 30, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"Europe PMC\", \"total_outputs\": 24, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"arXiv\", \"total_outputs\": 20, \"category\": \"Preprint\", \"home_repo\": False},\n ]\n expected = [\n {\n \"id\": \"NZL\",\n \"name\": \"New Zealand\",\n \"entity_type\": entity_type,\n \"description\": {\n \"license\": Description.license,\n \"text\": None,\n \"url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n },\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"end_year\": 2021,\n \"start_year\": 2020,\n \"stats\": {\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n \"years\": [\n {\n \"year\": 2020,\n \"date\": \"2020-12-31\",\n \"stats\": {\n \"n_citations\": 121,\n \"n_outputs\": 100,\n \"n_outputs_open\": 48,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 11,\n \"n_outputs_both\": 26,\n \"n_outputs_other_platform_open\": 37,\n \"n_outputs_other_platform_open_only\": 11,\n \"n_outputs_closed\": 52,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 19,\n \"n_outputs_hybrid\": 10,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 48.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 11.0,\n \"p_outputs_both\": 26.0,\n \"p_outputs_other_platform_open\": 37.0,\n \"p_outputs_other_platform_open_only\": 11.0,\n \"p_outputs_closed\": 52.0,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 51.35135135135135,\n \"p_outputs_hybrid\": 27.027027027027028,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 27.027027027027028,\n \"p_outputs_domain\": 72.97297297297297,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n {\n \"year\": 2021,\n \"date\": \"2021-12-31\",\n \"stats\": {\n \"n_citations\": 233,\n \"n_outputs\": 100,\n \"n_outputs_open\": 45,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 14,\n \"n_outputs_both\": 23,\n \"n_outputs_other_platform_open\": 31,\n \"n_outputs_other_platform_open_only\": 8,\n \"n_outputs_closed\": 55,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 20,\n \"n_outputs_hybrid\": 9,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 45.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 14.000000000000002,\n \"p_outputs_both\": 23.0,\n \"p_outputs_other_platform_open\": 31.0,\n \"p_outputs_other_platform_open_only\": 8.0,\n \"p_outputs_closed\": 55.00000000000001,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 54.054054054054056,\n \"p_outputs_hybrid\": 24.324324324324326,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 32.25806451612903,\n \"p_outputs_domain\": 87.09677419354838,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n ],\n \"repositories\": repositories,\n }\n ]\n\n for e_dict, a_entity in zip(expected, entities):\n a_dict = a_entity.to_dict()\n self.assertDictEqual(e_dict, a_dict)\n\n # Institution\n entity_type = \"institution\"\n\n # Load index\n df_index = pd.DataFrame(self.institution_index)\n preprocess_index_df(entity_type, df_index)\n\n # Load data\n df_data = pd.DataFrame(self.institution_data)\n preprocess_data_df(entity_type, df_data)\n\n # Make index and entities\n df_index = make_index_df(entity_type, df_index, df_data)\n entities = make_entities(entity_type, df_index, df_data)\n\n expected = [\n {\n \"id\": \"02n415q13\",\n \"name\": \"Curtin University\",\n \"country_code\": \"AUS\",\n \"country_name\": \"Australia\",\n \"description\": {\n \"license\": Description.license,\n \"text\": None,\n \"url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n },\n \"entity_type\": entity_type,\n \"url\": \"https://curtin.edu.au/\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"institution_type\": \"Education\",\n \"end_year\": 2021,\n \"start_year\": 2020,\n \"stats\": {\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n \"years\": [\n {\n \"year\": 2020,\n \"date\": \"2020-12-31\",\n \"stats\": {\n \"n_citations\": 121,\n \"n_outputs\": 100,\n \"n_outputs_open\": 48,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 11,\n \"n_outputs_both\": 26,\n \"n_outputs_other_platform_open\": 37,\n \"n_outputs_other_platform_open_only\": 11,\n \"n_outputs_closed\": 52,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 19,\n \"n_outputs_hybrid\": 10,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 48.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 11.0,\n \"p_outputs_both\": 26.0,\n \"p_outputs_other_platform_open\": 37.0,\n \"p_outputs_other_platform_open_only\": 11.0,\n \"p_outputs_closed\": 52.0,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 51.35135135135135,\n \"p_outputs_hybrid\": 27.027027027027028,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 27.027027027027028,\n \"p_outputs_domain\": 72.97297297297297,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n {\n \"year\": 2021,\n \"date\": \"2021-12-31\",\n \"stats\": {\n \"n_citations\": 233,\n \"n_outputs\": 100,\n \"n_outputs_open\": 45,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 14,\n \"n_outputs_both\": 23,\n \"n_outputs_other_platform_open\": 31,\n \"n_outputs_other_platform_open_only\": 8,\n \"n_outputs_closed\": 55,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 20,\n \"n_outputs_hybrid\": 9,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 45.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 14.000000000000002,\n \"p_outputs_both\": 23.0,\n \"p_outputs_other_platform_open\": 31.0,\n \"p_outputs_other_platform_open_only\": 8.0,\n \"p_outputs_closed\": 55.00000000000001,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 54.054054054054056,\n \"p_outputs_hybrid\": 24.324324324324326,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 32.25806451612903,\n \"p_outputs_domain\": 87.09677419354838,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n ],\n \"repositories\": repositories,\n }\n ]\n\n for e_dict, a_entity in zip(expected, entities):\n a_dict = a_entity.to_dict()\n self.assertDictEqual(e_dict, a_dict)\n\n def test_save_entities(self):\n with CliRunner().isolated_filesystem() as t:\n for entity_type, index, data, entity_ids in self.entities:\n # Read data\n df_index = pd.DataFrame(index)\n preprocess_index_df(entity_type, df_index)\n\n df_data = pd.DataFrame(data)\n preprocess_data_df(entity_type, df_data)\n\n # Save entities\n df_index = make_index_df(entity_type, df_index, df_data)\n entities = make_entities(entity_type, df_index, df_data)\n path = os.path.join(t, \"data\", entity_type)\n save_entities(path, entities)\n\n # Check that entity json files are saved\n for entity_id in entity_ids:\n file_path = os.path.join(path, f\"{entity_id}.json\")\n print(f\"Assert exists: {file_path}\")\n self.assertTrue(os.path.isfile(file_path))\n\n\ndef make_expected_build_files(base_path: str) -> List[str]:\n countries = [\"AUS\", \"NZL\"]\n institutions = [\"03b94tp07\", \"02n415q13\"] # Auckland, Curtin\n categories = [\"country\"] * len(countries) + [\"institution\"] * len(institutions)\n entity_ids = countries + institutions\n expected = []\n\n # Add base data files\n data_path = os.path.join(base_path, \"data\")\n file_names = [\"stats.json\", \"country.json\", \"institution.json\", \"index.json\"]\n for file_name in file_names:\n expected.append(os.path.join(data_path, file_name))\n\n # Add country and institution specific data files\n for entity_type, entity_id in zip(categories, entity_ids):\n path = os.path.join(data_path, entity_type, f\"{entity_id}.json\")\n expected.append(path)\n\n # Add logos\n for entity_type, entity_id in zip(categories, entity_ids):\n for size in [\"sm\", \"md\", \"lg\"]:\n if entity_type == \"country\" and size == \"lg\":\n continue\n\n file_type = \"svg\"\n if entity_type == \"institution\":\n file_type = \"jpg\"\n if size == \"lg\":\n file_type = \"png\"\n\n path = os.path.join(base_path, \"images\", \"logos\", entity_type, size, f\"{entity_id}.{file_type}\")\n expected.append(path)\n\n return expected\n","sub_path":"academic_observatory_workflows/workflows/tests/test_oa_web_workflow.py","file_name":"test_oa_web_workflow.py","file_ext":"py","file_size_in_byte":58361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"241424940","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.login_view, name='login_view'),\n path('ttt/', views.tesst, name='tesst'),\n path('register/', views.home_view, name='home_view'),\n path('/tes/', views.tes, name='tes'),\n path('/tes2/', views.tes2, name='tes2'),\n path('/register-courses/', views.to_regCourses, name='register-courses'),\n path('/registered-courses/', views.regCourses, name='registered-courses'),\n path('//register/', views.regcourseview, name='register'),\n]\n","sub_path":"Profiler/Profiler1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"149074370","text":"from access_tokens import tokens\nfrom django.http import Http404\nfrom django.conf import settings\n\n\ndef validate_token(function):\n def wrap(request, *args, **kwargs):\n token = kwargs.get('token')\n case_id = kwargs.get('case_id')\n validate = tokens.validate(\n token, scope=(), key=case_id,\n salt=settings.TOKEN_SALT, max_age=None\n )\n if validate:\n return function(request, *args, **kwargs)\n else:\n raise Http404\n return wrap\n","sub_path":"casereport/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"530244228","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 22:57:08 2020\n\n@author: viktor\n\"\"\"\n\nn=int(input(\"\"))\nx=1\nc=1\nprint(1)\nif n>1:\n while x text.txt\n# to get a list sorted by frequency:\n# python3 frequency.py < ru_pud-ud-test.conllu | sort -nr > text.txt\n# or it's also possible to use the for loop you advised, but I had\n# difficulty using your code as I kept warning 'list indices must be integers or slices, not tuple'\n# so I wrote the same with the format function\n\n\nfreq = []\n\nfor w in vocab:\n\tfreq.append((vocab[w], w))\n\n# freq.sort(reverse=True)\n\nfor i in freq:\n\tprint ('{}\\t{}'.format(i[0], i[1]))\n\n# now I have a unique word list arranged by frequency\n","sub_path":"2018-komp-ling/practicals/translit/frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"585673755","text":"import numpy as np\nimport sys\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nimport torch.utils.data as Data\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms as tf\nimport Model\n\n# parameters\nEPOCH = 1\nBATCH_SIZE = 256\nLEARNING_RATE = 0.001\n\ndef flipped_data(imgs, y):\n new_imgs = imgs.copy()\n new_y = y.copy()\n num_data = new_imgs.shape[0]\n for i in range(num_data):\n new_imgs[i,0,:,:] = new_imgs[i,0,:,::-1]\n return new_imgs, new_y\n\ndef parse_csv(label_path):\n raw_data_fp = open(label_path,'r')\n lines = raw_data_fp.readlines()[1:]\n num_data = len(lines)\n\n raw_imgs = np.empty(shape=(num_data,1,48*48), dtype=float)\n raw_y = np.zeros(shape=(num_data),dtype=np.int64)\n for i, line in enumerate(lines):\n nums = line.split(',')\n raw_y[i] = int(nums[0])\n raw_imgs[i,:,:] = np.array([float(num) for num in nums[1].split(' ')]) /255.0\n \n raw_imgs = raw_imgs.reshape((num_data,1,48,48))\n \n return raw_imgs, raw_y\n '''\ndef augment_data(r_imgs, r_y):\n #f_imgs, f_y = flipped_data(r_imgs, r_y)\n #imgs = np.concatenate((r_imgs, f_imgs), axis=0)\n imgs = torch.tensor(imgs).type(torch.FloatTensor)\n #y = np.concatenate((r_y, f_y), axis=0)\n y = torch.tensor(y).type(torch.LongTensor)\n transform = tf.Compose([\n tf.ToPILImage(),\n tf.ColorJitter(brightness=0.4, contrast=0.3, saturation=0.3, hue=0.3), \n tf.RandomHorizontalFlip(),\n tf.RandomRotation(30),\n tf.RandomResizedCrop(48,scale=(0.85,1)),\n tf.ToTensor()\n ])\n aug_imgs1 = imgs.clone()\n for i in range(imgs.size()[0]):\n aug_imgs1[i, :, :, :] = transform(aug_imgs1[i])\n aug_imgs2 = imgs.clone()\n for i in range(imgs.size()[0]):\n aug_imgs2[i, :, :, :] = transform(aug_imgs2[i])\n imgs = torch.cat((imgs,aug_imgs1, aug_imgs2), 0)\n y = torch.cat((y, y, y), 0)\n #imgs = torch.cat((imgs,aug_imgs1), 0)\n #y = torch.cat((y, y), 0)\n print(imgs.size(), y.size())\n return imgs, y\n '''\nclass TrainDataset(Dataset):\n def __init__(self, raw_imgs, raw_y):\n aug_imgs, aug_y = flipped_data(raw_imgs, raw_y)\n imgs = np.concatenate((raw_imgs, aug_imgs), axis=0)\n self.imgs = torch.tensor(imgs).type(torch.FloatTensor)\n y = np.concatenate((raw_y, aug_y), axis=0)\n self.y = torch.tensor(y).type(torch.LongTensor)\n self.transform = tf.Compose([\n tf.ToPILImage(),\n tf.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n tf.RandomRotation(30), \n tf.RandomResizedCrop(48,scale=(0.8,1)),\n tf.ToTensor()\n ])\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n #return self.imgs[idx], self.y[idx]\n return self.transform(self.imgs[idx]).type(torch.FloatTensor), self.y[idx]\n\nif __name__ == \"__main__\":\n raw_imgs, raw_y = parse_csv(sys.argv[1])\n imgs_shape = raw_imgs.shape\n y_shape = raw_y.shape\n c = np.concatenate((raw_imgs.reshape(len(raw_imgs), -1), raw_y.reshape(len(raw_y),1)), axis=1)\n np.random.shuffle(c)\n raw_imgs = (c[:, :-1]).reshape(imgs_shape)\n raw_y = (c[:, -1]).reshape(y_shape)\n \n num_val_data = 0#raw_imgs.shape[0] // 12\n val_imgs = raw_imgs[:num_val_data,:,:]\n val_y = raw_y[:num_val_data]\n\n train_imgs = raw_imgs[num_val_data:,:,:,:]\n train_y = raw_y[num_val_data:]\n a = train_imgs.shape[0]\n\n training_set = TrainDataset(train_imgs, train_y)\n val_set = Data.TensorDataset(\n torch.tensor(val_imgs).type(torch.FloatTensor), \n torch.tensor(val_y).type(torch.LongTensor))\n train_loader = DataLoader(\n training_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)\n val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)\n \n # train\n device = torch.device('cuda')\n model = Model.MyCNN()\n try:\n model.load_state_dict(torch.load('model_params.pkl'))\n print('use exist parameters')\n except:\n print('new model, no exist parameters')\n pass\n model.to(device)\n optimizer = Adam(model.parameters(), lr=LEARNING_RATE)\n loss_func = nn.CrossEntropyLoss()\n\n print('start training...')\n model.train()\n\n high_val_acc = 0.67\n for epoch in range(EPOCH):\n train_loss, train_acc = [], []\n torch.cuda.empty_cache()\n for step, (img, target) in enumerate(train_loader):\n #print(img.size(), target.size()) \n img_cuda = img.to(device, dtype=torch.float)\n target_cuda = target.to(device)\n\n optimizer.zero_grad()\n output = model(img_cuda)\n #print(output.size(), target_cuda.size())\n loss = loss_func(output, target_cuda)\n loss.backward()\n optimizer.step()\n\n predict = torch.max(output, 1)[1]\n acc = np.mean((target_cuda == predict).cpu().numpy())\n train_acc.append(acc)\n train_loss.append(loss.item())\n acc = np.mean(train_acc)\n val_acc = 0\n if num_val_data > 0:\n model.eval()\n for _, (img, target) in enumerate(val_loader):\n img_cuda = img.to(device, dtype=torch.float)\n target_cuda = target.to(device)\n output = model(img_cuda)\n predict = torch.max(output, 1)[1]\n val_acc += np.sum((target_cuda == predict).cpu().numpy())\n val_acc /= val_set.__len__()\n if val_acc > high_val_acc:\n high_val_acc = val_acc\n torch.save(model.state_dict(), 'model_params.pkl')\n print('saved new parameters')\n model.train()\n if epoch % 10 == 0:\n torch.save(model.state_dict(), 'model_params.pkl')\n print('saved new parameters')\n print(\"Epoch: {}| Loss: {:.4f}| Acc: {:.4f}| Val Acc: {:.4f}\"\\\n .format(epoch + 1, np.mean(train_loss), acc, val_acc))\n \n model.eval()\n # save parameters\n # torch.save(model, 'model.pkl') # entire net\n torch.save(model.state_dict(), 'model_params.pkl') # parameters\n \n\n","sub_path":"hw3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"17669339","text":"import glob\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport subprocess\nimport sys\nimport textwrap\nsys.path.append(os.path.expandvars(r'%UserProfile%\\Desktop\\Development\\_libs'))\nfrom AgileExportProcessor.modules.agile_export_processor import AgileExportProcessor\nfrom DexcomMappings.modules.codes import Codes\nfrom DexcomMappings.modules.products import Product\nfrom PeriodicReports.periodic_reports import PeriodicReport\nfrom collections import OrderedDict\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\n\nprod = Product()\ncodes = Codes()\n\n\ndef work(pr):\n # Process exports\n aep = AgileExportProcessor(glob.glob(os.path.join(pr.working_directory, 'Periodic Reports*.csv')))\n\n # Import Shipments Data\n g5_tx_1_2_sns = pd.read_csv(os.path.join(pr.working_directory, 'shipments/serial_number-lot_number_shipment_tracking.csv'),\n low_memory=False, usecols=['Serial Number'])['Serial Number'].unique()\n\n # Get problem_reports dataframe\n problem_reports = aep.get_problem_reports().copy()\n\n # Limit to G5 Transmitter complaints only\n problem_reports = problem_reports[problem_reports.product_line.isin(prod.tags_search('G5 & Transmitter'))]\n problem_reports['product_type'] = 'G5 Transmitter'\n\n # Limit to complaints only\n problem_reports = problem_reports[problem_reports.problem_code.isin(codes.complaint_classification_codes())].copy()\n\n # Extract transmitter serial number\n problem_reports['ex_transmitter_sn'] = AgileExportProcessor.determine_transmitter_serial_number(problem_reports, complaint_deep_search=True)\n\n # Determine if 1.2 TX\n problem_reports.loc[pd.notnull(problem_reports.ex_transmitter_sn), 'is_1.2_tx'] = problem_reports.ex_transmitter_sn.isin(g5_tx_1_2_sns)\n\n # Categories\n problem_reports.loc[(problem_reports.problem_code.isin([3008,4001,5011,5012,5013,5017,5018,5018,5019,5020,5021,5023,5024,5102,5104])), 'bucket'] = problem_reports.problem_code_and_summary.str.extract('.+? (.+)', expand=False)\n problem_reports.loc[(problem_reports.problem_code == 42), 'bucket'] = \"???'s / Hourglass\"\n problem_reports.loc[(problem_reports.problem_code.isin([25, 46])), 'bucket'] = \"Adhesive Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([52, 80, 81, 82, 83, 85])), 'bucket'] = \"Audio and Vibration\"\n problem_reports.loc[(problem_reports.problem_code.isin([28, 30, 41])), 'bucket'] = \"Battery Charge Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([49, 2015])), 'bucket'] = \"Buttons Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([1, 23, 65, 66, 67, 68, 112])), 'bucket'] = \"Calibration Issues\"\n problem_reports.loc[(problem_reports.problem_code.isin([6, 7, 115, 3005, 3006, 5010, 5015, 5016])), 'bucket'] = \"Connectivity Issues\"\n problem_reports.loc[(problem_reports.problem_code.isin([10, 47])), 'bucket'] = \"Difficulty Inserting Sensor/Transmitter\"\n problem_reports.loc[(problem_reports.problem_code.isin([31, 51, 53])), 'bucket'] = \"Display Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([37, 70, 114])), 'bucket'] = \"Error Icon\"\n problem_reports.loc[(problem_reports.problem_code.isin([71])), 'bucket'] = \"Failed Sensor after Calibration\"\n problem_reports.loc[(problem_reports.problem_code.isin([13])), 'bucket'] = \"Failed Sensor before Calibration\"\n problem_reports.loc[(problem_reports.problem_code.isin([40, 50])), 'bucket'] = \"Firmware-related\"\n problem_reports.loc[(problem_reports.problem_code.isin([12])), 'bucket'] = \"Inaccuracies\"\n problem_reports.loc[(problem_reports.problem_code.isin([89])), 'bucket'] = \"Low TX Battery\"\n problem_reports.loc[(problem_reports.problem_code.isin([113, 2009])), 'bucket'] = \"Pairing Issues\"\n problem_reports.loc[(problem_reports.problem_code.isin([24,32,45,88,94,95,96,105,106,108,3001,3003,3004,5000,5001,5002,5006,5008,5009,3500,4000,5014,5100,4002])), 'bucket'] = \"Other\"\n problem_reports.loc[(problem_reports.problem_code.isin([9, 17, 36, 48, 73, 76, 77, 91, 97, 109, 110, 2010, 2011, 2013, 2014])), 'bucket'] = \"Physical Damage\"\n problem_reports.loc[(problem_reports.problem_code.isin([20, 21, 22, 75, 1502])), 'bucket'] = \"Physiological\"\n problem_reports.loc[(problem_reports.problem_code.isin([29, 111])), 'bucket'] = \"Receiver Won't Turn On/Ceases to Function\"\n problem_reports.loc[(problem_reports.problem_code.isin([90])), 'bucket'] = \"Transmitter Failed Icon\"\n\n # Generation\n problem_reports['generation'] = 'G5 Transmitter'\n\n # Generate dataframes\n def generate_dfs(start_date, end_date):\n ''' Generates three dataframes: one with full pivot, one without Low TX Battery, and one without Pairing Issues.'''\n tx_buckets = [\"Low TX Battery\", \"Pairing Issues\", \"Connectivity Issues\", \"Transmitter Failed Icon\", \"Physical Damage\"]\n\n problem_reports_pivot = problem_reports.copy()\n\n problem_reports_pivot.loc[~problem_reports_pivot.bucket.isin(tx_buckets), 'bucket'] = 'Other'\n date_of_issue_selector = (problem_reports_pivot.date_of_issue >= start_date) & (problem_reports_pivot.date_of_issue < end_date)\n \n # Get unknown counts\n all_unknown_tx_sn_count = len(problem_reports_pivot[date_of_issue_selector & pd.isnull(problem_reports.ex_transmitter_sn)])\n without_low_tx_battery_unknown_tx_sn_count = len(problem_reports_pivot[date_of_issue_selector & pd.isnull(problem_reports.ex_transmitter_sn) & (problem_reports.bucket != 'Low TX Battery')])\n without_pairing_issues_unknown_tx_sn_count = len(problem_reports_pivot[date_of_issue_selector & pd.isnull(problem_reports.ex_transmitter_sn) & (problem_reports.bucket != 'Pairing Issues')])\n \n # Generate full pivot\n problem_reports_pivot = problem_reports_pivot[date_of_issue_selector].set_index('date_of_issue').groupby(['bucket', 'is_1.2_tx']).size().unstack(level=1).fillna(0)\n problem_reports_pivot.columns.name = None\n problem_reports_pivot.columns = ['Not G5 TX 1.2 (Qty)', 'G5 TX 1.2 (Qty)']\n\n problem_reports_pivot = problem_reports_pivot.sort_values('G5 TX 1.2 (Qty)', ascending=False)\n \n fractions_df = problem_reports_pivot / problem_reports_pivot.sum(axis=0)\n fractions_df = fractions_df.rename(columns = {'G5 TX 1.2 (Qty)': 'G5 TX 1.2 (Rate)', 'Not G5 TX 1.2 (Qty)': 'Not G5 TX 1.2 (Rate)'})\n fractions_df['variance'] = fractions_df['G5 TX 1.2 (Rate)'] - fractions_df['Not G5 TX 1.2 (Rate)']\n\n all_pivot = pd.concat([problem_reports_pivot, fractions_df], axis=1)\n \n all_pivot = all_pivot.loc[tx_buckets + ['Other',]].fillna(0).astype('float')\n \n # Generate pivot without Low TX Battery\n without_low_tx_battery_pivot = all_pivot.copy()\n\n without_low_tx_battery_pivot = without_low_tx_battery_pivot.loc[without_low_tx_battery_pivot.index != 'Low TX Battery']\n\n without_low_tx_battery_pivot['Not G5 TX 1.2 (Rate)'] = without_low_tx_battery_pivot['Not G5 TX 1.2 (Qty)'] / without_low_tx_battery_pivot['Not G5 TX 1.2 (Qty)'].sum()\n without_low_tx_battery_pivot['G5 TX 1.2 (Rate)'] = without_low_tx_battery_pivot['G5 TX 1.2 (Qty)'] / without_low_tx_battery_pivot['G5 TX 1.2 (Qty)'].sum()\n without_low_tx_battery_pivot['variance'] = without_low_tx_battery_pivot['G5 TX 1.2 (Rate)'] - without_low_tx_battery_pivot['Not G5 TX 1.2 (Rate)']\n \n without_low_tx_battery_pivot = without_low_tx_battery_pivot.loc[[i for i in tx_buckets if i != 'Low TX Battery'] + ['Other',]].fillna(0).astype('float')\n \n # Generate pivot without Pairing Issues\n without_pairing_failed_pivot = all_pivot.copy()\n\n without_pairing_failed_pivot = without_pairing_failed_pivot.loc[without_pairing_failed_pivot.index != 'Pairing Issues']\n\n without_pairing_failed_pivot['Not G5 TX 1.2 (Rate)'] = without_pairing_failed_pivot['Not G5 TX 1.2 (Qty)'] / without_pairing_failed_pivot['Not G5 TX 1.2 (Qty)'].sum()\n without_pairing_failed_pivot['G5 TX 1.2 (Rate)'] = without_pairing_failed_pivot['G5 TX 1.2 (Qty)'] / without_pairing_failed_pivot['G5 TX 1.2 (Qty)'].sum()\n without_pairing_failed_pivot['variance'] = without_pairing_failed_pivot['G5 TX 1.2 (Rate)'] - without_pairing_failed_pivot['Not G5 TX 1.2 (Rate)']\n\n without_pairing_failed_pivot = without_pairing_failed_pivot.loc[[i for i in tx_buckets if i != 'Pairing Issues'] + ['Other',]].fillna(0).astype('float')\n\n return all_pivot, without_low_tx_battery_pivot, without_pairing_failed_pivot, all_unknown_tx_sn_count, without_low_tx_battery_unknown_tx_sn_count, without_pairing_issues_unknown_tx_sn_count\n\n def generate_visualizations(all_pivot, without_low_tx_battery_pivot, without_pairing_failed_pivot, date_range_prefix):\n '''Generate pie charts.'''\n plt.close('all')\n sns.set_context('paper')\n matplotlib.rc('font', **{'family': 'serif', 'weight': 'normal', 'size': 10})\n\n def generate_fig(df, category_prefix):\n fig, (ax1, ax2) = plt.subplots(figsize=(7.5, 3), nrows=1, ncols=2)\n\n ax1.set_title('Not G5 TX 1.2\\nTotal Complaints = {:,}'.format(df['Not G5 TX 1.2 (Qty)'].sum()))\n patches, labels, autopcts = ax1.pie(x = df['Not G5 TX 1.2 (Qty)'], labels = [textwrap.fill(i, 25) for i in df.index], \n colors = sns.color_palette('colorblind'), autopct = '%1.1f%%', counterclock = True, \n pctdistance = 0.86, labeldistance=1.05)\n for i in labels:\n i.set_fontsize('xx-small')\n for i in autopcts:\n i.set_fontsize('xx-small')\n ax1.axis('equal')\n\n ax2.set_title('G5 TX 1.2\\nTotal Complaints = {:,}'.format(df['G5 TX 1.2 (Qty)'].sum()))\n patches, labels, autopcts = ax2.pie(x = df['G5 TX 1.2 (Qty)'], labels = [textwrap.fill(i, 25) for i in df.index], \n colors = sns.color_palette('colorblind'), autopct = '%1.1f%%', counterclock = True, \n pctdistance = 0.86, labeldistance=1.05)\n for i in labels:\n i.set_fontsize('xx-small')\n for i in autopcts:\n i.set_fontsize('xx-small')\n ax2.axis('equal')\n\n fig.savefig(os.path.join(pr.working_directory, '{}_{}.pdf'.format(date_range_prefix, category_prefix)), dpi=150, bbox_inches='tight')\n\n generate_fig(all_pivot, 'full')\n generate_fig(without_low_tx_battery_pivot, 'without_low_tx_battery')\n generate_fig(without_pairing_failed_pivot, 'without_pairing_failed')\n\n # All Dates\n all_pivot_full, without_low_tx_battery_pivot_full, without_pairing_failed_pivot_full, all_unknown_tx_sn_count_full, without_low_tx_battery_unknown_tx_sn_count_full, without_pairing_issues_unknown_tx_sn_count_full = \\\n generate_dfs('2016-10-01', pd.to_datetime(pr.export_date.tz_localize(None)))\n generate_visualizations(all_pivot_full, without_low_tx_battery_pivot_full, without_pairing_failed_pivot_full, 'all_dates')\n\n # November 2016\n all_pivot_nov_2016, without_low_tx_battery_pivot_nov_2016, without_pairing_failed_pivot_nov_2016, all_unknown_tx_sn_count_nov_2016, \\\n without_low_tx_battery_unknown_tx_sn_count_nov_2016, without_pairing_issues_unknown_tx_sn_count_nov_2016 = \\\n generate_dfs('2016-11-01', '2016-12-01')\n generate_visualizations(all_pivot_nov_2016, without_low_tx_battery_pivot_nov_2016, without_pairing_failed_pivot_nov_2016, 'nov_2016')\n\n # December 2016\n all_pivot_dec_2016, without_low_tx_battery_pivot_dec_2016, without_pairing_failed_pivot_dec_2016, all_unknown_tx_sn_count_dec_2016, \\\n without_low_tx_battery_unknown_tx_sn_count_dec_2016, without_pairing_issues_unknown_tx_sn_count_dec_2016 = \\\n generate_dfs('2016-12-01', '2017-01-01')\n generate_visualizations(all_pivot_dec_2016, without_low_tx_battery_pivot_dec_2016, without_pairing_failed_pivot_dec_2016, 'dec_2016')\n\n # January 2017\n all_pivot_jan_2017, without_low_tx_battery_pivot_jan_2017, without_pairing_failed_pivot_jan_2017, all_unknown_tx_sn_count_jan_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_jan_2017, without_pairing_issues_unknown_tx_sn_count_jan_2017 = \\\n generate_dfs('2017-01-01', '2017-02-01')\n generate_visualizations(all_pivot_jan_2017, without_low_tx_battery_pivot_jan_2017, without_pairing_failed_pivot_jan_2017, 'jan_2017')\n\n # February 2017\n all_pivot_feb_2017, without_low_tx_battery_pivot_feb_2017, without_pairing_failed_pivot_feb_2017, all_unknown_tx_sn_count_feb_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_feb_2017, without_pairing_issues_unknown_tx_sn_count_feb_2017 = \\\n generate_dfs('2017-02-01', '2017-03-01')\n generate_visualizations(all_pivot_feb_2017, without_low_tx_battery_pivot_feb_2017, without_pairing_failed_pivot_feb_2017, 'feb_2017')\n\n # March 2017\n all_pivot_mar_2017, without_low_tx_battery_pivot_mar_2017, without_pairing_failed_pivot_mar_2017, all_unknown_tx_sn_count_mar_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_mar_2017, without_pairing_issues_unknown_tx_sn_count_mar_2017 = \\\n generate_dfs('2017-03-01', '2017-04-01')\n generate_visualizations(all_pivot_mar_2017, without_low_tx_battery_pivot_mar_2017, without_pairing_failed_pivot_mar_2017, 'mar_2017')\n\n # April 2017\n all_pivot_apr_2017, without_low_tx_battery_pivot_apr_2017, without_pairing_failed_pivot_apr_2017, all_unknown_tx_sn_count_apr_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_apr_2017, without_pairing_issues_unknown_tx_sn_count_apr_2017 = \\\n generate_dfs('2017-04-01', '2017-05-01')\n generate_visualizations(all_pivot_apr_2017, without_low_tx_battery_pivot_apr_2017, without_pairing_failed_pivot_apr_2017, 'apr_2017')\n\n # May 2017\n all_pivot_may_2017, without_low_tx_battery_pivot_may_2017, without_pairing_failed_pivot_may_2017, all_unknown_tx_sn_count_may_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_may_2017, without_pairing_issues_unknown_tx_sn_count_may_2017 = \\\n generate_dfs('2017-05-01', '2017-06-01')\n generate_visualizations(all_pivot_may_2017, without_low_tx_battery_pivot_may_2017, without_pairing_failed_pivot_may_2017, 'may_2017')\n\n # June 2017\n all_pivot_jun_2017, without_low_tx_battery_pivot_jun_2017, without_pairing_failed_pivot_jun_2017, all_unknown_tx_sn_count_jun_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_jun_2017, without_pairing_issues_unknown_tx_sn_count_jun_2017 = \\\n generate_dfs('2017-06-01', '2017-07-01')\n generate_visualizations(all_pivot_jun_2017, without_low_tx_battery_pivot_jun_2017, without_pairing_failed_pivot_jun_2017, 'jun_2017')\n\n # Weekly trending of top 4 issues\n dates_to_plot = pd.date_range(start='2016-10-01', end=pd.to_datetime(pr.export_date.tz_localize(None)), freq='W-SUN')\n issues_to_plot = ['Low TX Battery', 'Pairing Issues', 'Connectivity Issues', 'Transmitter Failed Icon']\n\n vis_2_pivot = problem_reports[~pd.isnull(problem_reports.date_of_issue) & (problem_reports['is_1.2_tx'] == True)].set_index('date_of_issue').groupby([pd.TimeGrouper('W-SUN'), 'bucket']).size().\\\n reindex(issues_to_plot, level=1).unstack(level=1).reindex(dates_to_plot).fillna(0)\n\n vis_2_pivot.index = [(i + pd.Timedelta(days=-6)).strftime('%m/%d') + ' - ' + i.strftime('%m/%d') for i in vis_2_pivot.index]\n vis_2_pivot['Sum'] = vis_2_pivot.sum(axis=1)\n\n # Bar chart\n plt.close('all')\n\n sns.set_context('paper')\n sns.set_style('darkgrid')\n matplotlib.rc('font', **{'family': 'serif', 'weight': 'normal', 'size': 10})\n matplotlib.rc('xtick', **{'labelsize': 'x-small'})\n matplotlib.rc('ytick', **{'labelsize': 'x-small'})\n\n fig, ax = plt.subplots(figsize=(7.5, 5))\n\n ind = range(len(vis_2_pivot))\n\n bar_containers = []\n for row_idx, column in enumerate(vis_2_pivot.columns[0:-1]):\n bar_container = ax.bar(ind, vis_2_pivot[column], width=0.7, bottom=vis_2_pivot.iloc[:, 0:row_idx].sum(axis=1), label=vis_2_pivot.columns[row_idx])\n bar_containers.append(bar_container)\n\n plt.xticks(ind, vis_2_pivot.index, rotation=270)\n\n for row_idx, rect in enumerate(bar_containers[-1]):\n ax.text(rect.get_x() + rect.get_width()/2., vis_2_pivot.ix[row_idx, 'Sum']+3, vis_2_pivot.ix[row_idx, 'Sum'].astype('int'), ha='center', va='bottom', fontsize='x-small')\n\n ax.set_ylabel('Quantity of TX 1.2 Complaints')\n ax.set_xlabel('Week of Incident')\n\n ax.legend()\n\n fig.savefig(os.path.join(pr.working_directory, 'bar_chart.pdf'), dpi=150, bbox_inches='tight')\n\n # Generate PDF report\n env = Environment(loader=FileSystemLoader(pr.script_directory))\n template = env.get_template('report_template.tex.jinja')\n\n template_kwargs = {\n 'all_pivot_full': all_pivot_full,\n 'without_low_tx_battery_pivot_full': without_low_tx_battery_pivot_full,\n 'without_pairing_failed_pivot_full': without_pairing_failed_pivot_full,\n 'all_unknown_tx_sn_count_full': all_unknown_tx_sn_count_full,\n 'without_low_tx_battery_unknown_tx_sn_count_full': without_low_tx_battery_unknown_tx_sn_count_full,\n 'without_pairing_issues_unknown_tx_sn_count_full': without_pairing_issues_unknown_tx_sn_count_full,\n\n 'all_pivot_nov_2016': all_pivot_nov_2016,\n 'without_low_tx_battery_pivot_nov_2016': without_low_tx_battery_pivot_nov_2016,\n 'without_pairing_failed_pivot_nov_2016': without_pairing_failed_pivot_nov_2016,\n 'all_unknown_tx_sn_count_nov_2016': all_unknown_tx_sn_count_nov_2016,\n 'without_low_tx_battery_unknown_tx_sn_count_nov_2016': without_low_tx_battery_unknown_tx_sn_count_nov_2016,\n 'without_pairing_issues_unknown_tx_sn_count_nov_2016': without_pairing_issues_unknown_tx_sn_count_nov_2016,\n\n 'all_pivot_dec_2016': all_pivot_dec_2016,\n 'without_low_tx_battery_pivot_dec_2016': without_low_tx_battery_pivot_dec_2016,\n 'without_pairing_failed_pivot_dec_2016': without_pairing_failed_pivot_dec_2016,\n 'all_unknown_tx_sn_count_dec_2016': all_unknown_tx_sn_count_dec_2016,\n 'without_low_tx_battery_unknown_tx_sn_count_dec_2016': without_low_tx_battery_unknown_tx_sn_count_dec_2016,\n 'without_pairing_issues_unknown_tx_sn_count_dec_2016': without_pairing_issues_unknown_tx_sn_count_dec_2016,\n\n 'all_pivot_jan_2017': all_pivot_jan_2017,\n 'without_low_tx_battery_pivot_jan_2017': without_low_tx_battery_pivot_jan_2017,\n 'without_pairing_failed_pivot_jan_2017': without_pairing_failed_pivot_jan_2017,\n 'all_unknown_tx_sn_count_jan_2017': all_unknown_tx_sn_count_jan_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_jan_2017': without_low_tx_battery_unknown_tx_sn_count_jan_2017,\n 'without_pairing_issues_unknown_tx_sn_count_jan_2017': without_pairing_issues_unknown_tx_sn_count_jan_2017,\n\n 'all_pivot_feb_2017': all_pivot_feb_2017,\n 'without_low_tx_battery_pivot_feb_2017': without_low_tx_battery_pivot_feb_2017,\n 'without_pairing_failed_pivot_feb_2017': without_pairing_failed_pivot_feb_2017,\n 'all_unknown_tx_sn_count_feb_2017': all_unknown_tx_sn_count_feb_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_feb_2017': without_low_tx_battery_unknown_tx_sn_count_feb_2017,\n 'without_pairing_issues_unknown_tx_sn_count_feb_2017': without_pairing_issues_unknown_tx_sn_count_feb_2017,\n\n 'all_pivot_mar_2017': all_pivot_mar_2017,\n 'without_low_tx_battery_pivot_mar_2017': without_low_tx_battery_pivot_mar_2017,\n 'without_pairing_failed_pivot_mar_2017': without_pairing_failed_pivot_mar_2017,\n 'all_unknown_tx_sn_count_mar_2017': all_unknown_tx_sn_count_mar_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_mar_2017': without_low_tx_battery_unknown_tx_sn_count_mar_2017,\n 'without_pairing_issues_unknown_tx_sn_count_mar_2017': without_pairing_issues_unknown_tx_sn_count_mar_2017,\n \n 'all_pivot_apr_2017': all_pivot_apr_2017,\n 'without_low_tx_battery_pivot_apr_2017': without_low_tx_battery_pivot_apr_2017,\n 'without_pairing_failed_pivot_apr_2017': without_pairing_failed_pivot_apr_2017,\n 'all_unknown_tx_sn_count_apr_2017': all_unknown_tx_sn_count_apr_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_apr_2017': without_low_tx_battery_unknown_tx_sn_count_apr_2017,\n 'without_pairing_issues_unknown_tx_sn_count_apr_2017': without_pairing_issues_unknown_tx_sn_count_apr_2017,\n\n 'all_pivot_may_2017': all_pivot_may_2017,\n 'without_low_tx_battery_pivot_may_2017': without_low_tx_battery_pivot_may_2017,\n 'without_pairing_failed_pivot_may_2017': without_pairing_failed_pivot_may_2017,\n 'all_unknown_tx_sn_count_may_2017': all_unknown_tx_sn_count_may_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_may_2017': without_low_tx_battery_unknown_tx_sn_count_may_2017,\n 'without_pairing_issues_unknown_tx_sn_count_may_2017': without_pairing_issues_unknown_tx_sn_count_may_2017,\n\n 'all_pivot_jun_2017': all_pivot_jun_2017,\n 'without_low_tx_battery_pivot_jun_2017': without_low_tx_battery_pivot_jun_2017,\n 'without_pairing_failed_pivot_jun_2017': without_pairing_failed_pivot_jun_2017,\n 'all_unknown_tx_sn_count_jun_2017': all_unknown_tx_sn_count_jun_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_jun_2017': without_low_tx_battery_unknown_tx_sn_count_jun_2017,\n 'without_pairing_issues_unknown_tx_sn_count_jun_2017': without_pairing_issues_unknown_tx_sn_count_jun_2017,\n\n 'incident_week_pivot_df': vis_2_pivot\n }\n\n with open(os.path.join(pr.working_directory, 'report.tex'), 'w') as f:\n f.write(template.render(**template_kwargs))\n\n # Run pdflatex\n subprocess.run(['pdflatex', os.path.join(pr.working_directory, 'report.tex'), '--output-directory', pr.working_directory], stdout=subprocess.DEVNULL, shell=True)\n subprocess.run(['pdflatex', os.path.join(pr.working_directory, 'report.tex'), '--output-directory', pr.working_directory], stdout=subprocess.DEVNULL, shell=True)\n\n # Export to Excel\n export_cols = OrderedDict()\n\n export_cols['psr_number'] = 'Complaint Number'\n export_cols['awareness_date'] = 'Awareness Date'\n export_cols['date_of_issue'] = 'Date of Issue'\n export_cols['problem_code_and_summary'] = 'Problem Code and Summary'\n export_cols['product_line_and_description'] = 'Product Line and Description'\n export_cols['serial_number'] = 'Serial Number (Free-text)'\n export_cols['ex_transmitter_sn'] = 'Transmitter Serial Number (Extracted)'\n export_cols['is_1.2_tx'] = 'Is Version 1.2 TX?'\n\n export_df = problem_reports.copy()\n export_df = export_df[list(export_cols.keys())].copy()\n export_df = export_df.rename(columns=export_cols)\n\n writer = pd.ExcelWriter(os.path.join(pr.working_directory, 'report.xlsx'), datetime_format='mm/dd/yyyy')\n export_df.to_excel(writer, index=False)\n writer.close()\n\n\ndef main():\n periodic_report = PeriodicReport(script_directory=os.path.dirname(os.path.abspath(__file__)))\n periodic_report.startup()\n\n searches = [\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/October 2016',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/November 2016',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/December 2016',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/January 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/February 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/March 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/April 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/May 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/June 2017',\n ]\n periodic_report.run_agileplm_searches(searches)\n\n work(periodic_report)\n\n\nif __name__ == '__main__':\n main()","sub_path":"G5 Transmitter 1.2 Trending/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":26017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"119806161","text":"# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\n\nfrom oslo.config import cfg\n\nfrom st2common import log as logging\nfrom st2actions.runners.fabric_runner import BaseFabricRunner\nfrom st2actions.runners.fabric_runner import RUNNER_REMOTE_DIR\nfrom st2common.models.system.action import FabricRemoteScriptAction\n\n__all__ = [\n 'get_runner',\n 'RemoteScriptRunner'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_runner():\n return RemoteScriptRunner(str(uuid.uuid4()))\n\n\nclass RemoteScriptRunner(BaseFabricRunner):\n def run(self, action_parameters):\n LOG.debug(' action_parameters = %s', action_parameters)\n\n remote_action = self._get_remote_action(action_parameters)\n\n LOG.debug('Will execute remote_action : %s.', str(remote_action))\n result = self._run(remote_action)\n LOG.debug('Executed remote_action : %s. Result is : %s.', remote_action, result)\n status = self._get_result_status(result, cfg.CONF.ssh_runner.allow_partial_failure)\n\n self._log_action_completion(logger=LOG, result=result, status=status)\n return (status, result, None)\n\n def _get_remote_action(self, action_parameters):\n # remote script actions without entry_point don't make sense, user probably wanted to use\n # \"run-remote\" action\n if not self.entry_point:\n msg = ('Action \"%s\" is missing entry_point attribute. Perhaps wanted to use '\n '\"run-remote\" runner?')\n raise Exception(msg % (self.action_name))\n\n script_local_path_abs = self.entry_point\n pos_args, named_args = self._get_script_args(action_parameters)\n named_args = self._transform_named_args(named_args)\n env_vars = self._get_env_vars()\n remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,\n cfg.CONF.ssh_runner.remote_dir)\n remote_dir = os.path.join(remote_dir, self.liveaction_id)\n return FabricRemoteScriptAction(self.action_name,\n str(self.liveaction_id),\n script_local_path_abs,\n self.libs_dir_path,\n named_args=named_args,\n positional_args=pos_args,\n env_vars=env_vars,\n on_behalf_user=self._on_behalf_user,\n user=self._username,\n password=self._password,\n private_key=self._private_key,\n remote_dir=remote_dir,\n hosts=self._hosts,\n parallel=self._parallel,\n sudo=self._sudo,\n timeout=self._timeout,\n cwd=self._cwd)\n","sub_path":"st2actions/st2actions/runners/remote_script_runner.py","file_name":"remote_script_runner.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"602638249","text":"import logging\nimport pandas as pd\nimport telegram\nimport numpy as np\nimport random\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup \nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n\n\n# Reading Data\ndata = pd.read_csv(\"ig_scrapper/database/uni data/data_v1.01.csv\")\n\n# Temporal Data\nclass pt:\n ix = 0\n temp = pd.DataFrame()\n last = 0\n\n# Food Selection\nclass fd:\n\n postre = ('clasificacion', 'postre')\n desayuno = ('clasificacion', 'desayuno')\n gourmet = ('clasificacion', 'gourmet')\n chatarra = ('clasificacion', 'chatarra')\n combo = ('combo', True)\n pasapalo = ('clasificacion', 'pasapalo')\n cumple = ('Tortas',True)\n \n\n\n\n\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ntoken = \"1203014270:AAEcmANbEwlLCtDqCW_xogdgUMU92xh9cxc\"\nbot = telegram.Bot(token=token)\n\ntry:\n chat_id = bot.get_updates()[-1].message.chat_id\nexcept IndexError:\n chat_id = 0\n\n####################################################################################################################\n\ndef mini_menu(update,context):\n keyboard = [\n \n [InlineKeyboardButton(\"🍳🥞Desayuno🥯🥪\", callback_data='desayuno')],\n [InlineKeyboardButton(\"🍝🍲Gourmet 🍱🍛\", callback_data='gourmet')],\n \n [InlineKeyboardButton(\"🍔🍟Chatarra🍕🌯\", callback_data='chatarra')],\n [InlineKeyboardButton(\"🧁🎂 Dulces 🍪🍩\", callback_data='postre')]\n \n ]\n \n return keyboard\n\ndef second_menu(update,context):\n keyboard = [\n \n [InlineKeyboardButton(\"🍔🍟Combos🍪🥤\", callback_data='combo')],\n [InlineKeyboardButton(\"🍤🥗Pasapalos 🧀🍾\", callback_data='pasapalo')],\n [InlineKeyboardButton(\"🎂🍮Cumpleaños🥧🥮\", callback_data='cumple')]\n \n ]\n \n return keyboard\n\ndef start(update, context):\n\n keyboard = mini_menu(update,context)\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n update.message.reply_text('Hola '+'**'+str(update.message.chat.username)+'**'+''' Soy un Bot que trae ofertas de difrentes restaurantes en Valencia. \n Si quieres Mas selecciona /menu\n Mayor Informacion de este Bot /soporte''', reply_markup=reply_markup)\n \ndef clasificacion_data(data, columna, seleccion):\n \n temp = data[data[columna]==seleccion]\n ix = list(temp.index)\n random.shuffle(ix)\n return ix, temp\n\ndef next_fun( update, context, \n nx, temp, last):\n \n \n ruta = 'ig_scrapper/database/images/'\n url_ig = 'https://www.instagram.com' + str(temp['user url'][nx])\n \n if nx == last:\n\n keyboard = mini_menu(update,context)\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.callback_query.message.reply_text('Ups😵, por ahora mi data es limitada😓', reply_markup=reply_markup)\n\n else:\n context.bot.send_photo(chat_id=update.effective_chat.id, photo = open( ruta + str(temp['img name'][nx]),'rb'))\n \n if (temp['ws'][nx] is 'False') | (temp['ws'][nx] is False):\n \n keyboard = [\n \n [InlineKeyboardButton(\"Instagram\", url=url_ig)],\n\n [InlineKeyboardButton(\"⏹Menu\", callback_data='menu_'),\n InlineKeyboardButton(\"Siguiente▶️\", callback_data='next_fun')]\n \n ]\n else:\n url_ws = 'https://api.whatsapp.com/send?phone=+58'+ str(temp['ws'][nx]) +'&text=Hola,%20Te%20encontre%20gracias%20Offer_Eat%20Telegram'\n keyboard = [\n \n [InlineKeyboardButton(\"Whatsapp\", url=url_ws),\n InlineKeyboardButton(\"Instagram\", url=url_ig)],\n\n [InlineKeyboardButton(\"⏹Menu\", callback_data='menu_'),\n InlineKeyboardButton(\"Siguiente▶️\", callback_data='next_fun')]\n \n ]\n \n reply_markup = InlineKeyboardMarkup(keyboard)\n\n # Some Items doesn't have content\n\n if temp['content'][nx] is 'False' | (temp['content'][nx] is False):\n update.callback_query.message.reply_text('Cuenta: '+ str(temp['user name'][nx]), reply_markup=reply_markup)\n else:\n update.callback_query.message.reply_text('Cuenta: '+ str(temp['user name'][nx]) + '\\n' + temp['content'][nx], reply_markup=reply_markup)\n \n \n\ndef post_fun(update,context, data, fd):\n\n ix, temp = clasificacion_data(data, fd[0], fd[1])\n \n \n # Saving in temporal Class\n\n pt.ix = iter(ix)\n pt.temp = temp\n pt.last = ix[-1]\n \n\n next_fun(update, context, \n next(pt.ix), pt.temp, pt.last)\n \n \n \n\n\ndef post(update, context):\n \n context.bot.send_photo(chat_id=update.effective_chat.id, photo = open( 'ig_scrapper/database/images/image0.jpg','rb'))\n update.message.reply_text(data['content'][0])\n \n\ndef soporte(update, context):\n \n update.message.reply_text('''Este Bot esta en Version Beta, la idea simplificar la busqueda de restaurantes en Valencia.\n Desarrolado por Juan Vicente ventrone\n si deseas contactar a mi creador: https://t.me/JVentrone''')\n \ndef menu(update, context):\n \n k_1 = mini_menu(update,context)\n k_2 = second_menu(update,context)\n\n keyboard = k_1 + k_2\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n try:\n update.message.reply_text('Para Mayor Informacion /soporte', reply_markup=reply_markup)\n except:\n update.callback_query.message.reply_text('Para Mayor Informacion /soporte', reply_markup=reply_markup)\n\n\ndef progresivo(update,context):\n \n if pt.ix == 0: return menu(update, context)\n \n next_fun(update, context, \n next(pt.ix), pt.temp, pt.last)\n\n# This is bad, but it's just a propotype after, I will Fix this Chorizo!\ndef desayuno(update, context): post_fun(update,context,data, fd.desayuno)\ndef gourmet(update, context): post_fun(update,context,data, fd.gourmet)\ndef gourmet(update, context): post_fun(update,context,data, fd.gourmet)\ndef chatarra(update, context): post_fun(update,context,data, fd.chatarra)\ndef postre(update, context): post_fun(update,context,data, fd.postre)\ndef combo(update, context): post_fun(update,context,data, fd.combo)\ndef pasapalo(update, context): post_fun(update,context,data, fd.pasapalo)\ndef cumple(update, context): post_fun(update,context,data, fd.cumple)\n\n\n\n\ndef main():\n \n \n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(\"1203014270:AAEcmANbEwlLCtDqCW_xogdgUMU92xh9cxc\", use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"menu\", menu))\n dp.add_handler(CommandHandler(\"soporte\", soporte))\n \n # Bottons Commander \n dp.add_handler(CallbackQueryHandler(progresivo, pattern='^next_fun$'))\n dp.add_handler(CallbackQueryHandler(desayuno, pattern='^desayuno$'))\n dp.add_handler(CallbackQueryHandler(gourmet, pattern='^gourmet$'))\n dp.add_handler(CallbackQueryHandler(postre, pattern='^postre$'))\n dp.add_handler(CallbackQueryHandler(chatarra, pattern='^chatarra$'))\n dp.add_handler(CallbackQueryHandler(combo, pattern='^combo$'))\n dp.add_handler(CallbackQueryHandler(cumple, pattern='^cumple$'))\n dp.add_handler(CallbackQueryHandler(pasapalo, pattern='^pasapalo$'))\n dp.add_handler(CallbackQueryHandler(menu, pattern='^menu_$'))\n \n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"_init_.py","file_name":"_init_.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"115616703","text":"#!/usr/local/bin/python3\n\nimport copy\n\nneighbours = [(-1, -1, -1),\n (-1, 0, -1),\n (-1, 1, -1),\n (0, -1, -1),\n (0, 1, -1),\n (1, -1, -1),\n (1, 0, -1),\n (1, 1, -1),\n (-1, -1, 0),\n (-1, 0, 0),\n (-1, 1, 0),\n (0, -1, 0),\n (0, 1, 0),\n (1, -1, 0),\n (1, 0, 0),\n (1, 1, 0),\n (-1, -1, 1),\n (-1, 0, 1),\n (-1, 1, 1),\n (0, -1, 1),\n (0, 1, 1),\n (1, -1, 1),\n (1, 0, 1),\n (1, 1, 1),\n (0, 0, -1),\n (0, 0, 1)]\n \ndef prefill_grid(size):\n grid = []\n for z in range(0,size):\n grid.append([])\n for y in range(0,size):\n grid[z].append([])\n for x in range(0,size):\n grid[z][y].append(\".\")\n return grid\n \ndef load_grid(location, size):\n grid = prefill_grid(size)\n offset = int(size / 2)\n input = []\n with open(str(location), 'r') as file:\n for line in file:\n input.append(list(line.rstrip()))\n for y, l in enumerate(input):\n for x, c in enumerate(l):\n grid[offset][offset + y][offset + x] = c\n return grid\n \ndef process_node(x, y, z, grid, new_grid):\n current_value = grid[z][y][x]\n inactive = 0\n active = 0\n for node in neighbours:\n if -1 < z + node[2] < len(grid) and -1 < y + node[1] < len(grid[x]) and -1 < x + node[0] < len(grid[x][y]):\n node_value = grid[z + node[2]][y + node[1]][x + node[0]]\n if node_value == \".\":\n inactive += 1\n elif node_value == \"#\":\n active += 1\n if current_value == \"#\" and not 1 < active < 4:\n new_grid[z][y][x] = \".\"\n elif current_value == \".\" and active == 3:\n new_grid[z][y][x] = \"#\"\n return new_grid\n\ndef run_cycle(grid):\n new_grid = copy.deepcopy(grid)\n for z, layer in enumerate(grid):\n for y, row in enumerate(layer):\n for x, char in enumerate(row):\n new_grid = process_node(x, y, z, grid, new_grid)\n return new_grid\n\ndef process_grid(location):\n size = 26\n grid = load_grid(location, size)\n for _ in range(0,6):\n grid = run_cycle(grid)\n count = 0\n for layer in grid:\n for row in layer:\n count += row.count(\"#\")\n print(location + \" - Active Count: \" + str(count))\n\nprocess_grid(\"17-test.txt\")\nprocess_grid(\"17-input.txt\")\n\n","sub_path":"day17/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"540813447","text":"# Copyright (C) 2017 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Test Mapping Issue mapping\"\"\"\n\nfrom ddt import data, ddt, unpack\nfrom ggrc.app import app # NOQA pylint: disable=unused-import\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.api_helper import Api\nfrom integration.ggrc.models import factories\nfrom integration.ggrc_basic_permissions.models \\\n import factories as rbac_factories\n\n\ndef _get_map_dict(destination, source):\n return {\n 'relationship': {\n \"context\": {\n \"id\": destination.context.id,\n \"type\": \"Context\"\n },\n \"source\": {\n \"id\": source.id,\n \"type\": source.type\n },\n \"destination\": {\n \"id\": destination.id,\n \"type\": destination.type\n }\n }\n }\n\n\n@ddt\nclass TestIssueMapping(TestCase):\n \"\"\"Test Issue mapping\"\"\"\n\n def setup_roles(self):\n \"\"\"Setup necessary roles needed by the tests\"\"\"\n query = all_models.Role.query\n self.roles = {\n 'creator': query.filter_by(name=\"Creator\").first(),\n 'auditor': query.filter_by(name=\"Auditor\").first(),\n 'program_editor': query.filter_by(name=\"ProgramEditor\").first()\n }\n\n def setup_users(self):\n \"\"\"Creates two creator users\"\"\"\n self.users = {}\n for user_name in ('auditor', 'auditlead'):\n user = factories.PersonFactory()\n rbac_factories.UserRoleFactory(\n role=self.roles['creator'],\n person=user)\n self.users[user_name] = user\n\n def setup_audits(self):\n \"\"\"Create an audit and an archived audit\"\"\"\n self.audits = {\n False: self.create_audit(archived=False),\n True: self.create_audit(archived=True)\n }\n\n def setup_snapshots_and_issue(self):\n \"\"\"Create snapshot & issue objects\"\"\"\n self.snapshots = {}\n self.issues = {}\n self.control = factories.ControlFactory()\n revision = all_models.Revision.query.filter(\n all_models.Revision.resource_type == self.control.type).first()\n for is_archived in (False, True):\n audit = self.audits[is_archived]\n # Create a snapshot\n self.snapshots[is_archived] = factories.SnapshotFactory(\n child_id=revision.resource_id,\n child_type=revision.resource_type,\n revision=revision,\n parent=audit,\n context=audit.context,\n )\n # Create an issue\n issue = factories.IssueFactory()\n self.issues[is_archived] = issue\n # Map issue to audit\n factories.RelationshipFactory(\n source=audit,\n destination=issue,\n context=audit.context\n )\n\n def create_audit(self, archived=False):\n \"\"\"Create an audit object and fix the it's context\"\"\"\n audit = factories.AuditFactory(\n contact=self.users['auditlead'],\n archived=archived\n )\n\n # Add auditor & program editor roles\n rbac_factories.UserRoleFactory(\n context=audit.context,\n role=self.roles['auditor'],\n person=self.users['auditor'])\n rbac_factories.UserRoleFactory(\n context=audit.program.context,\n role=self.roles['program_editor'],\n person=self.users['auditlead'])\n\n return audit\n\n def setUp(self):\n \"\"\"Prepare data needed to run the tests\"\"\"\n self.api = Api()\n self.setup_roles()\n self.setup_users()\n self.setup_audits()\n self.setup_snapshots_and_issue()\n\n @data(\n # user_name, is_archived\n ('auditor', True),\n ('auditlead', True),\n ('auditor', False),\n ('auditlead', False),\n )\n @unpack\n def test_mapping_to_issue(self, user_name, is_archived):\n \"\"\"Test mapping snapshots to issue\"\"\"\n user = self.users[user_name]\n payload = _get_map_dict(\n self.snapshots[is_archived],\n self.issues[is_archived])\n self.api.set_user(user)\n\n # Try to map to audit\n response = self.api.post(all_models.Relationship, payload)\n self.assertStatus(response, 201)\n\n rel_id = response.json['relationship']['id']\n relationship = all_models.Relationship.query.filter_by(id=rel_id).first()\n response = self.api.delete(relationship)\n self.assertStatus(response, 200)\n","sub_path":"test/integration/ggrc_basic_permissions/test_issue_mapping.py","file_name":"test_issue_mapping.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"318584721","text":"#import all imports and parameters from import_parameters.py\n\nfrom import_parameters import *\n\nparser = argparse.ArgumentParser(description = 'All arguments to be used in predicting')\nparser.add_argument ('--checkpoint', default = 'checkpoint.pth', help = 'Filename of checkpoint (default = checkpoint.pth)', metavar = '')\nparser.add_argument ('--arch', metavar='ARCH', default='vgg16', help='Chose between two options; densenet161 or vgg16 (default is vgg16)')\nparser.add_argument ('--device', default = 'GPU', help = 'Chose which device will be used to predict (default is GPU if available, otherwise CPU)')\nparser.add_argument ('--train_dir', default = 'flowers/train', help = 'Directory of training fotos, this needs to be mentioned', metavar = '')\nparser.add_argument ('--image_path', default = \"flowers/test/10/image_07090.jpg\", help = 'Image path of immage to be classified (default is flowers/test/10/image_07090.jpg)', metavar = '')\nparser.add_argument ('--hidden_units', default = 512, type=int, help = 'Number of units in the first hidden layer (default is 512)')\n\nargs = parser.parse_args()\n\n\nfilepath = args.checkpoint\narch = args.arch\nimage_path = args.image_path\nhidden_units = args.hidden_units\n\n\n#defining if model should be run on cpu or GPU\nif args.device == 'cpu':\n device = 'cpu'\n print('Device is set to cpu')\nelif args.device == 'GPU':\n if (torch.cuda.is_available()):\n device = 'cuda'\n print('GPU device is available and will be set to GPU')\n\n else:\n device = 'cpu'\n print ('Device could not be set to GPU, therefore device is cpu')\n\n\n\n\n\n\n\n\n\n\n\n\nprint('setting parameters')\n#Setting directories\ndata_dir = 'flowers'\ntrain_dir = args.train_dir\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\n\nprint('define data transforms')\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(random_rotation),\n transforms.RandomResizedCrop(random_resize),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(network_means, network_stds) ]),\n\n 'validate' : transforms.Compose([\n transforms.Resize(resize),\n transforms.CenterCrop(center_crop),\n transforms.ToTensor(),\n transforms.Normalize(network_means, network_stds) ]),\n\n 'test' : transforms.Compose([\n transforms.Resize(resize),\n transforms.CenterCrop(center_crop),\n transforms.ToTensor(),\n transforms.Normalize(network_means, network_stds) ])\n}\n\nprint('define what are the datasets')\n# Load the datasets with ImageFolder\ntrain_dir = args.train_dir\n\nimage_datasets = {\n 'train': datasets.ImageFolder(train_dir, transform= data_transforms['train']),\n 'validate': datasets.ImageFolder(valid_dir, transform = data_transforms['validate']),\n 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test'])\n}\n\n\n\n\nprint('define dataloader')\n# Define the dataloaders using the image datasets and the trainforms\ndataloaders = {\n 'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size = 64, shuffle = True),\n 'validate': torch.utils.data.DataLoader(image_datasets['validate'], batch_size = 32),\n 'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size = 32)\n}\n\n\n\n\n\nprint('define model architecture')\n## define model architecture\n#Define model and classifier size dependant on chosen model\narch = args.arch\nmodel = models.__dict__[args.arch](pretrained=True)\nif arch == \"vgg16\":\n layers = [25088, hidden_units, 200, 102]\n\nelif arch == 'densenet161':\n layers = [2208, hidden_units, 200, 102]\n\n# model = models.vgg16(pretrained=True)\n\n# don't compute gradients\nfor param in model.parameters():\n param.requires_grad = False\n\n\n\n\n#defining build_classifier\nprint('define build_classifier')\ndef build_classifier(layers):\n\n\n classifier = nn.Sequential(\n nn.Linear(layers[0], layers[1]),\n nn.ReLU(),\n nn.Dropout(0.5), #50 % probability\n nn.Linear(layers[1], layers[2]),\n torch.nn.ReLU(),\n torch.nn.Dropout(0.2), #20% probability\n nn.Linear(layers[2], layers[3]),\n nn.LogSoftmax(dim=1))\n\n return classifier\n\n\n\n\n\n\n\n\n\n\n\n#defining load_checkpoint\nprint('define load_checkpoint')\ndef load_checkpoint(filepath, arch):\n\n checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)\n model = models.vgg16(pretrained=True)\n\n # Freeze the feature parameters\n for params in model.parameters():\n params.requires_grad = False\n\n #create new classifier\n classifier = build_classifier(layers)\n model.classifier = classifier\n\n\n criterion = nn.NLLLoss()\n\n optimizer = optim.Adam(model.classifier.parameters(), lr = 0.001)\n\n\n model.class_to_idx = checkpoint['class_to_idx']\n\n model.load_state_dict(checkpoint['model_state_dict'])\n\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, criterion\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nprint('define validate_model')\ndef validate_model(model , criterion , dataloader ):\n model.eval()\n model.cuda()\n sum_loss = 0\n sum_accuracy = 0\n\n for data in iter(dataloader):\n inputs, labels = data\n\n inputs = inputs.float().cuda()\n labels = labels.long().cuda()\n\n inputs = Variable(inputs)\n labels = Variable(labels)\n\n output = model.forward(inputs)\n loss = criterion(output, labels)\n sum_loss += loss\n ps = torch.exp(output).data\n\n equality = labels.data == ps.max(1)[1]\n sum_accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n loss_rate = sum_loss / len(dataloader)\n accuracy_rate = sum_accuracy / len(dataloader)\n\n return accuracy_rate, loss_rate\n\n\nmodel, criterion = load_checkpoint(filepath, model)\nmodel.to(device)\nvalidate_accuracy_rate, validate_loss_rate = validate_model(model , criterion , dataloaders['train'])\n\nprint('For the validation set, the accuracy rate is {:.3}'.format(validate_accuracy_rate))\nprint('For the validation set, the loss rate is {:.3}'.format(validate_loss_rate))\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"86833722","text":"'''\nThis will be main file which the co-ordinaters of the event will be using to test your\ncode. This file contains two functions:\n\n1. predict: You will be given an rgb image which you will use to predict the output \nwhich will be a string. For the prediction you can use/import code,models from other files or\nlibraries. More detailes given above the function defination.\n\n2. test: This will be used by the co-ordinators to test your code by giving sample \ninputs to the 'predict' function mentioned above. A sample test function is given for your\nreference but it is subject to minor changes during the evaluation. However, note that\nthere won't be any changes in the input format given to the predict function.\n\nMake sure all the necessary functions etc. you import are done from the same directory. And in \nthe final submission make sure you provide them also along with this script.\n'''\n\n\nimport tensorflow as tf \n# from tensorflow import keras\n# from keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\n# from tesseract_predict import *\nimport os\nimport glob\nimport shutil\nimport sys\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport imutils\nfrom preprocessing import preprocess \n# from preprocessing1 import preprocess\nfrom keras.models import load_model\n\n'''\nfunction: predict\ninput: image - A numpy array which is an rgb image\noutput: answer - A string which is the full captcha\n\nSuggestion: Try to make your code understandable by including comments and organizing it. For \nthis we encourgae you to write essential function in other files and import them here so that \nthe final code is neat and not too big. Make sure you use the same input format and return \nsame output format.\n'''\ndef predict(image):\n model=models.Sequential()\n num_classes=26\n model.add(layers.Conv2D(32,(5,5),padding='valid',activation='relu',input_shape=(64,64,1)))\n model.add(layers.Conv2D(64,(5,5),padding='valid',activation='relu'))\n model.add(layers.MaxPooling2D((2,2)))\n model.add(layers.Conv2D(128,(3,3),activation='relu'))\n model.add(layers.Conv2D(256,(3,3),activation='relu'))\n model.add(layers.MaxPooling2D((2,2)))\n model.add(layers.Dropout(0.25))\n model.add(layers.Flatten())\n model.add(layers.Dense(512,activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256,activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(num_classes,activation='softmax'))\n model.load_weights('model_char5.h5')\n model2 = load_model('46_model.h5')\n class_mapping='0LACdEhHJKMn2PRTUWX3Y5bQ89'\n class_mapping2='0123?56?89A8C?EF?H?JKLM?0PQR5TU?WXY2Qbd???hn9??'\n \n count=0\n total=0\n images,images2=preprocess(image)\n answer=\"\"\n for i in range(len(images2)):\n image1=images[i]\n image2=images2[i]\n result = np.argmax(model.predict(image1))\n result_confidence=np.max(model.predict(image1))\n print(\"model1\",class_mapping[result],result_confidence)\n result2 = np.argmax(model2.predict(image2))\n result_confidence2=np.max(model2.predict(image2))\n print(\"model2\",class_mapping2[result2],result_confidence2)\n if class_mapping2[result2]=='?':\n answer+=(class_mapping[result])\n elif class_mapping[result]=='Q':\n answer+=class_mapping2[result2]\n else:\n if result_confidence2<=result_confidence-0.14:\n answer+=(class_mapping[result])\n else:\n answer+=(class_mapping2[result2])\n return answer \n'''\n Write your code for prediction here.\n '''\n # answer = 'xyzabc' # sample needs to be modified\n\n\n\n'''\nfunction: test\ninput: None\noutput: None\n\nThis is a sample test function which the co-ordinaors will use to test your code. This is\nsubject to change but the imput to predict function and the output expected from the predict\nfunction will not change. \nYou can use this to test your code before submission: Some details are given below:\nimage_paths : A list that will store the paths of all the images that will be tested.\ncorrect_answers: A list that holds the correct answers\nscore : holds the total score. Keep in mind that scoring is subject to change during testing.\n\nYou can play with these variables and test before final submission.\n'''\ndef test():\n '''\n We will be using a similar template to test your code\n '''\n image_paths = ['corthon.jpeg']\n correct_answers = ['AXCKP']\n score = 0\n\n for i,image_path in enumerate(image_paths):\n image = cv2.imread(image_path) # This input format wont change\n answer = predict(image) # a string is expected\n print(answer)\n if correct_answers[i] == answer:\n score += 10\n \n print('The final score of the participant is',score)\n\n\nif __name__ == \"__main__\":\n test()","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"93396443","text":"from .exceptions import *\n\nimport random\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = [\n]\n\n\ndef _get_random_word(list_of_words):\n if not list_of_words:\n raise InvalidListOfWordsException\n return random.choice(list_of_words)\n\n\ndef _mask_word(word):\n if not word:\n raise InvalidWordException\n return '*'*(len(word))\n\n\ndef _uncover_word(answer_word, masked_word, character):\n if not answer_word or not masked_word or len(answer_word)!=len(masked_word):\n raise InvalidWordException\n if len(character)>1 or not character or type(character) != str:\n raise InvalidGuessedLetterException\n \n \n \n list_of_indices=[]\n count=0\n answer_word=answer_word.lower()\n character=character.lower()\n for char in answer_word:\n if character==char:\n list_of_indices.append(count)\n count+=1\n \n masked_word=list(masked_word)\n for index in list_of_indices:\n masked_word[index]=character\n \n return \"\".join(masked_word)\n \n\n\ndef guess_letter(game, letter):\n if game['answer_word']==game['masked_word'] or game['remaining_misses']==0: #checks to see if game already complete\n raise GameFinishedException\n \n letter=letter.lower()\n \n if letter in game['previous_guesses']:\n raise InvalidGuessedLetterException\n \n \n previously_masked=game['masked_word']\n game['masked_word']=_uncover_word(game['answer_word'],game['masked_word'],letter)\n game['previous_guesses'].append(letter)\n if previously_masked==game['masked_word']:\n game['remaining_misses'] -= 1\n \n if game['answer_word']==game['masked_word']:\n raise GameWonException\n if game['remaining_misses']==0:\n raise GameLostException\n \n return game\n\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"571744065","text":"def binarystring(st,current=0):\n if current == len(st):\n print(st)\n return None\n if st[current] == '?':\n cp = st[:]\n cp[current] = '0'\n binarystring(cp, current+1)\n\n cp = st[:]\n cp[current] = '1'\n binarystring(cp, current+1)\n return None\n else:\n binarystring(st,current+1)\n\n\nbinarystring(list(\"1??0?101\"))","sub_path":"Problems/BinaryStringPattern.py","file_name":"BinaryStringPattern.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"637540731","text":"import numpy as np\nfrom sklearn.model_selection import KFold\nimport copy\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\nclass StackingModel_v3:\n def __init__(self, topLayer_model, base_model_list,\n n_fold=5, use_probas=True, average_probas=False, val_weight_average=False, val_set=[]):\n self.topLayer_model = topLayer_model\n self.base_model_list = base_model_list #存储M个输入的未训练模型\n self.n_flod = n_fold # 默认5折交叉\n self.use_probas=use_probas\n self.average_probas = average_probas\n self.val_weight_average = val_weight_average\n self.val_set = val_set\n self.weight_lst = []\n\n def fit(self, X_train, y_train):\n X_train, y_train = np.array(X_train), np.array(y_train)\n self.class_inter_dict = self.__build_class_inter_dict(y_train)\n print(self.class_inter_dict)\n\n self.had_train_models = [] # 存储训练好的(M * K)个模型\n for i, model in enumerate(self.base_model_list):\n train_pred = []\n KFold_models = []\n loss_lst = []\n for j, (tra_idx, val_idx) in enumerate(KFold(n_splits=self.n_flod).split(X_train)):\n X_tra, X_val = X_train[tra_idx], X_train[val_idx]\n y_tra, y_val = y_train[tra_idx], y_train[val_idx]\n model.fit(X_tra, y_tra)\n if self.val_set==[]:\n print('#直接用\"构建特征的���证集\"计算损失')\n loss_lst.append(self.__cal_loss(model, X_val, y_val)) #直接用构建特征的验证集计算损失\n else:\n print('#使用\"外部验证集\"计算损失')\n loss_lst.append(self.__cal_loss(model, self.val_set[0], self.val_set[1])) #使用外部验证集计算损失\n KFold_models.append(copy.deepcopy(model))\n if self.use_probas:\n train_pred += model.predict_proba(X_val).tolist()\n else:\n train_pred += [[e]for e in model.predict(X_val)]\n self.weight_lst.append(self.__cal_weight_lst(loss_lst))\n self.had_train_models.append(copy.deepcopy(KFold_models)) #存储训练好的K折模型,用于预测\n\n train_pred = np.array(train_pred)\n if i == 0:\n X_train_stack = train_pred\n else:\n if not self.average_probas:\n X_train_stack = np.c_[X_train_stack, train_pred]\n else:\n #将每个模型的预测,求平均\n X_train_stack += train_pred\n if i == len(self.base_model_list) - 1:\n X_train_stack = X_train_stack / len(self.base_model_list)\n\n # 顶层模型的训练\n self.topLayer_model.fit(X_train_stack, y_train)\n\n def predict(self, X_test):\n return self.__predict_tmp(X_test, out_probas=False)\n\n def predict_proba(self, X_test):\n return self.__predict_tmp(X_test, out_probas=True)\n\n def __predict_tmp(self, X_test, out_probas=False): # 测试集的数据是X_test_stack,而不是原来的X_test\n for i, KF_models in enumerate(self.had_train_models):\n test_pred = []\n for model in KF_models:\n if self.use_probas:\n test_pred.append(model.predict_proba(X_test).tolist())\n else:\n test_pred.append([[e] for e in model.predict(X_test)])\n if self.val_weight_average: #每折加权平均\n test_pred = self.__cal_weight_average(self.weight_lst[i], np.array(test_pred))\n else: #每折直接平均\n test_pred = np.mean(np.array(test_pred), axis=0)\n if i == 0:\n X_test_stack = test_pred\n else:\n if not self.average_probas:\n X_test_stack = np.c_[X_test_stack, test_pred]\n else:\n X_test_stack += test_pred\n if i == len(self.base_model_list) - 1:\n X_test_stack = X_test_stack / len(self.base_model_list)\n # 顶层模型预测\n if out_probas:\n return self.topLayer_model.predict_proba(X_test_stack)\n else:\n return self.topLayer_model.predict(X_test_stack)\n\n def __cal_weight_average(self, kw_lst, test_pred):\n test_weight_average = []\n for kw, test_single in zip(kw_lst, test_pred):\n test_weight_average.append(kw * test_single)\n return np.sum(test_weight_average, axis=0)\n\n def __cal_weight_lst(self, loss_lst):\n print('每一折的损失:', loss_lst)\n Sk_sum = 0\n for sj in loss_lst:\n Sk_sum += (1 / sj)\n weight_lst = []\n for sk in loss_lst:\n weight_lst.append((1 / sk) / Sk_sum)\n\n print('每一折对应的模型权重:', weight_lst)\n print('所有权值加起来=', sum(weight_lst))\n return weight_lst\n\n def __cal_loss(self, model, X_val, y_val):\n n_class = len(set(y_val))\n y_pred_proba = model.predict_proba(X_val)\n y_val_oneHot = self.__oneHot(y_val)\n\n #计算损失\n sk = 0\n for i_sample in range(len(y_val)):\n for i_class in range(n_class):\n sk += abs(y_pred_proba[i_sample,i_class] - y_val_oneHot[i_sample,i_class])\n return sk / n_class\n\n def __oneHot(self, y_val):\n inter_encode = np.array([self.class_inter_dict[e] for e in y_val])\n onehot_encoder = OneHotEncoder(sparse=False)\n y_val_ontHot = onehot_encoder.fit_transform(inter_encode.reshape(-1, 1))\n return np.array(y_val_ontHot)\n\n def __build_class_inter_dict(self, y_train):\n y_train_set = set(y_train)\n class_inter_dict = {}\n for i, e in enumerate(y_train_set):\n class_inter_dict[e] = i\n return class_inter_dict\n\n","sub_path":"machine_learning_model/stacking/ensemble_learning_v3.py","file_name":"ensemble_learning_v3.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"553372965","text":"\"\"\"\nTests for leabratf.utils.py\n\"\"\"\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom leabratf import tfutils\n\ndef test_repeat():\n \"\"\"\n Test taken from github issue page:\n https://github.com/tensorflow/tensorflow/issues/8246\n \"\"\"\n def np_repeat(tensor, repeats):\n assert len(repeats) == tensor.ndim, \"dimension must match\"\n repeated = tensor\n for axis, repeat in enumerate(repeats):\n repeated = np.repeat(repeated, repeat, axis = axis)\n return repeated\n shape = [1,3,3,3,2]\n repeat = [1,2,2,3,1]\n tensor = np.random.randn(*shape)\n np_repeated_tensor = np_repeat(tensor, repeat)\n tf_tensor = tf.constant(tensor)\n g = tf.get_default_graph()\n tf_new = tfutils.repeat(tf_tensor, repeat)\n with tf.Session(graph=g) as sess:\n tf_repeated_tensor = tf_new.eval()\n assert np.allclose(np_repeated_tensor, tf_repeated_tensor)\n","sub_path":"leabratf/tests/test_tfutils.py","file_name":"test_tfutils.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"23451066","text":"#monte carlo simulation used to test the \n#monte carol is used for\n#the first task is\nimport random \nimport matplotlib.pyplot as plt\n\nlower_bust = 31.24\n#we want to have bust rate less than 31.24\nhigher_profit = 63.208\n#we want to have profit percent higher than or equal to 63.4\n\n\n\ndef rollDice():\n\t#notes we commmented the print functions since we used them initialy to debug the function\n\t#after we have emphasised that the function works well we can simply comment them\n\troll = random.randint(1,100)\n\n\tif roll <=50:\n\t\t#print(f\"{roll} roll is 100 you loss what are the odds! play again\")\n\t\treturn False\t\n\n\telif roll>50:\n\t\t#print(f\"{roll} roll was 51-100 you win pretty light flash\")\n\t\treturn True\n\t#now come an create simple bettor\n\n\ndef simple_bettor(funds,initial_wager,wager_count):\n\tvalue = funds\n\twager = initial_wager\n\t#now we ate going to plot wagers and values\n\tglobal broke_count\n\tglobal simple_profits\n\tglobal simple_bust\n\twx = []\n\tvy = []\n\n\n\t#the two above lists are used to store some data\n\n\tcurrentWager = 0\n\tstatus = None\n\twhile currentWager < wager_count :\n\t\t\n\t\twx.append(currentWager)\n\t\tvy.append(value)\n\n\t\tif rollDice():\n\t\t\tvalue += wager\n\t\t\tstatus = wager\n\t\t\t#if we roll the dice and we won then we add the wager to our fund\n\t\telse :\n\t\t\tvalue -= wager\n\t\t\tstatus = -wager\n\n\t\tcurrentWager +=1\n\t\t#we add wager so we increment the number of wagers we encountered\n\t\t#print(f\"funds: {value} and you get {status}\")\n\tif value< 0:\n\t\t#so we solve the debt issue\n\t\tsimple_bust += 1\n\t\tvalue = 'broke'\n\n\tplt.plot(wx,vy,'k')\n\tif value>startingFunds:\n\t\tsimple_profits += 1\n\t#black color\n\n\tprint(f\"funds: {value} and you get {status}\")\n\n\n\n#build double wager which has some strategy if he losses he is going to double the wager\n# if he wins he is going to go back with the same old wager\n\n\ndef doubler_bettor(funds,initial_wager,wager_count):\n#video 6\n\n\tvalue = funds\n\twager = initial_wager\n\t#now we ate going to plot wagers and values\n\n\twx = []\n\tvy = []\n\n\t#the two above lists are used to store some data\n\n\tcurrentWager = 1\n\tstatus = None\n\tglobal broke_count\n\tglobal doubler_busts\n\tglobal doubler_profits\n\n\tprevious_wager = 'win'\n\tprevious_wager_amount = initial_wager\n\n\twhile currentWager <= wager_count:\n\t\tif previous_wager=='win':\n\t\t\tprint(\"we win hte last wager. great\")\n\t\t\tif rollDice():\n\t\t\t\tvalue += wager\n\t\t\t\tprint (value)\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\t\t\telse :\n\t\t\t\tvalue -= wager\n\t\t\t\tprevious_wager = 'loss'\n\t\t\t\tprint(value)\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\t\tif value < 0:\n\t\t\t\t\tprint(f\"we went broke again after {currentWager} rolls\")\n\t\t\t\t\tdoubler_busts += 1\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\n\t\telif previous_wager=='loss':\n\t\t\tprint('we los the last one so we will be smart and double the wager')\n\t\t\t#v.9\n\t\t\t#we want to solve the debt issue until v.8 \n\t\t\t#so in the cass previous_wager was loss then we\n\t\t\t#make sure that the value we have left is larger than the wager\n\t\t\tif rollDice():\n\t\t\t\twager = previous_wager_amount*2\n\t\t\t\tprint(f\"we won with {wager}\")\n\t\t\t\tif value - wager < 0 :\n\t\t\t\t\twager = value\n\t\t\t\t\t#note we donot want to endup with debt so we make the last wager equals the value\n\t\t\t\tvalue += wager\n\t\t\t\tprint(value)\n\t\t\t\twager = initial_wager\n\t\t\t\tprevious_wager = 'win'\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\telse:\n\t\t\t\twager = previous_wager_amount*2\n\t\t\t\tprint (f\"we lost {wager}\")\n\t\t\t\tif value - wager <0:\n\t\t\t\t\twager = value\n\t\t\t\tvalue -= wager\n\t\t\t\tif value<=0:\n\t\t\t\t\tprint(f\"we went broke after {currentWager} rolls\")\n\t\t\t\t\tdoubler_busts += 1\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\t\tprint(value)\n\t\t\t\tprevious_wager = 'loss'\n\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\tcurrentWager +=1\n\tprint (value)\n\tplt.plot(wx,vy,'c')\n\tif value> startingFunds:\n\t\tdoubler_profits +=1\n\t#cin color is faint blue\n\n\n\ndef multiple_bettor(funds, initial_wager,wager_count):\n\t#the goal of the alogrithm is to define the best percents to wagers with\n\tglobal multiple_busts\n\tglobal multiple_profit\n\n\tvalue = funds\n\twager = initial_wager\n\twx = []\n\tvy = []\n\tcurrentWager = 1\n\tprevious_wager = 'win'\n\n\tprevious_wager_amount = initial_wager\n\n\twhile currentWager <= wager_count:\n\t\tif previous_wager=='win':\n\t\t\t#print(\"we win hte last wager. great\")\n\t\t\tif rollDice():\n\t\t\t\tvalue += wager\n\t\t\t#\tprint (value)\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\t\t\telse :\n\t\t\t\tvalue -= wager\n\t\t\t\tprevious_wager = 'loss'\n\t\t\t#\tprint(value)\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\t\tif value < 0:\n\t\t\t#\t\tprint(f\"we went broke again after {currentWager} rolls\")\n\t\t\t\t\tmultiple_busts += 1\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\n\t\telif previous_wager=='loss':\n\t\t\t#print('we los the last one so we will be smart and double the wager')\n\t\t\t#v.9\n\t\t\t#we want to solve the debt issue until v.8 \n\t\t\t#so in the cass previous_wager was loss then we\n\t\t\t#make sure that the value we have left is larger than the wager\n\t\t\tif rollDice():\n\t\t\t\twager = previous_wager_amount*random_multiple\n\n\t\t\t#\tprint(f\"we won with {wager}\")\n\t\t\t\tif value - wager < 0 :\n\t\t\t\t\twager = value\n\t\t\t\t\t#note we donot want to endup with debt so we make the last wager equals the value\n\t\t\t\tvalue += wager\n\t\t\t#\tprint(value)\n\t\t\t\twager = initial_wager\n\t\t\t\tprevious_wager = 'win'\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\telse:\n\t\t\t\twager = previous_wager_amount*random_multiple\n\t\t\t#\tprint (f\"we lost {wager}\")\n\t\t\t\tif value - wager <0:\n\t\t\t\t\twager = value\n\t\t\t\tvalue -= wager\n\t\t\t\tif value<=0:\n\t\t\t#\t\tprint(f\"we went broke after {currentWager} rolls\")\n\t\t\t\t\tmultiple_busts += 1\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\t#\tprint(value)\n\t\t\t\tprevious_wager = 'loss'\n\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\tcurrentWager +=1\n\t\n\t#print (value)\n\t#plt.plot(wx,vy,'c')\n\tif value> funds:\n\t\tmultiple_profit +=1\n\t#cin color is faint blue\n\n\n\n\n\n#commented at the start of vi.15\n# this is part of vi.14 dalmbert\n\n\n\ndef dAlemert (funds,initial_wager,wager_count):\n\t\tglobal da_busts\n\t\tglobal da_profits\n\n\t\tglobal Ret\n\t\tvalue = funds\n\t\twager = initial_wager\n\t\tcurrentWager = 1\n\t\tprevious_wager = 'win'\n\t\tprevious_wager_amount = initial_wager\n\t\t#hte amount of money we are wagering\n\t\twhile currentWager <= wager_count:\n\t\t\tif previous_wager == 'win':\n\t\t\t\tif wager == initial_wager:\n\t\t\t\t\tpass\n\t\t\t\telse :\n\t\t\t\t\twager -= initial_wager\n\t\t\t\t\n\t\t\t\t#print(f\"current wager {wager} value {value}\")\n\t\t\t\t\n\t\t\t\tif rollDice():\n\t\t\t\t\tvalue += wager\n\t\t\t\t\tprevious_wager_amount = wager\n\t\t\t\t\t#print(f\"we won current value: {value}\")\n\t\t\t\telse :\n\t\t\t\t\tvalue -= wager\n\t\t\t\t\tprevious_wager = 'loss'\n\t\t\t\t\t#print(f\"we lost current value {value}\")\n\t\t\t\t\tprevious_wager_amount = wager\n\t\t\t\t\tif value <= 0:\n\t\t\t\t\t\tda_busts += 1\n\t\t\t\t\t\tbreak\n\t\t\telif previous_wager == 'loss':\n\t\t\t\twager = previous_wager_amount+initial_wager\n\t\t\t\tif (value - wager ) <= 0:\n\t\t\t\t\twager = 0\n\t\t\t\t#print(f\"we lost the last wager {wager} \")\n\t\t\t\t\t#we donont want ot enter the debt\n\t\t\t\tif rollDice():\n\t\t\t\t\tvalue += wager\n\t\t\t\t\tprevious_wager = 'win'\n\t\t\t\t\tprevious_wager_amount = wager\n\t\t\t\telse :\n\t\t\t\t\tvalue -= wager\n\t\t\t\t\tprevious_wager_amount = wager\n\n\t\t\t\t\tif value <= 0:\n\t\t\t\t\t\tda_busts +=1\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t# we get in debt we donot want to play any more\n\t\t\n\t\t\tcurrentWager +=1\n\t\tif value > funds:\n\t\t\tda_profits += 1\n\t\tRet += value\n\n\n\n\n\n\n\nsamplesize = 100\nstartingFunds = 100000\n\nwhile True:\n\t\t\n\twagersize = random.uniform(1.0,1000.00)\n\t#the amount we are wagering\n\twagercount = random.uniform(10,100000)\n\t#the target number of wagers\n\n\twagercount = 100\n\n\n\tda_profits = 0\n\tda_busts = 0\n\tRet = 0\n\t#of the people who profit how much they make profit\n\t#out of the people who loss how much did they loss\n\t#out of the people who\n\n\tx = 0\n\twhile x< samplesize:\n\n\t\tdAlemert (startingFunds,wagersize,wagercount)\n\t\tx += 1\n\n\tROI = Ret - samplesize*startingFunds\n\tprint(f\"total invested is {samplesize*startingFunds}\")\n\tprint(f\"total return {Ret}\")\n\tprint(f\"the total return of investment is{ROI}\")\n\tprint(f\"bust rate is {da_busts/samplesize*100.00}\")\n\tprint(f\"profit rate is {da_profits/samplesize*100.00}\")\n\n\tprint(\"############################################\")\n\tprint(f\"number of people make money is {da_profits}\")\n\tprint(\"############################################\")\n\n\n","sub_path":"Monte-Carlo-Comparing-profit.py","file_name":"Monte-Carlo-Comparing-profit.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"410058926","text":"import pymel.core as pm\nfrom pymel.internal.plogging import pymelLogger\n\n#===============================================================================\n# GLOBAL FUNCTIONS\n#===============================================================================\n#PRINT\ndef PRINT(msg='', item='', type='info'):\n printMsg = '|| %s || %s' %(msg.ljust(65), item.ljust(20))\n \n if type == 'debug':\n pymelLogger.debug('\\t\\t\\t%s'%printMsg)\n elif type == 'info':\n pymelLogger.info('\\t\\t\\t%s'%printMsg)\n elif type == 'warning':\n pymelLogger.warning('\\t%s'%printMsg)\n elif type == 'error':\n pymelLogger.error('\\t%s'%printMsg)\n elif type == 'critical':\n pymelLogger.critical('\\t%s'%printMsg)\n else:\n pymelLogger.error('Cannot Print Message: Invalid Type')\n \n return","sub_path":"lib/utils/_archives/20141023/_utils_global.py","file_name":"_utils_global.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"617525486","text":"# bot.py\nimport os\nimport discord\nfrom discord.ext import commands\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nimport dotenv\n\ndotenv.load_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nPREFIX = os.getenv('BOT_PREFIX')\n\nclient = discord.Client()\n\n#sets the prefix for the bot\nclient = commands.Bot(command_prefix=PREFIX)\n\n#Initializer function\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\"\"\"\nloads an extension, chose from the following in the cogs folder\nParameters:\n -extension: String which contains the name of the file excluding the extension type\n example: main\n\"\"\"\n@client.command(hidden = True)\nasync def load(ctx, extension):\n await ctx.send(f'Loading {extension}...')\n client.load_extension(f'cogs.{extension}')\n\n\"\"\"\nunloads an extension, chose from the following in the cogs folder\nParameters:\n -extension: String which contains the name of the file excluding the extension type\n example: main\n\"\"\"\n@client.command(hidden = True)\nasync def unload(ctx, extension):\n await ctx.send(f'Unloading {extension}...')\n client.unload_extension(f'cogs.{extension}')\n\n\"\"\"\nreloads an extension, chose from the following in the cogs folder\nParameters:\n -extension: String which contains the name of the file excluding the extension type\n example: main\n\"\"\"\n@client.command(hidden = True)\nasync def reload(ctx, extension):\n await ctx.send(f'Reloading {extension}...')\n client.unload_extension(f'cogs.{extension}')\n client.load_extension(f'cogs.{extension}')\n\n#Generic error handling for missing arguments\n@client.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You are missing arguments in the command, please try again')\n\n#loads all cogs in ./cogs\nfor filename in os.listdir('./cogs'):\n #only if the file ends in .py\n if filename.endswith('.py') and '__init__' not in filename:\n client.load_extension(f'cogs.{filename[:-3]}')\n\n\nclient.run(TOKEN)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"166773261","text":"import numpy as np\nfrom numpy import array\nimport random\nimport scipy\nfrom scipy import ndimage\n\nepochs = 100000\ninputLayerSize = 2\nhiddenLayerSizes = [3,6]\noutputLayerSize = 2 #in and out of circle\n\n#randomly generate starting weights. There are weights between every two layers.\nweights = []\nweights.append(np.random.uniform(low = -1.0, size=(inputLayerSize,hiddenLayerSizes[0])))\nfor i in range(len(hiddenLayerSizes)-1):\n weights.append(np.random.uniform(low = -1.0, size=(hiddenLayerSizes[i],hiddenLayerSizes[i+1])))\nweights.append(np.random.uniform(low = -1.0, size=(hiddenLayerSizes[-1],outputLayerSize)))\n\n#randomly generate starting biases. There is bias at every layer except input.\nbiases = []\nfor i in range(len(hiddenLayerSizes)):\n biases.append(np.random.uniform(low = -1.0, size=(1,hiddenLayerSizes[i])))\nbiases.append(np.random.uniform(low = -1.0, size=(1,outputLayerSize)))\n\n#lambda\nlearnrate = 0.02\n\ndef train(x,c):\n '''x is array of inputs and c is correct output value.\n train \n '''\n global weights, biases\n activations = [array([x])]\n input_sums = [array([x])]\n #Now run feedforward\n for weight,bias in zip(weights,biases): #because same length and zip is cool!\n input_sums.append(activations[-1].dot(weight)+bias)\n activations.append(sigmoid(input_sums[-1])) #this line does all the work\n result = np.argmax(activations[-1])\n if result != c: #if didn't choose the right node\n deltaSigs = []\n deltaSigs.append(np.copy(input_sums[-1])) #going backwards, E = Z-c\n deltaSigs[0][0][c] -= 1\n #Now adjust weights and biases using the delta signal\n for i in range(len(weights)):\n deltaSig = deltaSigs[-1].dot(weights[-i-1].T)\n deltaSig1 = deltaSig*sigmoid_prime(activations[-i-2])\n deltaSigs.append(deltaSig1) #creating delta signals\n for i in range(len(weights)):\n weights[i] -= learnrate * activations[i].T.dot(deltaSigs[-i-2]) #don't want to include the output activation, and using last set of delta signals\n #when deltaSig (which is like error for a certain node) is large, then weight goes down so it factors in less\n biases[i] -= deltaSigs[-i-2] * learnrate #is like weights, where activation is 1 :D\n \n\ndef sigmoid(x): #errors will always be between 0 and 1\n '''The activation function.'''\n return 1/(1+np.exp(-x))\n\ndef sigmoid_prime(y): #looks nice with output which we store :D <3 yaaay\n '''Derivative of the activation function. Used in backpropagation.'''\n return y*(1-y)\n\n#generate inputs\n\n\nfor i in range(epochs):\n x = np.random.uniform(low = 0, high = 1.0, size = 2) \n c = 1 if x[0]**2 + x[1]**2 < 1 else 0\n train(x,c)\n\nnum = 1000\ncorrect = 0\nfor i in range(num):\n x = np.random.uniform(low = 0, high = 1.0, size = 2) \n c = 1 if x[0]**2 + x[1]**2 < 1 else 0\n activations = [array([x])]\n for weight,bias in zip(weights,biases): #because same length and zip is cool! \n activations.append(sigmoid(np.dot(activations[-1],weight)+bias)) #this line does all the work\n result = np.argmax(activations[-1])\n #print(activations) \n if result == c:\n correct += 1\n\nprint (correct)\nprint (weights)\nprint (biases)\n","sub_path":"Circle-Kevin minus comments.py","file_name":"Circle-Kevin minus comments.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"529831152","text":"from setuptools import setup, find_packages\n# import os\n\nversion = '1.0.b2'\n\nsetup(name='readset.i18n',\n version=version,\n description=\"This package provides a Normalizer for Chinese character\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Zope2\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Natural Language :: Chinese (Simplified)\",\n \"Natural Language :: Chinese (Traditional)\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Text Processing\",\n ],\n keywords='Zope Plone i18n i10n Pinyin',\n author='Jian Aijun',\n author_email='jianaijun@gmail.com',\n url='http://pypi.python.org/pypi/readset.i18n',\n license='GPL version 2',\n package_dir={'': 'src'},\n packages=find_packages('src'),\n namespace_packages=['readset'],\n include_package_data=True,\n zip_safe=False,\n test_suite=\"readset.i18n\",\n install_requires=[\n 'setuptools',\n 'zope.interface',\n 'zope.component',\n 'zope.publisher',\n 'plone.i18n',\n ],\n extras_require={\n 'test': [\n 'zope.component [zcml]',\n 'zope.configuration',\n 'zope.browserresource',\n 'plone.testing',\n 'zope.testing',\n ]\n },\n entry_points=\"\"\"\n # -*- Entry points: -*-\n\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"421917129","text":"from datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Count\n\nfrom app.models import Email\n\n\nclass Command(BaseCommand):\n help = 'Deletes DR3 emails'\n\n def handle(self, *args, **options):\n emails = Email.objects\\\n .annotate(recipient__goal_count=Count('recipient__goal'))\\\n .filter(name='dr3')\\\n .filter(recipient__date_joined__gte=datetime(2016, 12, 28))\\\n .filter(recipient__date_joined__lte=datetime(2017, 1, 15))\\\n .filter(recipient__goal_count=0)\n\n print(emails.count())\n\n for email in emails:\n email.recipient.delete()\n","sub_path":"app/management/commands/deletedr3users.py","file_name":"deletedr3users.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"261399625","text":"from __future__ import print_function, division\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom scipy.io import loadmat\nimport logging\nimport copy\nfrom tqdm import tqdm\nfrom PIL import Image\nimport numpy as np\nimport fnmatch\nimport csv\n\ndata_path_from_home = '/Data/tokyoTimeMachine'\ndata_path = os.environ['HOME'] + data_path_from_home\n\nimage_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n ]\n)\n\nclass TokyoDataSet(Dataset):\n\n def __init__(self, type='train', mode='db', root_dir = data_path, transforms=image_transform):\n\n self.logger = self.generate_logger(type)\n self.type = type\n self.logger.info('Loading Tokyo ' + ('Train' if type == 'train' else 'Val') + ' Matrix')\n mat_file = '/tokyoTM_train.mat' if type == 'train' else '/tokyoTM_val.mat'\n mat = loadmat(os.path.join(root_dir + mat_file))['dbStruct'][0][0]\n self.root_dir = root_dir\n self.image_dir = self.root_dir + '/images'\n adder = 0 if mode == 'db' else 3\n self.length = len(mat[adder + 1])\n self.data = [{} for _ in range(self.length)]\n self.transforms = transforms\n self.utm = [[mat[adder+2][0][i], mat[adder+2][1][i]] for i in range(self.length)]\n\n\n for idx in tqdm(range(self.length)):\n self.data[idx]['filename'] = mat[adder + 1][idx][0][0]\n self.data[idx]['utm_coordinate'] = (mat[adder + 2][0][idx], mat[adder + 2][1][idx])\n self.data[idx]['timestamp'] = mat[adder + 3][0][idx]\n self.data[idx]['image'] = os.path.join(self.image_dir, self.data[idx]['filename'])\n self.data[idx]['original_image'] = os.path.join(self.image_dir, self.data[idx]['filename'])\n\n if mode == 'query':\n self.data[idx]['pos'] = [-1] * 10\n self.data[idx]['neg'] = [-1] * 10\n\n self.logger.info('Done')\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n\n ret = copy.deepcopy(self.data[idx])\n ret['image'] = self.transforms(Image.open(self.data[idx]['image']))\n ret['original_image'] = Image.open(self.data[idx]['original_image'])\n return ret\n\n def generate_logger(self, type):\n logger_name = 'trainData' if type == 'train' else 'valData'\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(levelname)s: %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n def set(self, idx, key, val):\n self.data[idx][key] = val\n\n\nclass TokyoTrainDataSet(TokyoDataSet):\n\n def __init__(self, mode='db', GT_from_file=False):\n super().__init__(type='train', mode=mode)\n\n\nclass TokyoValDataSet(TokyoDataSet):\n\n def __init__(self, mode='db', GT_from_file=False):\n super().__init__(type='val', mode=mode)\n\n\nclass Tokyo247(Dataset):\n\n def __init__(self, root_dir=data_path, transform=image_transform):\n self.logger = self.generate_logger\n self.root_dir = root_dir\n self.image_dir = root_dir + '/247query_v3'\n self.matname = np.sort(fnmatch.filter(os.listdir(self.image_dir), '*.csv'))\n self.imname = np.sort(fnmatch.filter(os.listdir(self.image_dir), '*.jpg'))\n self.length = len(self.imname)\n self.data = np.array([{} for _ in range(self.length)])\n self.transforms = image_transform\n\n for idx in tqdm(range(self.length)):\n f = open(self.image_dir + '/' + self.matname[idx])\n mat = csv.reader(f, delimiter=',')\n mat = list(mat)[0]\n f.close()\n\n self.data[idx]['filename'] = mat[0]\n self.data[idx]['utm_coordinate'] = (mat[7], mat[8])\n self.data[idx]['image'] = os.path.join(self.image_dir, self.data[idx]['filename'])\n\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n\n ret = copy.deepcopy(self.data[idx])\n ret['image'] = self.transforms(Image.open(self.data[idx]['image']))\n return ret\n\n\n def generate_logger(self):\n logger_name = 'testData'\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(levelname)s: %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"40151393","text":"import Funzioni\r\nfrom Funzioni import *\r\nfrom Dati import *\r\n\r\n\r\n\r\nPulizia_File(\"Dati_Punti_Possibili.txt\")\r\n\r\nfinput = open(\"Dati_Punti.txt\", \"r\")\r\nfoutput = open(\"Dati_Punti_Possibili.txt\", \"w\")\r\n\r\ntry:\r\n for line in finput.read().split(\"\\n\"):\r\n Dati = list()\r\n triangoli = line.split(\"|;\")\r\n triangoli.pop()\r\n for triangolo in triangoli:\r\n lt = list()\r\n punti = triangolo.split(\"|\")\r\n for punto in punti:\r\n x,y = punto.split(\",\")\r\n lt.append([int(x),int(y)])\r\n Dati.append(lt)\r\n\r\n if Collisione_Braccio_Triangolo(Dati, Ostacolo1) or Collisione_Braccio_Triangolo(Dati, Ostacolo2):\r\n continue\r\n else:\r\n foutput.write(str(Dati)+\"\\n\")\r\nexcept Exception as e:\r\n pass\r\n \r\n \r\n \r\n\r\n \r\nfinput.close()\r\nfoutput.close()\r\n\r\n\r\n","sub_path":"libs/Calcoli_Punti_Possibili.py","file_name":"Calcoli_Punti_Possibili.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"646032200","text":"\r\nimport os\r\n\r\ndef fiboNum (Fn):\r\n\r\n if Fn == 0:\r\n return 0\r\n\r\n elif Fn == 1:\r\n return 1\r\n else:\r\n result = fiboNum(Fn-1) + fiboNum(Fn-2)\r\n return result\r\n\r\n# Ask how many numbers they want\r\nnumFibValues = int(input(\"How many Fibonacci values should be found? \"))\r\n\r\n\r\n\r\n# Loop while calling for each new number\r\n\r\ni = 1\r\nwhile i < numFibValues:\r\n fibValue = fiboNum(i)\r\n print(fibValue)\r\n\r\n i += 1\r\n\r\nprint(\"Task Completed\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ReadingAndWritingFiles/FibonacciWithUser/FibonacciValuesWithUserInput.py","file_name":"FibonacciValuesWithUserInput.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"3362813","text":"\nimport pandas_datareader.data as web\nimport statsmodels.api as sm\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\nend='2016/9/30'\nn225 = web.DataReader(\"NIKKEI225\", 'fred',\"1949/5/16\",end).dropna()\nlnn225=np.log(n225.dropna())\nlnn225.columns=['Close']\ny=lnn225\nx=range(len(lnn225))\nx=sm.add_constant(x)\nmodel=sm.OLS(y,x)\nresults=model.fit()\n\nprint(results.summary())\n\n\n\n\n\n\ny=lnn225.loc['1986/12/1':'1993/10/31'].dropna()\nx=range(len(y))\nx=sm.add_constant(x)\nmodel=sm.OLS(y,x)\nresults=model.fit()\nprint(results.summary())\n\n\n\n\ny=lnn225.loc['1986/12/1':'1989/12/31'].dropna()\nx=range(len(y))\nx=sm.add_constant(x)\nmodel=sm.OLS(y,x)\nresults=model.fit()\nprint(results.summary())\n\n\nprint(\"return \",np.exp(y.Close).pct_change().mean()*250)\nprint(\"volatility \",y.Close.diff().std()*np.sqrt(250))\nprint(\"std of residual\",results.resid.std())\nplt.plot(y,label='Close',color='darkgray')\n\n\nresults.resid.hist(bins=10,color=\"lightyellow\")\nplt.xlabel('residual')\nplt.ylabel('frequency')\n\n\n\nplt.show()\n\n","sub_path":"6.4.15.py","file_name":"6.4.15.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"382878631","text":"\"\"\"\nTemplate testing suite for 2048\n\"\"\"\n\nimport poc_simpletest\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\ndef run_suite(game_class):\n \"\"\"\n Some informal testing code\n \"\"\"\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite() \n \n # create a game\n game = game_class(2,2)\n \n # add tests using suite.run_test(....) here\n\n # test the initial configuration of the board using the str method\n suite.run_test(str(game), str([]), \"Test #0: init\")\n \n # suite.run_test(game.reset(), str([]), \"Test #1: reset\")\n game.reset()\n suite.run_test(str(game), str([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]), \"Test #1: reset\")\n\n suite.run_test(game.get_grid_height(), 2 , \"Test #2: get_grid_height\")\n suite.run_test(game.get_grid_width(), 3 , \"Test #3: get_grid_width\")\n \n game.set_tile(0, 0, 5)\n game.set_tile(0, 1, 5)\n game.set_tile(1, 0, 5)\n game.set_tile(1, 1, 5)\n game.set_tile(2, 0, 5)\n game.set_tile(0, 2, 5)\n game.set_tile(2, 2, 5)\n game.set_tile(-1, 2, 5)\n #game.set_tile(, , 5)\n #game.move(UP)\n #game.move(DOWN)\n #game.move(LEFT)\n #game.move(RIGHT)\n \n # report number of tests and failures\n suite.report_results()\n","sub_path":"Coursera/2015_PrincipleOfComputing_Rice/Wk2_Proj2_Final14_Test_user40_bJHRdpmDGW_19.py","file_name":"Wk2_Proj2_Final14_Test_user40_bJHRdpmDGW_19.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"568820366","text":"import logging\nfrom typing import Union\n\nfrom aiogram.dispatcher.dispatcher import Dispatcher\nimport aiogram.utils.markdown as fmt\nfrom aiogram import types\n\nfrom tgbot.database import connect\nfrom .keyboards import main_keyboard_kb \nfrom main_menu.dbworker import add_user\n\nlogger = logging.getLogger(__name__)\n\n\ndef main_munu_register(dp: Dispatcher):\n dp.register_message_handler(start_cmd, commands=['start'])\n\n\nasync def start_cmd(message: Union[types.CallbackQuery, types.Message], **kwargs):\n if isinstance(message, types.Message): \n chat_id = message.chat.id\n elif isinstance(message, types.CallbackQuery): \n chat_id = message.message.chat.id\n message = message.message\n \n connection = await connect()\n await add_user(connection, chat_id)\n markup = await main_keyboard_kb(connection)\n\n await message.reply(\n reply=False,\n reply_markup=markup,\n disable_notification=True,\n text=fmt.hbold('Категории главного меню')\n )\n connection.close()\n","sub_path":"main_menu/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"243136706","text":"\"\"\"Pygame-based media controller for MPF (BCP) v1.0\"\"\"\n# media_controller.py\n# Mission Pinball Framework\n# Written by Brian Madden & Gabe Knuth\n# Released under the MIT License. (See license info at the end of this file.)\n\n# Documentation and more info at http://missionpinball.com/mpf\n\nimport logging\nimport os\nimport sys\nimport time\nfrom distutils.version import LooseVersion\nimport Queue\n\n\nimport pygame\n\nfrom mpf.media_controller.core import *\nfrom mpf.media_controller.core.bcp_server import BCPServer\nfrom mpf.system.config import Config, CaseInsensitiveDict\nfrom mpf.system.events import EventManager\nfrom mpf.system.timing import Timing\nfrom mpf.system.tasks import Task, DelayManager\nfrom mpf.system.player import Player\nimport mpf.system.bcp as bcp\nimport version\n\n\nclass MediaController(object):\n\n def __init__(self, options):\n self.options = options\n\n self.log = logging.getLogger(\"MediaController\")\n self.log.info(\"Media Controller Version %s\", version.__version__)\n self.log.debug(\"Backbox Control Protocol Version %s\",\n version.__bcp_version__)\n self.log.debug(\"Config File Version %s\",\n version.__config_version__)\n\n python_version = sys.version_info\n self.log.debug(\"Python version: %s.%s.%s\", python_version[0],\n python_version[1], python_version[2])\n self.log.debug(\"Platform: %s\", sys.platform)\n self.log.debug(\"Python executable location: %s\", sys.executable)\n self.log.debug(\"32-bit Python? %s\", sys.maxsize < 2**32)\n\n self.active_debugger = dict()\n\n self.config = dict()\n self.done = False # todo\n self.machine_path = None\n self.asset_managers = dict()\n self.num_assets_to_load = 0\n self.window = None\n self.window_manager = None\n self.pygame = False\n self.pygame_requested = False\n self.registered_pygame_handlers = dict()\n self.pygame_allowed_events = list()\n self.socket_thread = None\n self.receive_queue = Queue.Queue()\n self.sending_queue = Queue.Queue()\n self.crash_queue = Queue.Queue()\n self.modes = CaseInsensitiveDict()\n self.player_list = list()\n self.player = None\n self.HZ = 0\n self.next_tick_time = 0\n self.secs_per_tick = 0\n\n Task.create(self._check_crash_queue)\n\n self.bcp_commands = {'ball_start': self.bcp_ball_start,\n 'ball_end': self.bcp_ball_end,\n 'config': self.bcp_config,\n 'error': self.bcp_error,\n 'get': self.bcp_get,\n 'goodbye': self.bcp_goodbye,\n 'hello': self.bcp_hello,\n 'mode_start': self.bcp_mode_start,\n 'mode_stop': self.bcp_mode_stop,\n 'player_added': self.bcp_player_add,\n 'player_score': self.bcp_player_score,\n 'player_turn_start': self.bcp_player_turn_start,\n 'player_variable': self.bcp_player_variable,\n 'reset': self.reset,\n 'set': self.bcp_set,\n 'shot': self.bcp_shot,\n 'switch': self.bcp_switch,\n 'timer': self.bcp_timer,\n 'trigger': self.bcp_trigger,\n }\n\n # load the MPF config & machine defaults\n self.config = (\n Config.load_config_yaml(config=self.config,\n yaml_file=self.options['mcconfigfile']))\n\n # Find the machine_files location. If it starts with a forward or\n # backward slash, then we assume it's from the mpf root. Otherwise we\n # assume it's from the subfolder location specified in the\n # mpfconfigfile location\n\n if (options['machinepath'].startswith('/') or\n options['machinepath'].startswith('\\\\')):\n machine_path = options['machinepath']\n else:\n machine_path = os.path.join(self.config['media_controller']['paths']\n ['machine_files'],\n options['machinepath'])\n\n self.machine_path = os.path.abspath(machine_path)\n\n # Add the machine folder to our path so we can import modules from it\n sys.path.append(self.machine_path)\n\n self.log.info(\"Machine folder: %s\", machine_path)\n\n # Now find the config file location. Same as machine_file with the\n # slash uses to specify an absolute path\n\n if (options['configfile'].startswith('/') or\n options['configfile'].startswith('\\\\')):\n config_file = options['configfile']\n else:\n\n if not options['configfile'].endswith('.yaml'):\n options['configfile'] += '.yaml'\n\n config_file = os.path.join(self.machine_path,\n self.config['media_controller']['paths']\n ['config'],\n options['configfile'])\n\n self.log.debug(\"Base machine config file: %s\", config_file)\n\n # Load the machine-specific config\n self.config = Config.load_config_yaml(config=self.config,\n yaml_file=config_file)\n\n mediacontroller_config_spec = '''\n exit_on_disconnect: boolean|True\n port: int|5050\n '''\n\n self.config['media_controller'] = (\n Config.process_config(mediacontroller_config_spec,\n self.config['media_controller']))\n\n self.events = EventManager(self, setup_event_player=False)\n self.timing = Timing(self)\n\n # Load the media controller modules\n self.config['media_controller']['modules'] = (\n self.config['media_controller']['modules'].split(' '))\n self.log.info(\"Loading Modules...\")\n for module in self.config['media_controller']['modules']:\n self.log.debug(\"Loading module: %s\", module)\n module_parts = module.split('.')\n exec('self.' + module_parts[0] + '=' + module + '(self)')\n\n # todo there's probably a more pythonic way to do this, and I know\n # exec() is supposedly unsafe, but meh, if you have access to put\n # malicious files in the system folder then you have access to this\n # code too.\n\n self.start_socket_thread()\n\n self.events.post(\"init_phase_1\")\n self.events.post(\"init_phase_2\")\n self.events.post(\"init_phase_3\")\n self.events.post(\"init_phase_4\")\n self.events.post(\"init_phase_5\")\n\n self.reset()\n\n def _check_crash_queue(self):\n try:\n crash = self.crash_queue.get(block=False)\n except Queue.Empty:\n yield 1000\n else:\n self.log.critical(\"MPF Shutting down due to child thread crash\")\n self.log.critical(\"Crash details: %s\", crash)\n self.done = True\n\n def reset(self, **kwargs):\n \"\"\"Processes an incoming BCP 'reset' command.\"\"\"\n self.player = None\n self.player_list = list()\n\n self.events.post('mc_reset_phase_1')\n self.events.post('mc_reset_phase_2')\n self.events.post('mc_reset_phase_3')\n\n def get_window(self):\n \"\"\" Returns a reference to the onscreen display window.\n\n This method will set up a window if one doesn't exist yet. This method\n exists because there are several different modules and plugins which\n may want to use a window, but we don't know which combinations might\n be used, so we centralize the creation and management of an onscreen\n window here.\n \"\"\"\n\n if not self.window:\n self.window_manager = window.WindowManager(self)\n self.window = self.window_manager.window\n\n return self.window\n\n def request_pygame(self):\n \"\"\"Called by a module to let the system know it would like to use\n Pygame. We centralize the requests instead of letting each module do\n their own pygame.init() so we get it in one place and can get everthing\n initialized in the right order.\n\n Returns: True or False, depending on whether pygame is available or not.\n \"\"\"\n\n if pygame and not self.pygame_requested:\n self.events.add_handler('init_phase_3', self._pygame_init)\n self.pygame_requested = True\n return True\n\n else:\n return False\n\n def _pygame_init(self):\n # performs the actual pygame initialization\n\n if not pygame:\n self.log.critical(\"Pygame is needed but not available. Please \"\n \"install Pygame and try again.\")\n raise Exception(\"Pygame is needed but not available. Please install\"\n \" Pygame and try again.\")\n\n if not self.pygame:\n self.log.debug(\"Initializing Pygame, version %s\",\n pygame.version.ver)\n\n pygame.init()\n self.pygame = True\n\n self.events.add_handler('timer_tick', self.get_pygame_events,\n priority=1000)\n\n self.events.post('pygame_initialized')\n\n def register_pygame_handler(self, event, handler):\n \"\"\"Registers a method to be a handler for a certain type of Pygame\n event.\n\n Args:\n event: A string of the Pygame event name you're registering this\n handler for.\n handler: A method that will be called when this Pygame event is\n posted.\n \"\"\"\n if event not in self.registered_pygame_handlers:\n self.registered_pygame_handlers[event] = set()\n\n self.registered_pygame_handlers[event].add(handler)\n self.pygame_allowed_events.append(event)\n\n self.log.debug(\"Adding Window event handler. Event:%s, Handler:%s\",\n event, handler)\n\n pygame.event.set_allowed(self.pygame_allowed_events)\n\n def get_pygame_events(self):\n \"\"\"Gets (and dispatches) Pygame events. Automatically called every\n machine loop via the timer_tick event.\n \"\"\"\n for event in pygame.event.get():\n if event.type in self.registered_pygame_handlers:\n for handler in self.registered_pygame_handlers[event.type]:\n\n if (event.type == pygame.KEYDOWN or\n event.type == pygame.KEYUP):\n handler(event.key, event.mod)\n else:\n handler()\n\n def _process_command(self, bcp_command, **kwargs):\n self.log.debug(\"Processing command: %s %s\", bcp_command, kwargs)\n\n\n # Can't use try/except KeyError here becasue there could be a KeyError\n # in the callback which we don't want it to swallow.\n if bcp_command in self.bcp_commands:\n self.bcp_commands[bcp_command](**kwargs)\n else:\n self.log.warning(\"Received invalid BCP command: %s\", bcp_command)\n self.send('error', message='invalid command', command=bcp_command)\n\n\n def send(self, bcp_command, callback=None, **kwargs):\n \"\"\"Sends a BCP command to the connected pinball controller.\n\n Args:\n bcp_command: String of the BCP command name.\n callback: Optional callback method that will be called when the\n command is sent.\n **kwargs: Optional additional kwargs will be added to the BCP\n command string.\n\n \"\"\"\n self.sending_queue.put(bcp.encode_command_string(bcp_command,\n **kwargs))\n if callback:\n callback()\n\n def send_dmd_frame(self, data):\n \"\"\"Sends a DMD frame to the BCP client.\n\n Args:\n data: A 4096-length raw byte string.\n \"\"\"\n\n dmd_string = 'dmd_frame?' + data\n self.sending_queue.put(dmd_string)\n\n def _timer_init(self):\n self.HZ = 30\n self.next_tick_time = time.time()\n self.secs_per_tick = 1.0 / self.HZ\n\n def timer_tick(self):\n \"\"\"Called by the platform each machine tick based on self.HZ\"\"\"\n self.timing.timer_tick() # notifies the timing module\n self.events.post('timer_tick') # sends the timer_tick system event\n Task.timer_tick() # notifies tasks\n DelayManager.timer_tick()\n\n def run(self):\n \"\"\"Main run loop.\"\"\"\n self._timer_init()\n\n self.log.info(\"Starting the run loop at %sHz\", self.HZ)\n\n start_time = time.time()\n loops = 0\n\n secs_per_tick = self.secs_per_tick\n\n self.next_tick_time = time.time()\n\n try:\n while self.done is False:\n time.sleep(0.001)\n\n self.get_from_queue()\n\n if self.next_tick_time <= time.time(): # todo change this\n self.timer_tick()\n self.next_tick_time += secs_per_tick\n loops += 1\n\n self._do_shutdown()\n self.log.info(\"Target loop rate: %s Hz\", self.HZ)\n self.log.info(\"Actual loop rate: %s Hz\",\n loops / (time.time() - start_time))\n\n except KeyboardInterrupt:\n self.shutdown()\n\n def shutdown(self):\n \"\"\"Shuts down and exits the media controller.\n\n This method will also send the BCP 'goodbye' command to any connected\n clients.\n \"\"\"\n self.socket_thread.stop()\n\n def _do_shutdown(self):\n if self.pygame:\n pygame.quit()\n\n def socket_thread_stopped(self):\n \"\"\"Notifies the media controller that the socket thread has stopped.\"\"\"\n self.done = True\n\n def start_socket_thread(self):\n \"\"\"Starts the BCPServer socket thread.\"\"\"\n self.socket_thread = BCPServer(self, self.receive_queue,\n self.sending_queue)\n self.socket_thread.daemon = True\n self.socket_thread.start()\n\n def get_from_queue(self):\n \"\"\"Gets and processes all queued up incoming BCP commands.\"\"\"\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)\n\n def bcp_hello(self, **kwargs):\n \"\"\"Processes an incoming BCP 'hello' command.\"\"\"\n try:\n if LooseVersion(kwargs['version']) == (\n LooseVersion(version.__bcp_version__)):\n self.send('hello', version=version.__bcp_version__)\n else:\n self.send('hello', version='unknown protocol version')\n except:\n self.log.warning(\"Received invalid 'version' parameter with \"\n \"'hello'\")\n\n def bcp_goodbye(self, **kwargs):\n \"\"\"Processes an incoming BCP 'goodbye' command.\"\"\"\n if self.config['media_controller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()\n\n def bcp_mode_start(self, name=None, priority=0, **kwargs):\n \"\"\"Processes an incoming BCP 'mode_start' command.\"\"\"\n if not name:\n return\n #todo raise error\n\n if name == 'game':\n self._game_start()\n\n if name in self.modes:\n self.modes[name].start(priority=priority)\n\n def bcp_mode_stop(self, name, **kwargs):\n \"\"\"Processes an incoming BCP 'mode_stop' command.\"\"\"\n if not name:\n return\n #todo raise error\n\n if name == 'game':\n self._game_end()\n\n if name in self.modes:\n self.modes[name].stop()\n\n def bcp_error(self, **kwargs):\n \"\"\"Processes an incoming BCP 'error' command.\"\"\"\n self.log.warning('Received error command from client')\n\n def bcp_ball_start(self, **kwargs):\n \"\"\"Processes an incoming BCP 'ball_start' command.\"\"\"\n kwargs['player'] = kwargs.pop('player_num')\n\n self.events.post('ball_started', **kwargs)\n\n def bcp_ball_end(self, **kwargs):\n \"\"\"Processes an incoming BCP 'ball_end' command.\"\"\"\n self.events.post('ball_ended', **kwargs)\n\n def _game_start(self, **kargs):\n \"\"\"Processes an incoming BCP 'game_start' command.\"\"\"\n self.player = None\n self.player_list = list()\n self.num_players = 0\n self.events.post('game_started', **kargs)\n\n def _game_end(self, **kwargs):\n \"\"\"Processes an incoming BCP 'game_end' command.\"\"\"\n self.player = None\n self.events.post('game_ended', **kwargs)\n\n def bcp_player_add(self, player_num, **kwargs):\n \"\"\"Processes an incoming BCP 'player_add' command.\"\"\"\n\n if player_num > len(self.player_list):\n new_player = Player(self, self.player_list)\n\n self.events.post('player_add_success', num=player_num)\n\n def bcp_player_variable(self, name, value, prev_value, change, player_num,\n **kwargs):\n \"\"\"Processes an incoming BCP 'player_variable' command.\"\"\"\n\n try:\n self.player_list[int(player_num)-1][name] = value\n except (IndexError, KeyError):\n pass\n\n def bcp_player_score(self, value, prev_value, change, player_num,\n **kwargs):\n \"\"\"Processes an incoming BCP 'player_score' command.\"\"\"\n\n try:\n self.player_list[int(player_num)-1]['score'] = int(value)\n except (IndexError, KeyError):\n pass\n\n def bcp_player_turn_start(self, player_num, **kwargs):\n \"\"\"Processes an incoming BCP 'player_turn_start' command.\"\"\"\n\n self.log.debug(\"bcp_player_turn_start\")\n\n if ((self.player and self.player.number != player_num) or\n not self.player):\n\n try:\n self.player = self.player_list[int(player_num)-1]\n except IndexError:\n self.log.error('Received player turn start for player %s, but '\n 'only %s player(s) exist',\n player_num, len(self.player_list))\n\n def bcp_trigger(self, name, **kwargs):\n \"\"\"Processes an incoming BCP 'trigger' command.\"\"\"\n self.events.post(name, **kwargs)\n\n def bcp_switch(self, name, state, **kwargs):\n \"\"\"Processes an incoming BCP 'switch' command.\"\"\"\n if int(state):\n self.events.post('switch_' + name + '_active')\n else:\n self.events.post('switch_' + name + '_inactive')\n\n def bcp_get(self, **kwargs):\n \"\"\"Processes an incoming BCP 'get' command.\n\n Note that this media controller doesn't implement the 'get' command at\n this time, but it's included here for completeness since the 'get'\n command is part of the BCP 1.0 specification so we don't want to return\n an error if we receive an incoming 'get' command.\n\n \"\"\"\n pass\n\n def bcp_set(self, **kwargs):\n \"\"\"Processes an incoming BCP 'set' command.\n\n Note that this media controller doesn't implement the 'set' command at\n this time, but it's included here for completeness since the 'set'\n command is part of the BCP 1.0 specification so we don't want to return\n an error if we receive an incoming 'set' command.\n\n \"\"\"\n pass\n\n def bcp_shot(self, name, profile, state):\n \"\"\"The MPF media controller uses triggers instead of shots for its\n display events, so we don't need to pay attention here.\"\"\"\n pass\n\n def bcp_config(self, **kwargs):\n \"\"\"Processes an incoming BCP 'config' command.\"\"\"\n for k, v in kwargs.iteritems():\n if k.startswith('volume_'):\n self.bcp_set_volume(track=k.split('volume_')[1], value=v)\n\n def bcp_timer(self, name, action, **kwargs):\n \"\"\"Processes an incoming BCP 'timer' command.\"\"\"\n pass\n\n def bcp_set_volume(self, track, value):\n \"\"\"Sets the volume based on an incoming BCP 'config' command.\n\n Args:\n track: String name of the track the volume will set.\n value: Float between 0 and 1 which represents the volume level to\n set.\n\n Note: At this time only the master volume can be set with this method.\n\n \"\"\"\n if track == 'master':\n self.sound.set_volume(value)\n\n #if track in self.sound.tracks:\n #self.sound.tracks[track]\n\n # todo add per-track volume support to sound system\n\n def get_debug_status(self, debug_path):\n\n if self.options['loglevel'] > 10 or self.options['consoleloglevel'] > 10:\n return True\n\n class_, module = debug_path.split('|')\n\n try:\n if module in self.active_debugger[class_]:\n return True\n else:\n return False\n except KeyError:\n return False\n\n\n# The MIT License (MIT)\n\n# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n","sub_path":"mpf/media_controller/core/media_controller.py","file_name":"media_controller.py","file_ext":"py","file_size_in_byte":22412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"271711424","text":"#!/usr/bin/python\n\nimport cv2\nimport numpy as np\n\ndef process_image(image):\n \n blue=[150,0,0]\n green=[0,200,0]\n white=[220,220,220]\n\n thickness = 2\n line_type = cv2.CV_AA\n\n eye1=(266,266)\n eye2=(330,266)\n eye_radius=20\n\n face_center=(290, 280)\n face_size=(80, 110)\n face_angle = 15;\n\n theText = \"A face and two eyes\"\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 1\n font_thickness = 1\n font_start=(100, 100)\n\n text_size,base_line = cv2.getTextSize(theText, font, font_scale, font_thickness)\n \n border=(20,20)\n\n p1 = (font_start[0]-border[0],font_start[1]-text_size[1]-border[1])\n p2 = (font_start[0]+text_size[0]+border[0],font_start[1]+border[1])\n\n font_middle=(font_start[0]+text_size[0]/2,font_start[1])\n \n \n\n # Start drawing\n #Eye\n cv2.circle(image, eye1, eye_radius, green, thickness, line_type);\n cv2.circle(image, eye2, eye_radius, green, thickness, line_type);\n #Face\n cv2.ellipse(image, face_center, face_size, face_angle, 0, 360, green, thickness, line_type);\n \n cv2.line(image, font_middle, face_center, blue, thickness, line_type);\n #Rectangle\n cv2.rectangle(image, p1, p2, blue, -1, line_type);\n\n\n #Text\n cv2.putText(image, theText, font_start, font, font_scale, white, thickness, line_type);\n \n\n\ndef display_graphics(image):\n cv2.imshow(\"Image\",image)\n\ndef main():\n \n cv2.namedWindow(\"Image\")\n\n image=cv2.imread(\"lena.jpg\")\n \n process_image(image)\n display_graphics(image)\n\n cv2.imwrite(\"lena_m.jpg\",image)\n \n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n \n main() \n \n","sub_path":"opencv_computer_vision_application_programming/2/2-5-Drawing-Shape/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"424379765","text":"\n\nfrom xai.brain.wordbase.verbs._harangue import _HARANGUE\n\n#calss header\nclass _HARANGUES(_HARANGUE, ):\n\tdef __init__(self,): \n\t\t_HARANGUE.__init__(self)\n\t\tself.name = \"HARANGUES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"harangue\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_harangues.py","file_name":"_harangues.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"576311017","text":"# -*- coding: utf-8 -*-\nfrom flask import session, flash, redirect, url_for\nfrom flask.ext.login import login_user\nfrom apps import db\nfrom apps.models import User\n\n\ndef OAuth2RegisterToUser(user_data, type):\n \n user = User.query.filter_by(id=user_data.get('id')).first()\n\n\n if user is None:\n \n if type == 'FACEBOOK':\n user = User(\n id=user_data['id'],\n name=user_data['name'],\n picture=\"http://graph.facebook.com/%s/picture\" % user_data['id'],\n gender=user_data['gender']\n )\n db.session.add(user)\n db.session.commit()\n \n #\n # @users\n #\n users = User.query.filter_by(id=user.id)\n\n if users.count() > 1:\n return 409\n\n user = users.first()\n #\n\n if user:\n if login_user(user):\n return 200\n else:\n return 500\n\n\ndef OAuthSessionPop():\n OAUTH_PROVIDER = ['oauth_token']\n for provider in OAUTH_PROVIDER:\n session.pop(provider, None)\n\n\ndef OAuthRegisterAndLoginRedirect(register_result):\n if register_result == 200:\n flash(u\"로그인에 성공하였습니다.\", \"success\")\n return redirect(url_for('main'))\n elif register_result == 409:\n flash(u\"중복된 사용자 이메일입니다.\", \"warning\")\n return redirect(url_for('login'))\n elif register_result == 500:\n flash(u\"사용자 등록에 실패하였습니다. 다시 시도하여주시기 바랍니다.\", \"error\")\n return redirect(url_for('login'))","sub_path":"core/OAuthManagement.py","file_name":"OAuthManagement.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"67851077","text":"#python3.7\r\n#Filename:爬取火车票信息(已知时间和始终站).py\r\n\r\n#后续:文本文件的返回页面设置\r\n#优化:异常处理,搜索时间的设置(当日往后一个月)\r\n\r\nimport requests,openpyxl\r\nimport os,json,re\r\nimport station_name_code\r\n\r\npath = \"C:\\\\Users\\\\15394\\\\Desktop\\\\\"\r\nprint(\"保存在\",path)\r\n\r\ndef getDatas(year,month,date,_from,_to):\r\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36\"}\r\n params = [{\r\n \"leftTicketDTO.train_date\":str(year)+\"-\"+str(month)+\"-\"+str(date),\r\n \"leftTicketDTO.from_station\":_from,\r\n \"leftTicketDTO.to_station\":_to,\r\n \"purpose_codes\":\"ADULT\"\r\n }]\r\n url = \"https://kyfw.12306.cn/otn/leftTicket/query\"\r\n for i in params:\r\n res = requests.get(url,params=i)\r\n res = json.loads(res.text)\r\n datas = res[\"data\"][\"result\"]\r\n #print(datas) #页面的所有火车票信息(字符串列表)\r\n return datas\r\n\r\n#对单条信息删选\r\ndef _re(data):\r\n message = data.split(\"|\") #经过拆分可看出“预定”都是在第二个(��引为1)\r\n #print(message)\r\n return message #单条火车信息组成的列表\r\n\r\n#对信息输出\r\ndef prt(mes):\r\n a = mes[3] #车次\r\n b1 = code(mes[4]) #始站(更改为汉字)\r\n b2 = code(mes[5]) #终站(更改为汉字)\r\n b3 = code(mes[6]) #出发站\r\n b4 = code(mes[7]) #到达站\r\n time1 = mes[8] #出发时间\r\n time2 = mes[9] #到达时间\r\n time3 = mes[10] #历时\r\n time4 = mes[13] #日期\r\n c1 = mes[32] #商务座(特等座)\r\n c2 = mes[31] #一等座\r\n c3 = mes[30] #二等座\r\n c4 = mes[21] #高级软卧\r\n c5 = mes[23] #软卧一等卧\r\n c6 = mes[33] #动卧\r\n c7 = mes[28] #硬卧二等卧\r\n c8 = mes[24] #软座\r\n c9 = mes[29] #硬座\r\n c10 = mes[26] #无座\r\n l = [a,b1,b2,b3,b4,time1,time2,time3,time4,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10]\r\n for i in range(len(l)):\r\n if l[i]==\"\":\r\n l[i] = \"无\"\r\n dic = {\"车次\":l[0],\"始站\":l[1],\"终站\":l[2],\"出发站\":l[3],\"到达站\":l[4],\r\n \"出发时间\":l[5],\"到达时间\":l[6],\"历时\":l[7],\"日期\":l[8],\r\n \"商务座\":l[9],\"一等座\":l[10],\"二等座\":l[11],\"高级软卧\":l[12],\"软卧一等卧\":l[13],\r\n \"动卧\":l[14],\"硬卧二等卧\":l[15],\"软座\":l[16],\"硬座\":l[17],\"无座\":l[18]}\r\n list0 = [\"车次:\",l[0],\"始站:\",l[1],\"终站:\",l[2],\"出发站:\",l[3],\"到达站:\",l[4],\r\n \"出发时间:\",l[5],\"到达时间:\",l[6],\"历时:\",l[7],\"日期:\",l[8],\r\n \"商务座:\",l[9],\"一等座:\",l[10],\"二等座:\",l[11],\"高级软卧:\",l[12],\"软卧一等卧:\",l[13],\r\n \"动卧:\",l[14],\"硬卧二等卧:\",l[15],\"软座:\",l[16],\"硬座:\",l[17],\"无座:\",l[18]]\r\n return list0,dic\r\n\r\n#文本返回保存\r\ndef txt_format(l): #文本格式优化\r\n for i in range(1,len(l),2):\r\n l[i] = \"{0:{1}<5}\\t\".format(l[i],chr(12288))\r\n l = \"\".join(l)\r\n return l\r\n\r\ndef _file(m): #保存为txt文件或直接输出\r\n with open(path+\"火车票.txt\",\"a\") as f:\r\n f.write(m)\r\n\r\ndef _excel(l,i): #保存为excel文件\r\n while os.path.exists(path+\"火车票.xlsx\") == False:\r\n wb = openpyxl.Workbook()\r\n sheet = wb.active\r\n sheet.title = \"火车票信息\"\r\n wb.save(path+\"火车票.xlsx\")\r\n wb = openpyxl.load_workbook(path+\"火车票.xlsx\")\r\n #sheet = wb[\"火车票信息:{} 至 {}\".format(_from,_to)]\r\n sheet = wb[\"火车票信息\"]\r\n if sheet[\"A1\"].value == None:\r\n sheet[\"A1\"] = l[0].replace(\":\",\"\")\r\n sheet[\"B1\"] = l[2].replace(\":\",\"\")\r\n sheet[\"C1\"] = l[4].replace(\":\",\"\")\r\n sheet[\"D1\"] = l[6].replace(\":\",\"\")\r\n sheet[\"E1\"] = l[8].replace(\":\",\"\")\r\n sheet[\"F1\"] = l[10].replace(\":\",\"\")\r\n sheet[\"G1\"] = l[12].replace(\":\",\"\")\r\n sheet[\"H1\"] = l[14].replace(\":\",\"\")\r\n sheet[\"I1\"] = l[16].replace(\":\",\"\")\r\n sheet[\"J1\"] = l[18].replace(\":\",\"\")\r\n sheet[\"K1\"] = l[20].replace(\":\",\"\")\r\n sheet[\"L1\"] = l[22].replace(\":\",\"\")\r\n sheet[\"M1\"] = l[24].replace(\":\",\"\")\r\n sheet[\"N1\"] = l[26].replace(\":\",\"\")\r\n sheet[\"O1\"] = l[28].replace(\":\",\"\")\r\n sheet[\"P1\"] = l[30].replace(\":\",\"\")\r\n sheet[\"Q1\"] = l[32].replace(\":\",\"\")\r\n sheet[\"R1\"] = l[34].replace(\":\",\"\")\r\n sheet[\"S1\"] = l[36].replace(\":\",\"\")\r\n i = str(i)\r\n sheet[\"A\"+i] = l[1]\r\n sheet[\"B\"+i] = l[3]\r\n sheet[\"C\"+i] = l[5]\r\n sheet[\"D\"+i] = l[7]\r\n sheet[\"E\"+i] = l[9]\r\n sheet[\"F\"+i] = l[11]\r\n sheet[\"G\"+i] = l[13]\r\n sheet[\"H\"+i] = l[15]\r\n sheet[\"I\"+i] = l[17]\r\n sheet[\"J\"+i] = l[19]\r\n sheet[\"K\"+i] = l[21]\r\n sheet[\"L\"+i] = l[23]\r\n sheet[\"M\"+i] = l[25]\r\n sheet[\"N\"+i] = l[27]\r\n sheet[\"O\"+i] = l[29]\r\n sheet[\"P\"+i] = l[31]\r\n sheet[\"Q\"+i] = l[33]\r\n sheet[\"R\"+i] = l[35]\r\n sheet[\"S\"+i] = l[37]\r\n wb.save(path+\"火车票.xlsx\")\r\n\r\n\r\n#运行顺序\r\ndef go_txt(year,month,date,_from,_to): #保存为txt\r\n datas = getDatas(year,month,date,code(_from),code(_to))\r\n for data in datas:\r\n mes = _re(data)\r\n l = prt(mes)[0]\r\n m = txt_format(l)\r\n print(m,\"\\n\") #返回到屏幕\r\n _file(m) #保存为txt\r\n\r\ndef go_excel(year,month,date,_from,_to): #保存为excel\r\n datas = getDatas(year,month,date,code(_from),code(_to))\r\n i = 2\r\n for data in datas:\r\n mes = _re(data)\r\n l = prt(mes)[0]\r\n _excel(l,i) #保存为excel\r\n i += 2\r\n\r\n#汉字编码转换\r\ndef code(name):\r\n name_code = station_name_code.dic()[0]\r\n code_name = station_name_code.dic()[1]\r\n if name in name_code:\r\n return name_code[name]\r\n elif name in code_name:\r\n return code_name[name]\r\n else:\r\n print(\"站点不存在\")\r\n\r\ndef main():\r\n year=input(\"输入年份(如:2019):\")\r\n month=input(\"输入月份(如:01):\")\r\n date=input(\"输入日期(如:01):\")\r\n _from=input(\"输入出发站(如:北京):\")\r\n _to=input(\"输入到达站(如:北京):\")\r\n choice=int(input(\"返回至屏幕和txt文件输入“0”,返回至excel文件输入“1”:\"))\r\n if choice == 0:\r\n try:\r\n go_txt(year,month,date,_from,_to) #txt文件和(或)返回到屏幕\r\n except:\r\n print(\"未找到\")\r\n else:\r\n try:\r\n go_excel(year,month,date,_from,_to) #excel表格\r\n except:\r\n print(\"未找到\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n print(\"Done\")\r\n","sub_path":"可选日期和始终站/爬取火车票信息(json).py","file_name":"爬取火车票信息(json).py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"132805908","text":"# 340. Longest Substring with At Most K Distinct Characters\n# Example 1:\n\n# Input: s = \"eceba\", k = 2\n# Output: 3\n# Explanation: T is \"ece\" which its length is 3.\n# Example 2:\n\n# Input: s = \"aa\", k = 1\n# Output: 2\n# Explanation: T is \"aa\" which its length is 2.\n\nfrom collections import defaultdict\n\n\n# using a dynamic sliding window that expands and contracts\n# update a min length to return each iteration\n\ndef lengthOfLongestSubstringKDistinct(s, k):\n l, r, length, map = 0, 0, 0, defaultdict(int)\n\n for r in range(len(s)):\n # always put the new value in the map. Expand\n map[s[r]] += 1\n\n # contract\n while len(map) > k:\n\n map[s[l]] -= 1\n # if when we contract and remove values if the mapping value is 0 remove it\n if map[s[l]] == 0:\n del map[s[l]]\n l += 1\n\n length = max(length, r-l + 1)\n return length\n\n\nprint(lengthOfLongestSubstringKDistinct(\"eceba\", 2)) # 3\nprint(lengthOfLongestSubstringKDistinct(\"aa\", 1)) # 2\n","sub_path":"SlidingWindow/LongestSubstringWithAtMostKDistinctCharacters.py","file_name":"LongestSubstringWithAtMostKDistinctCharacters.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"63602230","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.util import CLIError\n\nfrom azure.cli.core.commands import CliCommandType\n\nfrom ._client_factory import (cf_cdn, cf_custom_domain, cf_endpoints, cf_profiles, cf_origins, cf_resource_usage,\n cf_edge_nodes, cf_waf_policy, cf_waf_rule_set)\n\n\ndef _not_found(message):\n def _inner_not_found(ex):\n from azure.mgmt.cdn.models import ErrorResponseException\n if isinstance(ex, ErrorResponseException) \\\n and ex.response is not None \\\n and ex.response.status_code == 404:\n raise CLIError(message)\n raise ex\n return _inner_not_found\n\n\n_not_found_msg = \"{}(s) not found. Please verify the resource(s), group or it's parent resources \" \\\n \"exist.\"\n\n\n# pylint: disable=too-many-statements\ndef load_command_table(self, _):\n profile_not_found_msg = _not_found_msg.format('Profile')\n endpoint_not_found_msg = _not_found_msg.format('Endpoint')\n cd_not_found_msg = _not_found_msg.format('Custom Domain')\n origin_not_found_msg = _not_found_msg.format('Origin')\n waf_policy_not_found_msg = _not_found_msg.format('WAF Policy')\n\n cdn_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn#CdnManagementClient.{}',\n client_factory=cf_cdn\n )\n\n cdn_endpoints_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#EndpointsOperations.{}',\n client_factory=cf_endpoints,\n exception_handler=_not_found(endpoint_not_found_msg)\n )\n\n cdn_profiles_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#ProfilesOperations.{}',\n client_factory=cf_profiles,\n exception_handler=_not_found(profile_not_found_msg)\n )\n\n cdn_domain_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#CustomDomainsOperations.{}',\n client_factory=cf_custom_domain,\n exception_handler=_not_found(cd_not_found_msg)\n )\n\n cdn_origin_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#OriginsOperations.{}',\n client_factory=cf_origins,\n exception_handler=_not_found(origin_not_found_msg)\n )\n\n cdn_edge_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#EdgeNodesOperations.{}',\n client_factory=cf_edge_nodes\n )\n\n cdn_usage_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#ResourceUsageOperations.{}',\n client_factory=cf_resource_usage\n )\n\n cdn_waf_policy_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#PoliciesOperations.{}',\n client_factory=cf_waf_policy,\n exception_handler=_not_found(waf_policy_not_found_msg)\n )\n\n with self.command_group('cdn', cdn_sdk) as g:\n g.command('name-exists', 'check_name_availability')\n\n with self.command_group('cdn', cdn_usage_sdk) as g:\n g.command('usage', 'list')\n\n with self.command_group('cdn endpoint', cdn_endpoints_sdk) as g:\n for name in ['start', 'stop', 'delete']:\n g.command(name, name, supports_no_wait=True)\n g.show_command('show', 'get')\n g.command('list', 'list_by_profile')\n g.command('load', 'load_content', supports_no_wait=True)\n g.command('purge', 'purge_content', supports_no_wait=True)\n g.command('validate-custom-domain', 'validate_custom_domain')\n g.custom_command('create', 'create_endpoint', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint',\n supports_no_wait=True)\n g.generic_update_command('update', setter_name='update', setter_arg_name='endpoint_update_properties',\n custom_func_name='update_endpoint',\n doc_string_source='azure.mgmt.cdn.models#EndpointUpdateParameters',\n supports_no_wait=True)\n\n with self.command_group('cdn endpoint waf policy', cdn_endpoints_sdk, is_preview=True) as g:\n g.custom_show_command('show', 'show_endpoint_waf_policy_link', client_factory=cf_endpoints)\n g.custom_command('set', 'set_endpoint_waf_policy_link', client_factory=cf_endpoints)\n g.custom_command('remove', 'remove_endpoint_waf_policy_link', client_factory=cf_endpoints, confirmation=True)\n\n with self.command_group('cdn endpoint rule', cdn_endpoints_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.custom_command('add', 'add_rule', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n g.custom_command('remove', 'remove_rule', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n\n with self.command_group('cdn endpoint rule condition', cdn_endpoints_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.custom_command('add', 'add_condition', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n g.custom_command('remove', 'remove_condition', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n\n with self.command_group('cdn endpoint rule action', cdn_endpoints_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.custom_command('add', 'add_action', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n g.custom_command('remove', 'remove_action', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n\n with self.command_group('cdn profile', cdn_profiles_sdk) as g:\n g.show_command('show', 'get')\n g.command('usage', 'list_resource_usage')\n g.command('delete', 'delete')\n g.custom_command('list', 'list_profiles', client_factory=cf_cdn)\n g.custom_command('create', 'create_profile', client_factory=cf_cdn)\n g.generic_update_command('update', setter_name='update', custom_func_name='update_profile',\n doc_string_source='azure.mgmt.cdn.models#ProfileUpdateParameters')\n\n with self.command_group('cdn custom-domain', cdn_domain_sdk) as g:\n g.show_command('show', 'get')\n g.command('delete', 'delete')\n g.command('list', 'list_by_endpoint')\n g.custom_command('create', 'create_custom_domain', client_factory=cf_cdn)\n g.custom_command('enable-https', 'enable_custom_https', client_factory=cf_cdn)\n g.command('disable-https', 'disable_custom_https')\n\n with self.command_group('cdn origin', cdn_origin_sdk) as g:\n g.show_command('show', 'get')\n g.command('list', 'list_by_endpoint')\n\n with self.command_group('cdn edge-node', cdn_edge_sdk) as g:\n g.command('list', 'list')\n\n with self.command_group('cdn waf policy', cdn_waf_policy_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.command('list', 'list')\n g.custom_command('set', 'set_waf_policy', client_factory=cf_waf_policy)\n g.command('delete', 'delete', confirmation=True)\n\n with self.command_group('cdn waf policy managed-rule-set', cdn_waf_policy_sdk, is_preview=True) as g:\n g.custom_command('add', 'add_waf_policy_managed_rule_set', client_factory=cf_waf_policy)\n g.custom_command('remove',\n 'remove_waf_policy_managed_rule_set',\n client_factory=cf_waf_policy,\n confirmation=True)\n g.custom_command('list', 'list_waf_policy_managed_rule_sets', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_policy_managed_rule_set', client_factory=cf_waf_policy)\n g.custom_command('list-available', 'list_waf_managed_rule_set', client_factory=cf_waf_rule_set)\n\n with self.command_group('cdn waf policy managed-rule-set rule-group-override',\n cdn_waf_policy_sdk,\n is_preview=True) as g:\n g.custom_command('set', 'set_waf_managed_rule_group_override', client_factory=cf_waf_policy)\n g.custom_command('delete',\n 'delete_waf_managed_rule_group_override',\n client_factory=cf_waf_policy,\n confirmation=True)\n g.custom_command('list', 'list_waf_policy_managed_rule_group_overrides', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_managed_rule_group_override', client_factory=cf_waf_policy)\n g.custom_command('list-available', 'list_waf_managed_rule_groups', client_factory=cf_waf_rule_set)\n\n with self.command_group('cdn waf policy custom-rule', cdn_waf_policy_sdk, is_preview=True) as g:\n g.custom_command('set', 'set_waf_custom_rule', client_factory=cf_waf_policy)\n g.custom_command('delete', 'delete_waf_custom_rule', client_factory=cf_waf_policy, confirmation=True)\n g.custom_command('list', 'list_waf_custom_rules', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_custom_rule', client_factory=cf_waf_policy)\n\n with self.command_group('cdn waf policy rate-limit-rule', cdn_waf_policy_sdk, is_preview=True) as g:\n g.custom_command('set', 'set_waf_rate_limit_rule', client_factory=cf_waf_policy)\n g.custom_command('delete', 'delete_waf_rate_limit_rule', client_factory=cf_waf_policy, confirmation=True)\n g.custom_command('list', 'list_waf_rate_limit_rules', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_rate_limit_rule', client_factory=cf_waf_policy)\n","sub_path":"src/azure-cli/azure/cli/command_modules/cdn/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"145961594","text":"import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('imageresized_0.jpg')\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\nsift = cv2.xfeatures2d.SIFT_create()\r\ndetector = sift.detect(gray, None)\r\n\r\nkpts, des = sift.compute(gray, detector)\r\n# kpts,des=descriptor.compute(gray,kpts)\r\nim_with_keypoints = cv2.drawKeypoints(gray, kpts, np.array([]), color=255, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n\r\ncv2.imshow(\"Keypoints\", im_with_keypoints)\r\ncv2.waitKey()","sub_path":"odd_pys/keypoints/kp.py","file_name":"kp.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"72398077","text":"import os, glob, cv2, time\nfrom options.test_options import TestOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import save_images\nfrom util import html\nimport numpy as np\nfrom scipy.signal import convolve2d\n\n\ndef MSE(pic1, pic2):\n return np.sum(np.square(pic1 - pic2)) / (pic1.shape[0] * pic1.shape[1])\n\n\ndef matlab_style_gauss2D(shape=(3, 3), sigma=0.5):\n \"\"\"\n 2D gaussian mask - should give the same result as MATLAB's\n fspecial('gaussian',[shape],[sigma])\n \"\"\"\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h\n\n\ndef filter2(x, kernel, mode='same'):\n return convolve2d(x, np.rot90(kernel, 2), mode=mode)\n\n\ndef compute_ssim(im1, im2, k1=0.01, k2=0.03, win_size=11, L=255):\n if not im1.shape == im2.shape:\n raise ValueError(\"Input Imagees must have the same dimensions\")\n if len(im1.shape) > 2:\n raise ValueError(\"Please input the images with 1 channel\")\n\n M, N = im1.shape\n C1 = (k1 * L) ** 2\n C2 = (k2 * L) ** 2\n window = matlab_style_gauss2D(shape=(win_size, win_size), sigma=1.5)\n window = window / np.sum(np.sum(window))\n\n if im1.dtype == np.uint8:\n im1 = np.double(im1)\n if im2.dtype == np.uint8:\n im2 = np.double(im2)\n\n mu1 = filter2(im1, window, 'valid')\n mu2 = filter2(im2, window, 'valid')\n mu1_sq = mu1 * mu1\n mu2_sq = mu2 * mu2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = filter2(im1 * im1, window, 'valid') - mu1_sq\n sigma2_sq = filter2(im2 * im2, window, 'valid') - mu2_sq\n sigmal2 = filter2(im1 * im2, window, 'valid') - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigmal2 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n\n return np.mean(np.mean(ssim_map))\n\n\nif __name__ == '__main__':\n opt = TestOptions().parse() # get test options\n # opt.epoch = 200\n # hard-code some parameters for test\n opt.num_threads = 0 # test code only supports num_threads = 1\n # opt.batch_size = 1 # test code only supports batch_size = 1\n opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.\n opt.no_flip = True # no flip; comment this line if results on flipped images are needed.\n opt.load_size = opt.crop_size\n opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.\n opt.dataset_mode = 'unaligned' + ('_single_dir' if opt.single_dir else '')\n dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options\n model = create_model(opt) # create a model given opt.model and other options\n model.setup(opt) # regular setup: load and print networks; create schedulers\n # create a website\n web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) # define the website directory\n webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))\n # test with eval mode. This only affects layers like batchnorm and dropout.\n # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.\n # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.\n starttime = time.time()\n lasttime = starttime\n\n if opt.eval:\n model.eval()\n starttime = time.time()\n for i, data in enumerate(dataset):\n if i >= opt.num_test: # only apply our model to opt.num_test images.\n break\n model.set_input(data) # unpack data from data loader\n model.test() # run inference\n visuals = model.get_current_visuals() # get image results\n img_path = model.get_image_paths() # get image paths\n if i % 1 == 0: # save images to an HTML file\n print('processing (%04d)-th image... %s' % (len(img_path) + (i) * opt.batch_size, ''), 'cost',\n time.time() - lasttime, 'seconds')\n lasttime = time.time()\n save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)\n webpage.save() # save the HTML\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"27190800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 27 14:32:38 2018\n\n@author: jmezi\n\"\"\"\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom matplotlib.pyplot import plot, show, figure\n\nlayer_number = int(input(\"How many hidden layers would you like? \"))\nnode_number = []\nfor i in range(layer_number):\n node_number.append(int(input(\"How many nodes would you like for layer \"+str(i+1)+\"? \")))\nlearning_rate = float(input(\"What would you like the learning rate to be? \"))\n\nclass Classifier():\n def __init__(self,layer_number,node_number,learning_rate):\n self.learning_rate = learning_rate\n self.layer_number = layer_number\n self.node_number = node_number\n self.layers = []\n for i in range(layer_number+1):\n self.layers.append(Layer(node_number[i] + 1,node_number[i+1]))\n \n def fit(self, train_data, train_target):\n wrong = 1\n accuracy = 0\n k = 0\n l = 0\n repeats = 0\n accuracy2 = 0\n old_rate = self.learning_rate\n accuracy_data = []\n while k < 1000 and accuracy < 0.95:\n wrong = 0\n for i in range(len(train_target)):\n correct_guess = []\n for j in range(len(set(train_target))):\n if j == train_target[i]:\n correct_guess.append(1)\n else:\n correct_guess.append(0)\n guess = self.generate_guess(train_data[i])\n guess_index = list(guess[-1]).index(max(list(guess[-1])))\n if guess_index != train_target[i]:\n wrong += 1\n guess.append(np.asarray(correct_guess))\n self.back_propagate(guess)\n k += 1\n accuracy = (len(train_target) - wrong) / len(train_target)\n accuracy_data.append(accuracy)\n if abs(accuracy2 - accuracy) < 1e-4:\n repeats += 1\n else:\n repeats = 0\n accuracy2 = accuracy\n if repeats > 150 and accuracy < 0.75:\n old_rate = self.learning_rate\n self.learning_rate *= (1-accuracy)*10\n l = k + 5\n repeats = 0\n if k > l and self.learning_rate > old_rate and repeats < 5:\n self.learning_rate /= 2\n print(k)\n return accuracy_data\n \n def predict(self,test_data):\n prediction_array = []\n for i in range(len(test_data)):\n guess = self.generate_guess(test_data[i])\n prediction_array.append(list(guess[-1]).index(max(list(guess[-1]))))\n return np.asarray(prediction_array)\n \n def generate_guess(self,data_input):\n guess = []\n guess.append(data_input)\n for i in range(self.layer_number+1):\n guess[-1] = np.append(guess[-1],-1)\n guess.append(self.layers[i].node_output(guess[-1]))\n return guess\n\n def back_propagate(self,guess):\n error = self.generate_error(guess)\n self.update_weights(error,guess)\n \n def generate_error(self,guess):\n error = []\n guess[0] = np.delete(guess[0],-1)\n for i in range(len(guess) - 2):\n if i == 0:\n error.append(guess[-2]*(1-guess[-2])*(guess[-2] - guess[-1]))\n else:\n guess[-(i+2)] = np.delete(guess[-(i+2)],-1)\n error.append(guess[-(i+2)]*(1-guess[-(i+2)])*np.sum(np.dot(self.layers[-i].weights.T,error[-1])))\n error = np.flip(error)\n return error\n \n def update_weights(self,error,guess):\n for i in range(len(self.layers)):\n self.layers[i].update_layer_weights(error[i],guess[i],self.learning_rate)\n \nclass Layer():\n def __init__(self,input_number,output_number):\n self.weights = np.random.randint(-50,50, size=(output_number,input_number))/100\n \n def node_output(self,node_input):\n return 1/(1 + np.exp(- np.matmul(self.weights,node_input)))\n \n def update_layer_weights(self,error,guess,learning_rate):\n for i in range(len(self.weights)):\n self.weights[i] = self.weights[i] - learning_rate * error[i] * np.append(guess,-1)\n\niris = datasets.load_iris()\ntrain_data, test_data, train_target, test_target = train_test_split(iris.data, iris.target, test_size = 0.3)\ntrain_data = np.asarray(train_data)\ntrain_target = np.asarray(train_target)\n\nfor i in range(4):\n train_data[i] = (train_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\n test_data[i] = (test_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\nnode_number.append(len(set(train_target)))\nnode_number.insert(0,len(train_data[0]))\nclassifier = Classifier(layer_number,node_number,learning_rate)\naccuracy_data = classifier.fit(train_data,train_target)\nprediction = classifier.predict(test_data)\n\nright = 0\ni = 0\nwhile i < len(prediction):\n if prediction[i] == test_target[i]:\n right += 1\n \n i += 1\n\npercent_right = right / len(test_target)\nprint(percent_right)\n\nfigure(1)\nplot(accuracy_data)\nshow()\n\nlayer_number = int(input(\"How many hidden layers would you like? \"))\nnode_number = []\nfor i in range(layer_number):\n node_number.append(int(input(\"How many nodes would you like for layer \"+str(i+1)+\"? \")))\nlearning_rate = float(input(\"What would you like the learning rate to be? \"))\n\nwine = datasets.load_wine()\ntrain_data, test_data, train_target, test_target = train_test_split(wine.data, wine.target, test_size = 0.3)\ntrain_data = np.asarray(train_data)\ntrain_target = np.asarray(train_target)\n\nfor i in range(4):\n train_data[i] = (train_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\n test_data[i] = (test_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\nnode_number.append(len(set(train_target)))\nnode_number.insert(0,len(train_data[0]))\nclassifier = Classifier(layer_number,node_number,learning_rate)\naccuracy_data = classifier.fit(train_data,train_target)\nprediction = classifier.predict(test_data)\n\nright = 0\ni = 0\nwhile i < len(prediction):\n if prediction[i] == test_target[i]:\n right += 1\n \n i += 1\n\npercent_right = right / len(test_target)\nprint(percent_right)\n\nfigure(1)\nplot(accuracy_data)\nshow()\n","sub_path":"week06/prove7.py","file_name":"prove7.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"241417889","text":"from __future__ import print_function, division\nfrom saliency_map_net import SaliencyMapNet, SaliencyUNet, L_cut, Saliency_simple, Saliency_noskip, Saliency_encoder\n\nfrom data_loader import ChexRays, RSNA_loader\n\n\nimport torch\nfrom torch.optim import lr_scheduler\nimport numpy as np\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nfrom apex import amp\nfrom sklearn.metrics import roc_auc_score\n\nfrom knoedl import setup_experiment\nfrom knoedl.utils import dynamic_import\n\n\ndef main():\n # amp_handle = amp.init(enabled=True)\n params = setup_experiment(exp_type='training')\n # get base_dir from params file\n base_dir = params['base_dir']\n # note params file has to be given in training/ edit_configuration top right drop down menu\n\n # init logging\n from knoedl.log.tb_log import TbLogger as knoedl_TbLogger\n\n # read param file containing training parameters\n running_type = params['running_type']\n num_classes = params['model_params']['num_classes']\n batch_size = params['train']['batch_size']\n epochs = params['train']['epochs']\n patience = params['train']['patience']\n\n data_transforms = {\n 'train': transforms.Compose([\n transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomAffine(degrees=5, translate=(0.05, 0.05), scale=(0.9, 1.1)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n\n 'val': transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n }\n\n #### Data Location ####\n if running_type == 'dev':\n data_dir = 'data/hymenoptera_data'\n root_dir = '/media/data1/max_m/thesis/chexray_dev/'\n dev_folder = 'images/'\n dev_csv_path = os.path.join(root_dir, 'Dev_data.csv')\n log_dir = root_dir\n\n #### Datasets and Dataloaders Generation ####\n image_datasets = {x: ChexRays(csv_dir=root_dir + '{}.csv'.format(x),\n root_dir=root_dir,\n folder=dev_folder,\n transform=data_transforms[x]) # changed to none / data_transforms[x]\n for x in ['train', 'val']}\n elif running_type == 'normal':\n server = params['server']\n # if server == '95':\n # root_dir = '/media/data2/data/ChestXray14/small/data'\n # folder = 'images'\n # elif server =='99':\n # root_dir = '/media/data2/data/ChestXray-NIHCC'\n # folder = 'images/'\n\n csv_dir = '/media/data1/max_m/thesis/RSNA/csv_files/K_fold/unique_pids'\n root_dir = '/media/data2/data/rsna-pneumonia-detection-challenge'\n folder = 'train_images_med_png'\n log_dir = '/media/data1/max_m/logs'\n split_names = {'train': params['split_names'][0], 'val': params['split_names'][1]}\n\n image_datasets = {x: RSNA_loader(csv_dir=os.path.join(csv_dir, split_names[x]),\n root_dir=root_dir,\n folder=folder,\n num_classes=num_classes,\n transform=data_transforms[x]) # changed to none / data_transforms[x]\n for x in ['train', 'val']}\n\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=batch_size,\n shuffle=True,\n num_workers=4) for x in ['train', 'val']}\n\n # check if datasets contain the same classes\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n class_names = {x: image_datasets[x].classes for x in ['train', 'val']}\n assert class_names['train'] == class_names[\n 'val'], 'validation set does not contain the same classes as training set.' \\\n 'validation classes = {}, training classes = {}' \\\n .format(class_names['train'], class_names['val'])\n\n\n #### device assignment ####\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n #### model instantiation, based on architecture mode ####\n model_mode = params['model_params']['model_mode']\n if model_mode == 'saliency':\n # from saliency_map_net import SaliencyMapNet\n model = SaliencyMapNet(num_classes=num_classes,\n gr=32,\n resnet_backbone='ResNet50',\n dense_config='normal')\n elif model_mode == 'unet':\n model = SaliencyUNet(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif model_mode == 'cut':\n model = L_cut(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif 'simple' in model_mode:\n if '1' in model_mode:\n model = Saliency_simple(num_classes=num_classes,\n resnet_backbone='ResNet50', mode='mode1')\n elif '2' in model_mode:\n model = Saliency_simple(num_classes=num_classes,\n resnet_backbone='ResNet50', mode='mode2')\n else:\n model = Saliency_simple(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif model_mode == 'noskip':\n model = Saliency_noskip(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif model_mode == 'encoder':\n model = Saliency_encoder(num_classes=num_classes,\n resnet_backbone='ResNet50')\n model = model.to(device)\n\n #### training ####\n # when training on RSNA use softmax and NLLLoss\n loss_type = dynamic_import(['torch.nn'], params['train']['loss_type'], 'loss')\n loss = loss_type(**params['train']['loss_params'])\n\n optim_type = dynamic_import(['torch.optim'], params['train']['optim_type'], 'optimizer')\n optim = optim_type(model.parameters(), **params['train']['optim_params'])\n\n\n # APEX init\n model, optim = amp.initialize(model, optim, opt_level='O1')\n step_size = params['train']['lr_decay']\n gamma = params['train']['gamma']\n exp_lr_scheduler = lr_scheduler.StepLR(optim, step_size=step_size, gamma=gamma)\n\n\n #### Initialize model savers and tensorboard logging.####\n # Note that knoedl automatically creates logs for all .py files, and the console log and knoedl version\n knoedl_tb_logger = knoedl_TbLogger(base_dir, count_steps=True)\n\n #create a models directory in the base_dir that is created by knoedl\n # models_dir is used to save the best models, and a model from each epoch\n models_dir = os.path.join(base_dir, 'models/')\n os.makedirs(models_dir, exist_ok=True)\n\n #### load model params from previous training ####\n load_pretrained = params['load_pretrained']\n best_dir = params['best_dir']\n best_from = os.path.join(log_dir, best_dir)\n if load_pretrained:\n checkpoint = torch.load(best_from)\n model.load_state_dict(checkpoint['model_state_dict'], strict=False)\n # optim.load_state_dict(checkpoint['optimizer_state_dict'])\n\n ## freeze weights:\n # for child in model.pretrained_resnet.children():\n # for param in child.parameters():\n # param.requires_grad = False\n\n # model.eval()\n # - or -\n model.train()\n\n # todo : debugging training\n result = train_model(model,\n loss,\n optim,\n scheduler=exp_lr_scheduler,\n patience=patience,\n device=device,\n dataloaders=dataloaders,\n class_names=class_names,\n dataset_sizes=dataset_sizes,\n root_dir=root_dir,\n epochs=epochs,\n knoedl_tb_logger=knoedl_tb_logger,\n log_dir=models_dir,\n params=params)\n\n # visualize_model(model, dataloaders, device, class_names, num_images=4)\n\n return result\n\n\ndef show(tensor, title=None, save_location=None):\n inp = tensor.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n\n if save_location is not None:\n save_loc = os.path.join('/media/data1/max_m/thesis/saved_figures/', save_location) #uncomment if image is to be saved\n plt.savefig(save_loc)\n else:\n plt.pause(0.001) # comment out if image is to be saved\n\n\n#todo: transform list of labels into one hot encoding, add this to ChexRay as a class method\ndef convert_labels_to_tensor(class_names, sample):\n labels_dict = {label: idx for idx, label in enumerate(class_names['train'])}\n predictions = sample['annotations']['Finding Labels']\n # convert sample labels to list of labels\n\n idx = []\n batch_size = sample['image'].shape[0]\n num_classes = len(class_names['train'])\n one_hot = torch.zeros((batch_size, num_classes))\n for batch in range(batch_size):\n idx = []\n if isinstance(predictions[batch], str):\n predictions[batch] = [predictions[batch]]\n for i, label in enumerate(predictions[batch]):\n idx.append(labels_dict[label])\n\n one_hot[batch, idx] = 1\n return one_hot\n\n\n## TOdo: start a training with basic parameters, little data augmentation etc. % done\n# define training function\ndef train_model(model,\n criterion,\n optimizer,\n scheduler,\n patience,\n device,\n dataloaders,\n class_names,\n dataset_sizes,\n root_dir,\n knoedl_tb_logger,\n epochs,\n log_dir,\n params,\n ):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_roc_auc = 0.0\n\n\n with knoedl_tb_logger:\n for epoch in range(0, epochs):\n print('Epoch {}/{}'.format(epoch, epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n # update learning_rate scheduler:\n scheduler.step()\n\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n counter = 0\n\n if phase == 'val':\n labels_epoch = []\n outs_epoch = []\n\n # running_roc_auc_score = 0.0\n # running_add = 0.0\n ## use a array to save the outputs\n\n # Iterate over data.\n if phase == 'val':\n print('Wait !')\n for i, sample in enumerate(dataloaders[phase]):\n inputs = sample['image'].to(device)\n\n # formatting labels, where multiple labels are present, create a list\n # for roc auc score required in one hot labels\n labels = sample['annotations'].squeeze()\n #convert labels to int encoding if loss function is CrossEntropyLoss\n if len(labels.shape) == 2:\n labels_int = labels.max(1)[1]\n else:\n labels_int = labels.max()\n\n\n labels = labels.to(device)\n labels_int = labels_int.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n _, outputs = model(inputs)\n #change labels passed to loss if BCELoss or NLLLoss\n if params['train']['loss_type'] == 'NLLLoss':\n labels = labels.type(dtype=torch.long)\n if labels_int.dim() != 0:\n loss = criterion(outputs, labels_int) # assumes that labels are given as one hot encoded\n else:\n labels_int = labels_int.unsqueeze(0).type(dtype=torch.long)\n loss = criterion(outputs, labels_int)\n else:\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n\n optimizer.step()\n\n if phase == 'val':\n # append outputs and labels of batch to overall cached outputs and labels of epoch\n with torch.no_grad():\n outs_epoch.append(outputs.cpu().numpy())\n # if BCELoss, labels are one hot encoded\n labels_epoch.append(labels.cpu().numpy())\n\n # logging...\n if i % 100 == 0:\n if phase =='val':\n print('current i is: {}/{} \\n'\n ' current loss is {} \\n'.format(i,\n len(dataloaders[phase].dataset)/\n dataloaders[phase].batch_size,\n loss.item(),))\n else:\n print('current i is: {}/{} \\n'\n ' current loss is {}'.format(i,\n len(dataloaders[phase].dataset)/\n dataloaders[phase].batch_size,\n loss.item()))\n running_loss += loss.item() * inputs.size(0)\n\n #calculate loss\n epoch_loss = running_loss / dataset_sizes[phase]\n\n # write TensorBoard output...\n if phase == 'val':\n # save predicted classes for statistics\n outs_epoch = np.concatenate(outs_epoch, axis=0)\n labels_epoch = np.concatenate(labels_epoch, axis=0)\n\n roc_auc = roc_auc_score(y_true=labels_epoch, y_score=outs_epoch, average=\"macro\")\n\n if model.pooling.beta.shape[0] == 3:\n current_lr = optimizer.param_groups[0]['lr']\n res_list = [model.pooling.beta[0], model.pooling.beta[1], model.pooling.beta[2], current_lr, epoch, epoch_loss, roc_auc]\n tb_tags = ['beta0', 'beta1', 'beta2', 'lr', 'epoch', 'val_epoch_loss', 'roc_auc']\n knoedl_tb_logger.add_scalars(tb_tags, res_list, step=epoch)\n print('{} Loss: {:.4f} roc_auc: {:.4f}'.format(\n phase, epoch_loss, roc_auc))\n else:\n current_lr = optimizer.param_groups[0]['lr']\n res_list = [model.pooling.beta, current_lr, epoch, epoch_loss, roc_auc]\n tb_tags = ['beta0', 'lr', 'epoch', 'val_epoch_loss', 'roc_auc']\n knoedl_tb_logger.add_scalars(tb_tags, res_list, step=epoch)\n print('{} Loss: {:.4f} roc_auc: {:.4f}'.format(\n phase, epoch_loss, roc_auc))\n\n elif phase == 'train':\n res_list = [epoch, epoch_loss]\n tb_tags = ['epoch', 'train_epoch_loss']\n knoedl_tb_logger.add_scalars(tb_tags, res_list, step=epoch)\n print('{} Loss: {:.4f}'.format(\n phase, epoch_loss))\n\n\n if roc_auc > best_roc_auc:\n best_roc_auc = roc_auc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n # save model weights to file\n save_at = os.path.join(log_dir, 'checkpoint{}_{}.pth.tar'.format(epoch, phase))\n # torch.save(model.state_dict(), save_at)\n\n # save more checkpoints\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, save_at)\n\n counter = 0\n else:\n counter += 1\n\n if counter >= patience:\n break\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best roc_auc: {:4f}'.format(best_roc_auc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n save_best = os.path.join(log_dir, 'best.pth.tar')\n\n # save the best model\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, save_best)\n\n return model\n\nif __name__ == '__main__':\n main()\n\nprint('done')\n\n\n","sub_path":"classification_training.py","file_name":"classification_training.py","file_ext":"py","file_size_in_byte":18072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"410368410","text":"#! /usr/bin/env python3\n\n# Problem 49 - Prime permutations\n#\n# The arithmetic sequence, 1487, 4817, 8147, in which each of the terms\n# increases by 3330, is unusual in two ways: (i) each of the three terms\n# are prime, and, (ii) each of the 4-digit numbers are permutations of one\n# another.\n#\n# There are no arithmetic sequences made up of three 1-, 2-, or 3-digit\n# primes, exhibiting this property, but there is one other 4-digit increasing\n# sequence.\n#\n# What 12-digit number do you form by concatenating the three terms in this\n# sequence?\n\nimport unittest\n\nfrom util import *\n\ndef longestPrimePermSeq(digits):\n allPrimePerms = primePerms(digits)\n maxSeqLen, maxSeqs = 0, []\n for p, ps in allPrimePerms.items():\n index = seqToDict(ps)\n a = ps[0]\n for b in ps[1:]:\n d = b - a\n it = a\n seq = [it]\n while it + d in index:\n seq.append(it + d)\n it = ps[index[it + d]]\n if len(seq) == maxSeqLen:\n maxSeqs.append(seq)\n if len(seq) > maxSeqLen:\n maxSeqLen = len(seq)\n maxSeqs = [seq]\n return maxSeqs\n\ndef primePerms(digits):\n limit = int('9' * digits)\n primeList = [p for p in primes(limit) if numDigits(p) == digits]\n primeSet = set(primeList)\n\n allPerms = {}\n for i, p in enumerate(primeList):\n allPerms[p] = set()\n for perm in perms(intToSeq(p)):\n q = seqToInt(perm)\n if q in primeSet and q >= p:\n allPerms[p].add(q)\n allPerms[p] = sorted(allPerms[p])\n return allPerms\n\ndef seqToDict(seq):\n index = {}\n for i, n in enumerate(seq):\n index[n] = i\n return index\n\nclass Test(unittest.TestCase):\n def test_problem049(self):\n seqs = sorted([seqToInt(seq) for seq in longestPrimePermSeq(4)])\n self.assertEqual(seqs, [148748178147, 296962999629])\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"problem049.py","file_name":"problem049.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"4479483","text":"#\n# Console.py\n# Advanced console capabilites on Windows\n#\n# Jonatan H Sundqvist\n# July 30 2014\n#\n\n# TODO | - Look for portable solutions, dependency checks (cf. curses)\n#\t\t - Decide on API argument scheme (X, Y, tuple, complex, either or, etc)\n#\t\t \t-- Decorator for overloads (?)\n#\t\t - Proper Unicode handling, encoding queries\n#\t\t - Saving output\n#\t\t - Sort methods in a logical order (High-level API, Low-level API, internal methods, auxiliary internal methods, etc.)\n#\t\t - Position printing (printAt, fill, etc.)\n\n# SPEC | - \n#\t\t -\n\n\nimport tkinter as tk\n\nfrom ctypes import *\nfrom itertools import cycle, takewhile\n\nfrom sys import stdout\nfrom time import sleep\nfrom random import choice\nfrom collections import namedtuple\n\nif __name__ == '__main__':\n\tfrom WinTypes import *\nelse:\n\tfrom SwiftUtils.WinTypes import *\t# (?) # TODO: Fix import error (different behaviour when including this module from another script) (✓)\n# from constants import * \t# (?)\n\n\nclass Colours:\n\tBLACK \t= 0x0\n\tBLUE \t= 0x1\n\tGREEN \t= 0x2\n\tCYAN \t= 0x3\n\tBLOOD \t= 0x4\n\tPURPLE \t= 0x5\n\tGOLD \t= 0x6\n\tBONE \t= 0x7\n\tGREY \t= 0x8\n\tOCEAN \t= 0x9\n\tGRASS \t= 0xA\n\tLAGOON \t= 0xB\n\tRED \t= 0xC\n\tPINK \t= 0xD\n\tYELLOW \t= 0xE\n\tWHITE \t= 0xF\n\n\nclass Console():\n\n\t'''\n\tWraps the console API for Windows,\n\tproviding a simple interface for \n\tadvanced text-based interaction.\n\n\t'''\n\n\tdef __init__(self):\n\n\t\t''' '''\n\n\t\t# Acquire handle\n\t\twindll.Kernel32.GetStdHandle.restype = c_ulong\n\t\tself.HANDLE = c_ulong(0XFFFFFFF5)\n\t\tself.hStdout = windll.Kernel32.GetStdHandle(self.HANDLE)\n\n\t\t# Initialize\n\t\tself.title('Labyrinthian')\n\n\t\t# Initialize colours\n\t\tself.bg = 0x00 # Highest bits indicate bg?\n\t\tself.fg = 0x00 # Lowest bits indicate fg?\n\n\t\t# Initialize buffer attributes\n\t\t# TODO: Cursor object (eg. pos, visible, etc.) (?)\n\t\tself.size \t= None # Buffer size (in characters)\n\t\tself.pos \t= 0, 0 # Cursor position (character offset from top left corner)\n\n\t\tself.updateBufferInfo()\n\n\n\tdef colour(self, bg=None, fg=None):\n\n\t\t''' Returns or sets foreground and background colour '''\n\n\t\tif (bg is None) and (fg is None):\n\t\t\treturn (self.bg << 4) + (self.fg)\n\t\t\n\t\tif bg is not None:\n\t\t\tassert isinstance(bg, int) and (0x0 <= bg <= 0xF)\n\t\t\tself.bg = bg\n\t\t\n\t\tif fg is not None:\n\t\t\tassert isinstance(fg, int) and (0x0 <= fg <= 0xF)\n\t\t\tself.fg = fg\n\n\t\twindll.Kernel32.SetConsoleTextAttribute(self.hStdout, (self.bg << 4) + (self.fg))\n\n\n\tdef cursor(self, x=None, y=None):\n\n\t\t''' Sets or retrieves the cursor position '''\n\n\t\tself.updateBufferInfo()\n\t\tstdout.flush()\n\t\t#print('X: %d\\n%sY: %d' % (self.pos[0], ' ' * (self.pos[0]), self.pos[1]))\n\t\t\n\t\tif x is None and y is None:\n\t\t\t# TODO: Make sure self.pos is up to date (cf. print)\n\t\t\treturn self.pos\n\n\t\tif x is not None:\n\t\t\tself.pos = x, self.pos[1]\n\n\t\tif y is not None:\n\t\t\tself.pos = self.pos[0], y\n\n\t\twindll.Kernel32.SetConsoleCursorPosition(self.hStdout, COORD(*self.pos))\n\n\n\tdef charAt(self, X, Y, char=None, bg=None, fg=None):\n\n\t\t''' Sets or retrieves the character at the specified position '''\n\n\t\t# TODO: Retrieve colour data as well (cf. CHAR_INFO)\n\t\tif char is None:\n\t\t\treturn 0 # Char at X,Y\n\t\telse:\n\t\t\traise NotImplementedError # Set char at X, Y\n\n\n\tdef view(self, section, contents=None, bg=None, fg=None):\n\n\t\t''' Sets or retrieves a rectangular section of the console buffer '''\n\n\t\traise NotImplementedError\n\n\n\tdef updateBufferInfo(self):\n\n\t\t''' '''\n\n\t\tinfo = BUFFERINFO()\n\t\twindll.Kernel32.GetConsoleScreenBufferInfo(self.hStdout, byref(info)) # TODO: Make sure this is correct\n\t\t\n\t\tself.pos \t= (info.dwCursorPosition.X, info.dwCursorPosition.Y)\n\t\tassert self.pos == (info.dwCursorPosition.X, info.dwCursorPosition.Y)\n\t\tself.size \t= info.dwSize.X, info.dwSize.Y\n\n\n\tdef pullEvent(self):\n\n\t\t''' '''\n\n\t\traise NotImplementedError\n\n\t\tnumEvents = DWORD(0)\n\t\twindll.Kernel32.GetNumberOfConsoleInputEvents(self.hStdout, byref(numEvents))\n\t\trecord = INPUT_RECORD()\n\t\tlength = DWORD(1)\n\t\twindll.Kernel32.GetConsoleScreenBufferInfo(self.hStdout)\n\n\n\tdef title(self, title=None):\n\n\t\t''' Returns or sets title '''\n\n\t\tif title is None:\n\t\t\treturn self.title\n\t\telse:\n\t\t\tself.title = title\n\t\t\twindll.Kernel32.SetConsoleTitleW(title)\n\n\n\tdef moveCursor(self, x, y):\n\n\t\t''' Moves the cursor relative to its current position '''\n\n\t\tself.cursor(x+self.pos[0], y+self.pos[1])\n\n\n\tdef putTokens(self, *tokens):\n\n\t\t''' '''\n\n\t\tfor token in tokens:\n\t\t\tif isinstance(token, str):\n\t\t\t\tprint(token, end=' ')\n\t\t\telse:\n\t\t\t\tstdout.flush() # Have to flush the buffer for the colour change to take effect. Printing a newline also works.\n\t\t\t\tself.colour(fg=token)\n\n\n\tdef colourPrint(self, string):\n\t\ttokens = [word if not hasattr(Colours, word) else getattr(Colours, word) for word in string.split()]\n\t\tself.putTokens(*tokens)\n\n\n\tdef parseMarkup(self, markup):\n\n\t\t''' '''\n\n\t\t# TODO: Parse markup\n\t\t# TODO: Escapes for syntactic characters\n\t\t# TODO: Default formatting for plain text\n\t\t# TODO: Debugging, error handling\n\t\t# TODO: Optimise, extract setup code (eg. definitions)\n\t\t# TODO: Use regex or library (?)\n\t\t# NOTE: Nested tags are currently not supported\n\t\t\n\t\tToken = namedtuple('Token', 'fg bg text')\n\t\ttokens = []\n\n\t\t# Default values for attributes\n\t\tdefaults = {\n\t\t\t'fg': 'WHITE',\n\t\t\t'bg': 'BLACK'\n\t\t}\n\n\t\tdef colour(prop, frmt):\n\t\t\t''' '''\n\t\t\t# TODO: Find a more general name (eg. parseAttributes)\n\t\t\t# TODO: Allow customisation via kwargs (?)\n\t\t\tif prop not in frmt:\n\t\t\t\treturn defaults[prop]\n\t\t\telse:\n\t\t\t\t# TODO: Use colour aliases when printing tokens (?)\n\t\t\t\t# TODO: More attributes (...)\n\t\t\t\t# This sub-parser only consumes upper-case letters (since it's trying to extract a Colour constant)\n\t\t\t\t#return getattr(Colours, ''.join(takewhile(lambda c: c.isupper(), frmt[frmt.index(prop)+3:])))\n\t\t\t\t# This generalised sub-parser extracts ANY value token and leaves the interpretation to the caller\n\t\t\t\t# NOTE: Assumes the delimiter is a space. Easily customised.\n\t\t\t\treturn ''.join(takewhile(lambda c: c not in ' >', frmt[frmt.index(prop)+3:]))\n\n\t\t\n\t\twhile len(markup) > 0:\n\t\t\tif markup.startswith('<'):\n\t\t\t\tbegin \t= markup.index('<') # Should always be 0 within this branch\n\t\t\t\tend \t= markup.index('>') # Last index of formatting tag\n\t\t\t\tfrmt \t= markup[begin+1:end]\n\n\t\t\t\tclose \t= end + 1 + markup[end+1:].index('>') # Skip formatting tag when looking for closing tag (unnecessary optimization (?))\n\t\t\t\ttext \t= markup[end+1:close]\t\t\t\t\t# Extract text between formatting tag and end tag\n\n\t\t\t\tmarkup = markup[close+len('>'):] # Increment the pointer (so to speak)\n\n\t\t\t\t# TODO: Use takeWhile or regex (?)\n\t\t\t\t#fg = Colours.WHITE if 'fg=' not in frmt else getattr(Colours, frmt[]) # TODO: Allow hex colours too (?)\n\t\t\t\t#bg = Colours.BLACK if 'bg=' not in frmt else getattr(Colours, frmt[frmt.index('bg=')+3:(frmt[frmt.index('bg=')+3:].index())])\n\t\t\t\tfg = colour('fg', frmt)\n\t\t\t\tbg = colour('bg', frmt)\n\t\t\t\ttokens.append(Token(fg, bg, text))\n\t\t\telse:\n\t\t\t\t# Token does not have tags\n\t\t\t\tend = markup.index('<') if '<' in markup else len(markup)\n\t\t\t\ttokens.append(Token(defaults['fg'], defaults['bg'], markup[:end]))\n\t\t\t\tmarkup = markup[end:]\n\n\t\treturn tokens\n\t\t#return 'Hello there>This is white text. IMPORTANT!>'\n\n\n\tdef printMarkup(self, markup):\n\t\t\n\t\t''' '''\n\t\t# NOTE: Currently incompatible with customised markup\n\t\tfor token in self.parseMarkup(markup):\n\t\t\tself.putColoured(char=token.text, fg=getattr(Colours, token.fg), bg=getattr(Colours,token.bg))\n\n\t\t# TODO: Reset formatting afterwards (?)\n\n\n\tdef putColoured(self, char, fg=None, bg=None):\n\t\t''' Prints a coloured string '''\n\t\t# TODO: Rename char argument\n\t\tstdout.flush()\n\t\tself.colour(bg=bg, fg=fg)\n\t\tprint(char, end='')\n\t\tstdout.flush()\n\n\n\ndef main():\n\n\tconsole = Console()\n\n\tconsole.colourPrint('RED ERROR! WHITE ! Two minutes to self destruction.')\n\tprint()\n\tconsole.colourPrint('Evacuate GREEN premises WHITE immediately!')\n\t\n\t#print(('#'*20+'\\n')*20)\n\t#x = console.cursor(6,5)\n\t#print('█')\n\t#x = console.cursor(6,6)\n\t#print('█')\n\n\tprint()\n\n\tmaze = [\n\t\t'███████████████████████████████',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █ █',\n\t\t'█ █ █ █',\n\t\t'█ █ █ █',\n\t\t'█ █ █',\n\t\t'█ █████████ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ ████████ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'███████████████████████████████'\n\t]\n\n\tblocks = {\n\t '█': Colours.GREY,\n\t ' ': Colours.GREEN\n\t}\n\n\tfor line in maze:\n\t\tfor tile in line:\n\t\t\tcolour = blocks[tile]\n\t\t\tconsole.putColoured(tile, fg=colour, bg=colour)\n\t\tprint()\n\n\tprint()\n\n\tconsole.colour(bg=Colours.GREEN, fg=Colours.WHITE)\n\n\tdef left(steps):\n\t\treturn [(-1, 0) for X in range(steps)]\n\n\tdef right(steps):\n\t\treturn [(1, 0) for X in range(steps)]\n\n\tdef up(steps):\n\t\treturn [(0, -1) for X in range(steps)]\n\n\tdef down(steps):\n\t\treturn [(0, 1) for X in range(steps)]\n\n\tconsole.cursor(3, 5)\n\n\n\t#==============================================================================================================\n\t# Negotiating the maze\n\t#==============================================================================================================\n\t# NOTE: Printing affects cursor position\n\t# TODO: Console should take that into account\n\tfor X, Y in down(6) + right(8) + up(7) + right(6) + down(4) + right(8) + down(6) + left(6) + down(3) + left(8):\n\t\tbreak\n\t\tconsole.cursor(X+console.pos[0], Y+console.pos[1])\n\t\tprint('O')\n\t\tsleep(1/24)\n\t\tconsole.cursor(console.pos[0], console.pos[1])\n\t\tprint(' ')\n\n\n\t#==============================================================================================================\n\t# Rotating bar\n\t#==============================================================================================================\n\tfor f in range(10):\n\t\tbreak\n\t\tconsole.cursor(5,5)\n\t\tprint('|/-\\\\|/-\\\\'[f%8])\n\t\tconsole.cursor(7,12)\n\t\tprint('|/-\\\\|/-\\\\'[f%8])\n\t\tconsole.cursor(6,14)\n\t\tprint('|/-\\\\|/-\\\\'[f%8])\n\t\tsleep(1/8)\n\n\n\t#==============================================================================================================\n\t# Animating coloured squares\n\t#==============================================================================================================\n\tfor f, p, c in zip(range(100), cycle([(5,5), (6,5), (6,6), (5,6)]), cycle([Colours.YELLOW, Colours.PURPLE, Colours.GOLD, Colours.BLOOD])):\n\t\tbreak\n\t\tconsole.cursor(*p)\n\t\tconsole.putColoured(' ', bg=c)\n\t\t\n\t\tconsole.cursor(34, 2)\n\t\tconsole.putColoured('Frame: %d' % f, bg=Colours.BLACK)\n\t\tconsole.cursor(34, 4)\n\t\tconsole.putColoured('X: ', bg=Colours.BLACK, fg=Colours.BLOOD)\n\t\tconsole.putColoured(p[0], bg=Colours.BLACK, fg=Colours.WHITE)\n\t\tconsole.putColoured(', Y: ', bg=Colours.BLACK, fg=Colours.OCEAN)\n\t\tconsole.putColoured(p[1], bg=Colours.BLACK, fg=Colours.WHITE)\n\n\t\tsleep(1/5)\n\n\t\tconsole.cursor(*p)\n\t\tconsole.putColoured(' ', bg=Colours.GREEN)\n\n\tconsole.cursor(0,20)\n\n\n\t#==============================================================================================================\n\t# Markup test\n\t#==============================================================================================================\n\tconsole.printMarkup('Hello there! >This is white text. IMPORTANT!>')\n\n\n\t#==============================================================================================================\n\t# EVENTS\n\t#==============================================================================================================\n\tapp = tk.Tk()\n\tapp.bind('', \tlambda e: [console.moveCursor(-1, 0), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', lambda e: [console.moveCursor(1, 0), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', \tlambda e: [console.moveCursor(0, -1), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', \tlambda e: [console.moveCursor(0, 1), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', lambda e: console.putColoured(' ', bg=choice([Colours.RED, Colours.GOLD, Colours.LAGOON])))\n\tapp.mainloop()\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Console.py","file_name":"Console.py","file_ext":"py","file_size_in_byte":12500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"512362606","text":"import os\nimport json\n\nDATASET_PATH = './2013_camera_specs'\n\n\ndef load_json(index):\n source, specification_num = index.split(\"//\")\n specification = specification_num + \".json\"\n with open(os.path.join(DATASET_PATH, source, specification)) as specification_file:\n return json.load(specification_file)\n\n\ndef load_page_title(index):\n return load_json(index).get('')\n\n\ndef load_model(index):\n model = load_json(index).get('model')\n if model:\n if isinstance(model, list):\n concat_model = model[0]\n for i in range(1, len(model)):\n concat_model = concat_model + ' ' + model[i]\n return concat_model\n else:\n return model\n else:\n return 'NO-MODEL'\n\n\n","sub_path":"Rule_Based_Nan/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"41005345","text":"import pandas as pd\nimport numpy as np\nimport sys, os\nimport string\nimport re\nimport unicodedata\nimport itertools\nimport random\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\nfrom sklearn.metrics import accuracy_score\n\neng_sw = np.load('Data/eng_sw.npy')\nfr_sw = np.load('Data/fr_sw.npy')\ndu_sw = np.load('Data/du_sw.npy')\n\nsw_dict = {'EN' : eng_sw, \n\t\t 'FR' : fr_sw, \n\t\t 'DU' : du_sw}\n\ndef remove_accents(input_str):\n nfkd_form = unicodedata.normalize('NFKD', input_str)\n return u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])\n\ndef preprocess(text, lang=None):\n text = remove_accents(text)\n text = ''.join(i for i in text if not i.isdigit())\n text = ''.join(i for i in text if (i.isalnum() | (i == ' ')))\n text = text.strip()\n words = re.split(r'\\W+', text)\n words = [word.lower() for word in words if word not in string.punctuation]\n words = [word for word in words if len(word) < 15]\n if(lang != None):\n \twords = [word for word in words if word not in sw_dict[lang]]\n return text\n\ndef get_classes(model, feature_names, n_top_words):\n classes = []\n for feature_idx, topic in enumerate(model.components_):\n fnames = [feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]\n if('van' in fnames):\n classes.append('DU')\n elif('the' in fnames):\n classes.append('EN')\n else:\n classes.append('FR')\n return classes\n\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()\n\ndu = open('Data/Corpus/trainDU.txt', 'r').read().split('.')\nall_du = []\nfor sent in du:\n all_du.append(preprocess(sent, lang='DU'))\nall_du = all_du[1:]\nall_du = [du for du in all_du if du != '']\n\nen = open('Data/Corpus/trainEN.txt', 'r').read().split('.')\nall_en = []\nfor sent in en:\n all_en.append(preprocess(sent, lang='EN'))\nall_en = [en for en in all_en if en != '']\n\nfr = open('Data/Corpus/trainFR.txt', 'r').read().split('.')\nall_fr = []\nfor sent in fr:\n all_fr.append(preprocess(sent, lang='FR'))\nall_fr = [fr for fr in all_fr if fr != '']\n\ntf = TfidfVectorizer()\nsparsem = tf.fit_transform(all_en+all_du+all_fr)\nnmf = NMF(n_components=3, random_state=1,\n alpha=.1, l1_ratio=.5).fit(sparsem)\n\nprint()\nprint()\ntf_features_names = tf.get_feature_names()\nprint_top_words(nmf, tf_features_names, 10)\nprint()\nprint()\n\ntest = open('Data/Test/all.txt', 'r').read().split('\\n')\nclasses = get_classes(nmf, tf.get_feature_names(), 10)\nclasses = input('Classes: ').split(',')\npreds = []\nlabels = np.load('Stats/labels.npy')\nstats = []\n\nfor sentence, label in zip(test, labels):\n try:\n sentence = preprocess(sentence)\n sparset = tf.transform([sentence]).todense()\n results = nmf.transform(sparset)[0]\n idx = results.argmax()\n preds.append(classes[idx])\n for i in range(3):\n stats.append([classes[idx], results[i], classes[i], sentence, label])\n except Exception as e:\n print(e)\n\ndf = pd.DataFrame(stats, columns = ['Prediction', 'Score', 'Language', 'Sentence', 'Label'])\ndf.to_pickle('Stats/nmf_stats.pkl')\n\nprint(accuracy_score(labels, preds))\n\n\n\n","sub_path":"Demo/nmf.py","file_name":"nmf.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"182349881","text":"# -*- coding: utf-8 -*-\nfrom odoo import http, fields, exceptions, _\nfrom odoo.http import request\nfrom enum import Enum\n\nimport datetime\nimport json\nimport traceback\nimport psycopg2\nimport logging\nimport time\n\n_logger = logging.getLogger(__name__)\n\n\nclass BadTimeException(Exception):\n pass\n\n\nclass F0Parse(Enum):\n hw_ver = 0\n serial_num = 1\n sw_ver = 2\n inputs = 3\n outputs = 4\n time_schedules = 5\n io_table_lines = 6\n alarm_lines = 7\n mode = 8\n max_cards_count = 9\n max_events_count = 10\n\n\nclass WebRfidController(http.Controller):\n def __init__(self, *args, **kwargs):\n self._post = None\n self._vending_hw_version = None\n self._webstacks_env = None\n self._webstack = None\n self._ws_db_update_dict = None\n self._time_format = '%m.%d.%y %H:%M:%S'\n super(WebRfidController, self).__init__(*args, **kwargs)\n\n def _log_cmd_error(self, description, command, error, status_code):\n command.write({\n 'status': 'Failure',\n 'error': error,\n 'ex_timestamp': fields.datetime.now(),\n 'response': json.dumps(self._post),\n })\n\n self._report_sys_ev(description, command.controller_id)\n return self._check_for_unsent_cmd(status_code)\n\n def _check_for_unsent_cmd(self, status_code, event=None):\n commands_env = request.env['hr.rfid.command'].sudo()\n\n processing_comm = commands_env.search([\n ('webstack_id', '=', self._webstack.id),\n ('status', '=', 'Process'),\n ])\n\n if len(processing_comm) > 0:\n processing_comm = processing_comm[-1]\n return self._retry_command(status_code, processing_comm, event)\n\n command = commands_env.search([\n ('webstack_id', '=', self._webstack.id),\n ('status', '=', 'Wait'),\n ])\n\n if len(command) == 0:\n return { 'status': status_code }\n\n command = command[-1]\n\n if event is not None:\n event.command_id = command\n return self._send_command(command, status_code)\n\n def _retry_command(self, status_code, cmd, event):\n if cmd.retries == 5:\n cmd.status = 'Failure'\n return self._check_for_unsent_cmd(status_code, event)\n\n cmd.retries = cmd.retries + 1\n\n if event is not None:\n event.command_id = cmd\n return self._send_command(cmd, status_code)\n\n def _parse_heartbeat(self):\n self._ws_db_update_dict['version'] = str(self._post['FW'])\n return self._check_for_unsent_cmd(200)\n\n def _parse_event(self):\n controller = request.env['hr.rfid.ctrl'].sudo().search([\n ('ctrl_id', '=', self._post['event']['id']),\n ('webstack_id', '=', self._webstack.id),\n ])\n\n if len(controller) == 0:\n ctrl_env = request.env['hr.rfid.ctrl'].with_user(1)\n cmd_env = request.env['hr.rfid.command'].with_user(1)\n\n # try:\n controller = ctrl_env.create({\n 'name': 'Controller',\n 'ctrl_id': self._post['event']['id'],\n 'webstack_id': self._webstack.id,\n })\n # except\n\n command = cmd_env.read_controller_information_cmd(controller)\n\n return self._send_command(command, 400)\n\n card_env = request.env['hr.rfid.card'].sudo()\n workcodes_env = request.env['hr.rfid.workcode'].sudo()\n card = card_env.search(['|',('active', '=', True), ('active', '=', False),\n ('number', '=', self._post['event']['card']) ])\n reader = None\n event_action = self._post['event']['event_n']\n\n if event_action == 30:\n cmd_env = request.env['hr.rfid.command'].sudo()\n self._report_sys_ev('Controller restarted', controller)\n cmd_env.synchronize_clock_cmd(controller)\n return self._check_for_unsent_cmd(200)\n\n reader_num = self._post['event']['reader']\n if reader_num == 0:\n reader_num = ((self._post['event']['event_n'] - 3) % 4) + 1\n else:\n reader_num = reader_num & 0x07\n for it in controller.reader_ids:\n if it.number == reader_num:\n reader = it\n break\n\n if reader is None:\n self._report_sys_ev('Could not find a reader with that id', controller)\n return self._check_for_unsent_cmd(200)\n\n door = reader.door_id\n\n ev_env = request.env['hr.rfid.event.user'].sudo()\n\n if len(card) == 0:\n if event_action == 64 and controller.hw_version != self._vending_hw_version:\n cmd_env = request.env['hr.rfid.command'].sudo()\n cmd = {\n 'webstack_id': controller.webstack_id.id,\n 'controller_id': controller.id,\n 'cmd': 'DB',\n 'status': 'Process',\n 'ex_timestamp': fields.Datetime.now(),\n 'cmd_data': '40%02X00' % (4 + 4*(reader.number - 1)),\n }\n cmd = cmd_env.create(cmd)\n cmd_js = {\n 'status': 200,\n 'cmd': {\n 'id': cmd.controller_id.ctrl_id,\n 'c': cmd.cmd[:2],\n 'd': cmd.cmd_data,\n }\n }\n cmd.request = json.dumps(cmd_js)\n if self._post['event']['card'] == '0000000000':\n self._report_sys_ev('', controller)\n else:\n self._report_sys_ev(_('Could not find the card'), controller)\n return cmd_js\n elif event_action in [ 21, 22, 23, 24 ]:\n event_dict = {\n 'ctrl_addr': controller.ctrl_id,\n 'door_id': reader.door_id.id,\n 'reader_id': reader.id,\n 'event_time': self._get_ws_time_str(),\n 'event_action': '5', # Exit button\n }\n event = ev_env.create(event_dict)\n return self._check_for_unsent_cmd(200, event)\n\n if self._post['event']['card'] == '0000000000':\n self._report_sys_ev('', controller)\n else:\n self._report_sys_ev(_('Could not find the card'), controller)\n return self._check_for_unsent_cmd(200)\n\n # External db event, controller requests for permission to open or close door\n if event_action == 64 and controller.hw_version != self._vending_hw_version:\n ret = request.env['hr.rfid.access.group.door.rel'].sudo().search([\n ('access_group_id', 'in', card.get_owner().hr_rfid_access_group_ids.ids),\n ('door_id', '=', reader.door_id.id)\n ])\n return self._respond_to_ev_64(len(ret) > 0 and card.active is True,\n controller, reader, card)\n\n event_action = ((event_action - 3) % 4) + 1\n # Turnstile controller. If the 7th bit is not up, then there was no actual entry\n if controller.hw_version == '9' \\\n and (self._post['event']['reader'] & 64) == 0 \\\n and event_action == '1':\n event_action = '6'\n\n # Relay controller\n if controller.is_relay_ctrl() and event_action == 1 and controller.mode == 3:\n dt = self._post['event']['dt']\n if len(dt) == 24:\n chunks = [ dt[0:6], dt[6:12], dt[12:18], dt[18:24] ]\n print('Chunks=' + str(chunks))\n door_number = 0\n for i in range(len(chunks)):\n chunk = chunks[i]\n n1 = int(chunk[:2])\n n2 = int(chunk[2:4])\n n3 = int(chunk[4:])\n door_number |= n1*100 + n2*10 + n3\n if i != len(chunks)-1:\n door_number <<= 8\n for _door in reader.door_ids:\n if _door.number == door_number:\n door = _door\n break\n\n event_dict = {\n 'ctrl_addr': controller.ctrl_id,\n 'door_id': door.id,\n 'reader_id': reader.id,\n 'card_id': card.id,\n 'event_time': self._get_ws_time_str(),\n 'event_action': str(event_action),\n }\n\n if reader.mode == '03' and controller.hw_version != self._vending_hw_version: # Card and workcode\n wc = workcodes_env.search([\n ('workcode', '=', self._post['event']['dt'])\n ])\n if len(wc) == 0:\n event_dict['workcode'] = self._post['event']['dt']\n else:\n event_dict['workcode_id'] = wc.id\n\n self._get_card_owner(event_dict, card)\n event = ev_env.create(event_dict)\n\n return self._check_for_unsent_cmd(200, event)\n\n def _parse_response(self):\n command_env = request.env['hr.rfid.command'].with_user(1)\n response = self._post['response']\n controller = None\n\n for ctrl in self._webstack.controllers:\n if ctrl.ctrl_id == response['id']:\n controller = ctrl\n break\n\n if controller is None:\n self._report_sys_ev('Module sent us a response from a controller that does not exist')\n return self._check_for_unsent_cmd(200)\n\n command = command_env.search([ ('webstack_id', '=', self._webstack.id),\n ('controller_id', '=', controller.id),\n ('status', '=', 'Process'),\n ('cmd', '=', response['c']), ], limit=1)\n\n if len(command) == 0 and response['c'] == 'DB':\n command = command_env.search([ ('webstack_id', '=', self._webstack.id),\n ('controller_id', '=', controller.id),\n ('status', '=', 'Process'),\n ('cmd', '=', 'DB2'), ], limit=1)\n\n if len(command) == 0:\n self._report_sys_ev('Controller sent us a response to a command we never sent')\n return self._check_for_unsent_cmd(200)\n\n if response['e'] != 0:\n command.write({\n 'status': 'Failure',\n 'error': str(response['e']),\n 'ex_timestamp': fields.datetime.now(),\n 'response': json.dumps(self._post),\n })\n return self._check_for_unsent_cmd(200)\n\n if response['c'] == 'F0':\n self._parse_f0_response(command, controller)\n\n if response['c'] == 'F6':\n data = response['d']\n readers = [None, None, None, None]\n for it in controller.reader_ids:\n readers[it.number-1] = it\n for i in range(4):\n if readers[i] is not None:\n mode = str(data[i*6:i*6+2])\n readers[i].write({\n 'mode': mode,\n 'no_d6_cmd': True,\n })\n\n if response['c'] == 'F9':\n controller.write({\n 'io_table': response['d']\n })\n\n if response['c'] == 'FC':\n apb_mode = response['d']\n for door in controller.door_ids:\n door.apb_mode = (door.number == '1' and (apb_mode & 1)) \\\n or (door.number == '2' and (apb_mode & 2))\n\n if response['c'] == 'B3':\n data = response['d']\n\n entrance = [ int(data[0:2], 16), int(data[2:4], 16) ]\n exit = [ int(data[4:6], 16), int(data[6:8], 16) ]\n usys = [ int(data[8:10], 16), int(data[10:12], 16) ]\n uin = [ int(data[12:14], 16), int(data[14:16], 16) ]\n temperature = int(data[16:20], 10)\n humidity = int(data[20:24], 10)\n Z1 = int(data[24:26], 16)\n Z2 = int(data[26:28], 16)\n Z3 = int(data[28:30], 16)\n Z4 = int(data[30:32], 16)\n\n TOS = int(data[32:34], 16) * 10000 \\\n + int(data[34:36], 16) * 1000 \\\n + int(data[36:38], 16) * 100 \\\n + int(data[38:40], 16) * 10 \\\n + int(data[40:42], 16)\n\n DT = [ int(data[42:44], 16), int(data[44:46], 16), int(data[46:48], 16) ]\n\n if temperature >= 1000:\n temperature -= 1000\n temperature *= -1\n temperature /= 10\n\n humidity /= 10\n\n sys_voltage = ((usys[0] & 0xF0) >> 4) * 1000\n sys_voltage += (usys[0] & 0x0F) * 100\n sys_voltage += ((usys[1] & 0xF0) >> 4) * 10\n sys_voltage += (usys[1] & 0x0F)\n sys_voltage = (sys_voltage * 8) / 500\n\n input_voltage = ((uin[0] & 0xF0) >> 4) * 1000\n input_voltage += (uin[0] & 0x0F) * 100\n input_voltage += ((uin[1] & 0xF0) >> 4) * 10\n input_voltage += (uin[1] & 0x0F)\n input_voltage = (input_voltage * 8) / 500\n\n controller.write({\n 'temperature': temperature,\n 'humidity': humidity,\n 'system_voltage': sys_voltage,\n 'input_voltage': input_voltage,\n })\n\n command.write({\n 'status': 'Success',\n 'ex_timestamp': fields.datetime.now(),\n 'response': json.dumps(self._post),\n })\n\n return self._check_for_unsent_cmd(200)\n\n def _parse_f0_cmd(self, data):\n def bytes_to_num(start, digits):\n digits = digits-1\n res = 0\n for j in range(digits+1):\n multiplier = 10 ** (digits-j)\n res = res + int(data[start:start+2], 16) * multiplier\n start = start + 2\n return res\n\n return {\n F0Parse.hw_ver: str(bytes_to_num(0, 2)),\n F0Parse.serial_num: str(bytes_to_num(4, 4)),\n F0Parse.sw_ver: str(bytes_to_num(12, 3)),\n F0Parse.inputs: bytes_to_num(18, 3),\n F0Parse.outputs: bytes_to_num(24, 3),\n F0Parse.time_schedules: bytes_to_num(32, 2),\n F0Parse.io_table_lines: bytes_to_num(36, 2),\n F0Parse.alarm_lines: bytes_to_num(40, 1),\n F0Parse.mode: int(data[42:44], 16),\n F0Parse.max_cards_count: bytes_to_num(44, 5),\n F0Parse.max_events_count: bytes_to_num(54, 5),\n }\n\n def _parse_f0_response(self, command, controller):\n ctrl_env = request.env['hr.rfid.ctrl'].with_user(1)\n response = self._post['response']\n data = response['d']\n ctrl_mode = int(data[42:44], 16)\n external_db = (ctrl_mode & 0x20) > 0\n relay_time_factor = '1' if ctrl_mode & 0x40 else '0'\n dual_person_mode = (ctrl_mode & 0x08) > 0\n ctrl_mode = ctrl_mode & 0x07\n\n f0_parse = self._parse_f0_cmd(data)\n\n hw_ver = f0_parse[F0Parse.hw_ver]\n\n if (ctrl_mode < 1 or ctrl_mode > 4):\n return self._log_cmd_error('F0 command failure, controller sent '\n 'us a wrong mode', command, '31', 200)\n\n readers_count = int(data[30:32], 16)\n\n mode_reader_relation = { 1: [1, 2], 2: [2, 4], 3: [4], 4: [4] }\n\n if not ctrl_env.hw_version_is_for_relay_ctrl(hw_ver) and \\\n readers_count not in mode_reader_relation[ctrl_mode]:\n return self._log_cmd_error('F0 sent us a wrong reader-controller '\n 'mode combination', command, '31', 200)\n\n reader_env = request.env['hr.rfid.reader'].with_user(1)\n door_env = request.env['hr.rfid.door'].with_user(1)\n\n sw_ver = f0_parse[F0Parse.sw_ver]\n inputs = f0_parse[F0Parse.inputs]\n outputs = f0_parse[F0Parse.outputs]\n time_schedules = f0_parse[F0Parse.time_schedules]\n io_table_lines = f0_parse[F0Parse.io_table_lines]\n alarm_lines = f0_parse[F0Parse.alarm_lines]\n max_cards_count = f0_parse[F0Parse.max_cards_count]\n max_events_count = f0_parse[F0Parse.max_events_count]\n serial_num = f0_parse[F0Parse.serial_num]\n\n old_ctrl = ctrl_env.search([\n ('serial_number', '=', serial_num)\n ], limit=1)\n\n ctrl_already_existed = False\n if len(old_ctrl) > 0:\n if old_ctrl.webstack_id == controller.webstack_id:\n ctrl_already_existed = True\n else:\n old_ctrl.webstack_id = controller.webstack_id\n\n old_reader_count = len(controller.reader_ids)\n old_door_count = len(controller.door_ids)\n new_reader_count = 0\n new_door_count = 0\n\n def create_door(name, number):\n # If the controller is a vending controller\n nonlocal old_door_count\n nonlocal new_door_count\n\n door_dict = {\n 'name': name,\n 'number': number,\n 'controller_id': controller.id,\n }\n\n if new_door_count < old_door_count:\n new_door_count += 1\n _door = controller.door_ids[new_door_count-1]\n door_dict.pop('name')\n _door.write(door_dict)\n return _door\n\n if hw_ver == self._vending_hw_version:\n return None\n return door_env.create(door_dict)\n\n def create_reader(name, number, reader_type, door_id=None):\n create_dict = {\n 'name': name,\n 'number': number,\n 'reader_type': reader_type,\n 'controller_id': controller.id,\n }\n\n nonlocal old_reader_count\n nonlocal new_reader_count\n\n if door_id is not None:\n create_dict['door_id'] = door_id\n\n if new_reader_count < old_reader_count:\n new_reader_count += 1\n _reader = controller.reader_ids[new_reader_count-1]\n create_dict.pop('name')\n _reader.write(create_dict)\n return _reader\n\n return reader_env.create(create_dict)\n\n def add_door_to_reader(_reader, _door):\n _reader.door_ids += _door\n\n def gen_d_name(door_num, controller_id):\n return 'Door ' + str(door_num) + ' of ctrl ' + str(controller_id)\n\n if controller.hw_version_is_for_relay_ctrl(hw_ver):\n if ctrl_mode == 1 or ctrl_mode == 3:\n reader = create_reader('R1', 1, '0')\n for i in range(outputs):\n door = create_door(gen_d_name(i+1, controller.id), i+1)\n add_door_to_reader(reader, door)\n for i in range(1, readers_count):\n create_reader('R' + str(i+1), i+1, '0')\n elif ctrl_mode == 2:\n if outputs > 16 and readers_count < 2:\n return self._log_cmd_error('F0 sent us too many outputs and not enough readers',\n command, '31', 200)\n reader = create_reader('R1', 1, '0')\n for i in range(outputs):\n door = create_door(gen_d_name(i+1, controller.id), i+1)\n add_door_to_reader(reader, door)\n if outputs > 16:\n reader = create_reader('R2', 2, '0')\n for i in range(outputs-16):\n door = create_door(gen_d_name(i+1, controller.id), i+1)\n add_door_to_reader(reader, door)\n for i in range(2, readers_count):\n create_reader('R' + str(i+1), i+1, '0')\n else:\n for i in range(1, readers_count):\n create_reader('R' + str(i+1), i+1, '0')\n else:\n raise exceptions.ValidationError(_('Got controller mode=%d for hw_ver=%s???')\n % (ctrl_mode, hw_ver))\n else:\n if ctrl_mode == 1 or ctrl_mode == 3:\n last_door = create_door(gen_d_name(1, controller.id), 1)\n last_door = last_door.id\n create_reader('R1', 1, '0', last_door)\n if readers_count > 1:\n create_reader('R2', 2, '1', last_door)\n elif ctrl_mode == 2 and readers_count == 4:\n last_door = create_door(gen_d_name(1, controller.id), 1)\n last_door = last_door.id\n create_reader('R1', 1, '0', last_door)\n create_reader('R2', 2, '1', last_door)\n last_door = create_door(gen_d_name(2, controller.id), 2)\n last_door = last_door.id\n create_reader('R3', 3, '0', last_door)\n create_reader('R4', 4, '1', last_door)\n else: # (ctrl_mode == 2 and readers_count == 2) or ctrl_mode == 4\n print('harware version', hw_ver)\n last_door = create_door(gen_d_name(1, controller.id), 1)\n if last_door: \n last_door = last_door.id \n else:\n last_door = None\n create_reader('R1', 1, '0', last_door)\n last_door = create_door(gen_d_name(2, controller.id), 2)\n if last_door: \n \t last_door = last_door.id \n else:\n last_door = None\n create_reader('R2', 2, '0', last_door)\n\n if ctrl_mode == 3:\n last_door = create_door(gen_d_name(2, controller.id), 2)\n last_door = last_door.id\n create_reader('R3', 3, '0', last_door)\n last_door = create_door(gen_d_name(3, controller.id), 3)\n last_door = last_door.id\n create_reader('R4', 4, '0', last_door)\n elif ctrl_mode == 4:\n last_door = create_door(gen_d_name(3, controller.id), 3)\n last_door = last_door.id\n create_reader('R3', 3, '0', last_door)\n last_door = create_door(gen_d_name(4, controller.id), 4)\n last_door = last_door.id\n create_reader('R4', 4, '0', last_door)\n\n if old_reader_count > new_reader_count:\n controller.reader_ids[new_reader_count : old_reader_count].unlink()\n if old_door_count > new_door_count:\n controller.door_ids[new_door_count : old_door_count].unlink()\n\n if controller.serial_number is False:\n controller.name = 'Controller ' + serial_num + ' ' + str(controller.ctrl_id)\n\n controller.write({\n 'hw_version': hw_ver,\n 'serial_number': serial_num,\n 'sw_version': sw_ver,\n 'inputs': inputs,\n 'outputs': outputs,\n 'readers': readers_count,\n 'time_schedules': time_schedules,\n 'io_table_lines': io_table_lines,\n 'alarm_lines': alarm_lines,\n 'mode': ctrl_mode,\n 'external_db': external_db,\n 'relay_time_factor': relay_time_factor,\n 'dual_person_mode': dual_person_mode,\n 'max_cards_count': max_cards_count,\n 'max_events_count': max_events_count,\n 'last_f0_read': fields.datetime.now(),\n })\n\n cmd_env = request.env['hr.rfid.command'].sudo()\n if not ctrl_already_existed:\n cmd_env.synchronize_clock_cmd(controller)\n cmd_env.delete_all_cards_cmd(controller)\n cmd_env.delete_all_events_cmd(controller)\n cmd_env.read_readers_mode_cmd(controller)\n cmd_env.read_io_table_cmd(controller)\n\n if not controller.is_relay_ctrl() and (ctrl_mode == 1 or ctrl_mode == 3):\n cmd_env.read_anti_pass_back_mode_cmd(controller)\n\n def _report_sys_ev(self, description, controller=None):\n sys_ev_env = request.env['hr.rfid.event.system'].sudo()\n\n sys_ev = {\n 'webstack_id': self._webstack.id,\n 'error_description': description,\n 'input_js': json.dumps(self._post),\n }\n\n if 'event' in self._post:\n try:\n sys_ev['timestamp'] = self._get_ws_time_str()\n except BadTimeException:\n sys_ev['timestamp'] = str(fields.datetime.now())\n sys_ev['event_action'] = str(self._post['event']['event_n'])\n else:\n sys_ev['timestamp'] = datetime.datetime.now()\n\n if controller is not None:\n sys_ev['controller_id'] = controller.id\n\n sys_ev_env.create(sys_ev)\n\n def _respond_to_ev_64(self, open_door, controller, reader, card):\n cmd_env = request.env['hr.rfid.command'].sudo()\n ev_env = request.env['hr.rfid.event.user'].sudo()\n open_door = 3 if open_door is True else 4\n cmd = {\n 'webstack_id': controller.webstack_id.id,\n 'controller_id': controller.id,\n 'cmd': 'DB',\n 'status': 'Process',\n 'ex_timestamp': fields.Datetime.now(),\n }\n if not controller.is_relay_ctrl():\n cmd['cmd_data'] = '40%02X00' % (open_door + 4*(reader.number - 1))\n else:\n data = 0\n user_doors = card.get_owner().get_doors()\n for door in reader.door_ids:\n if door in user_doors:\n data |= 1 << (door.number - 1)\n cmd['cmd_data'] = '4000' + request.env['hr.rfid.door'].create_rights_int_to_str(data)\n event = {\n 'ctrl_addr': controller.ctrl_id,\n 'door_id': reader.door_id.id,\n 'reader_id': reader.id,\n 'card_id': card.id,\n 'event_time': self._get_ws_time_str(),\n 'event_action': '64',\n }\n self._get_card_owner(event, card)\n cmd = cmd_env.create(cmd)\n cmd_js = {\n 'status': 200,\n 'cmd': {\n 'id': cmd.controller_id.ctrl_id,\n 'c': cmd.cmd[:2],\n 'd': cmd.cmd_data,\n }\n }\n cmd.request = json.dumps(cmd_js)\n event['command_id'] = cmd.id\n ev_env.create(event)\n return cmd_js\n\n def _get_ws_time_str(self):\n return self._get_ws_time().strftime('%Y-%m-%d %H:%M:%S')\n\n def _get_ws_time(self):\n t = self._post['event']['date'] + ' ' + self._post['event']['time']\n try:\n ws_time = datetime.datetime.strptime(t, self._time_format)\n ws_time -= self._get_tz_offset(self._webstack)\n except ValueError:\n raise BadTimeException\n return ws_time\n\n @staticmethod\n def _get_tz_offset(webstack):\n tz_h = int(webstack.tz_offset[:3], 10)\n tz_m = int(webstack.tz_offset[3:], 10)\n return datetime.timedelta(hours=tz_h, minutes=tz_m)\n\n @staticmethod\n def _get_card_owner(event_dict: dict, card):\n if len(card.employee_id) == 0:\n event_dict['contact_id'] = card.contact_id.id\n else:\n event_dict['employee_id'] = card.employee_id.id\n\n @staticmethod\n def _send_command(command, status_code):\n command.status = 'Process'\n\n json_cmd = {\n 'status': status_code,\n 'cmd': {\n 'id': command.controller_id.ctrl_id,\n 'c': command.cmd[:2],\n 'd': command.cmd_data,\n }\n }\n\n if command.cmd == 'D1':\n if not command.controller_id.is_relay_ctrl():\n card_num = ''.join(list('0' + ch for ch in command.card_number))\n pin_code = ''.join(list('0' + ch for ch in command.pin_code))\n ts_code = str(command.ts_code)\n rights_data = '{:02X}'.format(command.rights_data)\n rights_mask = '{:02X}'.format(command.rights_mask)\n json_cmd['cmd']['d'] = card_num + pin_code + ts_code + rights_data + rights_mask\n else:\n card_num = ''.join(list('0' + ch for ch in command.card_number))\n rights_data = '%03d%03d%03d%03d' % (\n (command.rights_data >> (3*8)) & 0xFF,\n (command.rights_data >> (2*8)) & 0xFF,\n (command.rights_data >> (1*8)) & 0xFF,\n (command.rights_data >> (0*8)) & 0xFF,\n )\n if command.controller_id.mode == 3:\n rights_mask = '255255255255'\n else:\n rights_mask = '%03d%03d%03d%03d' % (\n (command.rights_mask >> (3*8)) & 0xFF,\n (command.rights_mask >> (2*8)) & 0xFF,\n (command.rights_mask >> (1*8)) & 0xFF,\n (command.rights_mask >> (0*8)) & 0xFF,\n )\n rights_data = ''.join(list('0' + ch for ch in rights_data))\n rights_mask = ''.join(list('0' + ch for ch in rights_mask))\n json_cmd['cmd']['d'] = card_num + rights_data + rights_mask\n\n if command.cmd == 'D7':\n dt = datetime.datetime.now()\n dt += WebRfidController._get_tz_offset(command.webstack_id)\n\n json_cmd['cmd']['d'] = '{:02}{:02}{:02}{:02}{:02}{:02}{:02}'.format(\n dt.second, dt.minute, dt.hour, dt.weekday() + 1, dt.day, dt.month, dt.year % 100\n )\n\n command.request = json.dumps(json_cmd)\n\n return json_cmd\n\n @http.route(['/hr/rfid/event'], type='json', auth='none', method=['POST'], csrf=False)\n def post_event(self, **post):\n print('post=' + str(post))\n t0 = time.time()\n if len(post) == 0:\n # Controllers with no odoo functionality use the dd/mm/yyyy format\n self._time_format = '%d.%m.%y %H:%M:%S'\n self._post = request.jsonrequest\n else:\n self._time_format = '%m.%d.%y %H:%M:%S'\n self._post = post\n _logger.debug('Received=' + str(self._post))\n\n if 'convertor' not in post:\n return self._parse_raw_data()\n\n self._vending_hw_version = '16'\n self._webstacks_env = request.env['hr.rfid.webstack'].with_user(1)\n self._webstack = self._webstacks_env.search(['|',('active', '=', True), ('active', '=', False),\n ('serial', '=', str(self._post['convertor'])) ])\n self._ws_db_update_dict = {\n 'last_ip': request.httprequest.environ['REMOTE_ADDR'],\n 'updated_at': fields.Datetime.now(),\n }\n try:\n if len(self._webstack) == 0:\n new_webstack = {\n 'name': 'Module ' + str(self._post['convertor']),\n 'serial': str(self._post['convertor']),\n 'key': self._post['key'],\n 'last_ip': request.httprequest.environ['REMOTE_ADDR'],\n 'updated_at': fields.Datetime.now(),\n 'available': 'a'\n }\n self._webstacks_env.create(new_webstack)\n return { 'status': 400 }\n\n if self._webstack.key != self._post['key']:\n self._report_sys_ev('Webstack key and key in json did not match')\n return { 'status': 400 }\n\n if not self._webstack.active:\n self._webstack.write(self._ws_db_update_dict)\n self._report_sys_ev('Webstack is not active')\n return { 'status': 400 }\n\n result = {\n 'status': 400\n }\n\n if 'heartbeat' in self._post:\n result = self._parse_heartbeat()\n elif 'event' in self._post:\n result = self._parse_event()\n elif 'response' in self._post:\n result = self._parse_response()\n\n self._webstack.write(self._ws_db_update_dict)\n t1 = time.time()\n _logger.debug('Took %2.03f time to form response=%s' % ((t1-t0), str(result)))\n print('ret=' + str(result))\n return result\n except (KeyError, exceptions.UserError, exceptions.AccessError, exceptions.AccessDenied,\n exceptions.MissingError, exceptions.ValidationError, exceptions.DeferredException,\n psycopg2.DataError, ValueError) as __:\n request.env['hr.rfid.event.system'].sudo().create([{\n 'webstack_id': self._webstack.id,\n 'timestamp': fields.Datetime.now(),\n 'error_description': traceback.format_exc(),\n 'input_js': json.dumps(self._post),\n }])\n _logger.debug('Caught an exception, returning status=500 and creating a system event')\n print('Caught an exception, returning status=500 and creating a system event')\n return { 'status': 500 }\n except BadTimeException:\n t = self._post['event']['date'] + ' ' + self._post['event']['time']\n ev_num = str(self._post['event']['event_n'])\n controller = self._webstack.controllers.filtered(lambda r: r.ctrl_id == self._post['event']['id'])\n sys_ev_dict = {\n 'webstack_id': self._webstack.id,\n 'controller_id': controller.id,\n 'timestamp': fields.Datetime.now(),\n 'event_action': ev_num,\n 'error_description': 'Controller sent us an invalid date or time: ' + t,\n 'input_js': json.dumps(self._post),\n }\n request.env['hr.rfid.event.system'].sudo().create(sys_ev_dict)\n _logger.debug('Caught a time error, returning status=200 and creating a system event')\n print('Caught a time error, returning status=200 and creating a system event')\n return { 'status': 200 }\n\n def _parse_raw_data(self):\n if 'serial' in self._post and 'security' in self._post and 'events' in self._post:\n return self._parse_barcode_device()\n\n return { 'status': 200 }\n\n def _parse_barcode_device(self):\n post = self._post\n ret = request.env['hr.rfid.raw.data'].create([{\n 'do_not_save': True,\n 'identification': post['serial'],\n 'security': post['security'],\n 'data': json.dumps(post),\n }])\n\n ret_data = ret.return_data\n\n if ret.do_not_save is True:\n ret.unlink()\n\n return json.loads(ret_data)\n","sub_path":"hr_rfid/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":34540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"25293708","text":"# Python https://docs.python.org/3.1/tutorial/datastructures.html\n\nname = \"Dan\" # string\nage = 24 # int\nfavorites = [\"Cycling\", \"Eatsies\", \"Smiling\"] # list of strings\n\n# object (dictionary = key : value)\nperson = {\n 'name' : name,\n 'age' : age,\n 'favorites' : favorites\n}\n\nprint(person)\n","sub_path":"basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"151498106","text":"#!/usr/bin/env python3\n#Autor:JoséFacundoBogado, dedicado a Clara(I❤U)\nimport requests\nimport json\nimport sys\nimport os\nimport time\n\ndef borrarPant():\n if os.name == \"posix\":\n os.system (\"clear\")\n elif os.name == \"ce\" or os.name == \"nt\" or os.name == \"dos\":\n os.system (\"cls\")\n\ndef menu():\n print('FlashTrading 0.1 (pre-alfa)')\n print('''¿Que desea hacer? :\n [0] Estado de Cuenta\n [1] Ver Portafolio\n [2] Consultar la cotización de un titulo\n [3] Ver operaciones\n [4] Comprar/Vender\n [5] Ver Paneles de cotizaciones\n [6] Serie historica\n [7] Calculadora de bonos\n [8] Acerca de...\n [9] Salir\n ''')\n n = input('(Ingrese el número):\\t')\n borrarPant()\n if n=='0':\n estado()\n elif n=='1':\n miportafolio()\n elif n=='2':\n consulta()\n elif n=='3':\n operaciones()\n elif n=='5':\n mostrarpanel()\n elif n=='8':\n acercade()\n elif n=='9':\n salir()\n else:\n borrarPant()\n print('\\n\\n\\tERROR: Ingrese el numero de la opción que desee')\n time.sleep(2)\n menu()\n\ndef estado():\n print('FlashTrading 0.1 (pre-alfa)')\n data = {\n 'Authorization': c,\n }\n r = requests.get(\"https://api.invertironline.com/api/estadocuenta\", headers=data)\n estado = json.loads(r.text)\n n=0\n print(\n '\\nEstado de cuenta nº: ', estado['cuentas'][0]['numero'],\n '\\n\\t\\t\\t\\t[Total en Pesos:$',estado['totalEnPesos'],']\\n',\n '\\nCuenta',estado['cuentas'][0]['tipo'],\n '\\t[Total:\\t$',estado['cuentas'][0]['total'],']\\n',\n '\\n\\t\\tActivos Valorizados\\t\\t\\t$',estado['cuentas'][0]['titulosValorizados'],\n '\\n\\t\\tComprometido\\t\\t\\t\\t$',estado['cuentas'][0]['comprometido'],\n '\\n\\t\\tDisponible para operar\\t\\t\\t$','{:.2f}'.format((estado['cuentas'][0]['saldos'][0]['saldo']+estado['cuentas'][0]['saldos'][1]['saldo']+estado['cuentas'][0]['saldos'][2]['saldo']+estado['cuentas'][0]['saldos'][3]['saldo'])-estado['cuentas'][0]['comprometido']),\n '\\n\\t\\tDisponible en cuenta\\t\\t\\t$','{:.2f}'.format(estado['cuentas'][0]['disponible']),\n '\\n\\t\\t\\tSaldo a acreditarse (Inmediato) $',estado['cuentas'][0]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (24hs)\\t$',estado['cuentas'][0]['saldos'][0]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (48hs)\\t$',estado['cuentas'][0]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (72hs)\\t$',estado['cuentas'][0]['saldos'][2]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (+72hs)\\t$',estado['cuentas'][0]['saldos'][3]['saldo'],\n '\\n\\nCuenta',estado['cuentas'][1]['tipo'],\n '\\t[Total:\\tU$S',estado['cuentas'][1]['total'],']\\n',\n '\\n\\t\\tActivos Valorizados\\t\\t\\tU$S',estado['cuentas'][1]['titulosValorizados'],\n '\\n\\t\\tComprometido\\t\\t\\t\\tU$S',estado['cuentas'][1]['comprometido'],\n '\\n\\t\\tDisponible para operar\\t\\t\\t$','{:.4f}'.format(estado['cuentas'][1]['saldos'][1]['saldo']+estado['cuentas'][1]['saldos'][2]['saldo']+estado['cuentas'][1]['saldos'][3]['saldo']-estado['cuentas'][1]['comprometido']),\n '\\n\\t\\tDisponible en cuenta\\t\\t\\tU$S',estado['cuentas'][1]['disponible'],\n '\\n\\t\\t\\tSaldo a acreditarse (Inmediato) U$S',estado['cuentas'][1]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (24hs)\\tU$S',estado['cuentas'][1]['saldos'][0]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (48hs)\\tU$S',estado['cuentas'][1]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (72hs)\\tU$S',estado['cuentas'][1]['saldos'][2]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (+72hs)\\tU$S',estado['cuentas'][1]['saldos'][3]['saldo'],'\\n',\n\n )\n #+estado['cuentas'][0]['saldos'][1]['saldo']+estado['cuentas'][0]['saldos'][2]['saldo']+estado['cuentas'][0]['saldos'][3]['saldo']-estado['cuentas'][0]['comprometido'],\n a=input('presione enter para ir al menu de opciones')\n borrarPant()\n menu()\n\ndef miportafolio():\n print('FlashTrading 0.1 (pre-alfa)')\n data = {\n 'Authorization': c,\n }\n r = requests.get(\"https://api.invertironline.com/api/portafolio\", headers=data)\n port = json.loads(r.text)\n print('Portafolio:')\n n=0\n while n< len(port['activos']):\n ls = port['activos'][n]['titulo']['descripcion']\n ls2 = port['activos'][n]['ultimoPrecio']\n ls3 = port['activos'][n]['cantidad']\n ls4 = port['activos'][n]['titulo']['moneda']\n ls5 = port['activos'][n]['gananciaPorcentaje']\n ls6 = port['activos'][n]['valorizado']\n ls7 = port['activos'][n]['titulo']['simbolo']\n ls8 = port['activos'][n]['variacionDiaria']\n ls9 = port['activos'][n]['gananciaDinero']\n print(ls,'\\n\\t\\tSimbolo:\\t\\t',ls7,'\\n\\t\\tNominales:\\t\\t',ls3,'\\n\\t\\tCotización:\\t\\t $',\n ls2,ls4,'\\n\\t\\tVariacion desde Compra:\\t',ls5,'%','\\n\\t\\tValorizado:\\t\\t $',ls6,ls4,\n '\\n\\t\\tVariación Diaria:\\t',ls8,'%','\\n\\t\\tGanancia Nominal:\\t $',ls9,ls4,'\\n')\n n+=1\n a=input('presione enter para ir al menu de opciones')\n borrarPant()\n menu()\n\ndef consulta():\n print('FlashTrading 0.1 (pre-alfa)\\n\\n')\n merc='bcba'\n simb=input('\\nIngrese el simbolo del titulo que desea consultar\\n(ejemplo: pamp, alua, cres):\\t')\n host='https://api.invertironline.com/api/'+merc+'/Titulos/'+simb+'/cotizacion'\n host2='https://api.invertironline.com/api/'+merc+'/Titulos/'+simb\n data = {\n 'Authorization': c,\n 'mercado':merc,\n 'simbolo':simb,\n 'model.simbolo':simb,\n 'model.mercado':merc\n }\n r = requests.get(host, headers=data)\n r2 = requests.get(host2, headers=data)\n borrarPant()\n cotizacion=json.loads(r.text)\n datos=json.loads(r2.text)\n print(datos['descripcion'],' - [',datos['simbolo'],']\\n',\n '\\núltimo precio: \\t$',cotizacion['ultimoPrecio'],'\\t\\tApertura: $',cotizacion['apertura'],'\\tMáximo: $',cotizacion['maximo'],'\\tMínimo: $',cotizacion['minimo'],\n '\\nCierre Anterior: ',cotizacion['cierreAnterior'],\n '\\nVariación:\\t', cotizacion['variacion'],'%',\n '\\n')\n print('-CAJA DE PUNTAS-\\n')\n print('\\tCOMPRA\\t\\tVENTA')\n print('Cantidad','Precio','\\tPrecio',' Cantidad')\n n=0\n while n< len(cotizacion['puntas']):\n print('{:^8}'.format(str(int(cotizacion['puntas'][n]['cantidadCompra']))),'$',cotizacion['puntas'][n]['precioCompra'],\n '\\t{:9}'.format('$'+str(cotizacion['puntas'][n]['precioVenta'])),'{:^8}'.format(str(int(cotizacion['puntas'][n]['cantidadVenta']))))\n n+=1\n k=input('presione enter para volver al menú')\n borrarPant()\n menu()\n\ndef mostrarpanel():\n hostpanel='https://api.invertironline.com/api/Cotizaciones/acciones/merval/argentina?panelCotizacion.instrumento=acciones&panelCotizacion.panel=merval&panelCotizacion.pais=argentina&api_key='+c\n body={\n 'Authorization':c,\n 'panelCotizacion.instrumento':'acciones',\n 'panelCotizacion.panel':'merval',\n 'panelCotizacion.pais':'argentina'\n }\n panel = requests.get(hostpanel, headers=body)\n merv=json.loads(panel.text)\n n=0\n borrarPant()\n print('''\n----------------------------------[[MERVAL]]-----------------------------------------------\n-------------------------------------------------------------------------------------------\n Último Variación -----Compra----Puntas----Venta----- Apertura Máximo Mínimo Cierre Cantidad Monto\nSimbolo Precio % Cantidad Precio - Precio Cantidad Anterior Operaciones Operado\n''')\n while n7}'.format('$'+str(merv['titulos'][n]['puntas']['precioVenta'])),'{:^10}'.format(str(int(merv['titulos'][n]['puntas']['cantidadVenta']))),\n '{:8}'.format('$ '+str(merv['titulos'][n]['apertura'])),'{:8}'.format('$ '+str(merv['titulos'][n]['maximo'])),'{:9}'.format('$ '+str(merv['titulos'][n]['minimo'])),\n '{:9}'.format('$ '+str(merv['titulos'][n]['ultimoCierre'])),'{:12}'.format(str(int(merv['titulos'][n]['cantidadOperaciones']))),'{:9}'.format('$ '+str(merv['titulos'][n]['volumen']))\n )\n n+=1\n w=input('presione enter para volver al menú')\n menu()\n\ndef operaciones():\n print('FlashTrading 0.1 (pre-alfa)\\n')\n print('''Seleccione que tipo de operaciones desea:\n [1] Todas\n [2] Pendientes\n [3] Terminadas\n [4] Canceladas\n ''')\n tipo=input('Ingrese la opción: ')\n if tipo=='1':\n tipo='todas'\n elif tipo=='2':\n tipo=\"pendientes\"\n elif tipo=='3':\n tipo='terminadas'\n elif tipo=='4':\n tipo='canceladas'\n else:\n borrarPant()\n print('Error. Ingrese un numero del 1 al 4')\n time.sleep(2)\n operaciones()\n borrarPant()\n #print(tipo)\n print('Ingrese la fecha desde la que desea consultar. (formato \"aaaa-mm-dd\", ejemplo: 2018-07-24)')\n print('***Si deja vacio se tomara el último mes por defecto')\n fechadesde=input()\n print('\\nIngrese la fecha hasta la que desea consultar. (formato \"aaaa-mm-dd\", ejemplo: 2019-01-06)')\n print('***Si deja vacio se tomara hoy por defecto')\n fechahasta=input()\n data = {\n 'Authorization': c,\n 'filtro.numero':'',\n 'filtro.estado':tipo,\n 'filtro.fechaDesde':fechadesde,\n 'filtro.fechaHasta':fechahasta\n }\n url='https://api.invertironline.com/api/operaciones?filtro.estado='+tipo+'&filtro.fechaDesde='+fechadesde+'&filtro.fechaHasta='+fechahasta\n r = requests.get(url, headers=data)\n operetas = json.loads(r.text)\n n=0\n print('''\n | Nº de |Fecha de | Tipo | Estado | Símbolo | Cantidad | Precio | Fecha | Monto | Precio |\n |trans. | orden | | | | /Monto | orden | Operada | Operado | operado |\n ---------------------------------------------------------------------------------------------------------------''')\n while n 0.05:\r\n\t\tmaster_list.append('|')\r\n\tprev_ts = timestamp\r\n\r\n# for item in master_list:\r\n# \tif item == '|':\r\n# \t\tprint()\r\n# \telse:\r\n# \t\tprint(hex(item), end=' ')","sub_path":"packet_parse/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"449049339","text":"from collections import deque\nimport gym\n\n\nclass DQN:\n \"\"\"\n Adapted from https://gist.github.com/yashpatel5400/049fe6f4372b16bab5d3dab36854f262#file-mountaincar-py\n https://towardsdatascience.com/reinforcement-learning-w-keras-openai-dqns-1eed3a5338c\n\n Implements a simple fully connected model with dropout to learn the behaviour from observations.\n Can solve the following environments:\n + MountainCar-v0\n \"\"\"\n\n def __init__(self,\n env=\"MountainCar-v0\",\n gamma=0.95,\n epsilon=1.0,\n epsilon_min=0.01,\n epsilon_decay=0.995,\n learning_rate=0.01,\n replay_buffer_size=2000,\n tau=0.05):\n\n self.env = gym.make(env)\n\n print(\"Action space: {}\".format(self.env.action_space))\n print(\"Observation/State space: {}\".format(self.env.observation_space))\n\n # instead of always learning from the most recent trial, we random sample from the replay buffer. This helps\n # with convergence when using NN function approximators, since it assures that our samples (trials) are\n # independently distributed. Also, this enables us to learn while considering the best action to take,\n # independently of the most recent trial.\n self.replay_buffer = deque(maxlen=replay_buffer_size)\n\n self.gamma = gamma # future reward discount factor (< 1)\n self.epsilon = epsilon # probability of exploring (taking random action)\n\n # we want to explore more in the beginning, so init with high epsilon and let epsilon decay over time\n self.epsilon_decay = epsilon_decay\n\n self.epsilon_min = epsilon_min # the minimum exploration probability we want to maintain\n self.learning_rate = learning_rate\n\n self.tau = tau # the factor by how much slower we update the weights of the target network\n\n # this is the model used to do the actual predictions for the action\n self.model = None\n\n # deepmind's trick for better convergence in complex environments: https://arxiv.org/abs/1312.5602\n # this is the final model will use after training, which learns slower the training model.\n # since we train on each time step, we also change the goal on each timestep. This means that the gradients\n # change a lot, which makes it hard to converge.\n self.target_model = None\n\n","sub_path":"algos/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}
+{"seq_id":"221685562","text":"#!/usr/bin/env python3\n\n# File: utils.py\n\n\"\"\"\n\"utils.py\" is an utility providing functionality for usage and\nmaintanance of the Bolinas Rod and Boat Club membership records.\nMost commands deal with a csv file named \"./Data/memlist.csv\" so for\nthese it is the default input file.\nLabels and Envelopes (along with the '-P ' option) have\nbeen deprecated but the code left in place incase anyone ever\nwishes to revive them. Current usage replaces them with emails and\nletters (which can be prepared using the 'prepare_mailing' command.)\nConsult the README file for further info.\n\nUsage:\n ./utils.py [ ? | --help | --version]\n ./utils.py ck_data [-O -d -i -A -S -X -C -o ]\n ./utils.py show [-O -i -A -S -o ]\n ./utils.py names_only [-O -w -i -o ]\n ./utils.py report [-O -i -A -S -o ]\n ./utils.py stati [-O -D -M -B --mode -i -A -S -o ]\n ./utils.py zeros [-O -i -o -o ]\n ./utils.py extra_charges [-O -w -f -i -o -j ]\n ./utils.py payables [-O -T -w -i -o ]\n ./utils.py show_mailing_categories [-O -T -w -o ]\n ./utils.py prepare_mailing --which [-O --oo -p -i -j --dir --cc --bcc ATTACHMENTS...]\n ./utils.py thank [-t <2thank> -O -p -i -j --dir -o -e ]\n ./utils.py display_emails [-O] -j [-o ]\n ./utils.py send_emails [-O --mta --emailer ] -j \n ./utils.py print_letters --dir [-O --separator -o outfile]\n ./utils.py emailing [-O -i -F ] --subject -c [ATTACHMENTS...]\n ./utils.py restore_fees [-O -i -X -o -e ]\n ./utils.py fee_intake_totals [-O -i -o -e ]\n ./utils.py (labels | envelopes) [-O -i -P -o -x ]\n ./utils.py wip [-O -o 2check]\n ./utils.py new_db -F function [-O -i -o -e ]\n\nOptions:\n -h --help Print this docstring. Best piped through pager.\n --version Print version.\n -A Applicant data file.\n --bcc Comma separated listing of bcc recipients\n --cc Comma separated listing of cc recipients\n -c The name of a file containing the body of an email.\n -C Contacts data file.\n -d Include details: fee inconsistency for ck_data,\n -dir The directory to be created and/or read\n containing letters for batch printing.\n -e Specify name of a file to which an error report\n can be written. [default: stdout]\n --emailer Use bash (via smtp or mutt) or python to send\n emails. [default: python]\n -f Specify output format of 'extra_charges' command.\n Possible choices are:\n 'table' listing of names /w fees tabulated (=> 2 columns.)\n 'listing' same format as Data/extra_fees.txt\n 'listings' side by side lists (best use landscape mode.)\n [default: table]\n -F Name of function to apply. (new_db command)\n -i Specify file used as input. Usually defaults to\n the MEMBERSHIP_SPoT attribute of the Club class.\n -D include demographic data } These pertain\n -M include meeting dates } to applicant\n -B include backers/sponsors } reports.\n -j Specify a json formated file (whether for input or output\n depends on context.)\n --mode In stati command signals stati to show:\n If not specified, all stati are reported.\n | --mode :\n only applicants are reported\n | --mode : only report stati listed.\n --mta Specify mail transfer agent to use. Choices are:\n clubg club's gmail account [default: clubg]\n akg my gmail account\n easy my easydns account\n -O Show Options/commands/arguments. Used for debugging.\n -o Specify destination. Choices are stdout, printer, or\n the name of a file. [default: stdout]\n --oo Owing_Only: Only consider members with dues/fees outstanding.\n (Sets owing_only attribute of instance of Club.)\n -P This option will probably be redacted since old\n methods of mailing are no longer used.\n Defaults are A5160 for labels & E000 for envelopes.\n -p Deals with printer variablility; ensures correct\n alignment of text when printing letters. [default: X6505_e1]\n -s Report only the stati listed (separated by\n member.SEPARATOR.\n -S Specify file from which to retrieve sponsors.\n --separator A string. [default: \\f]\n --subject The subject line of an email.\n -t <2thank> A csv file in same format as memlist.csv showing\n recent payments. Input for thank_cmd.\n [default: Info/2thank.csv]\n -T Present data in columns (a Table) rather than a long list.\n Used with the 'payables' and 'show_mailing_categories\n command. May not have much effect without setting -w\n to a high number.\n -w Maximum number of characters per line in output.\n [default: 95]\n --which Specifies type/subject of mailing.\n -x Used by commands not in use. (Expect redaction)\n -X Extra Fees data file.\n\nCommands:\n When run without a command, suggests ways of getting help.\n ck_data: Checks all the club's data bases for consistency.\n Assumes (user must assert) a fresh export of the gmail\n contacts list. Options:\n | -d Include fee inconsistencies (which are expected\n when some have paid.)\n show: Returns membership demographics a copy of which can then\n be sent to the web master for display on the web site.\n names_only: Returns a listing of members and applicants- names\n only, without any demographics. If -w is 0, it's a single\n column, otherwise output is in tabular.\n report: Prepares a 'Membership Report\".\n stati: Returns a listing of stati (entries in 'status' field.)\n if set can be 'applicants' (Applicants only will be\n shown) or a member.SEPARATOR separated set of stati\n (indicating which stati to show.)\n May also include any combination of -D, -M, -S to\n include adress/demographics, meeting dates &/or sponsors\n for applicants.\n usps: Creates a csv file containing names and addresses of\n members without an email address who therefore receive Club\n minutes by post. Also includes any one with a 'be' or an 's'\n status (... a mechanism for sending a copy to the secretary.)\n extra_charges: Reports on members paying extra charges (for\n kayak storage, mooring &/or dock usage.)\n | -f -specify listing, listings or table format.\n | -w -specify maxm # of chars per line in output.\n | -j -creat a json file. (This was\n but is no longer required by the restore_fees_cmd.)\n payables: Reports on non zero money fields.\n | -T Present as a table rather than a listing.\n | -w Maximum number of characters per line if -T.\n show_mailing_categories: Sends a list of possible entries for the\n '--which' parameter required by the prepare_mailings command.\n (See the 'content_types' dict in content.py.)\n prepare_mailing: Demands a <--which> argument to specify the\n content and the custom function(s) to be used. Try the\n 'show_mailing_categories' command for a list of choices.\n The command line arguments may end with zero or more names\n of files which are to be added as attachments to the emails.\n Other parameters have defaults set.\n '--oo' Send request for fee payment only to those with an\n outstanding balance. This is relevant only to mailings\n relating to dues and fees. Without this option mailings go\n to all members (including those with credit or 0 balance.\n '-p ' specifies printer to be used for letters.\n '-i ' membership data csv file.\n '-j ' where to dump prepared emails.\n '---dir ' where to file letters.\n thank: Reads the file specified by -t , applies payments\n specified there in to the -i and prepares thank you\n letter/email acknowledging receipt of payment and showing\n current balance(s.) See prepare_mailing command for further\n details.\n display_emails: Provides an opportunity to proof read the emails.\n send_emails: Sends out the emails found in the -j .\n Each mta has its own security requirements and each emailer\n has its own way of implementing them. Check the\n Notes/emailREADME for details. Note that not all\n combinations of mta and emailer are working but the following\n does: \"--mta clubg --emailer python\".\n within the ./Notes directory (./Notes/msmtprc.)\n print_letters: Sends the files contained in the directory\n specified by the --dir parameter. Depricated in favour of\n simply using the lpr utility: $ lpr ./Data/MailDir/*\n restore_fees: Use this command to populate each member's record\n with what they will owe for the next club year. Respects any\n existing credits. Best done after all dues and fees have been\n paid. (Will abort if any dues or fees are still outstanding.)\n Results are either placed into a file specified by the '-o'\n option (if provided) or placed into a file named as a\n concatination of \"new_\" and the input file. One can then\n mannually check the new file and rename it if all is well.\n emailing: Initially developed to allow sending of attachments.\n Since attachments are now possible using the send_mailing\n command (at least with emailer python) this command will\n most likely be redacted.\n fee_intake_totals: Input file should be a 'receipts' file with a\n specific format. It defaults to 'Data/receipts-YYYY.txt'\n where YYYY is the current year. Output yields subtotals and\n the grand total which can be copy/pasted into the 'receipts'\n file.\n labels: print labels. | default: -P A5160 | Both\n envelopes: print envelopes. | default: -P E000 | redacted.\n wip: \"work in progress\" Used for development/testing.\n\"\"\"\n\nimport os\nimport shutil\nimport csv\nimport codecs\nimport sys\nimport time\nimport random\nimport json\nimport subprocess\nfrom docopt import docopt\nimport sys_globals as glbs\nimport member\nimport helpers\nimport content\nimport data\nimport Pymail.send\nimport Bashmail.send\nfrom rbc import Club\n\n\nTEXT = \".txt\" # } Used by \nCSV = \".csv\" # } command.\n\nTEMP_FILE = \"2print.temp\" # see