max_caption:\n break\n words=words.split()\n words=words[1:-1]\n return ' '.join(words)\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nprint([*test_encodedfeatures][0])\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\nimage_number=100\ninput_image=[*test_encodedfeatures][image_number]\n\nimage=test_encodedfeatures[input_image].reshape((1,2048))\nprint(image_number)\npred_img=images[image_number]\nprint(input_image)\n\n\n#caption prediction\nprint(greedy_search(image))\nimg=mpimg.imread('/content/gdrive/My Drive/Machine Learning/Flickr_Data/Flickr_Data/Images/'+input_image)\nimgplot = plt.imshow(img)\nk=3\n# beam_search(input_image, k)\n\ndef prediction_using_beamsearch(input_image,bcount):\n \n words=[[[index_from_word['startseq']],0.0]]\n for i in range(34):\n store_beam_predictions=[]\n for each_word in words:\n print([each_word[0]])\n caption_sequence=keras.preprocessing.sequence.pad_sequences([each_word[0]],maxlen=max_caption,padding='post')\n print(caption_sequence)\n predicted_probs=training_m.predict([input_image,caption_sequence], verbose=0)\n print(predicted_probs)\n predicted_words=np.argsort(predicted_probs[0])[-bcount:]\n print(predicted_words)\n for j in predicted_words:\n succ_word=each_word[0][:]\n succ_prob=each_word[1]\n succ_word.append(j)\n succ_prob=succ_prob+predicted_probs[0][j]\n store_beam_predictions.append([succ_word,succ_prob])\n words=store_beam_predictions\n print(words)\n words = sorted(words,key=lambda x: x[1])\n words=words[-bcount:]\n print(words)\n print(\"--------------------------------------\")\n print()\n words=words[-1][0]\n print(words)\n caption=[]\n for k in words:\n if word_from_index[k]!='endseq':\n caption.append(word_from_index[k])\n else:\n break\n return ' '.join(caption[1:])\n\n\n\nprint(prediction_using_beamsearch(image,3))\n\ndef prediction_using_beamsearch_for_bleu(input_image,bcount):\n \n words=[[[index_from_word['startseq']],0.0]]\n for i in range(34):\n store_beam_predictions=[]\n for each_word in words:\n #print([each_word[0]])\n caption_sequence=keras.preprocessing.sequence.pad_sequences([each_word[0]],maxlen=max_caption,padding='post')\n #print(caption_sequence)\n predicted_probs=training_m.predict([input_image,caption_sequence], verbose=0)\n #print(predicted_probs)\n predicted_words=np.argsort(predicted_probs[0])[-bcount:]\n #print(predicted_words)\n for j in predicted_words:\n succ_word=each_word[0][:]\n succ_prob=each_word[1]\n succ_word.append(j)\n succ_prob=succ_prob+predicted_probs[0][j]\n store_beam_predictions.append([succ_word,succ_prob])\n words=store_beam_predictions\n #print(words)\n words = sorted(words,key=lambda x: x[1])\n words=words[-bcount:]\n #print(words)\n #print(\"--------------------------------------\")\n #print()\n words=words[-1][0]\n #print(words)\n caption=[]\n for k in words:\n if word_from_index[k]!='endseq':\n caption.append(word_from_index[k])\n else:\n break\n return ' '.join(caption[1:])\n\n\n\n#BLEU SCORE IMPLEMENTATION\nfrom nltk.translate.bleu_score import sentence_bleu\n\ndef blue_score_evaluation(imageid_nd_caption_cleaned, test_encodedfeatures, image_number, image):\n inputimage=[*test_encodedfeatures][image_number]\n inputimage=inputimage.split('.')\n inputimage = inputimage[0]\n actualCaptions = list()\n predictedCaption = prediction_using_beamsearch_for_bleu(image,3)\n predictedCaption = predictedCaption.split(' ')\n\n for caption in imageid_nd_caption_cleaned[inputimage]:\n tempList = caption.split(' ')\n actualCaptions.append(tempList)\n predictionscore = sentence_bleu(actualCaptions,predictedCaptions)\n return predictionscore\n\nprint(\"BLEU score of the predicted caption: \"+str(blue_score_evaluation(imageid_nd_caption_cleaned, test_encodedfeatures, image_number, image)))","sub_path":"Image Captioning /Image_Captioning_LSTM.py","file_name":"Image_Captioning_LSTM.py","file_ext":"py","file_size_in_byte":17918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"265514870","text":"from sys import stdin, stdout\n\ndef spread(r, c):\n if(r >= rCount or c >= cCount or r < 0 or c < 0 or blocks[r][c] == 0):\n return 0\n out = blocks[r][c];\n blocks[r][c] = 0;\n out += spread(r-1,c-1)\n out += spread(r-1,c)\n out += spread(r-1,c+1)\n out += spread(r,c-1)\n out += spread(r,c+1)\n out += spread(r+1,c-1)\n out += spread(r+1,c)\n out += spread(r+1,c+1)\n return out\n\ntestCases = int(raw_input())\nfor case in range(testCases):\n param = [int(x) for x in stdin.readline().rstrip().split()]\n rCount = param[0]\n cCount = param[1]\n if(rCount != 0 and cCount != 0):\n start = [int(x) for x in stdin.readline().rstrip().split()]\n blocks = []\n for r in range(rCount):\n blocks.append([int(x) for x in stdin.readline().rstrip().split()])\n stdout.write(str(spread(start[0], start[1])))\n stdout.write(\"\\n\")\n else:\n stdout.write(\"0\\n\")\n","sub_path":"acm/Unorganizerd Older Contests/cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"52159728","text":"from os import environ\n\nSESSION_CONFIG_DEFAULTS = {\n 'real_world_currency_per_point': 0.01,\n 'participation_fee': 0.00,\n 'doc': \"\",\n 'participation_fee':0,\n}\n\nSESSION_CONFIGS = [\n {\n 'name':'treatment_1',\n 'display_name':'Treatment 1 - Groups of 2',\n 'num_demo_participants': 2,\n 'app_sequence': ['type_1_p1','type_1_p2','payment_info'],\n },\n {\n 'name':'treatment_2',\n 'display_name':'Treatment 2 - Groups of 4',\n 'num_demo_participants': 4,\n 'app_sequence': ['type_2_p1','type_2_p2','payment_info'],\n },\n]\n\nLANGUAGE_CODE = 'en'\n\nREAL_WORLD_CURRENCY_CODE = 'USD'\n\nUSE_POINTS = False\n\nROOMS = []\n\nAUTH_LEVEL = environ.get('OTREE_AUTH_LEVEL')\n\nADMIN_USERNAME = 'admin'\nADMIN_PASSWORD = ''\n\nDEBUG = (environ.get('OTREE_PRODUCTION') in {None, '', '0'})\n\nDEMO_PAGE_INTRO_HTML = \"\"\"\nThe treatments are included on this page.\n\"\"\"\n\nOTREE_AUTH_LEVEL = \"STUDY\"\n\nSECRET_KEY = '' # Do Not Share\n\nINSTALLED_APPS = ['otree']\n","sub_path":".ipynb_checkpoints/settings-checkpoint.py","file_name":"settings-checkpoint.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"52368762","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*- \nimport sys\nimport os\nimport glob\nimport datetime\nimport shutil\nimport smtplib\n\nfrom db import get_products, check_product, get_param\nfrom dba import get_products_dest, check_product_dest, check_nc_dest, add_nc_dest,\\\n check_mapserver_dest, add_mapserver_file_dest, check_geotiff_dest, add_geotiff_dest\n\nfrom utilities import delete_file, get_image_bounds, get_image_stats, create_mapfile, \\\n get_email_disclaimer, get_email_header\n\nimport ConfigParser\n\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\n\nconfig = ConfigParser.ConfigParser()\ndirname = os.path.abspath(os.path.dirname(__file__))\nconfigFilePath = os.path.join(dirname, '../.config.ini')\nconfig.read(configFilePath)\n\n\ntry:\n\n dir_source = config.get('folders', 'source')\n dir_destiny = config.get('folders', 'destiny')\n dir_final = config.get('folders', 'final')\n\nexcept Exception as e:\n pass\n\n\ndir_pss = config.get('folders', 'pss')\n\n\ndef clean_products(date_ini, product, sensor):\n\n product_date = datetime.datetime.strptime(date_ini, '%Y-%m-%d')\n\n l1a_file_list = get_products(date_ini, 'L1A', product, sensor)\n\n if l1a_file_list:\n\n dirname, basename = os.path.split(l1a_file_list[0]['path'])\n\n prods = ['afai', 'chlor_a', 'nflh', 'K_490', 'par', 'cdom_index', 'Rrs_555']\n\n for p in prods:\n rid = check_product(product_date.strftime(\"%Y\"), product_date.strftime(\"%j\"),\n sensor, 'L3', p + '-' + sensor)\n\n print(\"\\033[94mFOUND\\033[0m: Product found [%s] for [%s] with date [%s|%s]\" %\n (p, sensor, product_date.strftime(\"%Y-%m-%d\"), product_date.strftime(\"%j\")))\n\n dir_files = []\n dir_files.extend(glob.glob(dirname.replace('L1A', 'L2') + '/*_L2_' + p + '.tif'))\n\n if dir_files:\n\n for f in dir_files:\n if os.path.isfile(f):\n print(\"\\033[93mWARNING\\033[0m: File deleted [%s]\" % f)\n delete_file(f)\n\n dir_files = []\n dir_files.extend(glob.glob(dirname + '/*.GEO'))\n dir_files.extend(glob.glob(dirname + '/*.L1A_LAC'))\n dir_files.extend(glob.glob(dirname + '/*.anc'))\n\n dir_files.extend(glob.glob(dirname.replace('L1A', 'L2') + '/*_L2_OC.nc'))\n\n dir_files.extend(glob.glob(dirname.replace('L1A', 'L1b') + '/*.L1B_QKM'))\n dir_files.extend(glob.glob(dirname.replace('L1A', 'L1b') + '/*.L1B_LAC'))\n dir_files.extend(glob.glob(dirname.replace('L1A', 'L1b') + '/*.L1B_HKM'))\n\n if dir_files:\n\n for f in dir_files:\n if os.path.isfile(f):\n print(\"\\033[93mWARNING\\033[0m: File deleted [%s]\" % f)\n delete_file(f)\n\n\ndef destiny_filepath(path_source, dir_source, dir_destiny):\n\n dirname, basename = os.path.split(path_source)\n dir_destiny = dirname.replace(dir_source, dir_destiny)\n\n filepath_destiny = os.path.join(dir_destiny, basename)\n\n if not os.path.isdir(dir_destiny):\n\n print(\"\\033[94mINFO\\033[0m: Attempting to create directory: %s\" % (dir_destiny))\n os.system('echo %s|sudo mkdir -p %s' % (dir_pss, dir_destiny))\n\n else:\n print(\"\\033[93mWARNING\\033[0m: Directory exists [%s]\" % (dir_destiny))\n\n return filepath_destiny\n\n\ndef add_to_final_db(product_date, prod_type, sensor, filename_rst, filename_map, relief=False, overwrite=False):\n\n dirname_in, basename = os.path.split(filename_rst)\n dirname_in = os.path.abspath(os.path.join(dirname_in, '../../../../'))\n\n filename_rst_dest = filename_rst.replace(dirname_in, dir_final)\n filename_rst_dest_cp = filename_rst.replace(dirname_in, dir_destiny)\n\n filename_rel_dest = None\n if relief:\n filename_rel_dest = relief.replace(dirname_in, dir_final)\n filename_rel_dest_cp = relief.replace(dirname_in, dir_destiny)\n\n rid = check_nc_dest(os.path.basename(filename_rst_dest))\n if not rid:\n rid = add_nc_dest(product_date.strftime('%Y'), product_date.strftime('%W'),\n product_date.strftime('%j'), prod_type + '-' + sensor, sensor, 'L3', prod_type,\n os.path.basename(filename_rst_dest), filename_rst_dest, product_date.strftime('%Y-%m-%d'))\n\n x_min, y_min, x_max, y_max = get_image_bounds(filename_rst)\n minv, maxv, meanv, std_dev, bit_depth, \\\n projection, pix_res, no_data = get_image_stats(filename_rst)\n\n if os.path.isfile(filename_map):\n delete_file(filename_map)\n\n mapserver_url = get_param('mapserver_url')\n\n filename_map_dest = filename_map.replace(dirname_in, dir_final)\n filename_map_dest_cp = filename_map.replace(dirname_in, dir_destiny)\n\n create_mapfile(mapserver_url, filename_map, filename_rel_dest, x_min, y_min, x_max, y_max,\n filename_mapfile_dest=filename_map_dest)\n\n if overwrite:\n command = 'rm ' + filename_map_dest_cp\n os.system('echo %s|sudo -S %s' % (dir_pss, command))\n\n copy_to_destiny(filename_map, filename_map_dest_cp)\n\n mrid = check_mapserver_dest(rid)\n if not mrid:\n mrid = add_mapserver_file_dest(rid, filename_map_dest, mapserver_url + \"map=\" + filename_map_dest + \"&\",\n x_min, y_min, x_max, y_max)\n print(\"\\033[94mINFO\\033[0m: Record added [%s] [%s]\" % (mrid, filename_map_dest))\n else:\n print(\"\\033[93mWARNING\\033[0m: Record exists on Database [%s] [%s]\" % (mrid, filename_map_dest))\n\n grid = check_geotiff_dest(rid, os.path.basename(filename_rst_dest))\n if not grid:\n grid= add_geotiff_dest(rid, os.path.basename(filename_rst_dest), filename_rst_dest, 'GTiff',\n projection, bit_depth, pix_res, minv, maxv, meanv, std_dev, no_data,\n x_min, y_min, x_max, y_max)\n print(\"\\033[94mINFO\\033[0m: Record added [%s] [%s]\" % (grid, filename_map_dest))\n else:\n print(\"\\033[93mWARNING\\033[0m: Record exists on Database [%s] [%s]\" % (grid, filename_rst_dest))\n\n file_permissions(filename_rst)\n file_permissions(filename_map)\n file_permissions(relief)\n\n\ndef sync_rgb(product_date, prod_type, sensor, row):\n\n if os.path.isfile(row['path']):\n\n filepath_destiny = destiny_filepath(row['path'], dir_source, dir_destiny)\n\n dirname, basename = os.path.split(filepath_destiny)\n\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if not os.path.isfile(filepath_destiny):\n shutil.copyfile(row['path'], filepath_destiny)\n\n mapfile_source = os.path.splitext(row['path'])[0] + '.map'\n\n jp2_source = os.path.splitext(row['path'])[0] + '.jp2'\n jp2_destiny = os.path.splitext(filepath_destiny)[0] + '.jp2'\n\n if os.path.isfile(jp2_source) and not os.path.isfile(jp2_destiny):\n shutil.copyfile(jp2_source, jp2_destiny)\n\n if os.path.isfile(filepath_destiny) and os.path.isfile(jp2_destiny):\n add_to_final_db(product_date, prod_type, sensor, filepath_destiny, mapfile_source)\n\n\ndef file_permissions(filename_in):\n try:\n if os.path.isfile(filename_in):\n os.system('echo %s|sudo chmod 765 %s' % (dir_pss, filename_in))\n except OSError as e:\n print(\"\\033[91mERROR\\033[0m: %s\" % e)\n\n\ndef copy_to_destiny(filename_source, filename_destiny):\n if os.path.isfile(filename_source) and not os.path.isfile(filename_destiny):\n\n os.system('echo %s|sudo -S %s' % (dir_pss, \"cp %s %s\" % (filename_source, filename_destiny)))\n\n if os.path.isfile(filename_destiny):\n print(\"\\033[92mPROCESSED\\033[0m: File copied [%s]\" % (filename_destiny))\n file_permissions(filename_destiny)\n else:\n print(\"\\033[93mWARNING\\033[0m: Unable to copy [%s]\" % (filename_destiny))\n\n elif not os.path.isfile(filename_source):\n print(\"\\033[93mWARNING\\033[0m: Source file doesn't exists [%s]\" % (filename_source))\n return False\n\n elif os.path.isfile(filename_destiny):\n print(\"\\033[93mWARNING\\033[0m: File exists on destiny [%s]\" % (filename_destiny))\n return False\n\n return None\n\n\ndef sync_relief(product_date, prod_type, sensor, row, overwrite=False):\n\n if os.path.isfile(row['path']):\n\n dirname_in, basename = os.path.split(row['path'])\n dirname_in = os.path.abspath(os.path.join(dirname_in, '../../../../'))\n\n filepath_destiny = row['path'].replace(dirname_in, dir_destiny)\n\n dirname_out, basename = os.path.split(filepath_destiny)\n\n if not os.path.exists(dirname_out):\n os.system('echo %s|sudo mkdir -p %s' % (dir_pss, dirname_out))\n if os.path.isdir(dirname_out):\n print(\"\\033[92mPROCESSED:\\033[0m Directory created [%s]\" % dirname_out)\n\n copy_to_destiny(row['path'], filepath_destiny)\n\n if os.path.isfile(filepath_destiny):\n command = 'chmod 777 ' + filepath_destiny\n os.system('echo %s|sudo -S %s' % (dir_pss, command))\n\n mapfile_source = os.path.splitext(row['path'])[0] + '.map'\n\n png_source = os.path.splitext(row['path'])[0] + '.png'\n png_destiny = os.path.splitext(filepath_destiny)[0] + '.png'\n kmz_source = os.path.splitext(row['path'])[0] + '.kmz'\n kmz_destiny = os.path.splitext(filepath_destiny)[0] + '.kmz'\n\n relief_source = os.path.splitext(row['path'])[0] + '_relief.tif'\n relief_destiny = os.path.splitext(filepath_destiny)[0] + '_relief.tif'\n\n copy_to_destiny(relief_source, relief_destiny)\n\n copy_to_destiny(png_source, png_destiny)\n copy_to_destiny(kmz_source, kmz_destiny)\n\n\n if os.path.isfile(relief_destiny):\n os.system('echo %s|sudo chmod 777 %s' % (dir_pss, relief_destiny))\n\n if os.path.isfile(filepath_destiny) and os.path.isfile(relief_destiny):\n add_to_final_db(product_date, prod_type, sensor, row['path'],\n mapfile_source, relief=relief_source, overwrite=overwrite)\n\n else:\n\n print(\"\\033[91mERROR\\033[0m: File not found [%s]\" % row['path'])\n\n\ndef sync_tar(product_date, product, sensor, dirname):\n\n prefix = sensor.title() + \"_\" + product_date.strftime('%Y%j') + \"_D01.L2_Mapped\"\n\n filename_source = os.path.join(dirname.replace('L1A', 'L2'), prefix + \"_OC-%s.tar.gz\" % (sensor[0].upper() + sensor[1:]))\n filename_destiny = destiny_filepath(filename_source, dir_source, dir_destiny)\n\n if os.path.isfile(filename_source) and not os.path.isfile(filename_destiny):\n os.system('echo %s|sudo cp %s %s' % (dir_pss, filename_source, filename_destiny))\n\n\ndef send_email(contenido='', fecha='', asunto=''):\n\n fromaddr = \"monitoreo.marino@gmail.com\"\n toaddr = \"jvaldezch@gmail.com\"\n\n msg = MIMEMultipart()\n msg['From'] = \"Monitoreo Marino <%s>\" % (fromaddr)\n msg['To'] = get_param('email_contacts')\n msg['Subject'] = asunto\n\n msg.attach(MIMEText(contenido, 'html'))\n\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(fromaddr, \"GCSim4r1820\")\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n server.quit()\n\n\ndef sync_products(date_ini, product, sensor, prod_name=None, overwrite=False):\n\n if not os.path.isdir(dir_destiny):\n\n print(\"\\033[91mERROR\\033[0m: Directory is not mounted [%s]\" % dir_destiny)\n\n body_content = \"\"\"No se pudo sincronizar el \"\"\"\n body_content += \"\"\"producto [%s] para sensor [%s]
\"\"\" % (product, sensor)\n body_content += \"\"\"para la fecha [%s] ya que la unidad
\"\"\" % (date_ini)\n body_content += \"\"\"[%s] no esta diposnible.
\"\"\" % (dir_destiny)\n\n content = \"\"\"\"\"\"\n content += get_email_header()\n content += \"\" + body_content + \"
\"\n content += get_email_disclaimer()\n content += \"\"\"\"\"\"\n\n send_email(contenido=content, fecha=date_ini,\n asunto='[Alerta] La unidad de red no esta disponible [%s] %s' % (dir_destiny, date_ini))\n\n sys.exit()\n\n product_date = datetime.datetime.strptime(date_ini, '%Y-%m-%d')\n\n prods = ['afai', 'chlor_a', 'nflh', 'K_490', 'par', 'cdom_index', 'Rrs_55']\n\n for p in prods:\n if prod_name and prod_name != p:\n continue\n\n row = check_product(product_date.strftime(\"%Y\"), product_date.strftime(\"%j\"),\n sensor, 'L3', p + '-' + sensor)\n if row:\n print(\"\\033[94mFOUND LOCALLY\\033[0m: [%s]\" % row['path'])\n sync_relief(product_date, p, sensor, row, overwrite)\n\n #rgb_rid = check_product(product_date.strftime(\"%Y\"), product_date.strftime(\"%j\"),\\\n # sensor, 'L3', 'rgb-' + sensor)\n\n #ergb_rid = check_product(product_date.strftime(\"%Y\"), product_date.strftime(\"%j\"),\\\n # sensor, 'L3', 'ergb-' + sensor)\n\n #if rgb_rid:\n # sync_rgb(product_date, 'rgb', sensor, rgb_rid)\n\n #if ergb_rid:\n # sync_rgb(product_date, 'ergb', sensor, ergb_rid)\n\n row = check_product(product_date.strftime(\"%Y\"), product_date.strftime(\"%j\"),\n sensor, 'L3', 'dsc-' + sensor)\n\n if row:\n sync_relief(product_date, 'dsc', sensor, row, overwrite)\n dirname, basename = os.path.split(row[\"path\"])\n\n #sync_tar(product_date, product, sensor, dirname)","sub_path":"pysimar/afai/sync_clean.py","file_name":"sync_clean.py","file_ext":"py","file_size_in_byte":13496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"526321239","text":"from ScoutSpyder.crawlers.base_crawler import *\nfrom ScoutSpyder.utils.helper import *\nfrom ScoutSpyder.utils.logging import initialise_logging\nfrom urllib.parse import urlparse\nimport re\n\nLOGGER = initialise_logging(__name__)\n\nclass PolyswarmBlogCrawler(BaseCrawler):\n crawler_id = 'io.polyswarm.blog'\n requests_per_sec = 1\n start_url = [\n 'https://blog.polyswarm.io/'\n ]\n robots_url = 'https://blog.polyswarm.io/robots.txt'\n\n def __init__(self, downloaded_doc=None):\n super().__init__(downloaded_doc)\n self.blacklist_regex = [\n 'http[s]?://(.*)#comments-listing',\n 'http[s]?://(.*)#tab-[0-9]',\n 'http[s]?://(.*)#'\n ]\n \n def extract_content(self):\n forbidden_paths = [\n '/',\n '/all',\n '/tag/.*',\n '/topic/.*',\n '/author/.*',\n '/page/.*'\n ]\n url_path = urlparse(self.url).path\n for path in forbidden_paths:\n if re.fullmatch(path, url_path):\n return\n if self.text:\n self.has_content = True","sub_path":"Backend/ScoutSpyder/crawlers/io_polyswarm_blog.py","file_name":"io_polyswarm_blog.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"237123043","text":"#!usr/bin/python3\n\nimport glc\n# import includes.opc as opc\nimport time, sys, argparse, colorsys\nimport numpy as np\nimport numpy.matlib as npm\nimport math, curses\n\ndef mandness(argv):\n #\n # Set up the parser\n #\n parser = argparse.ArgumentParser(prog=\"Mandness.py\",\n description='Mandness random stuff with Gaussians. A nice example: Mandness.py -f 24 -n 3 -s 0.5 -ss 60 -m 1.5 -ms 120',\n epilog='Glinsterlichten Project, 2016, @kellertuer')\n parser.add_argument('-c','--colorChange', default=0.1, type=float,\n metavar='s1', help='sigma for the initial color change')\n parser.add_argument('-f','--framerate', default=24, type=int, metavar='F',\n help='framerate used in the animation ')\n parser.add_argument('-m','--midPointChange', default=0.1, type=float,\n metavar='s2', help='sigma for the initial mid point change')\n parser.add_argument('-ms','--midPointSmooth', default=48, type=int,\n metavar='ms', help='change mid point sigma only every ms-th frame')\n parser.add_argument('-n','--numGauss', default=6, type=int,\n metavar='n', help='number of Gaussians in the image')\n parser.add_argument('-s','--sigmaChange', default=0.1, type=float,\n metavar='s3', help='sigma for the initial sigma change')\n parser.add_argument('-ss','--sigmaSmooth', default=48, type=int,\n metavar='ss', help='change sigma change only every ss-th frame')\n parser.add_argument('-x', default=8, type=int, metavar='X',\n help='number of horizontal board pixels')\n parser.add_argument('-y', default=16, type=int, metavar='Y',\n help='number of vertical board pixels')\n args = parser.parse_args(argv)\n gParam = [ {'r' : np.random.randint(0,256),\n 'g' : np.random.randint(0,256),\n 'b' : np.random.randint(0,256),\n 'x' : args.x*np.random.random(),\n 'y' : args.y*np.random.random(),\n 'w' : args.x/4.0,\n 'h' : args.y/4.0\n } for i in range(args.numGauss) ]\n # Iinit Library\n c = glc.InitOPC()\n y,x = np.meshgrid(\n np.linspace(0, args.y-1, args.y)-args.y/2,\n np.linspace(0, args.x-1, args.x)-args.x/2)\n cnt=0;\n cx= np.zeros(args.numGauss)\n cy= np.zeros(args.numGauss)\n ch= np.zeros(args.numGauss)\n cw= np.zeros(args.numGauss)\n while True:\n try:\n img = np.zeros( (args.x,args.y,3) )\n for i in range(args.numGauss):\n v = np.zeros(x.shape)\n for xb in {-3,-2,-1,0,1,2,3}:\n for yb in {-3,-2,-1,0,1,2,3}:\n xt = x-gParam[i]['x']-xb*args.x\n yt = y-gParam[i]['y']-yb*args.y\n v += np.exp(\n -xt*xt/(2.0*gParam[i]['w']*gParam[i]['w'])\n - yt*yt/(2.0*gParam[i]['h']*gParam[i]['h'])\n )\n rgb = [gParam[i]['r'],gParam[i]['g'],gParam[i]['b']]\n img += np.reshape(v,(args.x,args.y,1))*np.reshape(rgb,(1,1,3))\n # change Color\n for color in {'r','g','b'}:\n gParam[i][color] = np.min( (255, np.max( (0,\n gParam[i][color]+args.colorChange*np.random.randn()) ) ))\n # Change Pos\n if np.fmod(cnt,args.midPointSmooth) == 0:\n cx[i] = args.midPointChange*np.random.randn()\n cy[i] = args.midPointChange*np.random.randn()\n gParam[i]['x'] = np.fmod( gParam[i]['x']+ cx[i]/args.midPointSmooth, 2.0*args.x )\n gParam[i]['y'] = np.fmod( gParam[i]['y']+ cy[i]/args.midPointSmooth, 2.0*args.y )\n # change Width\n if np.fmod(cnt,args.sigmaSmooth) == 0:\n ch[i] = args.sigmaChange*np.random.randn()\n cw[i] = args.sigmaChange*np.random.randn()\n gParam[i]['w'] = np.fabs(gParam[i]['w']+cw[i]/args.sigmaSmooth)\n if (gParam[i]['w']) > args.x: \n gParam[i]['w'] -= (gParam[i]['w'] - args.x)\n cw[i] = -cw[i]\n gParam[i]['h'] = np.fabs(gParam[i]['h']+ch[i]/args.sigmaSmooth)\n if (gParam[i]['h']) > args.y:\n gParam[i]['h'] -= (gParam[i]['h'] - args.y)\n ch[i] = -ch[i]\n # normalize\n img = np.round(img*255.0/np.amax(img))\n c.put_pixels(glc.image2pixels(img))\n # wait\n time.sleep(1.0/args.framerate)\n cnt += 1\n except KeyboardInterrupt:\n print(str(cnt/args.framerate)+' sec.')\n break\n # after while: destruct\n glc.destOPC(c,img)\n # curses.endwin()\n\nif __name__ == \"__main__\":\n mandness(sys.argv[1:])\n","sub_path":"src/Mandness.py","file_name":"Mandness.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"578385161","text":"import gensim\n\n\nclass Word2Vec:\n def __init__(self, dicpath):\n self.model = gensim.models.Word2Vec.load(dicpath)\n\n def get_most_similar(self, plus_list, minus_list):\n exp_str = ''\n plus = []\n try:\n for p in plus_list:\n if p in self.model.wv.vocab:\n plus.append(p)\n minus = []\n for m in minus_list:\n if m in self.model.wv.vocab:\n minus.append(m)\n if not minus:\n if not plus:\n exp_str = ''\n result = [('辞書に存在する単語がありません。', 0.00000000)]\n else:\n exp_str = self.__exp_str(plus, '+')\n result = self.model.most_similar(positive=plus)\n else:\n if not plus:\n exp_str = self.__exp_str(minus, '-')\n result = self.model.most_similar(negative=minus)\n else:\n exp_str = self.__exp_str(plus, '+') + '-' + self.__exp_str(minus, '-')\n result = self.model.most_similar(\n positive=plus, negative=minus)\n except BaseException:\n exp_str = ''\n result = [('処理中にエラーが発生しました。', 0.00000000)]\n return result, exp_str\n \n def __exp_str(self, p_list, dmtr):\n st = ''\n for i, p in enumerate(p_list):\n if i == 0:\n st = p\n else:\n st = st + str(dmtr) + p\n return st\n","sub_path":"etcdemo/etcdemo/functions/modules/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"197007130","text":"# views module to be used __init__.py\n\nfrom app import app\nfrom flask import render_template, flash, redirect\nfrom .forms import LoginForm\n\n# create the route mappings where i will get the same output of index()\n@app.route('/') # localhost:5000/\n@app.route('/index') # localhost:5000/index\n\n# when directed to index of web page, i should see index() should executed\ndef index():\n user = {'nickname': 'Chantelle'} # fake user\n posts = [ # fake array of posts\n {\n 'author': {'nickname': 'John'},\n 'body': 'Beautiful day in Portland!'\n },\n {\n 'author': {'nickname': 'Susan'},\n 'body': 'The Avengers movie was so cool!'\n }\n ]\n # jinja2 is a template engine that substitutes {{ }} with the arguments in render_template()\n return render_template('index.html',title='Home',user=user, posts=posts)\n\n@app.route('/login', methods=['GET', 'POST'])\n\ndef login():\n form = LoginForm()\n if form.validate_on_submit(): # form processing by running all validators attached to fields. if correct, return true\n # flash is a quick way to show a message on the next page presented to the user\n flash('Login requested for OpenID=\"%s\", remember_me=%s' % (form.openid.data, str(form.remember_me.data)))\n return redirect('/index')\n return render_template('login.html', title='Sign In',form=form,providers=app.config['OPENID_PROVIDERS'])","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"417095332","text":"t = int(input())\nwhile(t>0):\n n = int(input())\n results =list((input().split()))\n arr = [int(i) for i in results]\n lol = set(arr)\n rep = sum(arr)-sum(lol)\n miss = int((n*(n+1))/2-sum(lol))\n print(rep,miss)\n t-=1\n \n\n","sub_path":"Arrays/MissingandRepeating.py","file_name":"MissingandRepeating.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"31298045","text":"\"\"\"\nraw_config defines which jobs may be run, and how they can be invoked via\nSlack.\n\nIt is a dict with one key per namespace, each of which maps to a dict with keys:\n\n * \"jobs\": a dict mapping a job_type to a further dict with keys:\n * \"run_args_template\": template of bash command to be run\n * \"report_stdout\": optional, whether the stdout of the command is\n reported to Slack\n * \"slack\": a list of dicts with keys:\n * \"command\": Slack command which starts the given job\n * \"help: help text to display to the user\n * \"type\": one of \"schedule_job\", \"cancel_job\", \"schedule_suppression\",\n \"cancel_suppression\"\n * \"job_type\": the type of the job to be scheduled/supppressed,\n corresponding to an entry in the \"jobs\" dict\n * \"delay_seconds\": optional, the delay between the command being issued\n and the job being run\n * \"fabfile\": optional, the URL of a fabfile which is required to run\n commands in the namespace\n\"\"\"\n\n\nimport re\nfrom operator import itemgetter\n\n# fmt: off\nraw_config = {\n \"test\": {\n \"jobs\": {\n \"read_poem\": {\n \"run_args_template\": \"cat poem\",\n \"report_stdout\": True,\n },\n },\n \"slack\": [\n {\n \"command\": \"read poem\",\n \"help\": \"read a poem\",\n \"type\": \"schedule_job\",\n \"job_type\": \"read_poem\",\n \"delay_seconds\": 1,\n },\n ],\n },\n \"fdaaa\": {\n \"fabfile\": \"https://raw.githubusercontent.com/ebmdatalab/clinicaltrials-act-tracker/master/fabfile.py\",\n \"jobs\": {\n \"deploy\": {\n \"run_args_template\": \"fab update:live\",\n },\n },\n \"slack\": [\n {\n \"command\": \"deploy\",\n \"help\": \"copy staging data to live site\",\n \"type\": \"schedule_job\",\n \"job_type\": \"deploy\",\n },\n ],\n },\n \"op\": {\n \"fabfile\": \"https://raw.githubusercontent.com/ebmdatalab/openprescribing/master/fabfile.py\",\n \"jobs\": {\n \"deploy\": {\n \"run_args_template\": \"fab deploy:production\"\n },\n \"deploy_staging\": {\n \"run_args_template\": \"fab deploy:staging,branch={branch_name}\"\n },\n \"cache_clear\": {\n \"run_args_template\": \"fab clear_cloudflare\"\n },\n \"ncso_import\": {\n \"run_args_template\": \"fab call_management_command:fetch_and_import_ncso_concessions,production\"\n },\n \"ncso_report\": {\n \"run_args_template\": \"fab --hide=running,stdout,status call_management_command:summarise_ncso_concessions,production\",\n \"report_stdout\": True,\n },\n \"ncso_reconcile\": {\n \"run_args_template\": \"fab --hide=running,stdout,status call_management_command:reconcile_ncso_concession,production,{concession_id},{vmpp_id}\",\n \"report_stdout\": True,\n },\n \"ncso_send_alerts\": {\n \"run_args_template\": \"fab --hide=running,stdout,status call_management_command:send_ncso_concessions_alerts,production\",\n \"report_stdout\": True,\n },\n },\n \"slack\": [{\n \"command\": \"deploy\",\n \"help\": \"deploy to production after a 60s delay\",\n \"type\": \"schedule_job\",\n \"job_type\": \"deploy\",\n \"delay_seconds\": 60,\n }, {\n \"command\": \"deploy now\",\n \"help\": \"deploy to production immediately\",\n \"type\": \"schedule_job\",\n \"job_type\": \"deploy\",\n }, {\n \"command\": \"deploy cancel\",\n \"help\": \"cancel any pending production deploy\",\n \"type\": \"cancel_job\",\n \"job_type\": \"deploy\",\n }, {\n \"command\": \"deploy suppress from [start_at] to [end_at]\",\n \"help\": \"suppress production deploys between these times today (times in UTC)\",\n \"type\": \"schedule_suppression\",\n \"job_type\": \"deploy\",\n }, {\n \"command\": \"deploy suppress cancel\",\n \"help\": \"cancel suppression of production deploys\",\n \"type\": \"cancel_suppression\",\n \"job_type\": \"deploy\",\n }, {\n \"command\": \"deploy staging [branch_name]\",\n \"help\": \"deploy branch [branch_name] to staging immediately\",\n \"type\": \"schedule_job\",\n \"job_type\": \"deploy_staging\",\n }, {\n \"command\": \"cache clear\",\n \"help\": \"clear Cloudflare cache\",\n \"type\": \"schedule_job\",\n \"job_type\": \"cache_clear\",\n }, {\n \"command\": \"ncso import\",\n \"help\": \"import NCSO concessions\",\n \"type\": \"schedule_job\",\n \"job_type\": \"ncso_import\",\n }, {\n \"command\": \"ncso report\",\n \"help\": \"show NCSO concession summary\",\n \"type\": \"schedule_job\",\n \"job_type\": \"ncso_report\",\n }, {\n \"command\": \"ncso reconcile concession [concession_id] against vmpp [vmpp_id]\",\n \"help\": \"show NCSO concession summary\",\n \"type\": \"schedule_job\",\n \"job_type\": \"ncso_reconcile\",\n }, {\n \"command\": \"ncso send alerts\",\n \"help\": \"send alerts for NCSO concessions\",\n \"type\": \"schedule_job\",\n \"job_type\": \"ncso_send_alerts\",\n \"job_type\": \"ncso_send_alerts\",\n }],\n }\n}\n# fmt: on\n\n\ndef build_config(raw_config):\n \"\"\"Convert raw_config into something that's easier to work with.\n\n See test_job_configs for an example.\n \"\"\"\n\n config = {\"jobs\": {}, \"slack\": [], \"help\": {}, \"fabfiles\": {}}\n\n for namespace in raw_config:\n helps = []\n\n for job_type, job_config in raw_config[namespace][\"jobs\"].items():\n namespaced_job_type = \"{}_{}\".format(namespace, job_type)\n validate_job_config(namespaced_job_type, job_config)\n config[\"jobs\"][namespaced_job_type] = job_config\n\n for slack_config in raw_config[namespace][\"slack\"]:\n command = \"{} {}\".format(namespace, slack_config[\"command\"])\n slack_config[\"command\"] = command\n slack_config[\"job_type\"] = \"{}_{}\".format(\n namespace, slack_config[\"job_type\"]\n )\n slack_config[\"regex\"] = build_regex_from_command(command)\n slack_config[\"template_params\"] = get_template_params(command)\n\n validate_slack_config(slack_config)\n config[\"slack\"].append(slack_config)\n\n helps.append([command, slack_config[\"help\"]])\n\n config[\"help\"][namespace] = sorted(helps)\n\n if \"fabfile\" in raw_config[namespace]:\n config[\"fabfiles\"][namespace] = raw_config[namespace][\"fabfile\"]\n\n config[\"slack\"] = sorted(config[\"slack\"], key=itemgetter(\"command\"))\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"job_type\"] not in config[\"jobs\"]:\n msg = \"Slack command {} references unknown job type {}\".format(\n slack_config[\"command\"], slack_config[\"job_type\"]\n )\n raise RuntimeError(msg)\n\n return config\n\n\ndef build_regex_from_command(command):\n \"\"\"Convert Slack command into regex that matches command.\n\n >>> build_pattern_from_command(\"say [greeting] to [name]\")\n re.compile(\"^say (.+?) to (.+?)$\")\n \"\"\"\n\n pattern = \"^\" + re.sub(r\"\\[\\w+\\]\", r\"(.+?)\", command) + \"$\"\n return re.compile(pattern)\n\n\ndef get_template_params(command):\n \"\"\"Extract parameters from Slack command.\n\n >>> get_template_params(\"say [greeting] to [name]\")\n [\"greeting\", \"name\"]\n \"\"\"\n\n return re.findall(r\"\\[(\\w+)\\]\", command)\n\n\ndef validate_job_config(job_type, job_config):\n \"\"\"Validate that job_config contains expected keys.\"\"\"\n\n required_keys = {\"run_args_template\"}\n optional_keys = {\"report_stdout\"}\n allowed_keys = required_keys | optional_keys\n\n if required_keys - job_config.keys():\n msg = \"Job {} is missing keys {}\".format(\n job_type, required_keys - job_config.keys()\n )\n raise RuntimeError(msg)\n\n if job_config.keys() - allowed_keys:\n msg = \"Job {} has extra keys {}\".format(\n job_type, job_config.keys() - allowed_keys\n )\n raise RuntimeError(msg)\n\n\ndef validate_slack_config(slack_config):\n \"\"\"Validate that slack_config contains expected keys.\"\"\"\n\n required_keys = {\"command\", \"help\", \"type\", \"job_type\", \"regex\", \"template_params\"}\n optional_keys = {\"delay_seconds\"}\n allowed_keys = required_keys | optional_keys\n\n command = slack_config[\"command\"]\n\n if required_keys - slack_config.keys():\n msg = \"Slack command `{}` is missing keys {}\".format(\n command, required_keys - slack_config.keys()\n )\n raise RuntimeError(msg)\n\n if slack_config.keys() - allowed_keys:\n msg = \"Slack command `{}` has extra keys {}\".format(\n command, slack_config.keys() - allowed_keys\n )\n raise RuntimeError(msg)\n\n\nconfig = build_config(raw_config)\n","sub_path":"ebmbot/job_configs.py","file_name":"job_configs.py","file_ext":"py","file_size_in_byte":9230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"303782517","text":"from django.urls import path\nfrom .import views # 경로에 따른 view함수 설정\n\n# 어플리케이션 name space\napp_name = \"musicians\"\n\n# 장고 내부적으로 정해진 이름\nurlpatterns = [\n # u - v - t 순서 고정.\n # path name을 무엇으로 지었는지, 어떤 view함수를 실행하는지 pattern 고정하기\n # 하나의 경로에 하나의 view함수!\n path('', views.index, name=\"index\"),\n path('create/', views.create, name=\"create\"),\n # django가 지정한 경로 작성 방식\n # <데이터 타입 : 변수 명>\n # 실제 사용자가 작성하는 url\n # '3/'\n path('/', views.detail, name=\"detail\"),\n path('/update/', views.update, name=\"update\"),\n path('/delete/', views.delete, name=\"delete\"),\n\n\n]","sub_path":"django_review/review/musicians/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"475329492","text":"import model\nimport sqlite3\nfrom flask import Flask, render_template, g, request\napp = Flask(__name__)\n\n@app.before_request\ndef before_request():\n g.db = sqlite3.connect(model.DATABASE)\n\n@app.teardown_request\ndef teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n schools = ['latitude', 'longitude', 'state']\n if request.method == \"POST\":\n z = request.form[\"schools\"]\n please = model.get_locations(z)\n else:\n please = model.get_locations()\n return render_template(\"index.html\", schools=schools, please=please)\n\n@app.route('/school', methods=['GET', 'POST'])\ndef school_page():\n years = model.years\n comparisons = model.comparisons\n schools = model.schools_list\n school_selected = 'Air Force'\n year_selected = '2015'\n comparison_selected = 'Wins_pct'\n if request.method == \"POST\":\n school_selected = request.form[\"schools\"]\n year_selected = request.form[\"year\"]\n comparison_selected = request.form[\"compare\"]\n result = model.search_by_school(school_selected,year_selected,comparison_selected)\n url = model.get_pic(school_selected)\n else:\n result = model.search_by_school()\n url = model.get_pic()\n return render_template(\"school.html\", schools=schools, comparisons=comparisons, years=years, result=result, x = school_selected, y = year_selected, z = comparison_selected, url=url)\n\n@app.route('/state', methods=['GET', 'POST'])\ndef state_page():\n states = model.states\n comparisons = model.comparisons\n years = model.years\n state_selected = 'Hawaii'\n year_selected = '2015'\n comparison_selected = 'Wins_pct'\n if request.method == \"POST\":\n state_selected = request.form[\"state\"]\n year_selected = request.form[\"year\"]\n comparison_selected = request.form[\"compare\"]\n result = model.search_by_state1(state_selected, year_selected, comparison_selected)\n result2 = model.search_by_state2(state_selected, year_selected, comparison_selected)\n result3 = model.search_by_state3(state_selected, year_selected, comparison_selected)\n graph = model.graph_states(year_selected, comparison_selected, state_selected)\n else:\n result = model.search_by_state1()\n result2 = model.search_by_state2()\n result3 = model.search_by_state3()\n graph = model.graph_states()\n return render_template(\"state.html\", states=states, comparisons=comparisons, years=years, result=result, result2=result2, x = state_selected, y = year_selected, z = comparison_selected, result3=result3, graph=graph)\n\n\n\n@app.route('/overview', methods=['GET', 'POST'])\ndef overview_page():\n comparisons = model.comparisons\n years = model.years\n year_selected = \"2015\"\n comparison_selected = \"Wins_pct\"\n if request.method == \"POST\":\n year_selected = request.form[\"year\"]\n comparison_selected = request.form[\"compare\"]\n result = model.overview_search(year_selected,comparison_selected)\n result1 = model.overview_search1(year_selected,comparison_selected)\n basketball = model.graph_basketball(year_selected, comparison_selected)\n # football = model.graph_football(year_selected, comparison_selected)\n # bar = model.create_plot()\n else:\n result = model.overview_search()\n result1 = model.overview_search1()\n basketball = model.graph_basketball()\n # football = model.graph_football()\n\n # bar = model.test_graph()\n # bar = model.create_plot()\n\n return render_template(\"overview.html\", comparisons=comparisons, years=years, result=result, result1=result1, y=year_selected, z=comparison_selected, basketball=basketball)\n\n\n\n\nif __name__ == '__main__':\n print('starting Flask app', app.name)\n app.run(debug=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # end\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"66905769","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 3 18:33:46 2019\n\n@author: abonna\n\"\"\"\n\nimport os\nimport csv\nimport credentials\nimport psycopg2\nfrom subprocess import call\n\n### Definicao das variaveis\nindir = '/home/ubuntu/dump/dados_banco_central/ranking_reclamacoes/'\noutdir = '/home/ubuntu/scripts/load-dados-banco-central/parsed/'\nfile = 'Bancos+e+financeiras+-+Reclamacoes+e+quantidades+de+clientes+por+instituicao+financeira.csv'\nnew_file = 'reclamacoes_quantidades_clientes_por_instituicao_financeira.csv'\nfolders = [f for f in os.listdir(indir) if os.path.isdir(indir+f)]\ntablename = 'dados_banco_central.reclamacoes_por_banco_stg'\n\nDATABASE, HOST, USER, PASSWORD = credentials.setDatabaseLogin()\n\n### funcao que cria data no formato banco de dados\ndef create_date(folder):\n if folder >= '2017':\n year = folder[:4]\n quarter = folder[-2:]\n month = str(int(quarter) * 3).zfill(2)\n date = year+'-'+month+'-01'\n elif folder < '2017' and folder >= '2016_07':\n year = folder[:4]\n month = folder[-2:]\n date = year+'-'+month+'-01'\n else:\n date = folder.replace('_','-')+'-01'\n \n return date\n\n### funcao que normaliza o nome dos bancos\ndef norm_banks(bankname):\n ### Limpeza\n name = bankname.upper().replace(' (CONGLOMERADO)','')\n name = name.replace('-','').replace(',','')\n name = name.replace('S.A.','').replace('S A','').replace('S/A','').replace('S.A','')\n name = name.replace('LTDA.','')\n name = name.replace('CRÉDITO FINANCIAMENTO E INVESTIMENTO','').replace('CREDITO FINANCIAMENTO E INVESTIMENTO','').replace('CREDITO FINANCIAMENTO E INVESTIMENTOS','')\n name = name.replace('SOCIEDADE DE','')\n name = name.replace('FINANCIADORA','')\n name = name.strip()\n name = ' '.join(name.split())\n name = name.replace('CRÉDITO FINANCIAMENTO E INVESTIMENTO','').replace('CREDITO FINANCIAMENTO E INVESTIMENTO','').replace('CREDITO FINANCIAMENTO E INVESTIMENTOS','').strip()\n \n ### padronizacao\n if name == 'BB':\n name = 'BANCO DO BRASIL'\n else:\n name = name.replace('INTERMEDIUM', 'INTER').replace('BANCO INTER','INTER')\n name = name.replace('PANAMERICANO', 'PAN').replace('BANCO PAN', 'PAN')\n name = name.replace('BONSUCESSO', 'BS2').replace('BANCO BS2', 'BS2').replace('GRUPO BS2 BS2','BS2')\n name = name.replace('CAIXA ECONÔMICA FEDERAL', 'CAIXA ECONOMICA FEDERAL')\n name = name.replace('SANTANDER BANESPA', 'SANTANDER')\n name = name.replace('HSBC BANK BRASIL BANCO MULTIPLO', 'HSBC')\n name = name.replace('BANCO DAYCOVAL','DAYCOVAL')\n name = name.replace('BANCO BMG','BMG')\n name = name.replace('BANCO CITIBANK','CITIBANK')\n name = name.replace('BANCO ORIGINAL','ORIGINAL')\n name = name.replace('PAGSEGURO INTERNET','PAGSEGURO')\n name = name.replace('BANCO BMC','BMC')\n name = name.replace('ABCBRASIL','ABC-BRASIL')\n name = name.replace('BANCO BGN','BGN')\n name = name.replace('BANCO TOPÁZIO','BANCO TOPAZIO')\n name = name.replace('NOVO BANCO CONTINENTAL BANCO MÚLTIPLO','NOVO BANCO CONTINENTAL BANCO MULTIPLO')\n name = name.replace('AGIPLAN FINANCEIRA','AGIPLAN')\n name = name.replace('BANIF INTERNACIONAL DO FUNCHAL (BRASIL) EM LIQUIDAÇÃO ORDINÁRIA','BANIF')\n name = name.replace('BANCO DO ESTADO DO PARÁ','BANCO DO ESTADO DO PARA')\n name = name.replace('BANCO DE TOKYO MITSUBISHI UFJ BRASIL','BANCO DE TOKYOMITSUBISHI UFJ BRASIL')\n name = name.replace('BANCO A J RENNER','BANCO RENNER').replace('BANCO A.J. RENNER','BANCO RENNER')\n name = name.replace('BANCO ABN AMRO','ABN AMRO')\n name = name.replace('BANCO BM&FBOVESPA DE SERVIÇOS DE LIQUIDAÇÃO E CUSTÓDIA','BANCO BM&FBOVESPA').replace('BANCO BM FBOVESPA DE SERVICOS DE LIQUIDACAO E CUSTODIA','BANCO BM&FBOVESPA')\n if name.endswith(' S'):\n name = name[:-2]\n \n return name\n\n### Iteracao sobre os diretorios e parser dos CSVs\nwith open(outdir+new_file,'w', newline=\"\\n\", encoding=\"utf-8\") as ofile:\n writer = csv.writer(ofile, delimiter=';')\n for folder in folders:\n date = create_date(folder)\n with open(indir+folder+'/'+file, 'r', encoding='latin-1') as ifile:\n reader = csv.reader(ifile, delimiter=';')\n header = next(reader, None) ### Pula o cabeçalho\n for row in reader:\n if len(row) == 16:\n del row[-1] ### Remove colunas nao utilizadas\n del row[:2] \n del row[2]\n row[2] = norm_banks(row[2])\n ### padrao numerico ENG-US\n row[3:] = [r.replace('.','').replace(',','.').replace(' ','') for r in row[3:]]\n row.insert(0,date)\n writer.writerow(row)\n### conecta no banco de dados\ndb_conn = psycopg2.connect(\"dbname='{}' user='{}' host='{}' password='{}'\".format(DATABASE, USER, HOST, PASSWORD))\ncursor = db_conn.cursor()\nprint('Connected to the database')\n### copy\nwith open(outdir+new_file, 'r') as ifile:\n SQL_STATEMENT = \"COPY %s FROM STDIN WITH CSV DELIMITER AS ';' NULL AS ''\"\n print(\"Executing Copy in \"+tablename)\n cursor.copy_expert(sql=SQL_STATEMENT % tablename, file=ifile)\n db_conn.commit()\ncursor.close()\ndb_conn.close()\n\n### VACUUM ANALYZE\ncall('psql -d torkcapital -c \"VACUUM VERBOSE ANALYZE '+tablename+'\";',shell=True)","sub_path":"load_reclamacoes_por_bancos.py","file_name":"load_reclamacoes_por_bancos.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"374808341","text":"import flatland as fl\r\nfrom flatkit.schema import ValuesFromTable, DictFromTable\r\n\r\nfrom flatland.validation import Validator, Converted, IsEmail\r\nfrom flatland.signals import validator_validated\r\nfrom flatland.schema.base import NotEmpty\r\n\r\n\r\nclass ListEmails(Validator):\r\n\r\n fail = None\r\n\r\n def validate(self, element, state):\r\n if element.properties.get(\"not_empty_error\"):\r\n self.fail = fl.validation.base.N_(element.properties[\"not_empty_error\"])\r\n else:\r\n self.fail = fl.validation.base.N_(u'One or more email addresses are not valid.')\r\n\r\n is_email_validator = IsEmail()\r\n if not element.optional and not element:\r\n return self.note_error(element, state, 'fail')\r\n for e in element:\r\n if e.value and not is_email_validator.validate(e, None):\r\n return self.note_error(element, state, 'fail')\r\n\r\n return True\r\n\r\nclass ListValue(Validator):\r\n\r\n fail = None\r\n\r\n def validate(self, element, state):\r\n if element.properties.get(\"not_empty_error\"):\r\n self.fail = fl.validation.base.N_(element.properties[\"not_empty_error\"])\r\n else:\r\n self.fail = fl.validation.base.N_(u'%(u)s is not a valid value for %(label)s.')\r\n\r\n for e in element.value:\r\n if e not in element.properties['valid_values']:\r\n return self.note_error(element, state, 'fail')\r\n\r\n return True\r\n\r\nclass EnumValue(Validator):\r\n\r\n fail = None\r\n\r\n def validate(self, element, state):\r\n if element.properties.get(\"not_empty_error\"):\r\n self.fail = fl.validation.base.N_(element.properties[\"not_empty_error\"])\r\n else:\r\n self.fail = fl.validation.base.N_(u'%(u)s is not a valid value for %(label)s.')\r\n if element.valid_values:\r\n if element.value not in element.valid_values:\r\n return self.note_error(element, state, 'fail')\r\n return True\r\n\r\nCommonString = fl.String.using(optional=True) \\\r\n .with_properties(widget='input')\r\n\r\nCommonEnum = fl.Enum.using(optional=True) \\\r\n .including_validators(EnumValue()) \\\r\n .with_properties(widget=\"select\")\r\nCommonEnum.value_labels = None\r\n\r\n# CommonBoolean has optional=False because booleans are\r\n# required to be True or False (None is not allowed)\r\nCommonBoolean = fl.Boolean.using(optional=True).with_properties(widget=\"checkbox\")\r\nCommonDict = fl.Dict.with_properties(widget=\"group\")\r\nCommonList = fl.List.using(optional=True)\r\nCommonInteger = fl.Integer.using(optional=True)\r\n_valid_date = Converted(incorrect=u\"%(label)s is not a valid date\")\r\nCommonDate = fl.Date.using(optional=True).including_validators(_valid_date) \\\r\n .with_properties(widget='date', attr={\"class\": \"picker\"}\r\n )\r\nCommonDateTime = fl.DateTime.using(optional=True).including_validators(_valid_date) \\\r\n .with_properties(widget='date', attr={\"class\": \"picker\"})\r\n\r\n\r\nclass SourceField(CommonEnum, object):\r\n valid_values = ValuesFromTable('sources', field=None)\r\n value_labels = DictFromTable('sources', value_field='short_name', key_field=None)\r\n\r\nclass ThematicCategoryField(CommonEnum, object):\r\n valid_values = ValuesFromTable('thematic_categories', field=None)\r\n value_labels = DictFromTable('thematic_categories',\r\n value_field='description', key_field=None)\r\n\r\nclass GeoScaleField(CommonEnum, object):\r\n valid_values = ValuesFromTable('geo_scales', field=None)\r\n value_labels = DictFromTable('geo_scales',\r\n value_field='code', key_field=None)\r\n\r\nclass GeoCoverageField(CommonEnum, object):\r\n valid_values = ValuesFromTable('geo_coverages', field=None)\r\n value_labels = DictFromTable('geo_coverages',\r\n value_field='code', key_field=None)\r\n\r\nclass TimelineField(CommonEnum, object):\r\n valid_values = ValuesFromTable('timelines', field=None)\r\n value_labels = DictFromTable('timelines',\r\n value_field='title', key_field=None)\r\n\r\nclass SteepCategoryField(CommonEnum, object):\r\n valid_values = ValuesFromTable('steep_categories', field=None)\r\n value_labels = DictFromTable('steep_categories', value_field='description',\r\n key_field=None)\r\n\r\nclass TrendField(CommonEnum, object):\r\n valid_values = ValuesFromTable('trends', field=None)\r\n value_labels = DictFromTable('trends', value_field='description',\r\n key_field=None)\r\n\r\nclass IndicatorField(CommonEnum, object):\r\n valid_values = ValuesFromTable('indicators', field=None)\r\n value_labels = DictFromTable('indicators', value_field='code',\r\n key_field=None)\r\n\r\nclass GMTField(CommonEnum, object):\r\n valid_values = ValuesFromTable('gmts', field=None)\r\n value_labels = DictFromTable('gmts', value_field='code',\r\n key_field=None)\r\n\r\n@validator_validated.connect\r\ndef validated(sender, element, result, **kwargs):\r\n if sender is NotEmpty:\r\n if not result:\r\n label = getattr(element, 'label', element.name)\r\n msg = element.properties.get(\"not_empty_error\",\r\n u\"%s is required\" % label)\r\n element.add_error(msg)\r\n\r\n","sub_path":"flis/trunk/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"173338267","text":"\n\nfrom xai.brain.wordbase.verbs._barrel import _BARREL\n\n#calss header\nclass _BARRELLED(_BARREL, ):\n\tdef __init__(self,): \n\t\t_BARREL.__init__(self)\n\t\tself.name = \"BARRELLED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"barrel\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_barrelled.py","file_name":"_barrelled.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"97846509","text":"import csv\nimport fileinput\nimport numpy\n\n\nclass SvParser(object):\n def __init__(self, delimiter):\n self.delimiter = delimiter\n\n def reader(self, file_location):\n for line_count, throw in enumerate(fileinput.input(file_location)):\n pass\n\n with open(file_location, \"rt\") as stream:\n sv = csv.reader(stream, delimiter=self.delimiter)\n\n elements = next(sv)\n parsed = {}\n for element in elements:\n parsed[element] = numpy.zeros(shape=line_count, dtype=\"float64\")\n\n for index, row in enumerate(sv):\n for count in range(len(row)):\n parsed[elements[count]][index] = row[count]\n\n return parsed\n\n def writer(self, file_location, data):\n raise NotImplementedError(\"Writing of Variable Separated Values is unsupported at this time\")\n","sub_path":"PyPWA/data/memory/sv.py","file_name":"sv.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"527657990","text":"import RPi.GPIO as GPIO\nimport Motor_Module as MOTOR\n\nFAN_IA = 23 #BCM 23, wPi. 4, Physical.16\nFAN_IB = 24 #BCM 24, wPi. 5, Physical.18\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(FAN_IA, GPIO.OUT)\nGPIO.setup(FAN_IB, GPIO.OUT)\n\n\nif __name__ == \"__main__\":\n try:\n while True:\n MOTOR.Left_2_Second(GPIO, FAN_IA, FAN_IB)\n MOTOR.Wait_2_Second(GPIO, FAN_IA, FAN_IB)\n MOTOR.Right_2_Second(GPIO, FAN_IA, FAN_IB)\n MOTOR.Wait_2_Second(GPIO, FAN_IA, FAN_IB)\n\n #Ctrl-C end\n except:\n GPIO.cleanup()\n print(\"end\")","sub_path":"Python/Motor2.py","file_name":"Motor2.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"348237446","text":"import logging\n\nimport sublime\n\n\nDEFAULT_LOG_LEVEL = logging.WARNING\nDEFAULT_LOG_LEVEL_NAME = logging.getLevelName(DEFAULT_LOG_LEVEL)\n\n\npl = logging.getLogger(__package__)\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(fmt=\"[{name}] {levelname}: {message}\", style='{')\nhandler.setFormatter(formatter)\npl.addHandler(handler)\npl.setLevel(DEFAULT_LOG_LEVEL)\n\nl = logging.getLogger(__name__)\n\n\ndef _settings():\n return sublime.load_settings(\"PackageDev.sublime-settings\")\n\n\ndef plugin_loaded():\n def on_settings_reload():\n cur_log_level_name = logging.getLevelName(pl.getEffectiveLevel())\n new_log_level_name = _settings().get('log_level', DEFAULT_LOG_LEVEL_NAME).upper()\n log_level = getattr(logging, new_log_level_name, DEFAULT_LOG_LEVEL)\n\n if new_log_level_name != cur_log_level_name:\n l.warning(\"Changing log level from %r to %r\", cur_log_level_name, new_log_level_name)\n pl.setLevel(log_level)\n\n _settings().add_on_change(__name__, on_settings_reload)\n on_settings_reload() # trigger on inital settings load, too\n\n\ndef plugin_unloaded():\n _settings().clear_on_change(__name__)\n pl.removeHandler(handler)\n","sub_path":"_logging.py","file_name":"_logging.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"452382889","text":"from .context import get_context\nfrom .lib.filesize import naturalsize\nfrom .lib.image_info import getImageInfo\nfrom typing import Any, List, Optional, Tuple\nimport ctypes\nimport io\nimport itertools\nimport os\nimport re\nimport string\nimport sublime\nimport sublime_plugin\nimport time\n\ng_auto_completions = [] # type: List[sublime.CompletionItem]\nMAXIMUM_WAIT_TIME = 0.3\n\n\ndef get_setting(string, view: Optional[sublime.View] = None) -> Any:\n if view and view.settings().get(string):\n return view.settings().get(string)\n else:\n return sublime.load_settings(\"AutoFilePath.sublime-settings\").get(string)\n\n\ndef get_cur_scope_settings(view: sublime.View):\n selection = view.sel()[0].a\n current_scope_str = view.scope_name(selection)\n\n all_scopes_settings = get_setting(\"afp_scopes\", view)\n for scope_settings in all_scopes_settings:\n if re.search(scope_settings.get(\"scope\"), current_scope_str):\n return scope_settings\n\n\ndef apply_alias_replacements(entered_path, aliases) -> Optional[str]:\n project_root = sublime.active_window().folders()[0]\n replacers = [(\"\", project_root)]\n\n result_path = entered_path\n for alias in aliases:\n alias_regex = re.compile(alias[0])\n alias_target = alias[1]\n if not re.match(alias_regex, result_path):\n continue\n\n for replacer in replacers:\n alias_target = alias_target.replace(replacer[0], replacer[1])\n\n result_path = re.sub(alias_regex, alias_target, result_path)\n\n return result_path if result_path != entered_path else None\n\n\ndef apply_post_replacements(view, insertion_text: str) -> str:\n cur_scope_settings = get_cur_scope_settings(view)\n if cur_scope_settings:\n replace_on_insert_setting = cur_scope_settings.get(\"replace_on_insert\")\n if replace_on_insert_setting:\n for replace in replace_on_insert_setting:\n insertion_text = re.sub(replace[0], replace[1], insertion_text)\n return insertion_text\n\n\nclass AfpShowFilenames(sublime_plugin.TextCommand):\n def run(self, edit: sublime.Edit) -> None:\n FileNameComplete.is_active = True\n self.view.run_command(\"auto_complete\", {\"disable_auto_insert\": True, \"next_completion_if_showing\": False})\n\n\nclass AfpSettingsPanel(sublime_plugin.WindowCommand):\n def run(self) -> None:\n use_pr = \"✗ Stop using project root\" if get_setting(\"afp_use_project_root\") else \"✓ Use Project Root\"\n use_dim = (\n \"✗ Disable HTML Image Dimension insertion\"\n if get_setting(\"afp_insert_dimensions\")\n else \"✓ Auto-insert Image Dimensions in HTML\"\n )\n p_root = get_setting(\"afp_proj_root\")\n\n menu = [[use_pr, p_root], [use_dim, '
']]\n self.window.show_quick_panel(menu, self.on_done)\n\n def on_done(self, value) -> None:\n settings = sublime.load_settings(\"AutoFilePath.sublime-settings\")\n if value == 0:\n use_pr = settings.get(\"afp_use_project_root\")\n settings.set(\"afp_use_project_root\", not use_pr)\n if value == 1:\n use_dim = settings.get(\"afp_use_project_root\")\n settings.set(\"afp_use_project_root\", not use_dim)\n\n\n# Used to remove the / or \\ when autocompleting a Windows drive (eg. /C:/path)\nclass AfpDeletePrefixedSlash(sublime_plugin.TextCommand):\n def run(self, edit: sublime.Edit) -> None:\n selection = self.view.sel()[0].a\n length = 5 if (self.view.substr(sublime.Region(selection - 5, selection - 3)) == \"\\\\\\\\\") else 4\n reg = sublime.Region(selection - length, selection - 3)\n self.view.erase(edit, reg)\n\n\n# inserts width and height dimensions into img tags. HTML only\nclass InsertDimensionsCommand(sublime_plugin.TextCommand):\n this_dir = \"\"\n\n def insert_dimension(self, edit: sublime.Edit, dim: int, name: str, tag_scope: sublime.Region) -> None:\n view = self.view\n selection = view.sel()[0].a\n\n if name in view.substr(tag_scope):\n reg = view.find(\"(?<=\" + name + r'=)\\s*\"\\d{1,5}', tag_scope.a)\n view.replace(edit, reg, '\"' + str(dim))\n else:\n dimension = str(dim)\n view.insert(edit, selection + 1, \" \" + name + '=\"' + dimension + '\"')\n\n def insert_dimensions(self, edit: sublime.Edit, scope: sublime.Region, w: int, h: int) -> None:\n view = self.view\n\n if get_setting(\"afp_insert_width_first\", view):\n self.insert_dimension(edit, h, \"height\", scope)\n self.insert_dimension(edit, w, \"width\", scope)\n else:\n self.insert_dimension(edit, w, \"width\", scope)\n self.insert_dimension(edit, h, \"height\", scope)\n\n # determines if there is a template tag in a given region. supports HTML and template languages.\n def is_img_tag_in_region(self, region: sublime.Region) -> bool:\n view = self.view\n\n # handle template languages but template languages like slim may also contain HTML so\n # we do a check for that as well\n return view.substr(region).strip().startswith(\"img\") or \"
None:\n view = self.view\n view.run_command(\"commit_completion\")\n selection = view.sel()[0].a\n\n if not \"html\" in view.scope_name(selection):\n return\n scope = view.extract_scope(selection - 1)\n\n # if using a template language, the scope is set to the current line\n tag_scope = (\n view.line(selection) if get_setting(\"afp_template_languages\", view) else view.extract_scope(scope.a - 1)\n )\n\n path = view.substr(scope)\n if path.startswith((\"'\", '\"', \"(\")):\n path = path[1:-1]\n\n path = path[path.rfind(FileNameComplete.sep) :] if FileNameComplete.sep in path else path\n full_path = self.this_dir + path\n\n if self.is_img_tag_in_region(tag_scope) and path.endswith((\".png\", \".jpg\", \".jpeg\", \".gif\")):\n with open(full_path, \"rb\") as r:\n read_data = r.read() if path.endswith((\".jpg\", \".jpeg\")) else r.read(24)\n w, h = getImageInfo(read_data)\n\n self.insert_dimensions(edit, tag_scope, w, h)\n\n\n# When backspacing through a path, selects the previous path component\nclass ReloadAutoCompleteCommand(sublime_plugin.TextCommand):\n def run(self, edit: sublime.Edit) -> None:\n view = self.view\n view.run_command(\"hide_auto_complete\")\n view.run_command(\"left_delete\")\n selection = view.sel()[0].a\n\n scope = view.extract_scope(selection - 1)\n scope_text = view.substr(scope)\n slash_pos = scope_text[: selection - scope.a].rfind(FileNameComplete.sep)\n slash_pos += 1 if slash_pos < 0 else 0\n\n region = sublime.Region(scope.a + slash_pos + 1, selection)\n view.sel().add(region)\n\n\ndef enable_autocomplete() -> None:\n \"\"\"\n Used externally by other packages which want to autocomplete file paths\n \"\"\"\n FileNameComplete.is_forced = True\n\n\ndef disable_autocomplete() -> None:\n \"\"\"\n Used externally by other packages which want to autocomplete file paths\n \"\"\"\n FileNameComplete.is_forced = False\n\n\nclass FileNameComplete(sublime_plugin.ViewEventListener):\n is_forced = False\n is_active = False\n sep = \"/\"\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.showing_win_drives = False\n\n def on_activated(self) -> None:\n self.showing_win_drives = False\n self.sep = \"/\"\n self.is_active = False\n\n def on_query_context(self, key: str, operator: str, operand: str, match_all: bool) -> bool:\n view = self.view\n\n if key == \"afp_deleting_slash\": # for reloading autocomplete\n selection = view.sel()[0]\n valid = (\n self.at_path_end(view) and selection.empty() and view.substr(selection.a - 1) == self.sep\n )\n return valid == operand\n\n if key == \"afp_use_keybinding\":\n return get_setting(\"afp_use_keybinding\", view) == operand\n\n return False\n\n def on_query_completions(self, prefix: str, locations) -> Optional[Tuple[List[sublime.CompletionItem], int]]:\n view = self.view\n is_always_enabled = not self.get_setting(\"afp_use_keybinding\", view)\n\n if not (is_always_enabled or self.is_forced or self.is_active):\n return\n\n selection = view.sel()[0].a\n\n if \"string.regexp.js\" in view.scope_name(selection):\n return\n\n blacklist = self.get_setting(\"afp_blacklist_scopes\", view)\n valid_scopes = self.get_setting(\"afp_valid_scopes\", view)\n\n if not any(view.match_selector(selection, scope) for scope in valid_scopes):\n return\n\n if any(view.match_selector(selection, scope) for scope in blacklist):\n return\n\n self.view = view\n self.selection = selection\n\n self.start_time = time.time()\n self.add_completions()\n\n return g_auto_completions, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS\n\n def on_modified_async(self) -> None:\n view = self.view\n selections = view.sel()\n\n if len(selections) != 1:\n return\n\n caret = selections[0].a\n prefix = view.substr(sublime.Region(caret - 4, caret))\n\n if self.showing_win_drives and re.match(r\"^/[a-zA-Z]:[/\\\\]\", prefix):\n self.showing_win_drives = False\n view.run_command(\"afp_delete_prefixed_slash\")\n\n def on_selection_modified_async(self) -> None:\n view = self.view\n\n if not view.window():\n return\n\n view_name = view.name()\n buffer_id = view.buffer_id()\n file_name = view.file_name()\n\n # Open autocomplete automatically if keybinding mode is used\n if not (self.is_forced or self.is_active):\n return\n\n selection = view.sel()\n\n # Fix sublime.py, line 641, in __getitem__ raise IndexError()\n if len(selection):\n selection = view.sel()[0]\n else:\n return\n\n # if selection.empty() and self.at_path_end(view):\n if selection.empty():\n scope_contents = view.substr(view.extract_scope(selection.a - 1))\n extracted_path = scope_contents.replace(\"\\r\\n\", \"\\n\").split(\"\\n\")[0]\n\n if \"\\\\\" in extracted_path and not \"/\" in extracted_path:\n self.sep = \"\\\\\"\n\n else:\n self.sep = \"/\"\n\n if (\n view.substr(selection.a - 1) == self.sep\n or len(view.extract_scope(selection.a)) < 3\n or not file_name\n ):\n view.run_command(\"auto_complete\", {\"disable_auto_insert\": True, \"next_completion_if_showing\": False})\n\n else:\n self.is_active = False\n\n def at_path_end(self, view: sublime.View) -> bool:\n selection = view.sel()[0]\n name = view.scope_name(selection.a)\n\n if selection.empty() and (\"string.end\" in name or \"string.quoted.end.js\" in name):\n return True\n\n if \".css\" in name and view.substr(selection.a) == \")\":\n return True\n\n return False\n\n def prepare_completion(self, view: sublime.View, this_dir: str, directory: str) -> sublime.CompletionItem:\n path = os.path.join(this_dir, directory)\n\n annotation = \"\"\n annotation_head = \"\"\n annotation_head_kind = sublime.KIND_ID_AMBIGUOUS\n details_head = \"\"\n details_parts = []\n\n if os.path.isdir(path):\n annotation = \"Dir\"\n annotation_head = \"📁\"\n annotation_head_kind = sublime.KIND_ID_MARKUP\n details_head = \"Directory\"\n elif os.path.isfile(path):\n annotation = \"File\"\n annotation_head = \"📄\"\n annotation_head_kind = sublime.KIND_ID_MARKUP\n details_head = \"File\"\n details_parts.append(\"Size: \" + naturalsize(os.stat(path).st_size))\n\n if path.endswith((\".gif\", \".jpeg\", \".jpg\", \".png\")):\n details_head = \"Image\"\n\n with io.open(path, \"rb\") as f:\n read_data = f.read() if path.endswith((\".jpeg\", \".jpg\")) else f.read(24)\n\n try:\n w, h = getImageInfo(read_data)\n details_parts.append(\"Height: \" + str(h))\n details_parts.append(\"Width: \" + str(w))\n except Exception:\n pass\n\n return sublime.CompletionItem(\n trigger=directory,\n annotation=annotation,\n completion=apply_post_replacements(view, directory),\n kind=(annotation_head_kind, annotation_head, details_head),\n details=\", \".join(details_parts),\n )\n\n def get_entered_path(self, view: sublime.View, selection: int) -> str:\n scope_contents = view.substr(view.extract_scope(selection - 1)).strip()\n cur_path = scope_contents.replace(\"\\r\\n\", \"\\n\").split(\"\\n\")[0]\n\n if cur_path.startswith((\"'\", '\"', \"(\")):\n cur_path = cur_path[1:-1]\n\n return cur_path\n\n def get_cur_path(self, view: sublime.View, selection: int) -> str:\n cur_path = self.get_entered_path(view, selection)\n return cur_path[: cur_path.rfind(self.sep) + 1] if self.sep in cur_path else \"\"\n\n def get_setting(self, key: str, view: Optional[sublime.View] = None) -> Any:\n if view and view.settings().get(key):\n return view.settings().get(key)\n\n else:\n return sublime.load_settings(\"AutoFilePath.sublime-settings\").get(key)\n\n def add_drives(self) -> None:\n if sublime.platform() != \"windows\":\n return\n\n drive_bitmask = ctypes.cdll.kernel32.GetLogicalDrives()\n drive_list = list(\n itertools.compress(string.ascii_uppercase, map(lambda x: ord(x) - ord(\"0\"), bin(drive_bitmask)[:1:-1]))\n )\n\n # Overrides default auto completion\n # https://github.com/BoundInCode/AutoFileName/issues/18\n for driver in drive_list:\n g_auto_completions.append(\n sublime.CompletionItem(\n trigger=f\"{driver}:{self.sep}\",\n annotation=\"Drive\",\n completion=f\"{driver}:{self.sep}\",\n kind=(sublime.KIND_ID_MARKUP, \"🖴\", \"Drive\"),\n details=\"\",\n )\n )\n\n if time.time() - self.start_time > MAXIMUM_WAIT_TIME:\n return\n\n def add_completions(self) -> None:\n g_auto_completions.clear()\n\n ctx = get_context(self.view)\n if not ctx[\"is_valid\"]:\n return\n\n scope_settings = get_cur_scope_settings(self.view)\n if scope_settings and scope_settings.get(\"prefixes\") and ctx[\"prefix\"]:\n if not ctx[\"prefix\"] in scope_settings.get(\"prefixes\"):\n return\n\n file_name = self.view.file_name()\n is_proj_rel = self.get_setting(\"afp_use_project_root\", self.view)\n\n this_dir = \"\"\n cur_path = os.path.expanduser(self.get_cur_path(self.view, self.selection)) # type:str\n\n if cur_path.startswith(\"\\\\\\\\\") and not cur_path.startswith(\"\\\\\\\\\\\\\") and sublime.platform() == \"windows\":\n self.showing_win_drives = True\n self.add_drives()\n return\n elif cur_path.startswith((\"/\", \"\\\\\")):\n if is_proj_rel and file_name:\n proot = self.get_setting(\"afp_proj_root\", self.view)\n if proot:\n if not file_name and not os.path.isabs(proot):\n proot = \"/\"\n cur_path = os.path.join(proot, cur_path[1:])\n for f in sublime.active_window().folders():\n if f in file_name:\n this_dir = os.path.join(f, cur_path.lstrip(\"/\\\\\"))\n elif not file_name:\n this_dir = cur_path\n else:\n this_dir = os.path.split(file_name)[0]\n this_dir = os.path.join(this_dir, cur_path)\n\n if scope_settings and scope_settings.get(\"aliases\"):\n entered_path = self.get_entered_path(self.view, self.selection)\n result_path = apply_alias_replacements(entered_path, scope_settings.get(\"aliases\"))\n if result_path:\n this_dir = re.sub(r\"[^/]+$\", \"\", result_path)\n\n try:\n if os.path.isabs(cur_path) and (not is_proj_rel or not this_dir):\n if sublime.platform() == \"windows\" and len(self.view.extract_scope(self.selection)) < 4:\n self.showing_win_drives = True\n self.add_drives()\n return\n\n if sublime.platform() != \"windows\":\n this_dir = cur_path\n\n self.showing_win_drives = False\n dir_files = os.listdir(this_dir)\n\n now = time.time()\n\n for directory in dir_files:\n if directory.startswith(\".\"):\n continue\n\n if not \".\" in directory:\n directory += self.sep\n\n g_auto_completions.append(self.prepare_completion(self.view, this_dir, directory))\n InsertDimensionsCommand.this_dir = this_dir\n\n if now - self.start_time > MAXIMUM_WAIT_TIME:\n return\n\n except OSError:\n pass\n","sub_path":"AutoFilePath.py","file_name":"AutoFilePath.py","file_ext":"py","file_size_in_byte":17484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"131077814","text":"\ndef main():\n guard_notes = {}\n\n # Read all lines and sort them so we know that every new shift is followed\n # by all of that shift's actions\n with open(\"input.txt\") as fh:\n lines = fh.readlines()\n lines.sort()\n\n # Read all log data into the following data structure\n # 'guard_id': { 'sleep_start': [...], 'awake_start': [...] }\n for line in lines:\n line = line.rstrip('\\n')\n time_info, action = line.split(']')\n date, minute = time_info.split(' ')\n date = date[1:]\n action = action[1:]\n\n if action.startswith('Guard'):\n guard_id = action.split(' ')[1][1:]\n if guard_id not in guard_notes:\n guard_notes[guard_id] = { 'sleep_start': [], 'awake_start': [] }\n else:\n if action.startswith('falls'):\n guard_notes[guard_id]['sleep_start'].append(minute)\n else:\n guard_notes[guard_id]['awake_start'].append(minute)\n\n # Pass over the guard_notes and read minutes/totals into the following data\n # structure\n # 'guard_id': {\n # 'minutes': { 1: ..., 2: ..., 3: ..., ... }\n # 'total': ... }\n guard_totals = {}\n for gid in guard_notes:\n if gid not in guard_totals:\n guard_totals[gid] = { 'minutes': {}, 'total': 0 }\n\n for s, a in zip(guard_notes[gid]['sleep_start'], guard_notes[gid]['awake_start']):\n for i in range(int(s.split(\":\")[1]), int(a.split(\":\")[1])):\n guard_totals[gid]['total'] += 1\n if i in guard_totals[gid]['minutes']:\n guard_totals[gid]['minutes'][i] += 1\n else:\n guard_totals[gid]['minutes'][i] = 1\n\n # Get guard id who slept the most\n tmax = 0\n for gid in guard_totals:\n if guard_totals[gid]['total'] > tmax:\n guard = gid\n tmax = guard_totals[gid]['total']\n\n # Get the minute they slept the most at\n mmax = 0\n for m in guard_totals[guard]['minutes']:\n if guard_totals[guard]['minutes'][m] > mmax:\n mmax = guard_totals[guard]['minutes'][m]\n mout = m\n print(\"Guard id: {}, Minute: {}, Product: {}\".format(guard, mout, int(guard) * int(mout)))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"2018/day4/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"573505839","text":"#\n# telephone.py\n#\n# Copyright (c) 2005 - 2007 Nokia Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport e32\nimport _telephone\n\n_phone=_telephone.Phone()\n_answering_supported=0\n\nif e32.s60_version_info>=(3,0):\n _answering_supported=1\n EStatusUnknown=_telephone.EStatusUnknown\n EStatusIdle=_telephone.EStatusIdle\n EStatusDialling=_telephone.EStatusDialling\n EStatusRinging=_telephone.EStatusRinging\n EStatusAnswering=_telephone.EStatusAnswering\n EStatusConnecting=_telephone.EStatusConnecting\n EStatusConnected=_telephone.EStatusConnected\n EStatusReconnectPending=_telephone.EStatusReconnectPending\n EStatusDisconnecting=_telephone.EStatusDisconnecting\n EStatusHold=_telephone.EStatusHold\n EStatusTransferring=_telephone.EStatusTransferring\n EStatusTransferAlerting=_telephone.EStatusTransferAlerting\n\n _phone_incoming=_telephone.Phone()\n _phone_answer=_telephone.Phone()\n\n _my_call_back=None \n\n_is_closed=1\n\ndef dial(number):\n global _is_closed\n if _is_closed:\n _phone.open()\n _is_closed=0\n _phone.set_number(number)\n _phone.dial()\ndef hang_up():\n try:\n _phone.hang_up()\n except:\n if _answering_supported:\n try:\n _phone_answer.hang_up()\n except:\n raise\n \nif _answering_supported:\n def call_state(cb):\n global _my_call_back\n _my_call_back=cb\n _phone_incoming.incoming_call(_telephone_call_back)\n def _answer(arg):\n # XXX state checking here?\n pass\n def incoming_call():\n _phone_answer.incoming_call(_answer)\n def answer():\n _phone_answer.answer()\n def cancel():\n _phone.cancel()\n _phone_incoming.cancel()\n _phone_answer.cancel()\n def _telephone_call_back(arg):\n global _my_call_back\n _phone_incoming.incoming_call(_telephone_call_back)\n _my_call_back(arg)\n","sub_path":"symbian/PythonForS60_1.9.6/module-repo/dev-modules/telephone/telephone.py","file_name":"telephone.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"39540596","text":"from __future__ import annotations\n\nfrom contextlib import ExitStack\nfrom typing import TYPE_CHECKING, List, Optional, Tuple\n\nimport torch\nfrom torchtext.data import Batch\nfrom tqdm import tqdm\n\nfrom diagnnose.activations.activation_writer import ActivationWriter\nfrom diagnnose.corpus.create_iterator import create_iterator\nfrom diagnnose.typedefs.activations import (\n ActivationNames,\n ActivationRanges,\n ActivationListDict,\n ActivationDict,\n BatchActivationTensorLists,\n BatchActivationTensors,\n SelectFunc,\n)\nfrom diagnnose.typedefs.corpus import Corpus\n\n# https://stackoverflow.com/a/39757388/3511979\nif TYPE_CHECKING:\n from diagnnose.models.lm import LanguageModel\n\n\nclass Extractor:\n \"\"\" Extracts all intermediate activations of a LM from a corpus.\n\n Only activations that are provided in activation_names will be\n stored in a pickle file. Each activation is written to its own file.\n\n Parameters\n ----------\n model : LanguageModel\n Language model that inherits from LanguageModel.\n corpus : Corpus\n Corpus containing sentences to be extracted.\n activations_dir : str, optional\n Directory to which activations will be written. Should always\n be provided unless `only_return_avg_eos` is set to True in\n `self.extract`.\n activation_names : List[tuple[int, str]]\n List of (layer, activation_name) tuples\n \"\"\"\n\n def __init__(\n self,\n model: LanguageModel,\n corpus: Corpus,\n activations_dir: Optional[str] = None,\n activation_names: Optional[ActivationNames] = None,\n ) -> None:\n self.model = model\n self.corpus = corpus\n\n self.activation_names: ActivationNames = activation_names or []\n\n if activations_dir is not None:\n self.activation_writer = ActivationWriter(activations_dir)\n\n # TODO: refactor\n def extract(\n self,\n batch_size: int = 1,\n cutoff: int = -1,\n dynamic_dumping: bool = True,\n selection_func: SelectFunc = lambda sen_id, pos, item: True,\n create_avg_eos: bool = False,\n only_return_avg_eos: bool = False,\n ) -> Optional[ActivationDict]:\n \"\"\" Extracts embeddings from a corpus.\n\n Uses contextlib.ExitStack to write to multiple files at once.\n File writing is done directly per sentence, to lessen RAM usage.\n\n Parameters\n ----------\n batch_size : int, optional\n Amount of sentences processed per forward step. Higher batch\n size increases extraction speed, but should be done\n accordingly to the amount of available RAM. Defaults to 1.\n cutoff : int, optional\n How many sentences of the corpus to extract activations for.\n Setting this parameter to -1 will extract the entire corpus,\n otherwise extraction is halted after extracting n sentences.\n dynamic_dumping : bool, optional\n Dump files dynamically, i.e. once per sentence, or dump\n all files at the end of extraction. Defaults to True.\n selection_func : Callable[[int, int, Example], bool]\n Function which determines if activations for a token should\n be extracted or not.\n create_avg_eos : bool, optional\n Toggle to save average end of sentence activations. If set\n to True other activations won't be dumped.\n only_return_avg_eos : bool, optional\n Toggle to not dump the avg eos activations.\n \"\"\"\n tot_extracted = n_sens = 0\n\n dump_activations = not create_avg_eos\n dump_avg_eos = create_avg_eos and not only_return_avg_eos\n if dump_activations or dump_avg_eos:\n assert hasattr(\n self, \"activation_writer\"\n ), \"Attempting to dump activations but no activation_dir has been provided\"\n\n all_activations: ActivationListDict = self._init_sen_activations()\n activation_ranges: ActivationRanges = {}\n iterator = create_iterator(\n self.corpus, batch_size=batch_size, device=self.model.device\n )\n\n tot_num = len(self.corpus) if cutoff == -1 else cutoff\n print(f\"\\nStarting extraction of {tot_num} sentences...\")\n\n with ExitStack() as stack:\n if dump_activations or dump_avg_eos:\n print(f\"Saving activations to `{self.activation_writer.activations_dir}`\")\n self.activation_writer.create_output_files(\n stack,\n self.activation_names,\n dump_activations=dump_activations,\n dump_avg_eos=dump_avg_eos,\n )\n\n if create_avg_eos:\n avg_eos_states = self._init_avg_eos_activations()\n\n for batch in tqdm(iterator, unit=\"batch\"):\n batch_activations, n_extracted = self._extract_sentence(\n batch, n_sens, selection_func\n )\n\n if dump_activations:\n for j in batch_activations.keys():\n if dynamic_dumping:\n self.activation_writer.dump_activations(\n batch_activations[j]\n )\n else:\n for name in all_activations.keys():\n all_activations[name].append(batch_activations[j][name])\n\n if create_avg_eos:\n self._update_avg_eos_activations(avg_eos_states, batch_activations)\n\n for j in batch_activations.keys():\n activation_ranges[n_sens + j] = (\n tot_extracted,\n tot_extracted + n_extracted[j],\n )\n tot_extracted += n_extracted[j]\n\n n_sens += batch.batch_size\n if 0 < cutoff <= n_sens:\n break\n\n # clear up RAM usage for final activation dump\n del self.model\n\n if create_avg_eos:\n self._normalize_avg_eos_activations(avg_eos_states, n_sens)\n if dump_avg_eos:\n self.activation_writer.dump_avg_eos(avg_eos_states)\n return avg_eos_states\n\n if dump_activations:\n self.activation_writer.dump_activation_ranges(activation_ranges)\n if not dynamic_dumping:\n concat_activations: ActivationDict = {}\n for name in all_activations.keys():\n concat_activations[name] = torch.cat(\n all_activations[name], dim=0\n )\n self.activation_writer.dump_activations(concat_activations)\n\n if dynamic_dumping and dump_activations:\n print(\"\\nConcatenating sequentially dumped pickle files into 1 array...\")\n self.activation_writer.concat_pickle_dumps()\n\n print(f\"\\nExtraction finished.\")\n print(\n f\"{n_sens} sentences have been extracted, yielding {tot_extracted} data points.\"\n )\n\n return None\n\n def _extract_sentence(\n self, batch: Batch, n_sens: int, selection_func: SelectFunc\n ) -> Tuple[BatchActivationTensors, List[int]]:\n \"\"\" Generates the embeddings of a sentence and writes to file.\n\n Parameters\n ----------\n batch : Batch\n Batch containing sentence and label information.\n n_sens : int\n Number of sentences extracted so far. Used for indexing the\n items in the batch.\n selection_func : SelectFunc\n Function that determines whether activations for a token\n should be extracted or not.\n\n Returns\n -------\n sen_activations : BatchTensorDict\n Dict mapping batch id's to activation names to tensors.\n n_extracted : List[int]\n Number of extracted activations, per batch item.\n \"\"\"\n batch_size = len(batch)\n n_extracted: List[int] = [0] * batch_size\n\n batch_tensor_list: BatchActivationTensorLists = self._init_batch_activations(\n batch_size\n )\n cur_activations: ActivationDict = self.model.init_hidden(batch_size)\n examples = self.corpus.examples[n_sens : n_sens + batch_size]\n\n sentence, sen_lens = batch.sen\n for i in range(sentence.size(1)):\n if self.model.use_char_embs:\n tokens = [e.sen[min(i, len(e.sen) - 1)] for e in examples]\n else:\n tokens = sentence[:, i]\n\n with torch.no_grad():\n _out, cur_activations = self.model(\n tokens, cur_activations, compute_out=False\n )\n\n # Check whether current activations match criterion defined in selection_func\n for j in range(batch_size):\n if i < sen_lens[j] and selection_func(n_sens + j, i, examples[j]):\n for layer, name in self.activation_names:\n cur_activation = cur_activations[layer, name][j]\n\n batch_tensor_list[j][(layer, name)].append(cur_activation)\n\n n_extracted[j] += 1\n\n batch_tensors: BatchActivationTensors = {}\n for j in range(batch_size):\n batch_tensors[j] = {}\n for a_name, tensor_list in batch_tensor_list[j].items():\n if len(tensor_list) > 0:\n batch_tensors[j][a_name] = torch.stack(tensor_list)\n else:\n del batch_tensors[j]\n break\n\n return batch_tensors, n_extracted\n\n def _init_batch_activations(self, batch_size: int) -> BatchActivationTensorLists:\n \"\"\" Initial dict of of activations for current batch. \"\"\"\n\n return {i: self._init_sen_activations() for i in range(batch_size)}\n\n def _init_sen_activations(self) -> ActivationListDict:\n \"\"\" Initial dict for each activation that is extracted. \"\"\"\n\n return {(layer, name): [] for (layer, name) in self.activation_names}\n\n def _init_avg_eos_activations(self) -> ActivationDict:\n init_avg_eos_activations: ActivationDict = self.model.create_zero_states()\n\n for layer in range(self.model.num_layers):\n if (layer, \"hx\") not in self.activation_names:\n self.activation_names.append((layer, \"hx\"))\n if (layer, \"cx\") not in self.activation_names:\n self.activation_names.append((layer, \"cx\"))\n\n return init_avg_eos_activations\n\n @staticmethod\n def _update_avg_eos_activations(\n prev_activations: ActivationDict, new_activations: BatchActivationTensors\n ) -> None:\n for j in new_activations.keys():\n for layer, name in prev_activations.keys():\n eos_activation = new_activations[j][layer, name][-1]\n prev_activations[layer, name] += eos_activation\n\n @staticmethod\n def _normalize_avg_eos_activations(\n avg_eos_activations: ActivationDict, n_sens: int\n ) -> None:\n if n_sens == 0:\n return\n for layer, name in avg_eos_activations.keys():\n avg_eos_activations[layer, name] = avg_eos_activations[layer, name] / n_sens\n","sub_path":"diagnnose/extractors/base_extractor.py","file_name":"base_extractor.py","file_ext":"py","file_size_in_byte":11323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"178777834","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport unittest\n\n\nclass NewVisitorTest(unittest.TestCase):\n \n def setUp(self):\n self.browser = webdriver.Firefox()\n\n def tearDown(self):\n self.browser.quit()\n\n def test_can_start_a_list_and_retrieve_it_later(self):\n #Guilherme ouviu falar de uma aplicação online interessante\n #para listas de tarefas. Logo, decide verificar sua homepage\n self.browser.get('http://localhost:8000')\n\n #Guilherme percebe que o titulo da página que se encontra e o cabeçalho mencionam \n #listas de tarefas (to-do)\n self.assertIn('To-Do', self.browser.title)\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do', header_text)\n\n #Guilherme é convidado a inserir um item de tarefa imediatamente\n inputbox = self.browser.find_element_by_id('id_new_item')\n self.assertEqual(\n inputbox.get_attribute('placeholder'),\n 'Enter a to-do item'\n )\n\n\n\n #Guilherme digita \"Comprar ingressos para o cinema\" em uma caixa de texto (Guilherme\n # costuma sair bastante com seus amigos e sua namorada Julia)\n \n inputbox.send_keys('1: Comprar ingressos para o cinema')\n \n\n\n #Quando Guilherme tecla enter, a página é atualizada, e agora a página lista \"1:Comprar \n # ingressos para o cinema\" como um item em uma lista de tarefas\n inputbox.send_keys(Keys.ENTER) \n time.sleep(1)\n\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr') #(!!Elements#Element\n self.assertTrue(\n any(row.text == '1: Comprar ingressos para o cinema' for row in rows),\n \"New to-do item did not appear in table\"\n )\n\n\n #Ainda continua havendo uma caixa de texto convidando-o a acrescentar outro item \n #dentro da lista de tarefas. Guilherme insere \"encontrar-me com Julia\" - Guilherme é um\n #namorado bem atensioso\n self.fail('Finish the test!')\n\n #A pagina é atualizada novamente e agora mostra os dois itens em sua lista\n\n\n #Guilherme se pergunta se, após fechar o navegador, o site se lembrará de sua lista. Então, ele\n #nota que o site gerou um URL único para ele *há um pequeno texto explicativo para isso\n\n\n #Guilherme acessa esse URL. Sua lista de tarefas continua lá\n\n\n #Satisfeito, Guilherme retorna a jogar em seu video-game.\n\nif __name__ == '__main__':\n unittest.main(warnings='ignore')\n","sub_path":"functional_tests.py","file_name":"functional_tests.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"111184858","text":"#1- emitir un mensaje de acuerdo a lo que una persona ingresa como cantidad de años que viene usando insecticida en su \n#plantación. Si hace 10 o más años, debemos emitir el mensaje \"Por favor solicite revisión de suelos en su plantación\". \n#Si hace menos de 10 años, debemos emitir el mensaje \"Intentaremos ayudarte con un nuevo sistema de control de plagas, \n# y cuidaremos el suelo de tu plantación\".\n\"\"\"\nprint(\"┊\"*30)\nprint(\"┊\"*8,\"Bienvenido/a\",\"┊\"*8)\nprint(\"┊\"*30)\nprint(\"┊Por favor introducir el año ┊\\n┊en que comenzó a utilizar ┊\\n┊insecticida ┊\")\nprint(\"┊\"*30)\nvalor1 = int(input(\" año: \"))\nsuma = 2020 - valor1\nprint(\"┊\"*30)\nprint(\"┊La cantidad de años de uso ┊\\n┊de insecticida es de: \",suma,\" ┊\")\nprint(\"┊\"*30)\nif suma >= 10:\n print(\"┊Por favor solicite revisión ┊\\n┊de suelos en su plantación ┊\")\nelif suma <= 9:\n print(\"┊Intentaremos ayudarte con un┊\\n┊nuevo sistema de control de ┊\\n┊plagas, y cuidaremos el ┊\\n┊suelo de tu plantación ┊\")\nelse:\n print(\"Gracias por utilizar el programa\")\nprint(\"┊\"*30)\n\"\"\"\n\n#2 \n\"\"\"\ntamañoNormal= \"Pez en buenas condiciones\"\ntamañopordebajodeloNormal= \"Pez con problemas de nutrición\"\ntamañounpocoporencimadeloNormal= \"Pez con síntomas de organismo contaminado\"\ntamañosobredimensionado= \"Pez contaminado\"\nmedida = str(input(\"________Califique el tamaño del pez______\\n\\n(Introducir el numero que corresponda)\\n1- Normal\\n2-Pequeño\\n3- Extragrande\\n4- Sobredimencionado\\n\"))\nif medida == \"1\":\n print(tamañoNormal)\nelif medida == \"2\":\n print(tamañopordebajodeloNormal)\nelif medida == \"3\":\n print(tamañounpocoporencimadeloNormal)\nelif medida == \"4\":\n print(tamañosobredimensionado)\nelse:\n print(\"Opción ingresada inválida\")\n\"\"\"\n\n#3 Para el uso de fertilizantes es necesario medir cuánto abarca un determinado compuesto \n# en el suelo el cual debe existir en una cantidad de al menos 10% por hectárea, y no debe \n# existir vegetación del tipo MATORRAL. Escribir un programa que determine si es factible \n# la utilización de fertilizantes.\n\"\"\"\nprint(\"\\n\\nA continuación calcularemos si en su campo\\n es factible el uso de fertilizantes\\n\")\ncondicion1= str(input(\"¿En el terreno existe vegetación\\ntipo matorral?\\n\"))\nif condicion1 == \"si\":\n print(\"No debe utilizar fertilizante.\")\nelif condicion1 == \"no\":\n terreno = int(input(\"¿En cuántas hectáreas desea utilizar fertilizante?\\n: \"))\n condicion2 = terreno * 100 / 10\n if condicion2 >= 10:\n print(\"El compuestro por hectárea es de: \", condicion2,\"%\")\n print(\"Es factible el uso de ferttilizantes.\")\n elif condicion2 <= 9:\n print(\"El compuestro por hectárea es de: \", condicion2,\"%\")\n print(\"No es factible el uso de fertilizantes.\")\nelse:\n print(\"Opción ingresada inválida\")\n\"\"\"\n\n#4 Tenemos que decidir entre 2 recetas ecológicas. \n# Los ingredientes para cada tipo de receta aparecen a continuación.\n\"\"\"\nIngredientes comunes: Verduras y berenjena.\n\nIngredientes Receta 1: Lentejas y apio.\n\nIngredientes Receta 2: Morrón y Cebolla..\n\"\"\"\n#Escribir un programa que pregunte al usuario que tipo de receta desea, \n# y en función de su respuesta le muestre un menú con los ingredientes \n# disponibles para que elija. Solo se puede eligir 3 ingrediente (entre \n# la receta elegida y los comunes.) y el tipo de receta. Al final se debe \n# mostrar por pantalla la receta elegida y todos los ingredientes.\n\nprint(\"Bienvenido a tu menú ecológico.\")\nplato = str(input(\"¿qué plato desea hoy?\\n-sopa de verduras 🌱 -verduras al vapor 🌿\\n\"))\n\nif plato == \"sopa de verduras\":\n print(\"¡Exelente elección!🌱\")\n ing = int(input(\"Selecciona los ingredientes que desea añadir:\\n1- lentejas\\n2- apio\\n\"))\n listaIng = []\nelif plato == \"verduras al vapor\":\n print(\"¡Exelente elección!🌿\")\n\n#########################################################################################\n\n\"\"\"print(\"Bienvenido al programa de identificación de peces\")\n\ntamañoDelPez = int(input(\"Ingrese en cm, el tamaño del pez a estudiar: \"))\n\ntamañoNormal =250\ntamañoSdimensionado = 350\n\nif tamañoDelPez < tamañoNormal:\n print(\"Pez cn problemas de nutrición\")\nelif tamañoDelPez == tamañoNormal:\n print(\"Pez en buenas condiciones\")\nelif tamañoDelPez > tamañoNormal:\n print(\"Pez con síntomas de organismo contaminado\")\nelif tamañoDelPez > tamañoSdimensionado:\n print(\"Pez contaminado\")\nelse:\n print(\"Valor ingresado es inválido\")\"\"\"\n\n\"\"\"\nLa ciudad esta dividida en 2 secciones de recolección sección A y B de acuerdo al nombre de la barrio y el tipo del barrio (CÉNTRICO y NO CÉNTRICO)\n\nLa sección A esta formada por los barrios céntricos cuyo nombre comienza con una letra anterior a M y los barrios no céntricos con nombre posterior a la M, y la sección B el resto.\n\"\"\"\n\n#Debemos hacer un programa que dado el nombre del barrio y la ubicación, nos informe en que sección se encuentra.\n\n\"\"\"nombre_barrio = input(\"Ingrese el nombre de su barrio >>> Bº: \")\nubicacion = int(input(\"Si el barrio donde vive es CÉNTRICO ingrese 1 o 2 si es NO CÉNTRICO: \"))\nif ubicacion == 1:\n if nombre_barrio < \"M\":\n print(\"Sección A\")\n else:\n print(\"Sección B\")\nelif ubicacion == 2:\n if nombre_barrio >= \"M\":\n print(\"Sección A\")\n else:\n print(\"Sección B\")\nelse:\n print(\"Opción Incorrecta\")\"\"\"","sub_path":"desafíos.py","file_name":"desafíos.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"12501194","text":"\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom generators.common import Generator\nimport os\nimport numpy as np\nfrom pycocotools.coco import COCO\nimport cv2\nimport json\n\n\nclass TinyGenerator(Generator):\n \"\"\"\n Generate data from the COCO dataset.\n See https://github.com/cocodataset/cocoapi/tree/master/PythonAPI for more information.\n \"\"\"\n\n def __init__(self, data_dir, set_name, **kwargs):\n \"\"\"\n Initialize a COCO data generator.\n\n Args\n data_dir: Path to where the COCO dataset is stored.\n set_name: Name of the set to parse.\n \"\"\"\n self.data_dir = data_dir\n self.set_name = set_name\n if set_name in ['train', 'test']:\n self.coco = COCO(os.path.join(data_dir, 'annotations', 'tiny_set_train_with_dense.json'))\n else:\n self.coco = COCO(os.path.join(data_dir, 'annotations', 'tiny_set_train_with_dense.json'))\n self.image_ids = self.coco.getImgIds()\n\n self.load_classes()\n\n super(TinyGenerator, self).__init__(**kwargs)\n\n def load_classes(self):\n \"\"\"\n Loads the class to label mapping (and inverse) for COCO.\n \"\"\"\n # load class names (name -> label)\n categories = self.coco.loadCats(self.coco.getCatIds())\n categories.sort(key=lambda x: x['id'])\n\n self.classes = {}\n self.coco_labels = {}\n self.coco_labels_inverse = {}\n for c in categories:\n self.coco_labels[len(self.classes)] = c['id']\n self.coco_labels_inverse[c['id']] = len(self.classes)\n self.classes[c['name']] = len(self.classes)\n\n # also load the reverse (label -> name)\n self.labels = {}\n for key, value in self.classes.items():\n self.labels[value] = key\n print('GOT labels: {}'.format(self.labels))\n\n def size(self):\n \"\"\" Size of the COCO dataset.\n \"\"\"\n return len(self.image_ids)\n\n def num_classes(self):\n \"\"\" Number of classes in the dataset. For TinyPerson is 2\n \"\"\"\n return 2\n\n def has_label(self, label):\n \"\"\" Return True if label is a known label.\n \"\"\"\n return label in self.labels\n\n def has_name(self, name):\n \"\"\" Returns True if name is a known class.\n \"\"\"\n return name in self.classes\n\n def name_to_label(self, name):\n \"\"\" Map name to label.\n \"\"\"\n return self.classes[name]\n\n def label_to_name(self, label):\n \"\"\" Map label to name.\n \"\"\"\n return self.labels[label]\n\n def coco_label_to_label(self, coco_label):\n \"\"\" Map COCO label to the label as used in the network.\n COCO has some gaps in the order of labels. The highest label is 90, but there are 80 classes.\n \"\"\"\n return self.coco_labels_inverse[coco_label]\n\n def coco_label_to_name(self, coco_label):\n \"\"\" Map COCO label to name.\n \"\"\"\n return self.label_to_name(self.coco_label_to_label(coco_label))\n\n def label_to_coco_label(self, label):\n \"\"\" Map label as used by the network to labels as used by COCO.\n \"\"\"\n return self.coco_labels[label]\n\n def image_aspect_ratio(self, image_index):\n \"\"\" Compute the aspect ratio for an image with image_index.\n \"\"\"\n image = self.coco.loadImgs(self.image_ids[image_index])[0]\n return float(image['width']) / float(image['height'])\n\n def load_image(self, image_index):\n \"\"\"\n Load an image at the image_index.\n \"\"\"\n # {'license': 2, 'file_name': '000000259765.jpg', 'coco_url': 'http://images.cocodataset.org/test2017/000000259765.jpg', 'height': 480, 'width': 640, 'date_captured': '2013-11-21 04:02:31', 'id': 259765}\n image_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n path = os.path.join(self.data_dir, self.set_name, image_info['file_name'])\n image = cv2.imread(path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n def load_annotations(self, image_index):\n \"\"\" Load annotations for an image_index.\n \"\"\"\n # get ground truth annotations\n annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)\n annotations = {'labels': np.empty((0,), dtype=np.float32), 'bboxes': np.empty((0, 4), dtype=np.float32)}\n\n # some images appear to miss annotations (like image with id 257034)\n if len(annotations_ids) == 0:\n return annotations\n\n # parse annotations\n coco_annotations = self.coco.loadAnns(annotations_ids)\n for idx, a in enumerate(coco_annotations):\n # some annotations have basically no width / height, skip them\n if a['bbox'][2] < 1 or a['bbox'][3] < 1:\n continue\n\n annotations['labels'] = np.concatenate(\n [annotations['labels'], [a['category_id'] - 1]], axis=0)\n annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[\n a['bbox'][0],\n a['bbox'][1],\n a['bbox'][0] + a['bbox'][2],\n a['bbox'][1] + a['bbox'][3],\n ]]], axis=0)\n\n return annotations\n\n\nif __name__ == '__main__':\n from augmentor.misc import MiscEffect\n from augmentor.color import VisualEffect\n misc_effect = MiscEffect(multi_scale_prob=0.9,\n rotate_prob=0.05,\n flip_prob=0.8,\n crop_prob=0.5,\n translate_prob=0.7,\n border_value=(128, 128, 128))\n\n visual_effect = VisualEffect()\n train_generator = TinyGenerator(\n r'G:\\datasets\\tiny_set',\n 'train',\n phi=6,\n batch_size=1,\n misc_effect=misc_effect,\n visual_effect=visual_effect,\n )\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n anchors = train_generator.anchors\n for batch_inputs, batch_targets in train_generator:\n image = batch_inputs[0][0]\n image[..., 0] *= std[0]\n image[..., 1] *= std[1]\n image[..., 2] *= std[2]\n image[..., 0] += mean[0]\n image[..., 1] += mean[1]\n image[..., 2] += mean[2]\n image *= 255.\n\n regression = batch_targets[0][0]\n valid_ids = np.where(regression[:, -1] == 1)[0]\n boxes = anchors[valid_ids]\n deltas = regression[valid_ids]\n class_ids = np.argmax(batch_targets[1][0][valid_ids], axis=-1)\n mean_ = [0, 0, 0, 0]\n std_ = [0.2, 0.2, 0.2, 0.2]\n\n width = boxes[:, 2] - boxes[:, 0]\n height = boxes[:, 3] - boxes[:, 1]\n\n x1 = boxes[:, 0] + (deltas[:, 0] * std_[0] + mean_[0]) * width\n y1 = boxes[:, 1] + (deltas[:, 1] * std_[1] + mean_[1]) * height\n x2 = boxes[:, 2] + (deltas[:, 2] * std_[2] + mean_[2]) * width\n y2 = boxes[:, 3] + (deltas[:, 3] * std_[3] + mean_[3]) * height\n for x1_, y1_, x2_, y2_, class_id in zip(x1, y1, x2, y2, class_ids):\n if train_generator.has_label(class_id):\n x1_, y1_, x2_, y2_ = int(x1_), int(y1_), int(x2_), int(y2_)\n cv2.rectangle(image, (x1_, y1_), (x2_, y2_), (0, 255, 0), 2)\n class_name = train_generator.labels[class_id]\n label = class_name\n ret, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.3, 1)\n cv2.rectangle(image, (x1_, y2_ - ret[1] - baseline), (x1_ + ret[0], y2_), (255, 255, 255), -1)\n cv2.putText(image, label, (x1_, y2_ - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n cv2.imshow('image', image.astype(np.uint8)[..., ::-1])\n cv2.waitKey(0)\n # 36864, 46080, 48384, 48960, 49104\n # if first_valid_id < 36864:\n # stride = 8\n # elif 36864 <= first_valid_id < 46080:\n # stride = 16\n # elif 46080 <= first_valid_id < 48384:\n # stride = 32\n # elif 48384 <= first_valid_id < 48960:\n # stride = 64\n # else:\n # stride = 128\n pass\n\n","sub_path":"generators/tiny.py","file_name":"tiny.py","file_ext":"py","file_size_in_byte":8649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"372249869","text":"from Queue import PriorityQueue\nclass Solution(object):\n def getSkyline(self, buildings):\n \"\"\"\n :type buildings: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n \"\"\"\n we use heap to store the key points of the building.\n [negtiveHeight, pos]\n for each iteration, we go through all the key points(corner points)\n if the new key points height will be added to the heap.\n if the current largest(stored in heap) is different than the previous one, then it will be added to the result.\n \"\"\"\n res = [[-1, 0]]\n q = PriorityQueue()\n heights = []\n for building in buildings:\n heights.append([building[0], -building[2], building[1]])\n heights.append([building[1], 0, 0])\n heights.sort()\n hp = [[0, float(\"inf\")]]\n for height in heights:\n [x, y, z] = height\n while x >= hp[0][1]:\n heapq.heappop(hp)\n if y:\n heapq.heappush(hp, [y, z])\n if res[-1][-1] != -hp[0][0]:\n res.append([x, -hp[0][0]])\n return res[1:]\n \n \n","sub_path":"Heap/218. The Skyline Problem.py","file_name":"218. The Skyline Problem.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"155074565","text":"class Solution(object):\n def pancakeSort(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[int]\n 参数复杂:\n 反转方法一:反转i+1:A[0:i+1]=A[i::-1]\n 反转方法二:反转3:a[0:3] = list(reversed(a[0:3]))\n 算法利用选择排序的思想\n \"\"\"\n \n r=[]\n for i in range(len(A)-1,0,-1):\n m=float(\"-inf\")\n for j in range(0,i+1):\n if A[j]>m:\n m=A[j]\n m_i=j\n if m_i==0:\n r.append(i+1)\n A[0:i+1]=A[i::-1]\n elif m_i==i:\n pass\n else:\n r.append(m_i+1)\n A[0:m_i+1]=A[m_i::-1]\n r.append(i+1)\n A[0:i+1]=A[i::-1]\n \n return r\n","sub_path":"969. Pancake Sorting/Solution_选择排序_列表切片.py","file_name":"Solution_选择排序_列表切片.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"480760409","text":"from threading import Thread\r\nfrom time import sleep\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nimport tensorflow as tf\r\n\r\n\r\nclass RecognizeThread:\r\n def __init__(self):\r\n self.newArrival = False\r\n self.stopped = False\r\n self.list_mouth_frame = []\r\n self.X = np.zeros(shape=(1, 70, 80, 100, 3), dtype=np.float32)\r\n self.recognizeDone = False\r\n self.y_predict = 0\r\n self.model = load_model('../resource/2017-12-01_model.h5')\r\n self.model._make_predict_function()\r\n self.graph = tf.get_default_graph()\r\n self.model_loaded = True\r\n print(\"------ LOAD MODEL COMPLETE ------\")\r\n\r\n def start(self):\r\n Thread(target=self.worker, args=()).start()\r\n return self\r\n\r\n def worker(self):\r\n while not self.stopped:\r\n if self.newArrival:\r\n frame_n_temp = min(len(self.list_mouth_frame), 70)\r\n self.X = np.zeros(shape=(1, 70, 80, 100, 3), dtype=np.float32)\r\n self.X[0, 0:frame_n_temp] = np.asarray(self.list_mouth_frame, dtype=np.float32)[0:frame_n_temp]\r\n\r\n with self.graph.as_default():\r\n y_predict_probabilities = self.model.predict(self.X, batch_size=None)[0]\r\n self.y_predict = y_predict_probabilities.argmax() + 1\r\n\r\n # print(\"Mouth Frame Valid Length: {}\".format(len(self.list_mouth_frame)))\r\n # print(\"Recognition Result: {}\".format(self.y_predict))\r\n self.recognizeDone = True\r\n self.newArrival = False\r\n\r\n sleep(0.005)\r\n\r\n def stop(self):\r\n self.stopped = True\r\n\r\n def setData(self, list_user_mouth_frame):\r\n self.list_mouth_frame = list_user_mouth_frame[:]\r\n self.newArrival = True\r\n\r\n def reset_recognize_state(self):\r\n self.recognizeDone = False\r\n\r\n def recognize_complete(self):\r\n return self.recognizeDone\r\n","sub_path":"src/ImageLipNetRecognizeThread.py","file_name":"ImageLipNetRecognizeThread.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"4519552","text":"\nimport sys\n\ninput = lambda: sys.stdin.readline().rstrip()\n\nd, h, w = map(int, input().split())\n\n# 대각선 길이 비를 구하고 그 비율로 곱해줘서 각 높이, 너비의 비율을 구한다.\nr = d / ((h ** 2 + w ** 2) ** 0.5)\nprint(int(h * r), int(w * r))\n","sub_path":"BaekJoon_Levels/Bronze_Ⅱ/Boj1297_TV크기/Boj1297_TV크기.py","file_name":"Boj1297_TV크기.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"150914952","text":"from django.conf.urls.defaults import patterns\nimport settings\n#from django.views.static import serve #Necessary @UnusedImport\n#from django.conf import settings\n\nurlpatterns = patterns('',\n \n (r'^$', 'realtime.views.home'),\n (r'^realtime$', 'realtime.views.realtime'),\n# (r'^tp', 'views.tp'),\n \n# (r'^importData', 'coleta.views.import_data'),\n# (r'^resumo', 'coleta.views.resumo'),\n# (r'^consulta', 'coleta.views.consulta'),\n \n \n (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n #(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n #(r'^articles/(\\d{4})/$', 'mysite.views.year_archive'),\n #(r'^articles/(\\d{4})/(\\d{2})/$', 'mysite.views.month_archive'),\n #(r'^articles/(\\d{4})/(\\d{2})/(\\d+)/$', 'mysite.views.article_detail'),\n)","sub_path":"SentimentalAnalysis/src/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"332173539","text":"# 非线性滤波\n# 相对于线性滤波来说,在去噪的效果可能要比线性的好得多\n# 非线性滤波:中值滤波、双边滤波\n\nimport cv2\n\n\ndef callback(x):\n pass\n\n\ndef app():\n cv2.namedWindow('medianBlur')\n cv2.createTrackbar('medianBlur', 'medianBlur', 3, 50, callback)\n\n cv2.namedWindow('bilateralFilter')\n cv2.createTrackbar('bilateralFilter', 'bilateralFilter', 3, 50, callback)\n\n image = cv2.imread(\"C:/Users/xiaozhuzhu98/Pictures/abPhoto/women.jpg\")\n while(True):\n m_size = cv2.getTrackbarPos('medianBlur', 'medianBlur')\n b_size = cv2.getTrackbarPos('bilateralFilter', 'bilateralFilter')\n\n # medianBlur 中值滤波函数\n # src:源图像\n # ksize:孔径线性大小,这个参数必须大于1的奇数\n m_image = cv2.medianBlur(image, m_size*2+1)\n\n # bilateralFilter 双边滤波函数\n # src: 源图像\n # d:表示过滤过程中每个像素点邻域的直径\n # sigmaColor:颜色空间滤波器值\n # sigmaSpace:坐标空间滤波值\n b_image = cv2.bilateralFilter(image, b_size, 25*2, 25/2)\n\n if cv2.waitKey(10) & 0xff == ord('q'):\n break\n\n cv2.imshow('medianBlur', m_image)\n cv2.imshow('bilateralFilter', b_image)\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n app()","sub_path":"part_3/opencv_imgpro_test_3.py","file_name":"opencv_imgpro_test_3.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"131838587","text":"\"\"\"Utility functions for tests.\"\"\"\n\nfrom __future__ import annotations\n\nimport base64\nimport os\nimport sys\nfrom asyncio import Future\nfrom datetime import datetime, timedelta, timezone\nfrom typing import TYPE_CHECKING\nfrom unittest.mock import Mock\n\nimport jwt\nimport mockaioredis\nfrom aiohttp import ClientResponse, web\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives.serialization import (\n Encoding,\n NoEncryption,\n PrivateFormat,\n PublicFormat,\n)\n\nfrom jwt_authorizer.app import create_app\nfrom jwt_authorizer.config import ALGORITHM\nfrom jwt_authorizer.verify import KeyClient\n\nif TYPE_CHECKING:\n from typing import Any, Dict, List, Optional\n\n\ndef number_to_base64(data: int) -> bytes:\n \"\"\"Convert an integer to base64-encoded bytes in big endian order.\n\n Parameters\n ----------\n data : `int`\n Arbitrarily large number\n\n Returns\n -------\n result : `bytes`\n The equivalent URL-safe base64-encoded string corresponding to the\n number in big endian order.\n \"\"\"\n bit_length = data.bit_length()\n byte_length = bit_length // 8 + 1\n data_as_bytes = data.to_bytes(byte_length, byteorder=\"big\", signed=False)\n return base64.urlsafe_b64encode(data_as_bytes)\n\n\nclass FakeKeyClient(KeyClient):\n \"\"\"Override KeyClient to not make HTTP requests.\n\n This returns minimal OpenID Connect and JWKS metadata for the two issuers\n used by the test suite.\n \"\"\"\n\n def __init__(self, keypair: RSAKeyPair) -> None:\n self.keypair = keypair\n\n async def get_url(self, url: str) -> ClientResponse:\n if url == \"https://test.example.com/.well-known/openid-configuration\":\n jwks_uri = \"https://test.example.com/.well-known/jwks.json\"\n return self._build_response_success({\"jwks_uri\": jwks_uri})\n elif url == \"https://test.example.com/.well-known/jwks.json\":\n return self._build_response_success(self._build_keys(\"some-kid\"))\n elif url == \"https://orig.example.com/.well-known/jwks.json\":\n return self._build_response_success(self._build_keys(\"orig-kid\"))\n else:\n return self._build_response_failure()\n\n def _build_keys(self, kid: str) -> Dict[str, Any]:\n \"\"\"Generate the JSON-encoded keys structure for a keypair.\"\"\"\n public_numbers = self.keypair.public_numbers()\n e = number_to_base64(public_numbers.e).decode()\n n = number_to_base64(public_numbers.n).decode()\n return {\"keys\": [{\"alg\": ALGORITHM, \"e\": e, \"n\": n, \"kid\": kid}]}\n\n def _build_response_failure(self) -> ClientResponse:\n \"\"\"Build a successful response.\"\"\"\n r = Mock(spec=ClientResponse)\n r.status = 404\n return r\n\n def _build_response_success(\n self, result: Dict[str, Any]\n ) -> ClientResponse:\n \"\"\"Build a successful response.\"\"\"\n r = Mock(spec=ClientResponse)\n if sys.version_info[0] == 3 and sys.version_info[1] < 8:\n future: Future[Dict[str, Any]] = Future()\n future.set_result(result)\n r.json.return_value = future\n else:\n r.json.return_value = result\n r.status = 200\n return r\n\n\nclass RSAKeyPair:\n \"\"\"An autogenerated public/private key pair.\"\"\"\n\n def __init__(self) -> None:\n self.private_key = rsa.generate_private_key(\n public_exponent=65537, key_size=2048, backend=default_backend()\n )\n\n def private_key_as_pem(self) -> bytes:\n return self.private_key.private_bytes(\n Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()\n )\n\n def public_key_as_pem(self) -> bytes:\n return self.private_key.public_key().public_bytes(\n Encoding.PEM, PublicFormat.SubjectPublicKeyInfo,\n )\n\n def public_numbers(self) -> rsa.RSAPublicNumbers:\n return self.private_key.public_key().public_numbers()\n\n\nasync def create_test_app(\n keypair: Optional[RSAKeyPair] = None,\n session_secret: Optional[bytes] = None,\n **kwargs: Any,\n) -> web.Application:\n \"\"\"Configured aiohttp Application for testing.\"\"\"\n if not keypair:\n keypair = RSAKeyPair()\n if not session_secret:\n session_secret = os.urandom(16)\n\n kwargs[\"OAUTH2_JWT.KEY\"] = keypair.private_key_as_pem().decode()\n secret_b64 = base64.urlsafe_b64encode(session_secret).decode()\n kwargs[\"OAUTH2_STORE_SESSION.OAUTH2_PROXY_SECRET\"] = secret_b64\n kwargs[\"OAUTH2_STORE_SESSION.REDIS_URL\"] = \"dummy\"\n\n app = await create_app(\n redis_pool=await mockaioredis.create_redis_pool(\"\"),\n key_client=FakeKeyClient(keypair),\n FORCE_ENV_FOR_DYNACONF=\"testing\",\n **kwargs,\n )\n\n return app\n\n\ndef create_test_token(\n keypair: RSAKeyPair,\n groups: Optional[List[str]] = None,\n kid: str = \"some-kid\",\n **attributes: str,\n) -> str:\n \"\"\"Create a signed token using the configured test issuer.\n\n This will match the issuer and audience of the default JWT Authorizer\n issuer, so JWT Authorizer will not attempt to reissue it.\n\n Parameters\n ----------\n keypair : `RSAKeyPair`\n The key pair to use to sign the token.\n groups : List[`str`], optional\n Group memberships the generated token should have.\n kid : `str`\n The key ID to use.\n **attributes : `str`\n Other attributes to set or override in the token.\n\n Returns\n -------\n token : `str`\n The encoded token.\n \"\"\"\n payload = create_test_token_payload(groups, **attributes)\n return jwt.encode(\n payload,\n keypair.private_key_as_pem(),\n algorithm=ALGORITHM,\n headers={\"kid\": kid},\n ).decode()\n\n\ndef create_test_token_payload(\n groups: Optional[List[str]] = None, **attributes: str,\n) -> Dict[str, Any]:\n \"\"\"Create the contents of a token using the configured test issuer.\n\n This will match the issuer and audience of the default JWT Authorizer\n issuer, so JWT Authorizer will not attempt to reissue it.\n\n Parameters\n ----------\n groups : List[`str`], optional\n Group memberships the generated token should have.\n **attributes : `str`\n Other attributes to set or override in the token.\n\n Returns\n -------\n payload : Dict[`str`, Any]\n The contents of the token.\n \"\"\"\n exp = datetime.now(timezone.utc) + timedelta(days=24)\n payload: Dict[str, Any] = {\n \"aud\": \"https://example.com/\",\n \"email\": \"some-user@example.com\",\n \"exp\": int(exp.timestamp()),\n \"iss\": \"https://test.example.com/\",\n \"jti\": \"some-unique-id\",\n \"sub\": \"some-user\",\n \"uid\": \"some-user\",\n \"uidNumber\": \"1000\",\n }\n payload.update(attributes)\n if groups:\n payload[\"isMemberOf\"] = [{\"name\": g} for g in groups]\n return payload\n","sub_path":"tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"154395184","text":"\"\"\"django_blog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\n\nfrom common import common_view\nfrom user.views import login\n\nurlpatterns = [\n path('', login.Login.as_view(), name='login'), # 只输入IP和端口时跳转到登录页\n path('login/', login.Login.as_view(), name='login'),\n path('logout/', login.logout, name='logout'),\n path('register/', login.Register.as_view(), name='register'),\n path('check_code/', common_view.getCheckCode, name='check_code'),\n path('valid_username/', login.valid_username, name='valid_username'),\n\n]\n","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"155565776","text":"__author__ = 'Tyraan'\nfrom tkinter import *\nimport sys\ndef quit1():\n print('quit !')\n quit()\nwi=Button(None,{'text':'Hello widgte world'},command=(lambda :print('Here lambda function') or quit1() ))\nwi.pack()\nwi.mainloop()\n\n\nwid=Button(None,text='Hello widget',command=sys.exit)\nwid.pack()\nwid.mainloop()\n\nwidg=Button(None,text='Event!',command=quit)\n\n","sub_path":"python/gui1.py","file_name":"gui1.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"567788429","text":"# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nrequires = [\n 'aiofiles==0.3.2',\n 'aiohttp>=3.5.4',\n 'animeface',\n 'appdirs==1.4.3',\n 'attrs==17.4.0',\n 'Click>=7.0',\n 'colour==0.1.5',\n 'httptools==0.0.9',\n 'jinja2==2.10',\n 'MarkupSafe==1.0',\n 'numpy==1.13.3',\n 'opencv-python==3.4.3.18',\n 'python-magic==0.4.13',\n 'sanic==0.8.3',\n 'SQLAlchemy-Utils>=0.33.8',\n 'SQLAlchemy>=1.2.14',\n 'ujson==1.35',\n 'uvloop==0.8.1',\n 'websockets>=5.0.1',\n]\n\nconsole_scripts = [\n 'moeflow = moeflow.cmds.main:main',\n]\n\nsetup(\n name='MoeFlow',\n version='0.0.1',\n author='Iskandar Setiadi',\n author_email='iskandarsetiadi@gmail.com',\n url='https://github.com/freedomofkeima/MoeFlow',\n description='Anime characters recognition website, powered by TensorFlow',\n license='MIT',\n packages=find_packages(where='src'),\n package_dir={\n '': 'src'\n },\n install_requires=requires,\n # NOTE: for dependency_links\n # https://github.com/pypa/pip/issues/3610#issuecomment-356687173\n dependency_links=['http://github.com/nya3jp/python-animeface/tarball/master#egg=animeface-1.1.0'], # NOQA\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.5\",\n \"Environment :: Web Environment\"\n ],\n entry_points={'console_scripts': console_scripts},\n extras_require={\n 'tests': [\n 'pytest>=4.0.0', 'pytest-cov', 'pytest-sugar',\n 'pytest-asyncio>=0.9.0', 'pytest-flake8>=1.0.2',\n ],\n 'patchelf_wrapper': ['patchelf-wrapper==1.0.4', ],\n 'tensorflow': ['tensorflow==1.4.0', ],\n },\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"514524088","text":"# -*- coding: utf8 -*-\n\nfrom ..scpi.dcpwr import Base, Measurement\n\n\nclass Z60(Base, Measurement):\n def __init__(self, *args, **kwargs):\n self.__dict__.setdefault('_instrument_id', '')\n\n # early define of _do_scpi_init\n self.__dict__.setdefault('_do_scpi_init', True)\n\n super(Base, self).__init__(*args, **kwargs)\n\n self._self_test_delay = 5\n\n self._output_count = 4\n self._driver_operation_cache = False\n\n self._output_spec = [\n {\n 'range': {\n 'P60V': (60.0, 14.0),\n 'P60V': (60.0, 14.0)\n },\n 'ovp_max': 60.0,\n 'voltage_max': 60.0,\n 'current_max': 14.0\n },\n {\n 'range': {\n 'P60V': (60.0, 14.0),\n 'P60V': (60.0, 14.0)\n },\n 'ovp_max': 60.0,\n 'voltage_max': 60.0,\n 'current_max': 14.0\n },\n {\n 'range': {\n 'P60V': (60.0, 14.0),\n 'P60V': (60.0, 14.0)\n },\n 'ovp_max': 60.0,\n 'voltage_max': 60.0,\n 'current_max': 14.0\n },\n {\n 'range': {\n 'P60V': (60.0, 14.0),\n 'P60V': (60.0, 14.0)\n },\n 'ovp_max': 60.0,\n 'voltage_max': 60.0,\n 'current_max': 14.0\n }\n ]\n\n self._identity_description = \"TDK-Lambda Z+ Series DC power supply driver\"\n self._identity_identifier = \"\"\n self._identity_revision = \"\"\n self._identity_vendor = \"\"\n self._identity_instrument_manufacturer = \"\"\n self._identity_instrument_model = \"\"\n self._identity_instrument_firmware_revision = \"\"\n self._identity_specification_major_version = 3\n self._identity_specification_minor_version = 0\n self._identity_supported_instrument_models = ['PSU']\n\n self._init_outputs()\n\n if self._interface: \n self._interface.term_char = '\\r\\n'\n\n def _initialize(self, resource=None, id_query=False, reset=False, **keywargs):\n \"\"\"Opens an I/O session to the instrument.\"\"\"\n\n super(Base, self)._initialize(resource, id_query, reset, **keywargs)\n\n if not self._do_scpi_init:\n return\n\n # interface clear - Z+ does not support interface clear\n # check ID\n if id_query and not self._driver_operation_simulate:\n id = self.identity.instrument_model\n id_check = self._instrument_id\n id_short = id[:len(id_check)]\n if id_short != id_check:\n raise Exception(\"Instrument ID mismatch, expecting %s, got %s\", id_check, id_short)\n\n # reset\n if reset:\n self.utility_reset()\n\n","sub_path":"ivi/tdklambda/z60.py","file_name":"z60.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"559382532","text":"from fnmatch import fnmatchcase\nfrom itertools import combinations\n\nimport numpy as np\n\nfrom ._data_obj import (\n NDVar, Case,\n ascategorial, asdataobject, assub, cellname, dataobj_repr,\n)\nfrom ._stats.stats import variability\nfrom ._utils.numpy_utils import FULL_SLICE\n\n\nclass Celltable(object):\n \"\"\"Divide y into cells defined by x.\n\n Parameters\n ----------\n y : data-object\n dependent measurement\n x : categorial\n Model (Factor or Interaction) for dividing y.\n match : categorial\n Factor on which cases are matched (i.e. subject for a repeated\n measures comparisons). If several data points with the same\n case fall into one cell of x, they are combined using\n match_func. If match is not None, Celltable.groups contains the\n {Xcell -> [match values of data points], ...} mapping corres-\n ponding to self.data\n sub : bool array\n Bool array of length N specifying which cases to include\n cat : None | sequence of cells of x\n Only retain data for these cells. Data will be sorted in the order\n of cells occuring in cat.\n ds : Dataset\n If a Dataset is specified, input items (y / x / match / sub) can\n be str instead of data-objects, in which case they will be\n retrieved from the Dataset.\n coercion : callable\n Function to convert the y parameter to to the dependent varaible\n (default: asdataobject).\n\n\n Examples\n --------\n Split a repeated-measure variable y into cells defined by the\n interaction of A and B::\n\n >>> c = Celltable(y, A % B, match=subject)\n\n\n Attributes\n ----------\n y : data-object\n ``y`` after evaluating input parameters.\n x : categorial\n ``x`` after evaluating input parameters.\n match : categorial | None\n ``match`` after evaluating input parameters.\n sub : bool array | None\n ``sub`` after evaluating input parameters.\n cells : list of (str | tuple)\n List of all cells in x.\n data : dict(cell -> data)\n Data (``y[index]``) in each cell.\n data_indexes : dict(cell -> index-array)\n For each cell, a boolean-array specifying the index for that cell in\n ``x``.\n\n **If ``match`` is specified**:\n\n within : dict(cell1, cell2 -> bool)\n Dictionary that specifies for each cell pair whether the corresponding\n comparison is a repeated-measures or an independent measures\n comparison (only available when the input argument ``match`` is\n specified.\n all_within : bool\n Whether all comparison are repeated-measures comparisons or not.\n groups : dict(cell -> group)\n A slice of the match argument describing the group members for each\n cell.\n\n \"\"\"\n def __init__(self, y, x=None, match=None, sub=None, cat=None, ds=None,\n coercion=asdataobject, dtype=None):\n self.sub = sub\n sub = assub(sub, ds)\n\n if x is None:\n if cat is not None:\n raise TypeError(f\"cat={cat!r}: cat is only a valid argument if x is provided\")\n y = coercion(y, sub, ds)\n else:\n x = ascategorial(x, sub, ds)\n if cat is not None:\n # reconstruct cat if some cells are provided as None\n is_none = [c is None for c in cat]\n if any(is_none):\n if len(cat) == len(x.cells):\n if all(is_none):\n cat = x.cells\n else:\n cells = [c for c in x.cells if c not in cat]\n cat = tuple(cells.pop(0) if c is None else c for c in cat)\n else:\n raise ValueError(\n f\"cat={cat!r}: categories can only be specified as \"\n f\"None if all cells in x are used, but there are more \"\n f\"than {len(cat)} cells: {x.cells}\")\n\n # make sure all categories are in data\n if not all(c in x.cells for c in cat):\n raise ValueError(\n f\"cat={cat!r} contains categories that are not in the \"\n f\"data: {', '.join(str(c) for c in cat if c not in x.cells)}\")\n\n # apply cat\n sort_idx = x.sort_index(order=cat)\n x = x[sort_idx]\n if sub is None:\n sub = sort_idx\n else:\n if sub.dtype.kind == 'b':\n sub = np.flatnonzero(sub)\n sub = sub[sort_idx]\n y = coercion(y, sub, ds, len(x))\n\n if match is not None:\n match = ascategorial(match, sub, ds, len(y))\n cell_model = match if x is None else x % match\n sort_idx = None\n if len(cell_model) > len(cell_model.cells):\n # need to aggregate\n y = y.aggregate(cell_model)\n match = match.aggregate(cell_model)\n if x is not None:\n x = x.aggregate(cell_model)\n if cat is not None:\n sort_idx = x.sort_index(order=cat)\n else:\n sort_idx = cell_model.sort_index()\n if x is not None and cat is not None:\n X_ = x[sort_idx]\n sort_X_idx = X_.sort_index(order=cat)\n sort_idx = sort_idx[sort_X_idx]\n\n if (sort_idx is not None) and (not np.all(np.diff(sort_idx) == 1)):\n y = y[sort_idx]\n match = match[sort_idx]\n if x is not None:\n x = x[sort_idx]\n\n if dtype is not None and y.x.dtype != dtype:\n y = y.astype(dtype)\n\n # save args\n self.y = y\n self.x = x\n self.cat = cat\n self.match = match\n self.coercion = coercion.__name__\n self.n_cases = len(y)\n\n # extract cell data\n self.data = {}\n self.data_indexes = {}\n if x is None:\n self.data[None] = y\n self.data_indexes[None] = FULL_SLICE\n self.cells = (None,)\n self.n_cells = 1\n self.all_within = match is not None\n return\n self.cells = cat if cat is not None else x.cells\n self.n_cells = len(self.cells)\n self.groups = {}\n for cell in x.cells:\n idx = x.index_opt(cell)\n self.data_indexes[cell] = idx\n self.data[cell] = y[idx]\n if match:\n self.groups[cell] = match[idx]\n\n # determine which comparisons are within subject comparisons\n if match:\n self.within = {}\n for cell1, cell2 in combinations(x.cells, 2):\n group1 = self.groups[cell1]\n if len(group1) == 0:\n continue\n group2 = self.groups[cell2]\n if len(group2) == 0:\n continue\n within = np.all(group1 == group2)\n self.within[cell1, cell2] = within\n self.within[cell2, cell1] = within\n self.any_within = any(self.within.values())\n self.all_within = all(self.within.values())\n else:\n self.any_within = False\n self.all_within = False\n\n def __repr__(self):\n args = [dataobj_repr(self.y), dataobj_repr(self.x)]\n if self.match is not None:\n args.append(\"match=%s\" % dataobj_repr(self.match))\n if self.sub is not None:\n args.append(\"sub=%s\" % dataobj_repr(self.sub))\n if self.coercion != 'asdataobject':\n args.append(\"coercion=%s\" % self.coercion)\n return \"Celltable(%s)\" % (', '.join(args))\n\n def __len__(self):\n return self.n_cells\n\n def cellname(self, cell, delim=' '):\n \"\"\"Produce a str label for a cell.\n\n Parameters\n ----------\n cell : tuple | str\n Cell.\n delim : str\n Interaction cells (represented as tuple of strings) are joined by\n ``delim``.\n \"\"\"\n return cellname(cell, delim=delim)\n\n def cellnames(self, delim=' '):\n \"\"\"Return a list of all cell names as strings.\n\n See Also\n --------\n .cellname : Produce a str label for a single cell.\n \"\"\"\n return [cellname(cell, delim) for cell in self.cells]\n\n def data_for_cell(self, cell):\n \"\"\"Retrieve data for a cell, allowing advanced cell combinations\n\n Parameters\n ----------\n cell : str | tuple of str\n Name fo the cell. See notes for special cell names. After a special\n cell is retrieved for the first time it is also add to\n ``self.data``.\n\n Notes\n -----\n Special cell names can be used to retrieve averages between different\n primary cells. The names should be composed so that a case sensitive\n version of fnmatch will find the source cells. For examples, if all\n cells are ``[('a', '1'), ('a', '2'), ('b', '1'), ('b', '2')]``,\n ``('a', '*')`` will retrieve the average of ``('a', '1')`` and\n ``('a', '2')``.\n \"\"\"\n if cell in self.data:\n return self.data[cell]\n\n # find cells matched by `cell`\n if isinstance(cell, str):\n cells = [c for c in self.cells if fnmatchcase(c, cell)]\n name = cell\n else:\n cells = [c for c in self.cells if\n all(fnmatchcase(c_, cp) for c_, cp in zip(c, cell))]\n name = '|'.join(cell)\n\n # check that all are repeated measures\n for cell1, cell2 in combinations(cells, 2):\n if not self.within[(cell1, cell2)]:\n err = (\"Combinatory cells can only be formed from repeated \"\n \"measures cells, %r and %r are not.\" % (cell1, cell2))\n raise ValueError(err)\n\n # combine data\n cell0 = cells[0]\n x = np.empty_like(self.data[cell0].x)\n for cell_ in cells:\n x += self.data[cell_].x\n x /= len(cells)\n out = NDVar(x, cell0.dims, {}, name)\n self.data[cell] = out\n return out\n\n def get_data(self, out=list):\n if out is dict:\n return self.data\n elif out is list:\n return [self.data[cell] for cell in self.cells]\n\n def get_statistic(self, func=np.mean):\n \"\"\"Return a list with ``a * func(data)`` for each data cell.\n\n Parameters\n ----------\n func : callable | str\n statistics function that is applied to the data. Can be string,\n such as '[x]sem' or '[x]ci', e.g. '2sem'.\n\n See also\n --------\n .get_statistic_dict : return statistics in a ``{cell: data}`` dict\n \"\"\"\n if isinstance(func, str):\n var_spec = func\n\n def func(y):\n return variability(y, None, None, var_spec, False)\n\n return [func(self.data[cell].x) for cell in self.cells]\n\n def get_statistic_dict(self, func=np.mean):\n \"\"\"Return a ``{cell: func(data)}`` dictionary.\n\n Parameters\n ----------\n func : callable | str\n statistics function that is applied to the data. Can be string,\n such as '[x]sem', '[x]std', or '[x]ci', e.g. '2sem'.\n\n See Also\n --------\n .get_statistic : statistic in a list\n \"\"\"\n return dict(zip(self.cells, self.get_statistic(func)))\n\n def variability(self, error='sem', pool=None):\n \"\"\"Variability measure\n\n Parameters\n ----------\n error : str\n Measure of variability. Examples:\n ``sem``: Standard error of the mean (default);\n ``2sem``: 2 standard error of the mean;\n ``ci``: 95% confidence interval;\n ``99%ci``: 99% confidence interval (default).\n pool : bool\n Pool the errors for the estimate of variability (default is True\n for complete within-subject designs, False otherwise).\n\n Notes\n -----\n Returns within-subject standard error for complete within-subject\n designs (see Loftus & Masson, 1994).\n \"\"\"\n match = self.match if self.all_within else None\n if pool is None:\n pool = self.all_within\n x = variability(self.y.x, self.x, match, error, pool)\n if isinstance(self.y, NDVar):\n dims = self.y.dims[1:]\n if not pool:\n dims = (Case,) + dims\n return NDVar(x, dims, self.y.info.copy(), error)\n else:\n return x\n","sub_path":"eelbrain/_celltable.py","file_name":"_celltable.py","file_ext":"py","file_size_in_byte":12725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"624712182","text":"# 【操作】为文本文件每一行的末尾增加行号\nwith open(r\"e:\\bb.txt\", \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n print(lines)\n lines = [line.rstrip() + \"#\" +\n str(index + 1) + \"\\n\" for index, line in enumerate(lines)] # 推导式生成列表\n print(lines)\n\nwith open(r\"e:\\bb.txt\", \"w\", encoding=\"utf-8\") as f:\n f.writelines(lines)\n","sub_path":"season2/IO/my06.py","file_name":"my06.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"640915017","text":"def solution(map):\n\n class Board(list):\n traversable_value = 0\n nontraversable_value = 1\n unvisited_value = None\n unreachable_value = None\n def __getitem__(self, tup):\n r, c = tup\n return super(self.__class__, self).__getitem__(r).__getitem__(c)\n def __setitem__(self, tup, val):\n r, c = tup\n super(self.__class__, self).__getitem__(r).__setitem__(c, val)\n\n from collections import deque\n\n class Cell(tuple):\n def __init__(self, minDistTo = None):\n self.minDistTo = minDistTo\n\n def getNeighbors(self):\n r, c = self\n yield self.__class__( (r-1, c) )\n yield self.__class__( (r+1, c) )\n yield self.__class__( (r, c-1) )\n yield self.__class__( (r, c+1) )\n\n def isInside(self, board):\n r, c = self\n num_rows, num_cols = len(board), len(list(board)[0])\n return 0 <= r < num_rows and 0 <= c < num_cols\n\n def isTraversableIn(self, board):\n return board[self] == board.__class__.traversable_value\n\n def isAWallIn(self, board):\n return board[self] == board.__class__.nontraversable_value\n\n def hasNotBeenVisitedIn(self, board):\n return board[self] == board.__class__.unvisited_value\n\n def isUnreachableFrom(self, other):\n if not isinstance(other, self.__class__):\n return False\n return other.minDistTo[self] == other.minDistTo.__class__.unreachable_value\n\n def genMinDistTo(self, m):\n\n if self.isAWallIn(board = m):\n return None\n\n h = len(m)\n w = len(list(m)[0])\n\n minDistTo = Board( [ [Board.unvisited_value]*w for _ in range(h) ] )\n\n minDistTo[self] = 1\n # print(Dequeue([self]))\n cells = deque([self]) #[] \n #cells.append(self)\n #print(cells)\n while cells: \n\n cell = cells.popleft()\n minDistToCell = minDistTo[cell]\n\n for neighbor in cell.getNeighbors():\n\n if neighbor.isInside(board = m) and \\\n neighbor.isTraversableIn(board = m) and \\\n neighbor.hasNotBeenVisitedIn(board = minDistTo):\n\n minDistToNeighbor = minDistToCell + 1\n minDistTo[neighbor] = minDistToNeighbor\n\n cells.append(neighbor) \n\n self.minDistTo = minDistTo\n\n def answer(m):\n\n num_rows = h = len(m) \n num_cols = w = len(m[0]) \n\n m = Board(m)\n\n bestConceivableResult = h + w - 1\n\n start = Cell( ( 0, 0) )\n end = Cell( (h-1, w-1) )\n\n start.genMinDistTo(m) \n\n if end.isUnreachableFrom(start):\n bestResult_soFar = 2**31 - 1\n else:\n bestResult_soFar = start.minDistTo[end]\n\n if bestResult_soFar == bestConceivableResult:\n return bestConceivableResult\n\n end.genMinDistTo(m)\n\n for r in range(num_rows):\n for c in range(num_cols):\n\n cell = Cell( (r, c) )\n\n if cell.isAWallIn(board = m):\n\n wall = cell\n\n potentiallyBetterResult = whatIfIRemovedThis(wall, m, start, end)\n\n bestResult_soFar = min(bestResult_soFar, potentiallyBetterResult)\n\n bestResult = bestResult_soFar\n\n return bestResult\n\n def whatIfIRemovedThis(wall, m, start, end):\n \n bestResult_soFar = 2**31 - 1\n\n for incoming in wall.getNeighbors():\n for outgoing in wall.getNeighbors():\n \n if incoming == outgoing:\n continue\n\n if not incoming.isInside(board = m) or not outgoing.isInside(board = m) or \\\n incoming.isAWallIn(board = m) or outgoing.isAWallIn(board = m) or \\\n incoming.isUnreachableFrom(start) or outgoing.isUnreachableFrom(end):\n continue\n\n minDistFromStartToIncoming = start.minDistTo[incoming]\n minDistFromOutgointToEnd = end.minDistTo[outgoing]\n\n potentiallyBetterResult = minDistFromStartToIncoming + 1 + minDistFromOutgointToEnd\n\n bestResult_soFar = min(bestResult_soFar, potentiallyBetterResult)\n\n bestResult = bestResult_soFar\n\n return bestResult\n return answer(map)\n\n\nprint(solution([[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))\nprint(solution([[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))\nprint(solution([[0, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 1, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))\n\nprint(solution([[0, 0, 0, 0, 0, 0, 0], \n [1, 1, 1, 1, 1, 1, 0], \n [1, 1, 0, 0, 0, 1, 0], \n [1, 1, 0, 1, 0, 1, 0], \n [1, 0, 0, 1, 0, 0, 0],\n [1, 0, 1, 1, 1, 1, 1], \n [1, 0, 0, 0, 0, 0, 0], \n [1, 1, 1, 1, 1, 1, 0]]))","sub_path":"rat.py","file_name":"rat.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"585370739","text":"from flask import Flask, request, render_template, jsonify\nfrom json import dumps\nimport requests\nimport json\nimport urllib.request as urllib\nimport os\nfrom function import *\nimport tensorflow as tf\nimport cv2\nimport time, datetime\n\nglobal graph\ngraph = tf.get_default_graph()\nmodel_path='model_150_50.h5'\nmodel = load_model(model_path)\n\n# Turn on and off transcribe\nglobal transcribe\ntranscribe = True\n\n# Change for backdoor\nglobal backdoor, backdoor_counter, backdoor_string1, backdoor_string2\nbackdoor = True\nbackdoor_counter = 0\nbackdoor_string1 = 'Hi, how are you feeling today?'\nbackdoor_string2 = 'Is there anything that you want to talk to me about?'\n# backdoor_string2 = 'Did something good happen today?'\n# model.compile(loss='binary_crossentropy',\n# optimizer='rmsprop',\n# metrics=['accuracy'])\nUPLOAD_FOLDER = 'C:/Users/A/PycharmProjects/aws-hackdays-insideout/upload_folder'\n\nALLOWED_EXTENSIONS = set(['3gp'])\n\napp = Flask(__name__)\n\n\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n@app.route(\"/\", methods = ['POST', 'GET'])\ndef testServerRest():\n paranoidStatement = \"server up and running - lol Alvin is paranoid\"\n print(paranoidStatement)\n return(paranoidStatement)\n\n\n@app.route(\"/sendAudio/\", methods = ['POST', 'GET'])\ndef getAudio():\n print(\"starting send audio\")\n current_timestamp = time.mktime(datetime.datetime.today().timetuple())\n awsFileName = str(current_timestamp).split('.')[0]\n if request.method == 'POST':\n int_message = 1\n print(\"Data uploading\")\n print(request.headers)\n for v in request.values:\n print(v)\n # logdata = request.stream.readline()\n # while(logdata):\n # print \"uploading\"\n # print logdata\n # logdata = request.stream.readline()\n\n # print(request.files)\n file = request.files['uploadedfile']\n # print(file)\n outputFilePath = 'C:/Users/A/PycharmProjects/aws-hackdays-insideout/upload_folder/' + str(current_timestamp).split('.')[0] + '.wav'\n pngPath = 'C:/Users/A/PycharmProjects/aws-hackdays-insideout/upload_folder/' + str(current_timestamp).split('.')[0] + '.png'\n file.save('C:/Users/A/PycharmProjects/aws-hackdays-insideout/upload_folder/' + str(current_timestamp).split('.')[0] + '.3gp')\n audioPath = outputFilePath\n convertAudio3GP_wav(input_path='C:/Users/A/PycharmProjects/aws-hackdays-insideout/upload_folder/' + str(current_timestamp).split('.')[0] + '.3gp',\n output_path=outputFilePath)\n print(\"Uploading done\")\n\n plotstft(outputFilePath, plotpath=pngPath)\n # with graph.as_default():\n with graph.as_default():\n img = cv2.imread(pngPath)\n img = cv2.resize(img, (150, 150))\n img = np.reshape(img, [1, 150, 150, 3])\n classes = model.predict_classes(img)\n if classes[0][0] == 1:\n # Normal = prediction_value==0, classes[0][0]==1\n prediction_value = 0\n print('Prediction based on spectogram = NORMAL')\n else:\n # Depressed = prediction_value==1, classes[0][0]==0\n prediction_value = 1\n print('Prediction based on spectogram = DEPRESSED')\n\n try:\n #Amazon Transcribe\n # transcribeText = audioToText(audioPath, awsFileName)\n transcribeText = 'Ommiting transcribe text'\n print(transcribeText)\n\n #Amazon Comprehend\n mixed, negative, neutral, positive = sentimentAnalysis(transcribeText)\n print('Mixed Value: ' + str(mixed))\n print('Negative Value: ' + str(negative))\n print('Neutral Value: ' + str(neutral))\n print('Positive Value: ' + str(positive))\n\n #Sensibility test / module\n sensibility_test_score = sensibility_test(transcribeText,backdoor=True)\n\n\n #Personal NLP + analytics\n personal_NLP_analytics_score = additional_nlp_score(transcribeText)\n\n\n\n\n\n\n except:\n print('transcribe failed')\n # return Response(str(int_message), mimetype='text/plain')\n\n #Generate Final Result\n depression_value, confidence_value = generateFinalResult(sensibility_test_score, mixed, neutral, positive, negative,\n personal_NLP_analytics_score,prediction_value)\n\n\n insert_raw_results('test', sensibility_test_score, mixed, neutral, positive, negative,\n personal_NLP_analytics_score,\n prediction_value, depression_value, transcribeText, confidence_value)\n\n if backdoor:\n global backdoor_counter\n if backdoor_counter == 0:\n backdoor_counter += 1\n return backdoor_string1\n elif backdoor_counter == 1:\n return backdoor_string2\n else:\n\n return (prediction_value)\n\n\n\napp.run(host = '192.168.0.135')","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"275926634","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('newUser/', views.newUser, name=\"newUser\"),\n path('gamePage/delete//', views.delete, name=\"delete\"),\n path('gamePage/edit//', views.edit, name=\"edit\"),\n path('gameEntry/', views.gameEntry, name=\"gameEntry\"),\n]\n\nurlpatterns += [\n path('accounts/', include('django.contrib.auth.urls')),\n]\n","sub_path":"jodiProject/jodiApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"557404119","text":"import os\nfrom pelican import logger\nfrom pelican.contents import is_valid_content, Page\nfrom pelican.generators import Generator\nfrom PIL import Image\nfrom . import thumbnails\n\n\nclass ImageContent(object):\n def __init__(self, album, filename, settings):\n self.album = album\n self.filename = filename\n self.settings = settings\n\n @property\n def url(self):\n return self.album.url + '/' + self.filename\n\n def thumbnail(self, spec=''):\n return thumbnails.request_thumbnail(os.path.join(self.album.name, self.filename), spec, self.settings)\n\n\nclass AlbumContent(object):\n def __init__(self, name, settings):\n self.name = name\n self.settings = settings\n self._images = {}\n self.albums = []\n self.pages = []\n\n @property\n def url(self):\n return (self.settings['ALBUM_PATH'] + '/' + self.name) if self.name else self.settings['ALBUM_PATH']\n\n def add_image(self, filename):\n self._images[filename] = ImageContent(self, filename, self.settings)\n\n @property\n def images(self):\n return [self._images[image] for image in sorted(self._images.keys())]\n\n\nclass Album(Page):\n default_template = 'album'\n album = None\n\n\nclass AlbumGenerator(Generator):\n albums = None\n pages = None\n\n def find_albums(self, path=(), parent=None):\n album_path = os.path.join(self.path, self.settings['ALBUM_PATH'], *path)\n\n location = '/'.join(path)\n album = AlbumContent(location, self.settings)\n if parent:\n parent.albums.append(album)\n\n for filename in os.listdir(album_path):\n f = os.path.join(self.path, self.settings['ALBUM_PATH'], *path + (filename,))\n file_path = os.path.join(album_path, filename)\n\n if os.path.isdir(file_path):\n self.find_albums(path + (filename,), album)\n else:\n try:\n Image.open(file_path)\n album.add_image(filename)\n except IOError:\n try:\n page = self.readers.read_file(\n base_path=self.path,\n path=f,\n content_class=Album,\n context=self.context,\n )\n self.add_source_path(page)\n except Exception as e:\n logger.error('Could not process %s\\n%s', f, e,\n exc_info=self.settings.get('DEBUG', False))\n self._add_failed_source_path(f)\n continue\n\n if not is_valid_content(page, f):\n self._add_failed_source_path(f)\n continue\n\n page.album = album\n self.pages.append(page)\n album.pages.append(page)\n\n def generate_context(self):\n self.pages = []\n self.find_albums()\n self.context['albums'] = [p for p in sorted(self.pages, key=lambda p: p.metadata['title'])]\n\n def generate_output(self, writer):\n for page in self.pages:\n writer.write_file(\n page.save_as, self.get_template(page.template),\n self.context, page=page, album=page.album,\n relative_urls=self.settings['RELATIVE_URLS'],\n override_output=hasattr(page, 'override_save_as'))\n\n\ndef get_generators(pelican):\n return AlbumGenerator\n","sub_path":"pelican_albums/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"146079336","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nimport base64\nimport json\nimport os\nfrom odoo.exceptions import UserError, ValidationError\n\nclass PadPublishWizard(models.TransientModel):\n _name = 'padtool.pad.publish.wizard'\n\n @api.model\n def _default_directory(self):\n return self.env['padtool.directory'].search([('active', '=', True)])\n\n pad_id = fields.Many2one('padtool.pad', string=\"Pad to Publish\", required=True, ondelete='cascade')\n directory_ids = fields.Many2many('padtool.directory', string='Publish Directory', required=True,default=_default_directory)\n\n @api.model\n def default_get(self, fields):\n result = super(PadPublishWizard, self).default_get(fields)\n result.update({\n 'pad_id': self.env.context.get('active_id', False),\n })\n return result\n \n @api.multi\n def publish(self):\n if self.pad_id.content is False:\n raise UserError(\"Pad(%s) doesn't have content\" % self.pad_id.name)\n \n pad = self.pad_id;\n content = json.loads(pad.content) \n\n strParameter = 'PanelCenter = %f,%f\\n' % (content['dPanelCenterX'],content['dPanelCenterY'])\n strParameter += 'GolbalToleranceRegular = %s,%s\\n' % (pad['GolbalToleranceRegularX'],pad['GolbalToleranceRegularY'])\n strParameter += 'GolbalToleranceUnregular = %s,%s\\n' % (pad['GolbalToleranceUnregularX'],pad['GolbalToleranceUnregularY'])\n strParameter += 'GolbalToleranceRegularDynamic = %s,%s\\n' % (pad['GolbalToleranceRegularDynamicX'],pad['GolbalToleranceRegularDynamicY'])\n strParameter += 'GolbalIndentRegular = %s,%s\\n' % (pad['GolbalIndentRegularX'],pad['GolbalIndentRegularY'])\n strParameter += 'GolbalIndentUnregular = %s,%s\\n' % (pad['GolbalIndentUnregularX'],pad['GolbalIndentUnregularY'])\n strParameter += 'GlassToGlassMode = %s\\n' % int(pad['GlassToGlassMode'])\n strParameter += 'NeglectInspIfNoMarkResult = %s\\n' % pad['NeglectInspIfNoMarkResult']\n \n strParameter += 'BMMode = %d\\n' % int(pad['BMMode'])\n strParameter += 'BMPeriodX0 = %s\\n' % pad['BMPeriodX0']\n strParameter += 'BMPeriodY0 = %s\\n' % pad['BMPeriodY0']\n strParameter += 'BMPeriodX1 = %s\\n' % pad['BMPeriodX1']\n strParameter += 'BMPeriodY1 = %s\\n' % pad['BMPeriodY1']\n\n region_id = 0\n strFrame = ''\n strRegion = ''\n\n strPad_Filterpos = ''\n Pad_Filterpos_Number = 0\n \n strPad_Filter = ''\n Pad_Filter_Number = 0\n \n strPad_Inspect = ''\n Pad_Inspect_Number = 0\n \n innerFrame = None\n outrtFrame = None \n \n strMainMark = ''\n strSubMark = ''\n mainMarkWidth = 0\n subMarkWidth = 0\n mainMarkHeight = 0\n subMarkHeight = 0\n mainMarkStartx = 0\n subMarkStartx = 0 \n mainMarkNumber = 0\n subMarkNumber = 0\n\n for obj in content['objs']:\n if obj['padType'] == 'frame':\n if innerFrame == None:\n innerFrame = obj\n else:\n if innerFrame['points'][0]['x'] > obj['points'][0]['x'] and innerFrame['points'][1]['x'] < obj['points'][1]['x']:\n outrtFrame = obj\n else:\n outrtFrame = innerFrame\n innerFrame = obj\n \n elif obj['padType'] == 'uninspectZone' and len(obj['points'])==2:\n strPad_Filterpos += 'Pad.Filterpos%d.BottomLeft = %f,%f\\n' % (Pad_Filterpos_Number,obj['points'][0]['ux']-content['dPanelCenterX'],obj['points'][0]['uy']-content['dPanelCenterY'])\n strPad_Filterpos += 'Pad.Filterpos%d.BottomRight = %f,%f\\n' % (Pad_Filterpos_Number,obj['points'][1]['ux']-content['dPanelCenterX'],obj['points'][0]['uy']-content['dPanelCenterY'])\n strPad_Filterpos += 'Pad.Filterpos%d.TopLeft = %f,%f\\n' % (Pad_Filterpos_Number,obj['points'][0]['ux']-content['dPanelCenterX'],obj['points'][1]['uy']-content['dPanelCenterY'])\n strPad_Filterpos += 'Pad.Filterpos%d.TopRight = %f,%f\\n' % (Pad_Filterpos_Number,obj['points'][1]['ux']-content['dPanelCenterX'],obj['points'][1]['uy']-content['dPanelCenterY'])\n \n Pad_Filterpos_Number += 1\n elif obj['padType'] == 'uninspectZone' and len(obj['points'])>2:\n strPad_Filter += 'Pad.Filter'+str(Pad_Filter_Number)+' = '\n for p in obj['points']:\n strPad_Filter += str(p['ux']-content['dPanelCenterX'])+','+str(p['uy']-content['dPanelCenterY'])+';'\n strPad_Filter += '\\n' \n Pad_Filter_Number += 1\n elif obj['padType'] == 'inspectZone' and len(obj['points'])>1:\n if len(obj['points']) == 2:\n obj['points'].append(obj['points'][1])\n obj['points'].append({'ux':obj['points'][2]['ux'],'uy':obj['points'][0]['uy']})\n obj['points'][1]={'ux':obj['points'][0]['ux'],'uy':obj['points'][2]['uy']}\n \n strPad_Inspect += 'Pad.Inspect'+str(Pad_Inspect_Number)+' = '\n for p in obj['points']:\n strPad_Inspect += str(p['ux']-content['dPanelCenterX'])+','+str(p['uy']-content['dPanelCenterY'])+';'\n strPad_Inspect += '\\n' \n strPad_Inspect += 'Pad.Inspect%d.Period = %f,%f\\n' % (Pad_Inspect_Number,obj['periodX'],obj['periodY'])\n strPad_Inspect += 'Pad.Inspect%d.D1G1 = %d\\n' % (Pad_Inspect_Number,obj['D1G1'])\n \n Pad_Inspect_Number += 1\n elif obj['padType'] == 'mainMark':\n height = 0 \n iInterSectionWidth = 0\n \n for block in obj['blocks']:\n if (not 'iInterSectionHeight' in block):\n continue;\n if iInterSectionWidth == 0:\n iInterSectionWidth = block['iInterSectionWidth']\n \n height += block['iInterSectionHeight']\n \n mainMarkWidth += iInterSectionWidth \n strMainMark += 'MainMark'+str(mainMarkNumber)+'.size = '+ str(iInterSectionWidth) +','+str(height)+'\\n'\n strMainMark += 'MainMark'+str(mainMarkNumber)+'.startx = '+ str(mainMarkStartx) + '\\n'\n strMainMark += 'MainMark'+str(mainMarkNumber)+'.pos = '+str((obj['points'][0]['ux'] + obj['points'][1]['ux'])/2-content['dPanelCenterX'])+','+str((obj['points'][0]['uy']+obj['points'][1]['uy'])/2-content['dPanelCenterY'])+'\\n'\n strMainMark += 'MainMark'+str(mainMarkNumber)+'.ipindex = '+str(block['iIPIndex'])+'\\n'\n strMainMark += 'MainMark'+str(mainMarkNumber)+'.scanindex = '+str(block['iScanIndex'])+'\\n'\n \n mainMarkStartx += iInterSectionWidth\n mainMarkHeight = height if height > mainMarkHeight else mainMarkHeight \n mainMarkNumber += 1\n\n elif obj['padType'] == 'subMark':\n height = 0 \n iInterSectionWidth = 0\n \n for block in obj['blocks']:\n if (not 'iInterSectionHeight' in block):\n continue;\n if iInterSectionWidth == 0:\n iInterSectionWidth = block['iInterSectionWidth']\n \n height += block['iInterSectionHeight']\n \n subMarkWidth += iInterSectionWidth\n strSubMark += 'SubMark'+str(subMarkNumber)+'.size = '+ str(iInterSectionWidth) +','+str(height)+'\\n'\n strSubMark += 'SubMark'+str(subMarkNumber)+'.startx = '+ str(subMarkStartx) + '\\n'\n strSubMark += 'SubMark'+str(subMarkNumber)+'.pos = '+str((obj['points'][0]['ux'] + obj['points'][1]['ux'])/2-content['dPanelCenterX'])+','+str((obj['points'][0]['uy']+obj['points'][1]['uy'])/2-content['dPanelCenterY'])+'\\n'\n strSubMark += 'SubMark'+str(subMarkNumber)+'.ipindex = '+str(block['iIPIndex'])+'\\n'\n strSubMark += 'SubMark'+str(subMarkNumber)+'.scanindex = '+str(block['iScanIndex'])+'\\n'\n strSubMark += 'SubMark'+str(subMarkNumber)+'.horizontal = '+str(obj['iMarkDirectionType'])+'\\n'\n \n subMarkStartx += iInterSectionWidth\n subMarkHeight = height if height > subMarkHeight else subMarkHeight \n subMarkNumber += 1\n\n elif obj['padType'] == 'region':\n regionLeft = obj['points'][0]['ux'] - content['dPanelCenterX']\n regionBottom = obj['points'][0]['uy'] - content['dPanelCenterY']\n regionRight = obj['points'][1]['ux'] - content['dPanelCenterX']\n regionTop = obj['points'][1]['uy'] - content['dPanelCenterY']\n \n strRegion += 'Region'+str(region_id)+'.region = '+str(regionLeft)+','+str(regionBottom)+';'+str(regionRight)+','+str(regionBottom)+';'+str(regionRight)+','+str(regionTop)+';'+str(regionLeft)+','+str(regionTop)+'\\n'\n strRegion += 'Region'+str(region_id)+'.iFrameNo = '+str(obj['iFrameNo'])+'\\n'\n region_id = region_id + 1\n \n if innerFrame is not None and outrtFrame is not None:\n frameLeft0 = outrtFrame['points'][0]['ux']-content['dPanelCenterX']\n frameRight0 = innerFrame['points'][0]['ux']-content['dPanelCenterX']\n frameTop0 = innerFrame['points'][1]['uy']-content['dPanelCenterY'] + content['region_overlap']\n frameBottom0 = innerFrame['points'][0]['uy']-content['dPanelCenterY'] - content['region_overlap']\n strFrame += 'PadFrameNum = 4\\n'\n strFrame += 'PadFrame0.iDirection = 0\\n'\n strFrame += 'PadFrame0.postion_topleft = '+str(frameLeft0)+','+str(frameBottom0)+'\\n'\n strFrame += 'PadFrame0.postion_topright = '+str(frameRight0)+','+str(frameBottom0)+'\\n'\n strFrame += 'PadFrame0.postion_bottomleft = '+str(frameLeft0)+','+str(frameTop0)+'\\n'\n strFrame += 'PadFrame0.postion_bottomright = '+str(frameRight0)+','+str(frameTop0)+'\\n'\n \n frameLeft1 = outrtFrame['points'][0]['ux']-content['dPanelCenterX']\n frameRight1 = outrtFrame['points'][1]['ux']-content['dPanelCenterX']\n frameTop1 = innerFrame['points'][0]['uy']-content['dPanelCenterY']\n frameBottom1 = outrtFrame['points'][0]['uy']-content['dPanelCenterY']\n strFrame += 'PadFrame1.iDirection = 1\\n'\n strFrame += 'PadFrame1.postion_topleft = '+str(frameLeft1)+','+str(frameBottom1)+'\\n'\n strFrame += 'PadFrame1.postion_topright = '+str(frameRight1)+','+str(frameBottom1)+'\\n'\n strFrame += 'PadFrame1.postion_bottomleft = '+str(frameLeft1)+','+str(frameTop1)+'\\n'\n strFrame += 'PadFrame1.postion_bottomright = '+str(frameRight1)+','+str(frameTop1)+'\\n'\n \n frameLeft2 = innerFrame['points'][1]['ux']-content['dPanelCenterX']\n frameRight2 = outrtFrame['points'][1]['ux']-content['dPanelCenterX']\n frameTop2 = innerFrame['points'][1]['uy']-content['dPanelCenterY'] + content['region_overlap']\n frameBottom2 = innerFrame['points'][0]['uy']-content['dPanelCenterY'] - content['region_overlap']\n strFrame += 'PadFrame2.iDirection = 2\\n'\n strFrame += 'PadFrame2.postion_topleft = '+str(frameLeft2)+','+str(frameBottom2)+'\\n'\n strFrame += 'PadFrame2.postion_topright = '+str(frameRight2)+','+str(frameBottom2)+'\\n'\n strFrame += 'PadFrame2.postion_bottomleft = '+str(frameLeft2)+','+str(frameTop2)+'\\n'\n strFrame += 'PadFrame2.postion_bottomright = '+str(frameRight2)+','+str(frameTop2)+'\\n'\n \n frameLeft3 = outrtFrame['points'][0]['ux']-content['dPanelCenterX']\n frameRight3 = outrtFrame['points'][1]['ux']-content['dPanelCenterX']\n frameTop3 = outrtFrame['points'][1]['uy']-content['dPanelCenterY']\n frameBottom3 = innerFrame['points'][1]['uy']-content['dPanelCenterY']\n strFrame += 'PadFrame3.iDirection = 3\\n'\n strFrame += 'PadFrame3.postion_topleft = '+str(frameLeft3)+','+str(frameBottom3)+'\\n'\n strFrame += 'PadFrame3.postion_topright = '+str(frameRight3)+','+str(frameBottom3)+'\\n'\n strFrame += 'PadFrame3.postion_bottomleft = '+str(frameLeft3)+','+str(frameTop3)+'\\n'\n strFrame += 'PadFrame3.postion_bottomright = '+str(frameRight3)+','+str(frameTop3)+'\\n'\n \n if mainMarkNumber > 0:\n strMainMark = 'MainMarkNumber = '+str(mainMarkNumber)+'\\n' + strMainMark\n for dir in self.directory_ids:\n if not os.path.exists(dir.name +'/'+ pad.name):\n os.mkdir(dir.name +'/'+ pad.name)\n with open(dir.name +'/'+ pad.name +'/MainMark.bmp', 'wb') as f:\n f.write(base64.b64decode(pad.mainMark))\n #mark = Image.new('L', (mainMarkWidth,mainMarkHeight))\n #mark.save(markFile) \n \n if subMarkNumber > 0:\n strSubMark = 'SubMarkNumber = '+str(subMarkNumber)+'\\n' + strSubMark\n for dir in self.directory_ids:\n if not os.path.exists(dir.name +'/'+ pad.name):\n os.mkdir(dir.name +'/'+ pad.name)\n with open(dir.name +'/'+ pad.name +'/SubMark.bmp', 'wb') as f:\n f.write(base64.b64decode(pad.subMark))\n \n if Pad_Filterpos_Number > 0:\n strPad_Filterpos = 'Pad_Filterpos_Number = '+str(Pad_Filterpos_Number) +'\\n' + strPad_Filterpos \n if Pad_Filter_Number > 0:\n strPad_Filter = 'Pad_Filter_Number = '+str(Pad_Filter_Number) +'\\n'+ strPad_Filter \n if Pad_Inspect_Number > 0:\n strPad_Inspect = 'Pad_Inspect_Number = '+str(Pad_Inspect_Number) +'\\n'+ strPad_Inspect \n if region_id > 0:\n strRegion = 'TotalRegionNumber = '+str(region_id) +'\\n'+ strRegion \n \n for dir in self.directory_ids: \n with open(dir.name +'/'+ pad.name+'.pad', 'w') as f:\n f.write(strParameter)\n f.write( strFrame )\n f.write( strRegion )\n f.write( strMainMark )\n f.write( strSubMark )\n f.write( strPad_Filterpos )\n f.write( strPad_Filter )\n f.write( strPad_Inspect )\n","sub_path":"odoo/padtool/wizard/pad_publish_wizard.py","file_name":"pad_publish_wizard.py","file_ext":"py","file_size_in_byte":14645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"579297989","text":"import sys\nfrom pathlib import Path\nimport json\n\nLIB_FOLDER = Path(__file__).resolve().parent.parent / 'build/Products/python'\nEXPECTED_JSON = Path(__file__).resolve().parent.parent / 'tests/expected.json'\n\nsys.path.append(str(LIB_FOLDER))\n\nimport mylib\n\ndef test_encoding():\n test_string = \"模型\"\n m = mylib.Model(test_string)\n name_string = m.getName()\n assert test_string == name_string\n\ndef test_json():\n m = mylib.Model(\"John\")\n expected = json.loads(EXPECTED_JSON.read_text())\n d = m.toJSON()\n assert d == expected\n","sub_path":"python/test_python.py","file_name":"test_python.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"201736979","text":"# Computes the first 100,000 digits of the mathematical constant e.\n# Christian Stroh\n\nfrom decimal import *\nfrom fractions import Fraction\nfrom math import factorial\n\nseries = []\nfor count in range(10000):\n series.append(Fraction(1, factorial(count)))\nprint(\"Series constructed ...\")\n\npartial_sum = series[0]\nindex = 1\nwhile index < len(series):\n partial_sum = partial_sum + series[index]\n index += 1\nprint(\"Partial sums added ...\")\n\ngetcontext().prec = 100000\nprint(Decimal(partial_sum.numerator) / Decimal(partial_sum.denominator))\n\n","sub_path":"digits-of-e.py","file_name":"digits-of-e.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"541881558","text":"#!/usr/bin/env python3\n\"\"\"\nError Analysis Module\n\"\"\"\nimport numpy as np\n\n\ndef sensitivity(confusion):\n \"\"\"\n Calculates the sensitivity for each class in a confusion matrix\n\n confusion is a confusion numpy.ndarray of shape (classes, classes) where\n row indices represent the correct labels and column indices represent the\n predicted label\n\n classes is the number of classes\n\n Returns: a numpy.ndarray of shape (classes,) containing the sensitivity of\n each class\n \"\"\"\n tp = np.diagonal(confusion)\n tp_fn = np.sum(confusion, axis=1)\n return tp / tp_fn\n","sub_path":"supervised_learning/0x04-error_analysis/1-sensitivity.py","file_name":"1-sensitivity.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"55880498","text":"from nose.tools import eq_, ok_\n\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.test import TestCase\n\nfrom us_ignite.apps import models\nfrom us_ignite.apps.tests import fixtures\nfrom us_ignite.profiles.tests.fixtures import get_user\n\n\nclass TestApplicationModel(TestCase):\n\n def test_application_creation_is_successful(self):\n user = get_user('us-ignite')\n data = {\n 'name': 'Gigabit app',\n 'owner': user,\n }\n instance = models.Application.objects.create(**data)\n ok_(instance.id)\n eq_(instance.name, 'Gigabit app')\n ok_(instance.slug)\n eq_(instance.owner, user)\n eq_(instance.status, models.Application.DRAFT)\n eq_(instance.stage, models.Application.IDEA)\n eq_(instance.website, '')\n eq_(instance.image, '')\n eq_(instance.summary, '')\n eq_(instance.impact_statement, '')\n eq_(instance.assistance, '')\n eq_(instance.team_description, '')\n eq_(instance.acknowledgments, '')\n eq_(instance.notes, '')\n ok_(instance.created)\n ok_(instance.modified)\n eq_(instance.is_featured, False)\n eq_(list(instance.features.all()), [])\n eq_(instance.features_other, '')\n eq_(instance.domain, None)\n eq_(list(instance.members.all()), [])\n eq_(list(instance.tags.all()), [])\n eq_(instance.team_name, '')\n eq_(instance.awards, '')\n\n def test_application_absolute_url(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n eq_(application.get_absolute_url(), u'/apps/%s/' % application.slug)\n\n def test_application_edit_url(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n eq_(application.get_edit_url(), u'/apps/%s/edit/' % application.slug)\n\n def test_membership_url(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n eq_(application.get_membership_url(),\n u'/apps/%s/membership/' % application.slug)\n\n def test_hub_membership_url(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n eq_(application.get_hub_membership_url(),\n u'/apps/%s/hubs-membership/' % application.slug)\n\n def test_domain_url(self):\n user = get_user('app-owner')\n domain = fixtures.get_domain(slug='healthcare')\n application = fixtures.get_application(owner=user, domain=domain)\n eq_(application.get_domain_url(),\n u'/apps/domain/healthcare/')\n\n def test_application_export_url(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n eq_(application.get_export_url(),\n u'/apps/%s/export/' % application.slug)\n\n def test_application_is_public(self):\n user = get_user('app-owner')\n application = fixtures.get_application(\n owner=user, status=models.Application.PUBLISHED)\n ok_(application.is_public())\n\n def test_application_is_draft(self):\n user = get_user('app-owner')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n ok_(application.is_draft())\n\n def test_application_ownership(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n ok_(application.is_owned_by(user))\n\n def test_application_with_no_ownership(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=None)\n eq_(application.is_owned_by(AnonymousUser()), False)\n eq_(application.is_owned_by(user), False)\n\n def test_application_owner_membership(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n ok_(application.has_member(user))\n\n def test_application_member_membership(self):\n user = get_user('app-owner')\n member = get_user('app-member')\n application = fixtures.get_application(owner=user)\n models.ApplicationMembership.objects.create(\n application=application, user=member)\n ok_(application.has_member(member))\n\n def test_no_owner_app_membership(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=None)\n eq_(application.has_member(user), False)\n eq_(application.has_member(AnonymousUser()), False)\n\n def test_published_app_is_visible_by_anon(self):\n user = get_user('app-owner')\n application = fixtures.get_application(\n owner=user, status=models.Application.PUBLISHED)\n ok_(application.is_visible_by(AnonymousUser()))\n\n def test_draft_app_is_visible_by_owner(self):\n user = get_user('app-owner')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n ok_(application.is_visible_by(user))\n\n def test_draft_app_is_visible_by_member(self):\n user = get_user('app-owner')\n member = get_user('app-member')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n models.ApplicationMembership.objects.create(\n application=application, user=member)\n ok_(application.is_visible_by(member))\n\n def test_app_is_editable_by_owner(self):\n user = get_user('app-owner')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n ok_(application.is_editable_by(user))\n\n def test_app_is_editable_by_other_user(self):\n user = get_user('app-owner')\n member = get_user('app-member')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n eq_(application.is_editable_by(member), False)\n\n def test_app_is_not_editable_by_anon(self):\n user = get_user('app-owner')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n eq_(application.is_editable_by(AnonymousUser()), False)\n\n def test_app_is_editable_by_member_with_edit_permissions(self):\n user = get_user('app-owner')\n member = get_user('member')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n models.ApplicationMembership.objects.create(\n application=application, user=member, can_edit=True)\n eq_(application.is_editable_by(member), True)\n\n def test_app_is_not_editable_by_member(self):\n user = get_user('app-owner')\n member = get_user('member')\n application = fixtures.get_application(\n owner=user, status=models.Application.DRAFT)\n models.ApplicationMembership.objects.create(\n application=application, user=member, can_edit=False)\n eq_(application.is_editable_by(member), False)\n\n def test_get_summary_returns_existing_summary(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user, summary='summary')\n eq_(application.get_summary(), 'summary')\n\n def test_get_signature_is_generated(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user, summary='summary')\n ok_(application.get_signature())\n\n\nclass TestApplicationMembership(TestCase):\n\n def test_application_membership_creation(self):\n user = get_user('app-owner')\n member = get_user('member')\n application = fixtures.get_application(owner=user)\n data = {\n 'user': member,\n 'application': application,\n }\n instance = models.ApplicationMembership.objects.create(**data)\n eq_(instance.user, member)\n eq_(instance.application, application)\n eq_(instance.can_edit, False)\n ok_(instance.created)\n\n\nclass TestApplicationURL(TestCase):\n\n def test_application_url_creation(self):\n user = get_user('app-owner')\n application = fixtures.get_application(owner=user)\n data = {\n 'application': application,\n 'url': 'http://us-ignite.org',\n }\n instance = models.ApplicationURL.objects.create(**data)\n ok_(instance.id)\n eq_(instance.application, application)\n eq_(instance.name, '')\n eq_(instance.url, 'http://us-ignite.org')\n\n\nclass TestApplicationVersionModel(TestCase):\n\n def test_application_creation_is_successful(self):\n user = get_user('us-ignite')\n application = fixtures.get_application(\n name='Gigabit app', owner=user)\n data = {\n 'application': application,\n 'name': application.name,\n }\n instance = models.ApplicationVersion.objects.create(**data)\n ok_(instance.id)\n ok_(instance.application, application)\n eq_(instance.name, 'Gigabit app')\n eq_(instance.stage, models.Application.IDEA)\n eq_(instance.website, '')\n eq_(instance.image, '')\n ok_(instance.slug)\n ok_(instance.created)\n ok_(instance.modified)\n eq_(instance.summary, '')\n eq_(instance.impact_statement, '')\n eq_(instance.assistance, '')\n eq_(instance.team_description, '')\n eq_(instance.acknowledgments, '')\n\n\nclass TestFeatureModel(TestCase):\n\n def test_feature_creation_is_successful(self):\n instance = models.Feature.objects.create(**{\n 'name': 'OpenFlow',\n })\n ok_(instance.id)\n eq_(instance.name, 'OpenFlow')\n ok_(instance.slug)\n\n\nclass TestPageModel(TestCase):\n\n def test_page_creation_is_successful(self):\n data = {\n 'name': 'Featured apps',\n }\n instance = models.Page.objects.create(**data)\n ok_(instance.id)\n eq_(instance.name, 'Featured apps')\n ok_(instance.slug)\n eq_(instance.description, '')\n ok_(instance.created)\n ok_(instance.modified)\n eq_(instance.status, models.Page.DRAFT)\n\n def test_featured_page_is_swapped(self):\n fixtures.get_page(\n name='Awesome apps', status=models.Page.FEATURED)\n new_page = fixtures.get_page(\n name='Gigabit apps', status=models.Page.FEATURED)\n eq_(models.Page.objects.get(status=models.Page.FEATURED),\n new_page)\n\n def test_is_featured_property(self):\n page = fixtures.get_page(\n name='Awesome apps', status=models.Page.FEATURED)\n eq_(page.is_featured(), True)\n\n def test_get_absolute_url_featured(self):\n page = fixtures.get_page(\n name='Awesome apps', status=models.Page.FEATURED)\n eq_(page.get_absolute_url(), '/apps/featured/')\n\n def test_get_absolute_url_published(self):\n page = fixtures.get_page(\n name='older', status=models.Page.PUBLISHED)\n eq_(page.get_absolute_url(), '/apps/featured/archive/older/')\n\n\nclass TestPageApplication(TestCase):\n\n def test_page_item_is_created_successfully(self):\n user = get_user('app-maker')\n application = fixtures.get_application(owner=user)\n page = fixtures.get_page(name='Awesome apps')\n data = {\n 'application': application,\n 'page': page,\n }\n instance = models.PageApplication.objects.create(**data)\n ok_(instance.id)\n eq_(instance.application, application)\n eq_(instance.page, page)\n eq_(instance.order, 0)\n","sub_path":"us_ignite/apps/tests/models_tests.py","file_name":"models_tests.py","file_ext":"py","file_size_in_byte":11584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"217634463","text":"from django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.models import Group\n\n\nfrom .models import Cliente\n\ndef cliente_profile(sender, instance, created, **kwargs):\n\tif created:\n\t\tgroup = Group.objects.get(name='cliente')\n\t\tinstance.groups.add(group)\n\t\tCliente.objects.create(\n\t\t\tuser=instance,\n\t\t\tname=instance.username,\n\t\t\t)\n\t\tprint('perfil criado com sucesso!')\n\npost_save.connect(cliente_profile, sender=User)","sub_path":"signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"567692767","text":"import pasqualiFinal\nfrom colors import cores\n\npasqualiFinal.ini('challenge hackerrank')\n\nN = int(input(f'Choose one number\\n'))\n\n\nif N % 2 == 1:\n print(f'{cores[\"red\"]}Weird{cores[\"close\"]}')\nelse:\n if N in range(2, 6):\n print(f'Not Weird')\n elif N in range(6, 21):\n print(f'Weird')\n else:\n print(f'Not Weird')\n\n\npasqualiFinal.close()\n","sub_path":"My_modules/weirdnumber.py","file_name":"weirdnumber.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"350056760","text":"import json\nimport logging\nimport uuid\nfrom datetime import datetime, time as dt_time, timedelta\nfrom functools import partial\nfrom typing import Callable, Dict, List, Optional, Sequence, Tuple\n\nimport flask\nfrom datacube.model import Dataset, Range\nfrom datacube.utils import DocReader, parse_time\nfrom dateutil.tz import tz\nfrom eodatasets3 import serialise, stac as eo3stac\nfrom eodatasets3.model import AccessoryDoc, DatasetDoc, MeasurementDoc, ProductDoc\nfrom eodatasets3.properties import Eo3Dict\nfrom eodatasets3.utils import is_doc_eo3\nfrom flask import abort, request\nfrom shapely.geometry import shape\nfrom shapely.geometry.base import BaseGeometry\nfrom werkzeug.datastructures import TypeConversionDict\nfrom werkzeug.exceptions import BadRequest, HTTPException\n\nfrom cubedash.summary._stores import DatasetItem\n\nfrom . import _model, _utils\nfrom .summary import ItemSort\n\n_LOG = logging.getLogger(__name__)\nbp = flask.Blueprint(\"stac\", __name__, url_prefix=\"/stac\")\n\nPAGE_SIZE_LIMIT = _model.app.config.get(\"STAC_PAGE_SIZE_LIMIT\", 1000)\nDEFAULT_PAGE_SIZE = _model.app.config.get(\"STAC_DEFAULT_PAGE_SIZE\", 20)\n# Should we force all URLs to include the full hostname?\nFORCE_ABSOLUTE_LINKS = _model.app.config.get(\"STAC_ABSOLUTE_HREFS\", True)\n\n# Should searches return the full properties for every stac item by default?\n# These searches are much slower we're forced us to use ODC's own metadata table.\nDEFAULT_RETURN_FULL_ITEMS = _model.app.config.get(\n \"STAC_DEFAULT_FULL_ITEM_INFORMATION\", True\n)\n\nSTAC_VERSION = \"1.0.0\"\n\n\ndef url_for(*args, **kwargs):\n if FORCE_ABSOLUTE_LINKS:\n kwargs[\"_external\"] = True\n return flask.url_for(*args, **kwargs)\n\n\ndef stac_endpoint_information() -> Dict:\n config = _model.app.config\n o = dict(\n id=config.get(\"STAC_ENDPOINT_ID\", \"odc-explorer\"),\n title=config.get(\"STAC_ENDPOINT_TITLE\", \"Default ODC Explorer instance\"),\n )\n description = config.get(\n \"STAC_ENDPOINT_DESCRIPTION\",\n \"Configure stac endpoint information in your Explorer `settings.env.py` file\",\n )\n if description:\n o[\"description\"] = description\n return o\n\n\ndef utc(d: datetime):\n if d.tzinfo is None:\n return d.replace(tzinfo=tz.tzutc())\n return d.astimezone(tz.tzutc())\n\n\ndef _stac_response(doc: Dict, content_type=\"application/json\") -> flask.Response:\n \"\"\"Return a stac document as the flask response\"\"\"\n return _utils.as_json(\n _with_stac_properties(doc),\n content_type=content_type,\n )\n\n\ndef _with_stac_properties(doc):\n # Any response without a links array already is a coding problem.\n doc[\"links\"].append(dict(rel=\"root\", href=url_for(\".root\")))\n return {\n # Always put stac version at the beginning for readability.\n \"stac_version\": STAC_VERSION,\n # The given doc may override it too.\n **doc,\n }\n\n\ndef _geojson_stac_response(doc: Dict) -> flask.Response:\n \"\"\"Return a stac item\"\"\"\n return _stac_response(doc, content_type=\"application/geo+json\")\n\n\n@bp.route(\"\", strict_slashes=False)\ndef root():\n \"\"\"\n The root stac page links to each collection (product) catalog\n \"\"\"\n return _stac_response(\n dict(\n **stac_endpoint_information(),\n type=\"Catalog\",\n links=[\n dict(\n title=\"Collections\",\n description=\"All product collections\",\n rel=\"children\",\n type=\"application/json\",\n href=url_for(\".collections\"),\n ),\n dict(\n title=\"Arrivals\",\n description=\"Most recently added items\",\n rel=\"child\",\n type=\"application/json\",\n href=url_for(\".arrivals\"),\n ),\n dict(\n title=\"Item Search\",\n rel=\"search\",\n type=\"application/json\",\n href=url_for(\".stac_search\"),\n ),\n dict(rel=\"self\", href=request.url),\n # Individual Product Collections\n *(\n dict(\n title=product.name,\n description=product.definition.get(\"description\"),\n rel=\"child\",\n href=url_for(\".collection\", collection=product.name),\n )\n for product, product_summary in _model.get_products_with_summaries()\n ),\n ],\n conformsTo=[\n \"https://api.stacspec.org/v1.0.0-beta.2/core\",\n \"https://api.stacspec.org/v1.0.0-beta.2/item-search\",\n # Incomplete:\n # \"http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/oas30\",\n ],\n )\n )\n\n\n@bp.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef stac_search():\n \"\"\"\n Search api for stac items.\n \"\"\"\n if request.method == \"GET\":\n args = request.args\n else:\n args = TypeConversionDict(request.get_json())\n\n products = args.get(\"collections\", default=[], type=_array_arg)\n if \"collection\" in args:\n products.append(args.get(\"collection\"))\n # Fallback for legacy 'product' argument\n elif \"product\" in args:\n products.append(args.get(\"product\"))\n\n return _geojson_stac_response(_handle_search_request(args, products))\n\n\ndef _array_arg(arg: str, expect_type=str, expect_size=None) -> List:\n \"\"\"\n Parse an argument that should be a simple list.\n \"\"\"\n if isinstance(arg, list):\n return arg\n\n # Make invalid arguments loud. The default ValueError behaviour is to quietly forget the param.\n try:\n arg = arg.strip()\n # Legacy json-like format. This is what sat-api seems to do too.\n if arg.startswith(\"[\"):\n value = json.loads(arg)\n else:\n # Otherwise OpenAPI non-exploded form style.\n # Eg. \"1, 2, 3\" or \"string1,string2\" or \"string1\"\n args = [a.strip() for a in arg.split(\",\")]\n value = [expect_type(a.strip()) for a in args if a]\n except ValueError:\n raise BadRequest(\n f\"Invalid argument syntax. Expected comma-separated list, got: {arg!r}\"\n )\n\n if not isinstance(value, list):\n raise BadRequest(f\"Invalid argument syntax. Expected json list, got: {value!r}\")\n\n if expect_size is not None and len(value) != expect_size:\n raise BadRequest(\n f\"Expected size {expect_size}, got {len(value)} elements in {arg!r}\"\n )\n\n return value\n\n\ndef _geojson_arg(arg: dict) -> BaseGeometry:\n if not isinstance(arg, dict):\n raise BadRequest(\n \"The 'intersects' argument must be a JSON object (and sent over a POST request)\"\n )\n\n try:\n return shape(arg)\n except ValueError:\n raise BadRequest(\"The 'intersects' argument must be valid GeoJSON geometry.\")\n\n\ndef _bool_argument(s: str):\n \"\"\"\n Parse an argument that should be a bool\n \"\"\"\n if isinstance(s, bool):\n return s\n # Copying FastAPI booleans:\n # https://fastapi.tiangolo.com/tutorial/query-params\n return s.strip().lower() in (\"1\", \"true\", \"on\", \"yes\")\n\n\ndef _handle_search_request(\n request_args: TypeConversionDict,\n product_names: List[str],\n include_total_count: bool = True,\n) -> Dict:\n bbox = request_args.get(\n \"bbox\", type=partial(_array_arg, expect_size=4, expect_type=float)\n )\n\n # Stac-api <=0.7.0 used 'time', later versions use 'datetime'\n time = request_args.get(\"datetime\") or request_args.get(\"time\")\n\n limit = request_args.get(\"limit\", default=DEFAULT_PAGE_SIZE, type=int)\n ids = request_args.get(\n \"ids\", default=None, type=partial(_array_arg, expect_type=uuid.UUID)\n )\n offset = request_args.get(\"_o\", default=0, type=int)\n\n # Request the full Item information. This forces us to go to the\n # ODC dataset table for every record, which can be extremely slow.\n full_information = request_args.get(\n \"_full\", default=DEFAULT_RETURN_FULL_ITEMS, type=_bool_argument\n )\n\n intersects = request_args.get(\"intersects\", default=None, type=_geojson_arg)\n\n if limit > PAGE_SIZE_LIMIT:\n abort(\n 400,\n f\"Max page size is {PAGE_SIZE_LIMIT}. \"\n f\"Use the next links instead of a large limit.\",\n )\n\n if bbox is not None and len(bbox) != 4:\n abort(400, \"Expected bbox of size 4. [min lon, min lat, max long, max lat]\")\n\n if time is not None:\n time = _parse_time_range(time)\n\n def next_page_url(next_offset):\n return url_for(\n \".stac_search\",\n collections=product_names,\n bbox=\"{},{},{},{}\".format(*bbox) if bbox else None,\n time=_unparse_time_range(time) if time else None,\n ids=\",\".join(map(str, ids)) if ids else None,\n limit=limit,\n _o=next_offset,\n _full=full_information,\n )\n\n feature_collection = search_stac_items(\n product_names=product_names,\n bbox=bbox,\n time=time,\n dataset_ids=ids,\n limit=limit,\n offset=offset,\n intersects=intersects,\n # The /stac/search api only supports intersects over post requests.\n use_post_request=intersects is not None,\n get_next_url=next_page_url,\n full_information=full_information,\n include_total_count=include_total_count,\n )\n feature_collection[\"links\"].extend(\n (\n dict(\n href=url_for(\".stac_search\"),\n rel=\"search\",\n title=\"Search\",\n type=\"application/geo+json\",\n method=\"GET\",\n ),\n dict(\n href=url_for(\".stac_search\"),\n rel=\"search\",\n title=\"Search\",\n type=\"application/geo+json\",\n method=\"POST\",\n ),\n )\n )\n return feature_collection\n\n\ndef search_stac_items(\n get_next_url: Callable[[int], str],\n limit: int = DEFAULT_PAGE_SIZE,\n offset: int = 0,\n dataset_ids: Optional[str] = None,\n product_names: Optional[List[str]] = None,\n bbox: Optional[Tuple[float, float, float, float]] = None,\n intersects: Optional[BaseGeometry] = None,\n time: Optional[Tuple[datetime, datetime]] = None,\n full_information: bool = False,\n order: ItemSort = ItemSort.DEFAULT_SORT,\n include_total_count: bool = False,\n use_post_request: bool = False,\n) -> Dict:\n \"\"\"\n Perform a search, returning a FeatureCollection of stac Item results.\n\n :param get_next_url: A function that calculates a page url for the given offset.\n \"\"\"\n offset = offset or 0\n items = list(\n _model.STORE.search_items(\n product_names=product_names,\n time=time,\n bbox=bbox,\n limit=limit + 1,\n dataset_ids=dataset_ids,\n intersects=intersects,\n offset=offset,\n full_dataset=full_information,\n order=order,\n )\n )\n returned = items[:limit]\n there_are_more = len(items) == limit + 1\n\n page = 0\n if limit != 0:\n page = offset // limit\n paging_properties = dict(\n # Stac standard\n numberReturned=len(returned),\n # Compatibility with older implementation. Was removed from stac-api standard.\n # (page numbers + limits are not ideal as they prevent some big db optimisations.)\n context=dict(\n page=page,\n limit=limit,\n returned=len(returned),\n ),\n )\n if include_total_count:\n count_matching = _model.STORE.get_count(\n product_names=product_names, time=time, bbox=bbox, dataset_ids=dataset_ids\n )\n paging_properties[\"numberMatched\"] = count_matching\n paging_properties[\"context\"][\"matched\"] = count_matching\n\n result = dict(\n type=\"FeatureCollection\",\n features=[as_stac_item(f) for f in returned],\n links=[],\n **paging_properties,\n )\n\n if there_are_more:\n if use_post_request:\n next_link = dict(\n rel=\"next\",\n method=\"POST\",\n merge=True,\n # Unlike GET requests, we can tell them to repeat their same request args\n # themselves.\n #\n # Same URL:\n href=flask.request.url,\n # ... with a new offset.\n body=dict(\n _o=offset + limit,\n ),\n )\n else:\n # Otherwise, let the route create the next url.\n next_link = dict(rel=\"next\", href=get_next_url(offset + limit))\n\n result[\"links\"].append(next_link)\n\n return result\n\n\n@bp.route(\"/collections\")\ndef collections():\n \"\"\"\n This is like the root \"/\", but has full information for each collection in\n an array (instead of just a link to each collection).\n \"\"\"\n return _stac_response(\n dict(\n links=[],\n collections=[\n _with_stac_properties(_stac_collection(product.name))\n for product, product_summary in _model.get_products_with_summaries()\n ],\n )\n )\n\n\n@bp.route(\"/arrivals\")\ndef arrivals():\n \"\"\"\n Virtual collection of the items most recently indexed into this index\n \"\"\"\n return _stac_response(\n dict(\n id=\"Arrivals\",\n title=\"Dataset Arrivals\",\n type=\"Collection\",\n license=\"various\",\n description=\"The most recently added Items to this index\",\n properties={},\n providers=[],\n # Covers all products, so all possible extent. We *could* be smart and show the whole\n # server's extent range, but that wouldn't be too useful either. ?\n extent=dict(\n temporal=dict(interval=[[None, None]]),\n spatial=dict(bbox=[[-180.0, -90.0, 180.0, 90.0]]),\n ),\n links=[\n dict(\n rel=\"items\",\n href=url_for(\".arrivals_items\"),\n )\n ],\n )\n )\n\n\n@bp.route(\"/arrivals/items\")\ndef arrivals_items():\n \"\"\"\n Get the Items most recently indexed into this Open Data Cube instance.\n\n This returns a Stac FeatureCollection of complete Stac Items, with paging links.\n \"\"\"\n limit = request.args.get(\"limit\", default=DEFAULT_PAGE_SIZE, type=int)\n offset = request.args.get(\"_o\", default=0, type=int)\n if limit > PAGE_SIZE_LIMIT:\n abort(\n 400,\n f\"Max page size is {PAGE_SIZE_LIMIT}. \"\n f\"Use the next links instead of a large limit.\",\n )\n\n def next_page_url(next_offset):\n return url_for(\n \".arrivals_items\",\n limit=limit,\n _o=next_offset,\n )\n\n return _geojson_stac_response(\n search_stac_items(\n limit=limit,\n offset=offset,\n get_next_url=next_page_url,\n full_information=True,\n order=ItemSort.RECENTLY_ADDED,\n include_total_count=False,\n )\n )\n\n\n@bp.route(\"/collections/\")\ndef collection(collection: str):\n \"\"\"\n Overview of a WFS Collection (a datacube product)\n \"\"\"\n return _stac_response(_stac_collection(collection))\n\n\ndef _stac_collection(collection: str):\n summary = _model.get_product_summary(collection)\n try:\n dataset_type = _model.STORE.get_dataset_type(collection)\n except KeyError:\n abort(404, f\"Unknown collection {collection!r}\")\n\n all_time_summary = _model.get_time_summary(collection)\n\n begin, end = (\n (summary.time_earliest, summary.time_latest) if summary else (None, None)\n )\n footprint = all_time_summary.footprint_wgs84\n stac_collection = dict(\n id=summary.name,\n title=summary.name,\n type=\"Collection\",\n license=_utils.product_license(dataset_type),\n description=dataset_type.definition.get(\"description\"),\n properties=dict(_build_properties(dataset_type.metadata)),\n providers=[],\n extent=dict(\n temporal=dict(\n interval=[\n [\n utc(begin) if begin else None,\n utc(end) if end else None,\n ]\n ]\n ),\n spatial=dict(\n bbox=[footprint.bounds if footprint else [-180.0, -90.0, 180.0, 90.0]]\n ),\n ),\n links=[\n dict(\n rel=\"items\",\n href=url_for(\".collection_items\", collection=collection),\n )\n ],\n )\n return stac_collection\n\n\n@bp.route(\"/collections//items\")\ndef collection_items(collection: str):\n \"\"\"\n A geojson FeatureCollection of all items in a collection/product.\n\n (with paging)\n \"\"\"\n all_time_summary = _model.get_time_summary(collection)\n if not all_time_summary:\n abort(404, f\"Product {collection!r} not found among summaries.\")\n\n feature_collection = _handle_search_request(\n request_args=request.args,\n product_names=[collection],\n )\n\n # Maybe we shouldn't include total count, as it prevents some future optimisation?\n if \"numberMatched\" not in feature_collection:\n feature_collection[\"numberMatched\"] = all_time_summary.dataset_count\n # Backwards compatibility with older stac implementations.\n feature_collection[\"context\"][\"matched\"] = feature_collection[\"numberMatched\"]\n\n return _geojson_stac_response(feature_collection)\n\n\n@bp.route(\"/collections//items/\")\ndef item(collection: str, dataset_id: str):\n dataset = _model.STORE.get_item(dataset_id)\n if not dataset:\n abort(404, f\"No dataset found with id {dataset_id!r}\")\n\n actual_product_name = dataset.product_name\n if collection != actual_product_name:\n # We're not doing a redirect as we don't want people to rely on wrong urls\n # (and we're unkind)\n actual_url = url_for(\n \".item\",\n collection=actual_product_name,\n dataset_id=dataset_id,\n )\n abort(\n 404,\n f\"No such dataset in collection.\\n\"\n f\"Perhaps you meant collection {actual_product_name}: {actual_url})\",\n )\n\n return _geojson_stac_response(as_stac_item(dataset))\n\n\ndef _pick_remote_uri(uris: Sequence[str]) -> Optional[int]:\n \"\"\"\n Return the offset of the first uri with a remote path, if any.\n \"\"\"\n for i, uri in enumerate(uris):\n scheme, *_ = uri.split(\":\")\n if scheme in (\"https\", \"http\", \"ftp\", \"s3\", \"gfs\"):\n return i\n return None\n\n\ndef _parse_time_range(time: str) -> Optional[Tuple[datetime, datetime]]:\n \"\"\"\n >>> _parse_time_range('1986-04-16T01:12:16/2097-05-10T00:24:21')\n (datetime.datetime(1986, 4, 16, 1, 12, 16), datetime.datetime(2097, 5, 10, 0, 24, 21))\n >>> _parse_time_range('1986-04-16T01:12:16')\n (datetime.datetime(1986, 4, 16, 1, 12, 16), datetime.datetime(1986, 4, 16, 1, 12, 17))\n >>> # Time is optional:\n >>> _parse_time_range('2019-01-01/2019-01-01')\n (datetime.datetime(2019, 1, 1, 0, 0), datetime.datetime(2019, 1, 1, 0, 0))\n >>> _parse_time_range('1986-04-16')\n (datetime.datetime(1986, 4, 16, 0, 0), datetime.datetime(1986, 4, 17, 0, 0))\n >>> # Open ranges:\n >>> _parse_time_range('2019-01-01/..')[0]\n datetime.datetime(2019, 1, 1, 0, 0)\n >>> _parse_time_range('2019-01-01/..')[1] > datetime.now()\n True\n >>> _parse_time_range('../2019-01-01')\n (datetime.datetime(1971, 1, 1, 0, 0), datetime.datetime(2019, 1, 1, 0, 0))\n >>> # Unbounded time is the same as no time filter. (\"None\")\n >>> _parse_time_range('../..')\n >>>\n \"\"\"\n time_period = time.split(\"/\")\n if len(time_period) == 2:\n start, end = time_period\n if start == \"..\":\n start = datetime(1971, 1, 1, 0, 0)\n elif end == \"..\":\n end = datetime.now() + timedelta(days=2)\n # Were they both open? Treat it as no date filter.\n if end == \"..\":\n return None\n\n return parse_time(start), parse_time(end)\n elif len(time_period) == 1:\n t: datetime = parse_time(time_period[0])\n if t.time() == dt_time():\n return t, t + timedelta(days=1)\n else:\n return t, t + timedelta(seconds=1)\n\n\ndef _unparse_time_range(time: Tuple[datetime, datetime]) -> str:\n \"\"\"\n >>> _unparse_time_range((\n ... datetime(1986, 4, 16, 1, 12, 16),\n ... datetime(2097, 5, 10, 0, 24, 21)\n ... ))\n '1986-04-16T01:12:16/2097-05-10T00:24:21'\n \"\"\"\n start_time, end_time = time\n return f\"{start_time.isoformat()}/{end_time.isoformat()}\"\n\n\ndef _band_to_measurement(band: Dict, dataset_location: str) -> MeasurementDoc:\n \"\"\"Create EO3 measurement from an EO1 band dict\"\"\"\n return MeasurementDoc(\n path=band.get(\"path\"),\n band=band.get(\"band\"),\n layer=band.get(\"layer\"),\n name=band.get(\"name\"),\n alias=band.get(\"label\"),\n )\n\n\ndef as_stac_item(dataset: DatasetItem):\n \"\"\"\n Get a dict corresponding to a stac item\n \"\"\"\n ds: Dataset = dataset.odc_dataset\n\n if ds is not None and is_doc_eo3(ds.metadata_doc):\n dataset_doc = serialise.from_doc(ds.metadata_doc, skip_validation=True)\n dataset_doc.locations = ds.uris\n\n # Geometry is optional in eo3, and needs to be calculated from grids if missing.\n # We can use ODC's own calculation that happens on index.\n if dataset_doc.geometry is None:\n fallback_extent = ds.extent\n if fallback_extent is not None:\n dataset_doc.geometry = fallback_extent.geom\n dataset_doc.crs = str(ds.crs)\n\n if ds.sources:\n dataset_doc.lineage = {classifier: [d.id] for classifier, d in ds.sources}\n # Does ODC still put legacy lineage into indexed documents?\n elif (\"source_datasets\" in dataset_doc.lineage) and len(\n dataset_doc.lineage\n ) == 1:\n # From old to new lineage type.\n dataset_doc.lineage = {\n classifier: [dataset[\"id\"]]\n for classifier, dataset in dataset_doc.lineage[\"source_datasets\"]\n }\n\n else:\n # eo1 to eo3\n\n dataset_doc = DatasetDoc(\n id=dataset.dataset_id,\n # Filled-in below.\n label=None,\n product=ProductDoc(dataset.product_name),\n locations=ds.uris if ds is not None else None,\n crs=str(dataset.geometry.crs) if dataset.geometry is not None else None,\n geometry=dataset.geometry.geom if dataset.geometry is not None else None,\n grids=None,\n # TODO: Convert these from stac to eo3\n properties=Eo3Dict(\n {\n \"datetime\": utc(dataset.center_time),\n **(dict(_build_properties(ds.metadata)) if ds else {}),\n \"odc:processing_datetime\": utc(dataset.creation_time),\n }\n ),\n measurements={\n name: _band_to_measurement(\n b,\n dataset_location=ds.uris[0] if ds is not None and ds.uris else None,\n )\n for name, b in ds.measurements.items()\n }\n if ds is not None\n else {},\n accessories=_accessories_from_eo1(ds.metadata_doc)\n if ds is not None\n else {},\n # TODO: Fill in lineage. The datacube API only gives us full datasets, which is\n # expensive. We only need a list of IDs here.\n lineage={},\n )\n\n if dataset_doc.label is None and ds is not None:\n dataset_doc.label = _utils.dataset_label(ds)\n\n item_doc = eo3stac.to_stac_item(\n dataset=dataset_doc,\n stac_item_destination_url=url_for(\n \".item\",\n collection=dataset.product_name,\n dataset_id=dataset.dataset_id,\n ),\n odc_dataset_metadata_url=url_for(\"dataset.raw_doc\", id_=dataset.dataset_id),\n explorer_base_url=url_for(\"default_redirect\"),\n )\n # Add the region code that Explorer inferred.\n # (Explorer's region codes predate ODC's and support\n # many more products.\n item_doc[\"properties\"][\"cubedash:region_code\"] = dataset.region_code\n\n return item_doc\n\n\ndef _accessories_from_eo1(metadata_doc: Dict) -> Dict[str, AccessoryDoc]:\n \"\"\"Create and EO3 accessories section from an EO1 document\"\"\"\n accessories = {}\n\n # Browse image -> thumbnail\n if \"browse\" in metadata_doc:\n for name, browse in metadata_doc[\"browse\"].items():\n accessories[f\"thumbnail:{name}\"] = AccessoryDoc(\n path=browse[\"path\"], name=name\n )\n\n # Checksum\n if \"checksum_path\" in metadata_doc:\n accessories[\"checksum:sha1\"] = AccessoryDoc(\n path=metadata_doc[\"checksum_path\"], name=\"checksum:sha1\"\n )\n return accessories\n\n\ndef field_platform(key, value):\n yield \"eo:platform\", value.lower().replace(\"_\", \"-\")\n\n\ndef field_instrument(key, value):\n yield \"eo:instrument\", value\n\n\ndef field_path_row(key, value):\n # Path/Row fields are ranges in datacube but 99% of the time\n # they are a single value\n # (they are ranges in telemetry products)\n # Stac doesn't accept a range here, so we'll skip it in those products,\n # but we can handle the 99% case when lower==higher.\n if key == \"sat_path\":\n kind = \"landsat:wrs_path\"\n elif key == \"sat_row\":\n kind = \"landsat:wrs_row\"\n else:\n raise ValueError(f\"Path/row kind {repr(key)}\")\n\n # If there's only one value in the range, return it.\n if isinstance(value, Range):\n if value.end is None or value.begin == value.end:\n # Standard stac\n yield kind, int(value.begin)\n else:\n # Our questionable output. Only present in telemetry products?\n yield f\"odc:{key}\", [value.begin, value.end]\n\n\n# Other Property examples:\n# collection\t\"landsat-8-l1\"\n# eo:gsd\t15\n# eo:platform\t\"landsat-8\"\n# eo:instrument\t\"OLI_TIRS\"\n# eo:off_nadir\t0\n# datetime\t\"2019-02-12T19:26:08.449265+00:00\"\n# eo:sun_azimuth\t-172.29462212\n# eo:sun_elevation\t-6.62176054\n# eo:cloud_cover\t-1\n# eo:row\t\"135\"\n# eo:column\t\"044\"\n# landsat:product_id\t\"LC08_L1GT_044135_20190212_20190212_01_RT\"\n# landsat:scene_id\t\"LC80441352019043LGN00\"\n# landsat:processing_level\t\"L1GT\"\n# landsat:tier\t\"RT\"\n\n_STAC_PROPERTY_MAP = {\n \"platform\": field_platform,\n \"instrument\": field_instrument,\n # \"measurements\": field_bands,\n \"sat_path\": field_path_row,\n \"sat_row\": field_path_row,\n}\n\n\ndef _build_properties(d: DocReader):\n for key, val in d.fields.items():\n if val is None:\n continue\n converter = _STAC_PROPERTY_MAP.get(key)\n if converter:\n yield from converter(key, val)\n\n\n@bp.errorhandler(HTTPException)\ndef handle_exception(e):\n \"\"\"Return JSON instead of HTML for HTTP errors.\"\"\"\n response = e.get_response()\n response.data = json.dumps(\n {\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n }\n )\n response.content_type = \"application/json\"\n return response\n","sub_path":"cubedash/_stac.py","file_name":"_stac.py","file_ext":"py","file_size_in_byte":27493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"82052564","text":"from tkinter import ttk\n\nimport tkinter\n\n\nclass HeaderWidget:\n def __init__(self, frame, currencies=None, on_currency_selected=None, on_refreshed=None):\n self.frame = frame\n self.currencies = currencies\n self.on_currency_selected = on_currency_selected\n self.on_refreshed = on_refreshed\n self.init_ui()\n\n def init_ui(self):\n refresh_btn = tkinter.Button(self.frame, text=\"Refresh\",font=('Arial', 8, 'bold'), width=10, height=2)\n refresh_btn.config(command=self.on_refresh_btn_clicked)\n refresh_btn.pack(side=tkinter.LEFT, padx=(0, 10), pady=(16, 0))\n\n combo_frame = tkinter.Frame(self.frame)\n tkinter.Label(combo_frame, text=\"Currency\").pack()\n # Adding combobox drop down list\n self.crypto_chosen = ttk.Combobox(combo_frame, width=27, values=self.currencies)\n self.crypto_chosen.bind(\"<>\", self.on_currency_checkbox_selected)\n self.crypto_chosen.current(0)\n self.crypto_chosen.pack()\n combo_frame.pack(side=tkinter.LEFT)\n\n def on_currency_checkbox_selected(self, event):\n selected_currency = self.crypto_chosen.get()\n if self.on_currency_selected is not None:\n self.on_currency_selected(selected_currency)\n\n def on_refresh_btn_clicked(self):\n if self.on_refreshed is not None:\n self.on_refreshed()\n","sub_path":"ui/header_widget.py","file_name":"header_widget.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"140960790","text":"#!/usr/bin/python3\n\nimport socket\n\nsize = 512\nhost = ''\nport = 9898\n\n# Family = Internet, type = stream socket mean TCP\nsock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n# We have a socket, we need to bind to an IP address and port\n# to have a place to listen on\nsock.bind((host, port))\nsock.listen(5)\n# We can store information about the other end\n# once we accept the connectio attempt\nc,addr = sock.accept()\ndata = c.recv(size)\nif data:\n f = open(\"storage.dat\", '+w')\n print(\"connection from: \", addr[0])\n f.write(addr[0])\n f.write(\":\")\n f.write(data.decode(\"utf-8\"))\n f.close()\nsock.close()\n\n\n# Listen msg using nc command\n# nc localhost 9898\n# Run this command after running the python script\n","sub_path":"networking/py_network_server.py","file_name":"py_network_server.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"45756674","text":"from shutil import copyfile\n\n\nf = open(\"c1.txt\", \"r\")\nf.readline()\n\nmaxnum = int(f.readline().split()[1])\nmaxnum = maxnum-1\n\nwhile maxnum > 0:\n \n newname = \"c\" + str(maxnum) +\".txt\"\n copyfile(\"cc.txt\", newname)\n newf = open(newname, \"r\")\n newdata = newf.read()\n newdata = newdata.replace(\"fiber_num: 1\", \"fiber_num: \"+ str(maxnum))\n reopen = open(newname, \"w\")\n reopen.write(newdata)\n maxnum = maxnum-1\n\n newf.close()\n reopen.close()\n\nf.close()\n","sub_path":"test/yarn_1/config/config_gen.py","file_name":"config_gen.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"233543015","text":"\n# -*- coding: utf-8 -*-\nfrom config import db\n\n\nclass CounselorManage(db.Model):\n __tablename__ = \"counselor_manage\"\n\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n group_id = db.Column(db.Integer)\n counselor_id = db.Column(db.Integer)\n\n def __init__(self, id,group_id,counselor_id):\n '''Constructor'''\n self.id=id\n self.group_id=group_id\n self.counselor_id=counselor_id\n\n\n def __repr__(self):\n return 'id : %s' % self.id\n\n\n# Client and database attributes dictionary\nclinetHead = {u'id', u'groupId', u'counselorId'}\nCounselorManageChangeDic = {\n \"id\":\"id\",\n \"groupId\":\"group_id\",\n \"counselorId\":\"counselor_id\"\n}\n\nintList = {u'id', u'groupId', u'counselorId'}\n\n# db.create_all()\n","sub_path":"boss_service/models/Counselor/CounselorManage.py","file_name":"CounselorManage.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"495624292","text":"import os\nimport torchvision.models as models\nfrom CompositionalNets.Code.model import resnet_feature_extractor\nfrom CompositionalNets.Code.config import data_path, model_save_dir, vc_num, backbone_type, dataset\nfrom torch import load, device\nfrom torch.cuda import is_available as cuda_is_available\nfrom collections import OrderedDict\nimport re\nfrom functools import reduce\n\nfrom src.config import Directories\nfrom src.models import UNet, set_weights_to_ones\n\n# Setup work\ndevice_ids = [0]\n\n# dataset = 'chaos' # pascal3d+\n\n#vgg, resnet50, resnext, resnet152\n# TODO: Add the ability to use U-Net's bottleneck's output as the feature map\n# The U-Net trained on the CHAOS dataset most definitely extracts more meaningful\n# features than something pre-trained on ImageNet\n\n# nn_type = 'vgg'\nnn_type = backbone_type\nunet_filename = 'unet_liver_2020-10-31_19:19:13.pth'\n\n# vMF parameter\nvMF_kappa = 30\n\n# Number of vMF clusters, used as argument for cls_num in vMFMM init\n# This needs to the same as the extractor's output channel size\n# because the vMF clusters are used as weights for a Convolutional operation \n# applied on the extractor's output feature map\n# Convolutional Function Dimension = [H * W * VC_NUM]\n# Feature Map Dimensions = [VC_NUM * H * W]\n# Convolutional Function * Feature Map Dimensions = vMF Activations\n# vc_num = vc_num\n\ncategories = ['aeroplane', 'bicycle', 'boat', 'bottle', 'bus', 'car', 'chair', 'diningtable', 'motorbike', 'sofa',\n 'train', 'tvmonitor']\ncat_test = ['aeroplane', 'bicycle', 'bus', 'car', 'motorbike', 'train']\n\ndef calculate_receptive_fields(kernel_sizes, strides):\n \n r_fields = []\n prev_r_field = 1\n \n for i, kernel_size in enumerate(kernel_sizes, start=1):\n product_of_strides = reduce(lambda x,y: x*y, strides[:i])\n \n curr_r_field = prev_r_field + (kernel_size - 1) * product_of_strides\n r_fields.append(curr_r_field)\n prev_r_field = curr_r_field\n \n return r_fields\n\nif nn_type =='vgg':\n layer = 'pool5' # 'pool5','pool4'\n if layer == 'pool4':\n extractor=models.vgg16(pretrained=True).features[0:24]\n elif layer =='pool5':\n if vc_num == 512:\n extractor = models.vgg16(pretrained=True).features\n elif vc_num == 256:\n extractor = models.vgg16(pretrained=True).features[:12]\n elif vc_num == 128:\n extractor = models.vgg16(pretrained=True).features[:10]\n \nelif nn_type[:6]=='resnet' or nn_type=='resnext' or nn_type=='alexnet':\n layer = 'last' # 'last','second'\n extractor=resnet_feature_extractor(nn_type,layer)\n \nelif nn_type == 'unet':\n layer = 'pool5'\n path_to_unet = os.path.join(Directories.CHECKPOINTS, unet_filename)\n unet = UNet(pretrained=True)\n device = device('cuda:0' if cuda_is_available() else 'cpu')\n unet.load_state_dict(load(path_to_unet, map_location=device)['model_state_dict'])\n \n unet_ones = UNet(pretrained=False)\n unet_ones = unet_ones.get_features()\n \n if vc_num == 1024:\n extractor = unet.get_features()[:24]\n elif vc_num == 512:\n extractor = unet.get_features()[:19]\n elif vc_num == 256:\n extractor = unet.get_features()[:15]\n elif vc_num == 128:\n extractor = unet.get_features()[:10]\n else:\n# extractor = unet.get_features()[:9]\n unet_layer = 15\n extractor = unet.get_features()[:unet_layer] #256\n \n unet_ones = set_weights_to_ones(unet_ones)[:unet_layer]\n# extractor = unet.get_features()[:4] #64\n# extractor = unet.get_features()[:2] #64\n\nelif nn_type == 'unet_lits':\n layer = 'pool5'\n unet_filepath = os.path.join(Directories.CHECKPOINTS, 'model_UNet.pth')\n state_dict = load(unet_filepath)['model_state_dict']\n new_dict = OrderedDict()\n for curr_key, value in state_dict.items():\n new_key = re.findall('conv.+', curr_key)[0]\n new_dict[new_key] = state_dict[curr_key]\n unet_filepath = os.path.join(Directories.CHECKPOINTS, 'model_UNet.pth')\n unet = UNet(in_channels=3)\n unet.load_state_dict(new_dict)\n \n if vc_num == 1024:\n extractor = unet.get_features()[:24]\n elif vc_num == 512:\n extractor = unet.get_features()[:19]\n elif vc_num == 256:\n extractor = unet.get_features()[:15]\n elif vc_num == 128:\n extractor = unet.get_features()[:10]\n else:\n# extractor = unet.get_features()[:9]\n extractor = unet.get_features()[:15]\n# extractor = unet.get_features()[:4]\n\nif cuda_is_available():\n extractor.cuda(device_ids[0]).eval()\n\ninit_path = model_save_dir+'init_{}/'.format(nn_type)\nif not os.path.exists(init_path):\n os.makedirs(init_path)\n\ndict_dir = init_path+'dictionary_{}/'.format(nn_type)\nif not os.path.exists(dict_dir):\n os.makedirs(dict_dir)\n\nsim_dir = init_path+'similarity_{}_{}_{}/'.format(nn_type,layer,dataset)\n\nAstride_set = [2, 4, 8, 16, 32] # stride size\nfeatDim_set = [64, 128, 256, 512, 512] # feature dimension\nArf_set = [6, 16, 44, 100, 212] # receptive field size for vgg\nApad_set = [2, 6, 18, 42, 90] # padding size\n\n# Why are these initialized to those values?\n# Why an offset of 3?\nif layer =='pool4' or layer =='second':\n Astride = Astride_set[3]\n Arf = Arf_set[3]\n Apad = Apad_set[3]\n offset = 3\n \nelif layer =='pool5' or layer == 'last':\n# Astride = Astride_set[3]\n# Arf = 170\n# Apad = Apad_set[4]\n# offset = 3\n \n Astride = 32\n Arf = 300\n Apad = 20\n offset = 3\n\nif nn_type == 'unet':\n num_layers = len(extractor)\n kernels = [3] * (num_layers // 2)\n strides = [1] * (num_layers // 2)\n padding = 1\n \n r_fields = calculate_receptive_fields(kernels, strides)\n \n num_max_pools = unet_layer // 5\n \n Astride = 1\n Arf = r_fields[-1] * (2 ** num_max_pools)\n# Apad = Arf*2\n Apad = 0\n offset = 0\n \n","sub_path":"Initialization_Code/config_initialization.py","file_name":"config_initialization.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"386752667","text":"try:\n # Python2\n import Tkinter as tk\nexcept ImportError:\n # Python3\n import tkinter as tk\n\ncanvas = None\nroot = None\ncell_size = 3\ncells = {}\n\ndef draw_init(grid):\n global canvas, root\n if not canvas:\n dim = grid.dimensions\n root = tk.Tk()\n root.configure()\n canvas = tk.Canvas(root, width=dim[0] * cell_size, height=dim[1] * cell_size, bg='black')\n canvas.pack()\n\n if not cells:\n for idx, cell in grid.cells():\n cells[idx] = canvas.create_rectangle(\n idx[0] * cell_size,\n idx[1] * cell_size,\n (idx[0] + 1) * cell_size,\n (idx[1] + 1) * cell_size,\n #fill = 'red',\n fill = heat_to_color(cell.heat)\n )\n\ndef draw_grid(grid):\n # 2D grid only.\n for idx, cell in grid.cells():\n canvas.itemconfigure(cells[idx], fill = heat_to_color(cell.heat))\n root.update()\n #tk.mainloop()\n\nimport matplotlib.pyplot as plt\ncolor_map = {}\ncolor_steps = 20\ndef heat_to_color(heat, scale=3.0, cm=plt.cm.hot):\n quant = int(min((heat / scale), 1) * 20)\n if not color_map:\n for step in range(color_steps + 1):\n color_map[step] = \"#%02x%02x%02x\" % tuple(x * 255 for x in cm(float(step) / color_steps)[:3])\n return color_map[quant]\n\n#tk.mainloop()","sub_path":"sandbox/v03/drawtk.py","file_name":"drawtk.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"539789732","text":"# Django imports.\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Q, QuerySet\nfrom django.utils import timezone\n\n__author__ = 'Jason Parent'\n\n\nclass FriendshipQuerySet(QuerySet):\n def get_friendship(self, user, friend):\n \"\"\"Gets a friendship between the user and the specified friend, if one exists.\"\"\"\n query = Q(\n Q(sender=user, receiver=friend) |\n Q(sender=friend, receiver=user)\n )\n\n try:\n return self.get(query)\n except Friendship.DoesNotExist:\n return None\n\n def get_friendships(self, user, status=None, sent=True, received=True):\n \"\"\"\n Get all friendships associated with the user according to the status and whether the user\n sent and/or received the friendship request.\n \"\"\"\n query = Q()\n\n if sent:\n query = Q(sender=user)\n\n if received:\n query |= Q(receiver=user)\n\n friendships = self.filter(query)\n\n if status:\n friendships = friendships.filter(status=status)\n\n return friendships.order_by('updated')\n\n def pending(self, user):\n \"\"\"Get all pending friendships associated with the user.\"\"\"\n return self.get_friendships(user, Friendship.PENDING)\n\n def pending_sent(self, user):\n \"\"\"Get all pending friendships that were sent by the user.\"\"\"\n return self.get_friendships(user, Friendship.PENDING, sent=True, received=False)\n\n def pending_received(self, user):\n \"\"\"Get all pending friendships that were received by the user.\"\"\"\n return self.get_friendships(user, Friendship.PENDING, sent=False, received=True)\n\n def current(self, user):\n \"\"\"Get all current (accepted) friendships associated with the user.\"\"\"\n return self.get_friendships(user, Friendship.ACCEPTED)\n\n\nclass Friendship(models.Model):\n \"\"\"A friendly relationship between two users.\"\"\"\n\n PENDING = 'PENDING'\n ACCEPTED = 'ACCEPTED'\n REJECTED = 'REJECTED'\n\n STATUS_CHOICES = (\n (PENDING, PENDING),\n (ACCEPTED, ACCEPTED),\n (REJECTED, REJECTED),\n )\n\n objects = FriendshipQuerySet.as_manager()\n\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='friendships_by_sender',\n help_text='User who sent the friendship request.')\n\n receiver = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='friendships_by_receiver',\n help_text='User who received the friendship request.')\n\n created = models.DateTimeField(auto_now_add=True)\n\n updated = models.DateTimeField(auto_now=True)\n\n status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=PENDING)\n\n class Meta(object):\n app_label = 'friendship'\n default_related_name = 'friendships'\n unique_together = ('sender', 'receiver')\n\n @classmethod\n def user_has_friend(cls, user, friend):\n \"\"\"Checks whether the user is part of a friendship with the specified friend.\"\"\"\n return cls.objects.get_friendship(user, friend) is not None\n\n @classmethod\n def user_add_friend(cls, user, friend):\n \"\"\"Creates a pending friendship between the user and the specified friend.\"\"\"\n if friend == user or cls.user_has_friend(user, friend):\n return None\n\n return cls.objects.create(sender=user, receiver=friend)\n\n @classmethod\n def user_remove_friend(cls, user, friend):\n \"\"\"Removes a friend, if a friendship exists between the user and the specified friend.\"\"\"\n if friend == user:\n return None\n\n friendship = cls.objects.get_friendship(user, friend)\n\n if not friendship:\n return None\n\n friendship.delete()\n\n return friendship\n\n @classmethod\n def user_list_friends(cls, user, friendships=None):\n \"\"\"Gets a list of friendship from the specified friendships.\"\"\"\n if friendships is None:\n friendships = cls.objects.current(user)\n\n # Extract non-self friendship from friendships.\n return map((lambda f: f.sender if f.receiver == user else f.receiver), friendships)\n\n def __unicode__(self):\n return '{sender} to {receiver}: {status}'.format(\n sender=self.sender.username,\n receiver=self.receiver.username,\n status=self.get_status_display()\n )\n\n def accept(self):\n \"\"\"Accepts a pending friendship.\"\"\"\n if self.status != self.PENDING:\n return None\n\n self.status = self.ACCEPTED\n self.updated = timezone.now()\n self.save()\n\n def reject(self):\n \"\"\"Rejects a pending friendship.\"\"\"\n if self.status != self.PENDING:\n return None\n\n self.status = self.REJECTED\n self.updated = timezone.now()\n self.save()\n","sub_path":"friendship/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"447959536","text":"import requests, json\nfrom requests.auth import HTTPBasicAuth\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n# Disable Warnings from Untrusted TLS keys\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nclass App(object):\n \n def __init__(self, url, apikey,token):\n self.url = url\n self.apikey = apikey\n self.token = token\n self.iam_access_token = None\n \n \n def __doPost(self, url, data, headers=None, auth=None):\n try:\n r = requests.post(url=url, headers=None, data=data, auth=auth, verify=False)\n\n if (r.status_code != 200):\n print('requests.get -> %s = %s\\n' % (r.url, r))\n return None\n return r.content\n except requests.exceptions.RequestException as e:\n print(url, e)\n return None\n\n def __doGet(self, url, data, headers=None, auth=None):\n\n try:\n r = requests.get(url=url, headers=None, auth=auth, verify=False)\n #print('doGet')\n print(r)\n\n if (r.status_code != 200):\n print('requests.get -> %s = %s\\n' % (r.url, r))\n return None\n return r.content\n except requests.exceptions.RequestException as e:\n print(url, e)\n return None\n\n def getIAMToken(self):\n url = 'https://iam.cloud.ibm.com/identity/token'\n headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}\n form = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',\n 'apikey': self.apikey}\n\n r = requests.post(url, headers=headers, data=form, verify=False) .json()\n #print(r)\n\n if ( r['access_token'] ):\n self.iam_access_token = r['access_token']\n\n def getAllPodsInNamespace(self,namespace):\n '''\n curl -k -H \"Authorization: Bearer $TOKEN\" -H 'Accept: application/json' \\\n https://$ENDPOINT/api/v1/namespaces/$NAMESPACE/pods\n '''\n #authorization = 'Bearer %s' % self.iam_access_token\n authorization = 'Bearer %s' % self.token\n headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization' : authorization }\n url=self.url + '/api/v1/namespaces/' + namespace + '/pods'\n\n #print('url='+ url )\n r = requests.get(url=url, headers=headers, verify=False)\n #print ('Response: ',r.status_code, ' - ', r.json())\n #print (r.json())\n return r.json()\n\n def deleteAllPodsInNamespace(self,namespace):\n '''\n curl -k -X DELETE -H \"Authorization: Bearer $TOKEN\" -H 'Accept: application/json' \\\n https://$ENDPOINT/api/v1/namespaces/$NAMESPACE/pods\n '''\n #authorization = 'Bearer %s' % self.iam_access_token\n authorization = 'Bearer %s' % self.token\n headers = {'Content-type': 'application/json', 'Accept': 'application/json', 'Authorization' : authorization }\n url=self.url + '/api/v1/namespaces/' + namespace + '/pods'\n\n #print('url='+ url )\n r = requests.delete(url=url, headers=headers, verify=False)\n print ('Response: ',r.status_code, ' - ', r.json())\n \n \ndef main(params):\n\n app = App(params['IBMCLOUD_OC_CONSOLE'],params['APIKEY'],params['IBMCLOUD_OC_TOKEN'])\n\n app.getIAMToken()\n r = app.getAllPodsInNamespace(params['IBMCLOUD_OC_PROJECT'])\n\n for pod in r['items']:\n print(pod['metadata']['name'])\n #app.deleteAllPodsInNamespace(IBMCLOUD_OC_PROJECT)\n\n return { }\n\n'''\nif __name__ == '__main__':\n params = {}\n ret = main(params)\n #print (ret)\n'''","sub_path":"example03/list-pods.py","file_name":"list-pods.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"424363963","text":"# *-*coding:utf-8*-*\n\"\"\"\n@version: Python2.7.13\n@author: Attack\n@time: 2017/2/6 18:06\n\"\"\"\nimport time\n\nimport requests\nfrom TestScript import easyMysql\nimport random\n\n\ndef get_asset_id(env,asset_name):\n sql = 'SELECT id FROM t_zc_asset where asset_name = \"{asset_name}\";'.format(asset_name=asset_name)\n mysql = easyMysql.EasyMysql(env)\n result = mysql.call_mysql('finance_p2p',sql)\n asset_id = result[0]['id']\n return asset_id\n\ndef to_fomart_time(timestamp):\n \"\"\"转化时间戳为 年-月-日 时:分:秒 格式\"\"\"\n time_local = time.localtime(timestamp)\n time_fomart = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n return time_fomart\n\ndef to_timestamp(fomart_time):\n \"\"\"转化 年-月-日 时:分:秒 为时间戳\"\"\"\n time_array = time.strptime(fomart_time, \"%Y-%m-%d %H:%M:%S\")\n timestamp = int(time.mktime(time_array))\n return timestamp\n\nroot_url1 = 'http://lcts1.feidee.net/fixin-ms/'\nroot_url2 = 'http://lcts2.feidee.net/fixin-ms/'\n\nusername = 'test1'\npassword = 'Test123456!'\nlogin_param = {'userName':username,'userPwd':password,'validationCode':'1111'}\ndef creat_product(env,login_param,contractAmount=None,loanTime=None,ptype=None,periods=None,\n amountYuan=None,amountTime=None,timeUnitCode=None):\n pname = '自动录标' + str(int(time.time()))[-5:] #产品名称\n if contractAmount == None: #债权包合同金额\n contractAmount = '100000000'\n if loanTime == None: #债权期限 天\n loanTime = '60'\n if ptype == None: #产品类型\n ptype = '1' #1普通 2新手 3抢购\n if periods == None: #产品期限\n periods = '12'\n if amountYuan == None: #产品募集金额\n amountYuan = '150000'\n if amountTime ==None: #产品募集时长(秒 1h=3600 1day=86400 1month=2592000)\n amountTime = random.randint(72000,75600)\n if timeUnitCode == None: #产品类型 T01 月月高 T02季季高 T03周周高 T04双月高\n timeUnitCode = 'T01'\n r = requests.session()\n #登录月月高后台\n response = r.post(root_url1+'sysUser/login',login_param)\n print(u'登陆返回:'+response.text)\n #请求债权包id接口,获取assetCode\n assetCode = r.post(root_url1+'zcAsset/getAssetCode').json()['assetCode']\n #构建债权包表单数据\n submit_data1 = {'id':'',\n 'assetCode':assetCode, #债权包id\n 'assetName':'债权包'+str(assetCode), #债权包名称\n 'orgCode':'C0012', #债权包合作机构\n 'objectId':'',\t#资产包ID\n 'contractAmount':contractAmount, #合同金额(需要*100)\n 'loanTime':loanTime, #期限\n 'usedStartTime':'',\t#起息日\n 'paymentMode':'1'} #还款方式\n #请求债权包新建接口,新建债权包\n response = r.post(root_url1+'zcAsset/create',submit_data1)\n print(u'创建资产包返回' + response.text)\n submit_data2 = {'id':''\t,#ID\n 'prodId':''\t,#产品ID\n 'settleBatchId':'',#不知道是啥\n 'status':''\t,#状态\n 'tmpId':'76',#模板id 测试模板01 id 36 test1 id 37\n 'rateList[0].annualRate':'6.00',#第一期利率\n 'rateList[0].period':'1',\n 'rateList[1].annualRate':'6.50',#第二期利率\n 'rateList[1].period':'2',\n 'rateList[2].annualRate':'7.00',#第三期利率\n 'rateList[2].period':'3',\n 'rateList[3].annualRate':'7.50',#第四期利率\n 'rateList[3].period':'4',\n 'rateList[4].annualRate':'8.00',#第五期利率\n 'rateList[4].period':'5',\n 'rateList[5].annualRate':'8.50',#第六期利率\n 'rateList[5].period':'6',\n 'rateList[6].annualRate':'9.00',#第七期利率\n 'rateList[6].period':'7',\n 'rateList[7].annualRate':'9.50',#第八期利率\n 'rateList[7].period':'8',\n 'rateList[8].annualRate':'10.00',#第九期利率\n 'rateList[8].period':'9',\n 'rateList[9].annualRate':'10.50',#第十期利率\n 'rateList[9].period':'10',\n 'rateList[10].annualRate':'11.00',#第十一期利率\n 'rateList[10].period':'11',\n 'rateList[11].annualRate':'12.00',#第十二期利率\n 'rateList[11].period':'12',\n 'pname':pname,\n 'ptype':ptype,#产品类型 1普通 2新手 3抢购\n 'periods':periods,#产品期数\n 'lockPeriods':'1',#锁定时间(月)\n 'reserveDay':'3',#预约截止当期结息日之前期限(天)\n 'amountYuan':amountYuan,#募集金额\n 'minInvestAmountYuan':'100',#起购金额\n 'maxInvestAmountYuan':str(int(amountYuan)-1),#最大金额\n 'raiseStartTimeStr':to_fomart_time(time.time()+random.randint(360,720)),#募集开始时间\n 'raiseEndTimeStr':to_fomart_time(time.time()+amountTime),#募集结束时间\n 'calculInterestDateStr':to_fomart_time(time.time()+172800).split(' ')[0],#起息日期\n 'timeUnitCode':timeUnitCode,#时间单位类型\n 'saleChannel':'04,',#销售渠道\n 'assetIdsCombobox':get_asset_id(env,submit_data1['assetName']),#资产包 'SELECT id FROM finance_p2p.t_zc_asset where asset_name = \"{asset_name}\";'.fomat(asset_name=asset_name)\n 'publishTimeStr':'',#募集时间文本\n 'activityTag':'',#活动标签\n 'subsidy':'',#\n 'editorValue[]':'',\n 'editorValue[]':'',\n 'editorValue[]':'',\n 'editorValue[]':'',\n 'editorValue[]':'',\n 'description':'',\n 'moneyPurpose':'',\n 'insurance':'',\n 'interestCalculate':'',\n 'redeem':'',\n 'assetIdsStr':get_asset_id(env,submit_data1['assetName'])}\n\n #请求标的新建接口,新建标的\n response = r.post(root_url1+'zcProd/create',submit_data2)\n print(u'创建产品返回' + response.text)\n\n # #构建待发布产品接口请求数据,并获取到之前新建的产品id\n # submit_data3 = {'whatSoEver':'',\n # 'pname'\t:'',\n # 'tmpId'\t:'',\n # 'status':'',\n # 'currentPeriods':'',\n # 'raiseStartTime':'',\n # 'page':'1',\n # 'rows':'15'}\n # zcProd = r.post(root_url2+'zcProd/dataGrid',submit_data3).json()\n # for zc in zcProd['rows']:\n # if zc['pname'] == pname.decode('utf-8'):\n # zc_id = zc['id']\n #\n # #请求发布接口,发布此前新建的产品\n # submit_data4 = {'id':zc_id}\n # response = r.post(root_url2+'zcProd/publish',submit_data4)\n # print u'发布产品返回' + response.text\nfor i in range(1):\n time.sleep(1)\n creat_product('test1',login_param)\n","sub_path":"TestScript/easyZCProduct.py","file_name":"easyZCProduct.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"275537808","text":"#!/usr/bin/env python3\n\n########################################################################\n# \n# Reads environmental data from enviro+ sensor on raspberry pi\n# ESP32 via MQTT\n# OpenWeatherMap Api\n# Writes data to csv, a MySQL server on the Raspberry Pi, and sends it to Azure\n# Checks for low and high values and sends an alert to the user \n# \n########################################################################\n\nimport time\nimport colorsys\nimport sys\nimport ST7735\nimport datetime as dt\nimport requests\nimport urllib\nimport weather_warning_sender\nimport write_to_csv\nimport json\nimport sql_writer\n\nimport weatherscraper\n\nimport mqtt_to_esp32\n\n\n# set variables\nname = \"Ali Dore\"\nemail = \"50031595@belfastmet.ac.uk\" # No one else will see this, you'll be emailed if your server stops sending data\nlatitude = 54.53292600186758\nlongitude = -5.849735364895184\nlocation_description = \"indoors\" #e.g. indoors, outdoors, garage, shed\n\ntry:\n # Transitional fix for breaking change in LTR559\n from ltr559 import LTR559\n ltr559 = LTR559()\nexcept ImportError:\n import ltr559\n\nfrom bme280 import BME280\nfrom pms5003 import PMS5003, ReadTimeoutError as pmsReadTimeoutError, SerialTimeoutError\nfrom enviroplus import gas\nfrom subprocess import PIPE, Popen\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom fonts.ttf import RobotoMedium as UserFont\nimport logging\n\nlogging.basicConfig(\n format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n\nlogging.info(\"\"\"Sends readings from all of Enviro plus' sensors to Azure\"\"\")\n\n# BME280 temperature/pressure/humidity sensor\nbme280 = BME280()\n\n# PMS5003 particulate sensor\npms5003 = PMS5003()\ntime.sleep(1.0)\n\n# Create ST7735 LCD display class\nst7735 = ST7735.ST7735(\n port=0,\n cs=1,\n dc=9,\n backlight=12,\n rotation=270,\n spi_speed_hz=10000000\n)\n\n# Initialize display\nst7735.begin()\n\nWIDTH = st7735.width\nHEIGHT = st7735.height\n\n# Set up canvas and font\nimg = Image.new('RGB', (WIDTH, HEIGHT), color=(0, 0, 0))\ndraw = ImageDraw.Draw(img)\nfont_size_small = 10\nfont_size_large = 20\nfont = ImageFont.truetype(UserFont, font_size_large)\nsmallfont = ImageFont.truetype(UserFont, font_size_small)\nx_offset = 2\ny_offset = 2\n\nmessage = \"\"\n\n# The position of the top bar\ntop_pos = 25\n\n# Create a values dict to store the data\nvariables = [\"temperature\",\n \"pressure\",\n \"humidity\",\n \"light\",\n \"oxidised\",\n \"reduced\",\n \"nh3\",\n \"pm1\",\n \"pm25\",\n \"pm10\"]\n\nunits = [\"C\",\n \"hPa\",\n \"%\",\n \"Lux\",\n \"kO\",\n \"kO\",\n \"kO\",\n \"ug/m3\",\n \"ug/m3\",\n \"ug/m3\"]\n\n# Define your own warning limits\n# The limits definition follows the order of the variables array\n# Example limits explanation for temperature:\n# [4,18,28,35] means\n# [-273.15 .. 4] -> Dangerously Low\n# (4 .. 18] -> Low\n# (18 .. 28] -> Normal\n# (28 .. 35] -> High\n# (35 .. MAX] -> Dangerously High\n# DISCLAIMER: The limits provided here are just examples and come\n# with NO WARRANTY. The authors of this example code claim\n# NO RESPONSIBILITY if reliance on the following values or this\n# code in general leads to ANY DAMAGES or DEATH.\nlimits = [[4, 18, 28, 35],\n [250, 650, 1013.25, 1015],\n [20, 30, 60, 70],\n [-1, -1, 30000, 100000],\n [-1, -1, 40, 50],\n [-1, -1, 450, 550],\n [-1, -1, 200, 300],\n [-1, -1, 50, 100],\n [-1, -1, 50, 100],\n [-1, -1, 50, 100]]\n\n# RGB palette for values on the combined screen\npalette = [(0, 0, 255), # Dangerously Low\n (0, 255, 255), # Low\n (0, 255, 0), # Normal\n (255, 255, 0), # High\n (255, 0, 0)] # Dangerously High\n\nvalues = {}\n\n\n# Displays data and text on the 0.96\" LCD\ndef display_text(variable, data, unit):\n # Maintain length of list\n values[variable] = values[variable][1:] + [data]\n # Scale the values for the variable between 0 and 1\n vmin = min(values[variable])\n vmax = max(values[variable])\n colours = [(v - vmin + 1) / (vmax - vmin + 1) for v in values[variable]]\n # Format the variable name and value\n message = \"{}: {:.1f} {}\".format(variable[:4], data, unit)\n logging.info(message)\n draw.rectangle((0, 0, WIDTH, HEIGHT), (255, 255, 255))\n for i in range(len(colours)):\n # Convert the values to colours from red to blue\n colour = (1.0 - colours[i]) * 0.6\n r, g, b = [int(x * 255.0) for x in colorsys.hsv_to_rgb(colour, 1.0, 1.0)]\n # Draw a 1-pixel wide rectangle of colour\n draw.rectangle((i, top_pos, i + 1, HEIGHT), (r, g, b))\n # Draw a line graph in black\n line_y = HEIGHT - (top_pos + (colours[i] * (HEIGHT - top_pos))) + top_pos\n draw.rectangle((i, line_y, i + 1, line_y + 1), (0, 0, 0))\n # Write the text at the top in black\n draw.text((0, 0), message, font=font, fill=(0, 0, 0))\n st7735.display(img)\n\n\n# Saves the data to be used in the graphs later and prints to the log\ndef save_data(idx, data):\n variable = variables[idx]\n # Maintain length of list\n values[variable] = values[variable][1:] + [data]\n unit = units[idx]\n message = \"{}: {:.1f} {}\".format(variable[:4], data, unit)\n logging.info(message)\n\n\n# Displays all the text on the 0.96\" LCD\ndef display_everything():\n draw.rectangle((0, 0, WIDTH, HEIGHT), (0, 0, 0))\n column_count = 2\n row_count = (len(variables) / column_count)\n for i in range(len(variables)):\n variable = variables[i]\n data_value = values[variable][-1]\n unit = units[i]\n x = x_offset + ((WIDTH // column_count) * (i // row_count))\n y = y_offset + ((HEIGHT / row_count) * (i % row_count))\n message = \"{}: {:.1f} {}\".format(variable[:4], data_value, unit)\n lim = limits[i]\n rgb = palette[0]\n for j in range(len(lim)):\n if data_value > lim[j]:\n rgb = palette[j + 1]\n draw.text((x, y), message, font=smallfont, fill=rgb)\n st7735.display(img)\n\n\n# Get the temperature of the CPU for compensation\ndef get_cpu_temperature():\n process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE, universal_newlines=True)\n output, _error = process.communicate()\n return float(output[output.index('=') + 1:output.rindex(\"'\")])\n\ndef sendToServer(sensor_temp, sensor_pressure, sensor_humidity, sensor_light, \n sensor_oxidising, sensor_reducing, sensor_nh3, sensor_pm1,\n sensor_pm2_5, sensor_pm10):\n \n url = \"https://sensata-academy-weather-station.azurewebsites.net/api/sensata-weather-station\"\n\n jsonString = '{' + f'''\n\"name\": \"{name}\",\n\"email\": \"{email}\",\n\"latitude\": {latitude},\n\"longitude\": {longitude},\n\"description\": \"{location_description}\", \n\"sensor_temp\": {sensor_temp}, \n\"sensor_pressure\": {sensor_pressure},\n\"sensor_humidity\": {sensor_humidity}, \n\"sensor_light\": {sensor_light},\n\"sensor_oxidising\": {sensor_oxidising},\n\"sensor_reducing\": {sensor_reducing}, \n\"sensor_nh3\": {sensor_nh3}, \n\"sensor_pm1\": {sensor_pm1}, \n\"sensor_pm2_5\": {sensor_pm2_5}, \n\"sensor_pm10\": {sensor_pm10} \n''' + '}'\n\n headers = {'Content-type': 'application/json', 'Accept': 'application/text'}\n \n r = requests.post(url, data=jsonString, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"SUCCESSFULLY sent sensor data to server\")\n else: \n logging.info(\"FAILED to post sensor data to server\")\n\ndef main():\n # signal esp at start\n mqtt_to_esp32.control_esp(\"onoff\")\n\n todaysDate = dt.datetime.now()\n baselineDate = dt.datetime.strptime(\"01/04/2021\",\"%d/%m/%Y\")\n\n sensor_temp = 0\n sensor_pressure = 0\n sensor_humidity = 0\n sensor_light = 0\n sensor_oxidising = 0\n sensor_reducing = 0\n sensor_nh3 = 0\n sensor_pm1 = 0\n sensor_pm2_5 = 0 \n sensor_pm10 = 0\n\n\n if todaysDate > baselineDate: \n # Tuning factor for compensation. Decrease this number to adjust the\n # temperature down, and increase to adjust up\n #factor = 2.25\n factor = 0.85\n\n cpu_temps = [get_cpu_temperature()] * 5\n\n delay = 0.5 # Debounce the proximity tap\n last_page = 0\n\n for v in variables:\n values[v] = [1] * WIDTH\n\n # The main loop\n try:\n for i in range(0,3): #first reading contains errors. Talk the 3rd\n proximity = ltr559.get_proximity()\n # Everything on one screen\n cpu_temp = get_cpu_temperature()\n # Smooth out with some averaging to decrease jitter\n cpu_temps = cpu_temps[1:] + [cpu_temp]\n avg_cpu_temp = sum(cpu_temps) / float(len(cpu_temps))\n raw_temp = bme280.get_temperature()\n raw_data = raw_temp - ((avg_cpu_temp - raw_temp) / factor)\n save_data(0, raw_data)\n display_everything()\n sensor_temp = raw_data\n\n raw_data = bme280.get_pressure()\n save_data(1, raw_data)\n display_everything()\n sensor_pressure = raw_data\n\n raw_data = bme280.get_humidity()\n save_data(2, raw_data)\n sensor_humidity = raw_data\n\n if proximity < 10:\n raw_data = ltr559.get_lux()\n sensor_light = raw_data\n else:\n raw_data = 1\n sensor_light = 0\n save_data(3, raw_data)\n display_everything()\n\n\n gas_data = gas.read_all()\n sensor_oxidising = gas_data.oxidising / 1000\n sensor_reducing = gas_data.reducing / 1000\n sensor_nh3 = gas_data.nh3 / 1000\n\n save_data(4, sensor_oxidising)\n save_data(5, sensor_reducing)\n save_data(6, sensor_nh3)\n display_everything()\n\n pms_data = None\n try:\n pms_data = pms5003.read()\n except (SerialTimeoutError, pmsReadTimeoutError):\n logging.warning(\"Failed to read PMS5003\")\n else:\n sensor_pm1 = float(pms_data.pm_ug_per_m3(1.0))\n sensor_pm2_5 = float(pms_data.pm_ug_per_m3(2.5))\n sensor_pm10 = float(pms_data.pm_ug_per_m3(10))\n save_data(7, sensor_pm1)\n save_data(8, sensor_pm2_5)\n save_data(9, sensor_pm10)\n display_everything()\n\n # Added by Ali\n\n # Calculate factor to see if you can get a more accurate value\n calculate_factor(sensor_temp, raw_temp, avg_cpu_temp, factor)\n\n nowDate = todaysDate.strftime(\"%d/%m/%Y %H:%M:%S\")\n sensor_data = [sensor_temp, sensor_pressure, sensor_humidity, sensor_light, sensor_oxidising, sensor_reducing, sensor_nh3, sensor_pm1, sensor_pm2_5, sensor_pm10]\n\n # Get high and low limits for alerts\n file_path = '/home/pi/python_scripts/enviroproject/alerts_config.json'\n with open(file_path, 'r') as infile:\n jdata = json.load(infile)\n\n location = jdata['sensor']['location']\n action = jdata['sensor']['action']\n interval = jdata['sensor']['interval']\n weather = jdata['sensor']['weather']\n templow = jdata['notification'][\"templow\"]\n humidhigh = jdata['notification'][\"humidhigh\"]\n temphigh = jdata['notification'][\"temphigh\"]\n\n timestamp = time.time()\n\n # Get actual weather from api\n city = \"Belfast\" \n weather = weatherscraper.getCurrentWeather(\"description\", city)\n api_temperature = weatherscraper.getCurrentWeather(\"temperature\", city)\n api_pressure = weatherscraper.getCurrentWeather(\"pressure\", city)\n api_humidity = weatherscraper.getCurrentWeather(\"humidity\", city)\n\n # convert string values to float\n\n api_temperature = float(api_temperature)\n api_pressure = float(api_pressure)\n api_humidity = float(api_humidity)\n \n # write to csv\n csv_result = write_to_csv.write_csv(nowDate, timestamp, location, action, sensor_data, weather)\n print(\"數據寫入csv文件\")\n\n # get esp data via mqtt\n esp_dat_dict = mqtt_to_esp32.get_esp_data()\n esp_temp = esp_dat_dict[\"esp32/temperature\"]\n esp_humidity = esp_dat_dict[\"esp32/humidity\"]\n esp_light = esp_dat_dict[\"esp32/light\"]\n print(esp_temp.decode(), esp_humidity.decode(), esp_light.decode())\n\n # write data to sql database\n sql_object = sql_writer.sql_writer()\n # Create database if one doesn't exist\n sql_object.create_database()\n sql_object.insert_row(location, action, sensor_temp, sensor_pressure, sensor_humidity, sensor_light, sensor_oxidising, sensor_reducing, sensor_nh3, esp_temp.decode(), esp_humidity.decode(), esp_light.decode(), api_temperature, api_pressure, api_humidity, weather)\n print(\"數據寫入sql database\")\n\n # Send notifications if values are above the high or low limits\n if templow[\"on\"]:\n weather_warning_sender.lowtemp(sensor_temp, templow)\n if humidhigh[\"on\"] == \"True\":\n weather_warning_sender.humidhigh(sensor_humidity, humidhigh)\n if temphigh[\"on\"] == \"True\":\n weather_warning_sender.temphigh(sensor_temp, temphigh)\n \n\n sendToServer(sensor_temp, sensor_pressure, sensor_humidity, sensor_light, \n sensor_oxidising, sensor_reducing, sensor_nh3, sensor_pm1,\n sensor_pm2_5, sensor_pm10)\n \n ## Make esp flash at end\n mqtt_to_esp32.control_esp(\"onoff\")\n\n\n ## Signal to Arduino\n \"\"\"\n try:\n import arduino\n arduino.run_arduino()\n\n \n \n except FileNotFoundError as e:\n print(f\"Arduino not attached: {e}\")\n except Exception as e:\n print(f\"Other error: {e}\")\n\n\n \"\"\"\n \n\n\n\n # Exit cleanly\n except KeyboardInterrupt:\n sys.exit(0)\n\n\ndef calculate_factor(sensor_temp, raw_temp, avg_cpu_temp, factor):\n print(f\"sensor_temp {sensor_temp}\\nraw_temp {raw_temp}\\navg_cpu_temp {avg_cpu_temp}\")\n f = (avg_cpu_temp-raw_temp)/(raw_temp-sensor_temp)\n print(f\"這兩個數字應該相同\\nfactor:\\n {f}\\t{factor}\")\n\n # Put actual temp here, read from a thermometer you trust:\n ACTUAL_TEMP = 17.6 \n\n calculated_factor = (avg_cpu_temp-raw_temp)/(raw_temp-ACTUAL_TEMP)\n print(f\"Please adjust factor to:\\n{calculated_factor}\")\n\n# check if internet connected\ndef wait_for_internet_connection():\n while True:\n try:\n response = urllib.request.urlopen('http://google.com',timeout=1)\n return\n except: \n print(\"no internet connection\")\n exit()\n break\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"sensor_data_to_server.py","file_name":"sensor_data_to_server.py","file_ext":"py","file_size_in_byte":15469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"642808868","text":"from tkinter import *\r\nfrom tkinter import ttk\r\n\r\nroot = Tk()\r\nroot.title('Codemy.com - Learn To Code!')\r\nroot.geometry(\"700x500\")\r\n\r\nmy_entries = []\r\n\r\ndef something():\r\n entry_list = ''\r\n\r\n for entries in my_entries:\r\n entry_list = entry_list + str(entries.get()) + '\\n'\r\n my_label.config(text = entry_list)\r\n\r\n print(my_entries[0].get())\r\n\r\n# Row loop\r\nfor y in range(5):\r\n # Column loop\r\n for x in range(5):\r\n my_entry = Entry(root)\r\n my_entry.grid(row = y, column = x)\r\n my_entries.append(my_entry)\r\n\r\nmy_button = Button(root, text = \"Button\", command = something)\r\nmy_button.grid(row = 6, column = 0, pady = 20)\r\n\r\nmy_label = Label(root, text = '')\r\nmy_label.grid(row = 7, column = 0, pady = 20)\r\nroot.mainloop()","sub_path":"entryboxes.py","file_name":"entryboxes.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"301238665","text":"# Copyright 2014 DreamHost, LLC\n#\n# Author: DreamHost, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport uuid\n\nimport mock\nimport unittest2 as unittest\n\nfrom six.moves import range\nfrom astara import event\nfrom astara import tenant\nfrom astara.drivers import router\nfrom astara import state\nfrom astara.drivers import states\nfrom astara.test.unit import fakes\n\n\nclass TestTenantResourceManager(unittest.TestCase):\n\n def setUp(self):\n super(TestTenantResourceManager, self).setUp()\n\n self.fake_driver = fakes.fake_driver()\n self.tenant_id = 'cfb48b9c-66f6-11e5-a7be-525400cfc326'\n self.instance_mgr = \\\n mock.patch('astara.instance_manager.InstanceManager').start()\n self.addCleanup(mock.patch.stopall)\n self.notifier = mock.Mock()\n self.deleter = mock.Mock()\n self.trm = tenant.TenantResourceManager(\n '1234',\n delete_callback=self.deleter,\n notify_callback=self.notifier,\n queue_warning_threshold=10,\n reboot_error_threshold=5,\n )\n self.ctx = mock.Mock()\n\n def test_new_resource(self):\n r = event.Resource(\n tenant_id=self.tenant_id,\n id='5678',\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n sm = self.trm.get_state_machines(msg, self.ctx)[0]\n self.assertEqual(sm.resource_id, '5678')\n self.assertIn('5678', self.trm.state_machines)\n\n def test_get_state_machine_no_resoruce_id(self):\n r = event.Resource(\n tenant_id=self.tenant_id,\n id=None,\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n self.assertRaises(tenant.InvalidIncomingMessage,\n self.trm.get_state_machines, msg, self.ctx)\n\n def test_all_resources(self):\n for i in range(5):\n rid = str(uuid.uuid4())\n driver = fakes.fake_driver(rid)\n sm = state.Automaton(\n driver=driver,\n worker_context=self.ctx,\n resource_id=driver.id,\n tenant_id=self.tenant_id,\n delete_callback=None,\n bandwidth_callback=None,\n queue_warning_threshold=5,\n reboot_error_threshold=5)\n self.trm.state_machines[rid] = sm\n r = event.Resource(\n tenant_id=self.tenant_id,\n id='*',\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n sms = self.trm.get_state_machines(msg, self.ctx)\n self.assertEqual(5, len(sms))\n\n def test_errored_routers(self):\n self.trm.state_machines.state_machines = {}\n for i in range(5):\n rid = str(uuid.uuid4())\n driver = fakes.fake_driver(rid)\n sm = state.Automaton(\n driver=driver,\n worker_context=self.ctx,\n resource_id=i,\n tenant_id=self.tenant_id,\n delete_callback=None,\n bandwidth_callback=None,\n queue_warning_threshold=5,\n reboot_error_threshold=5)\n self.trm.state_machines[rid] = sm\n\n # Replace the default mock with one that has 'state' set.\n if i == 2:\n status = states.ERROR\n else:\n status = states.UP\n\n sm.instance = mock.Mock(state=status)\n self.trm.state_machines.state_machines[str(i)] = sm\n\n r = event.Resource(\n tenant_id=self.tenant_id,\n id='2',\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n sms = self.trm.get_state_machines(msg, self.ctx)\n self.assertEqual(1, len(sms))\n self.assertEqual(2, sms[0].resource_id)\n self.assertIs(self.trm.state_machines.state_machines['2'], sms[0])\n\n def test_existing_resource(self):\n r = event.Resource(\n tenant_id=self.tenant_id,\n id='5678',\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n # First time creates...\n sm1 = self.trm.get_state_machines(msg, self.ctx)[0]\n # Second time should return the same objects...\n sm2 = self.trm.get_state_machines(msg, self.ctx)[0]\n self.assertIs(sm1, sm2)\n self.assertIs(sm1._queue, sm2._queue)\n\n def test_existing_resource_of_many(self):\n sms = {}\n for resource_id in ['5678', 'ABCD', 'EFGH']:\n r = event.Resource(\n tenant_id=self.tenant_id,\n id=resource_id,\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n # First time creates...\n sm1 = self.trm.get_state_machines(msg, self.ctx)[0]\n sms[resource_id] = sm1\n\n # Second time should return the same objects...\n r = event.Resource(\n id='5678',\n tenant_id=self.tenant_id,\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n sm2 = self.trm.get_state_machines(msg, self.ctx)[0]\n self.assertIs(sm2, sms['5678'])\n\n def test_delete_resource(self):\n r = event.Resource(\n id='1234',\n tenant_id=self.tenant_id,\n driver=router.Router.RESOURCE_NAME,\n )\n self.trm.state_machines['1234'] = mock.Mock()\n self.trm._delete_resource(r)\n self.assertNotIn('1234', self.trm.state_machines)\n self.assertTrue(self.deleter.called)\n\n def test_delete_default_resource(self):\n r = event.Resource(\n id='1234',\n tenant_id=self.tenant_id,\n driver=router.Router.RESOURCE_NAME)\n self.trm._default_resource_id = '1234'\n self.trm.state_machines['1234'] = mock.Mock()\n self.trm._delete_resource(r)\n self.assertNotIn('1234', self.trm.state_machines)\n self.assertIs(None, self.trm._default_resource_id)\n\n def test_delete_not_default_resource(self):\n r = event.Resource(\n id='1234',\n tenant_id=self.tenant_id,\n driver=router.Router.RESOURCE_NAME)\n self.trm._default_resource_id = 'abcd'\n self.trm.state_machines['1234'] = mock.Mock()\n self.trm._delete_resource(r)\n self.assertEqual('abcd', self.trm._default_resource_id)\n\n def test_no_update_deleted_resource(self):\n r = event.Resource(\n tenant_id='1234',\n id='5678',\n driver=router.Router.RESOURCE_NAME,\n )\n self.trm._default_resource_id = 'abcd'\n self.trm.state_machines['5678'] = mock.Mock()\n self.trm._delete_resource(r)\n self.assertEqual(self.trm.state_machines.values(), [])\n r = event.Resource(\n tenant_id='1234',\n id='5678',\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n sms = self.trm.get_state_machines(msg, self.ctx)\n self.assertEqual(sms, [])\n self.assertIn('5678', self.trm.state_machines.deleted)\n\n def test_deleter_callback(self):\n r = event.Resource(\n tenant_id='1234',\n id='5678',\n driver=router.Router.RESOURCE_NAME,\n )\n msg = event.Event(\n resource=r,\n crud=event.CREATE,\n body={'key': 'value'},\n )\n sm = self.trm.get_state_machines(msg, self.ctx)[0]\n self.assertIn('5678', self.trm.state_machines)\n sm._do_delete()\n self.assertNotIn('5678', self.trm.state_machines)\n self.assertTrue(\n self.trm.state_machines.has_been_deleted('5678'))\n\n def test_report_bandwidth(self):\n notifications = []\n self.notifier.side_effect = notifications.append\n self.trm._report_bandwidth(\n '5678',\n [{'name': 'a',\n 'value': 1,\n },\n {'name': 'b',\n 'value': 2,\n }],\n )\n n = notifications[0]\n self.assertEqual('1234', n['tenant_id'])\n self.assertIn('5678', n['uuid'])\n self.assertIn('timestamp', n)\n self.assertEqual('astara.bandwidth.used', n['event_type'])\n self.assertIn('a', n['payload'])\n self.assertIn('b', n['payload'])\n\n def test_get_state_machine_by_resource_id(self):\n fake_sm = mock.Mock()\n self.trm.state_machines['fake_resource_id'] = fake_sm\n self.assertEqual(\n self.trm.get_state_machine_by_resource_id('fake_resource_id'),\n fake_sm\n )\n\n def test_unmanage_resource(self):\n fake_sm = mock.Mock()\n self.trm.state_machines['fake-resource_id'] = fake_sm\n self.trm.unmanage_resource('fake-resource-id')\n self.assertNotIn('fake-resource-id', self.trm.state_machines)\n self.assertFalse(\n self.trm.state_machines.has_been_deleted('fake-resource-id'))\n","sub_path":"astara/test/unit/test_tenant.py","file_name":"test_tenant.py","file_ext":"py","file_size_in_byte":10275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"473331700","text":"import http\n\n\nclass HttpClient(object):\n\n def __init__(self):\n self.default_url = \"www.rte.ie\"\n\n def __str__(self):\n return \"Http Client used to fetch rss feeds\"\n\n def get(self, url):\n conn = http.client.HTTPConnection(url)\n conn.request(\"GET\", \"/news/rss/news-headlines.xml\")\n r = conn.getresponse()\n return r.read()\n\nif __name__ == \"__main__\":\n print( 'Initialising the client...')\n\n client = HttpClient()\n\n print( 'Attempting to fetch rte rss feed')\n\n print( 'Received: ', client.get(client.default_url))\n","sub_path":"src/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"601874433","text":"import cv2\nimport numpy as np\nimport functools\n\ndef callback(x, y):\n\tprint(x, y)\n\nframe = np.ones((640, 640))\n\ncv2.namedWindow('test')\ncv2.createTrackbar('thrs0', 'test', 300, 800, functools.partial(callback, 0))\ncv2.createTrackbar('thrs1', 'test', 300, 800, functools.partial(callback, 1))\n# Do whatever you want with contours\ncv2.imshow('test', frame)\n\ncv2.waitKey(0)","sub_path":"opencv_slider_test.py","file_name":"opencv_slider_test.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"97617477","text":"# -*- coding: utf-8 -*-\n'''\n.. _module_mc_rdiffbackup:\n\nmc_rdiff-backup / rdiff-backup functions\n============================================\n\n\n\n'''\n\n# Import python libs\nimport logging\nimport mc_states.api\n\nfrom salt.utils.odict import OrderedDict\n\n__name = 'rdiffbackup'\n\nlog = logging.getLogger(__name__)\n\n\ndef settings():\n '''\n rdiff-backup settings\n\n '''\n @mc_states.api.lazy_subregistry_get(__salt__, __name)\n def _settings():\n grains = __grains__\n pillar = __pillar__\n data = __salt__['mc_utils.defaults'](\n 'makina-states.services.backup.rdiff-backup', {\n }\n )\n return data\n return _settings()\n\n\n\n#\n","sub_path":"doc/sphinx/mc_states/modules/mc_rdiffbackup.py","file_name":"mc_rdiffbackup.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"635419525","text":"import os\n#se cargan variables globales\nfrom dotenv import load_dotenv\nload_dotenv()\n#jwt\nimport jwt\n#token\nfrom functools import wraps\n# import para hacer solicitudes\nimport requests\n# import del manejo de listas\nfrom typing import List, Dict\n# import para el funcionamiento general de flask\nfrom flask import Flask, jsonify\nfrom flask import request\nfrom flask import Response\n# import libreria json\nimport json \n# import de conexion con mysql\nimport mysql.connector\n#para simular lanzar dados\nimport random\n#import para obtener fecha y hora\nfrom datetime import date\n\nerror_message = \"{'respuesta':'Error'}\"\nsuccess_message = \"{'respuesta': 'Success'}\"\napp = Flask(__name__, static_url_path='') # creacion de la app en python de flask\n#configuracion global de base de datos\nconfig = {\n 'user': 'root',\n 'password': 'root',\n 'host': 'db',\n 'port': '3306',\n 'database': 'juegos'\n}\nbasedir = os.path.abspath(os.path.dirname(__file__))\ndata_file = os.path.join(basedir, 'key-public.pem')\ndef check_for_token(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n token = request.headers.get('Authorization')\n TokenArray = token.split(\" \") \n print(TokenArray[1], flush=True)\n if not token:\n return jsonify({'Mensaje':'Falta el token'}), 403\n try:\n f = open(data_file, \"r\")\n public_key = f.read()\n f.close()\n data = jwt.decode(TokenArray[1], public_key, algorithms='RS256')\n except Exception as e:\n print(e, flush=True)\n return jsonify({'Mensaje':'Token Invalido'}), 403\n return func(*args, **kwargs)\n return wrapped\n \ndef updateFinalizarPartida(idjuego):\n try:\n today = date.today()\n # variable de la conexion con la base de datos\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n #consulta hacia que se utilizara en base de datos\n sql_query = \"\"\"UPDATE juego SET estado = 2 WHERE juego = %(juego)s\"\"\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n connection.commit()\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n return Response(success_message, status=201, mimetype='application/json')\n except Exception as e:\n print(e)\n return Response(error_message, status=500, mimetype='application/json')\n\n\ndef juegos() -> List[Dict]:\n # variable de la conexion con la base de datos\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n # ejecucion de consulta hacia la base de datos\n cursor.execute('''SELECT j.juego, j.estado, j.created_at, p.jugador FROM juego j\n INNER JOIN posicion p\n ON p.juego = j.juego''')\n # creacion de objeto donde se almacenara el contenido de la tabla\n results = cursor.fetchall()\n json_data_list = []\n for row in results:\n data = {}\n data['juego'] = str(row[0])\n data['estado'] = str(row[1])\n data['created_at'] = str(row[2])\n data['jugador'] = str(row[3])\n json_data_list.append(data)\n # se cierra el cursor\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n # retorno del objeto con el contenido de la tabla\n return json.dumps(json_data_list)\n\n\ndef obtenerTokenDados():\n parametro = {'id': os.getenv(\"ID_TOKENDADO\"), 'secret' : os.getenv('LLAVE_TOKENDADO')}\n token = requests.get(os.getenv(\"JWT_ENDPOINT\"), params=parametro)\n return json.loads(token.text)\n\ndef tirarDado():\n try:\n solicitud = obtenerTokenDados()\n token = solicitud[\"token\"]\n header = {'Authorization': 'Bearer ' + token}\n res = requests.get(os.getenv(\"DADO_ENDPOINT\"), headers=header)\n dadosJson = json.loads(res.text)\n dados = dadosJson[\"dados\"]\n dado1 = dados[0]\n dado2 = dados[1]\n #operacion entre los dados anteriores\n dado3 = dados[2] \n operacion = 0\n if (dado3 % 2) == 0:\n operacion = dado1 + dado2\n else:\n operacion = abs(dado1 - dado2)\n guardarBitacoraPartida('DADO', 'VALOR DADO ' + str(operacion))\n return operacion\n except Exception as e:\n print(e, flush=True)\n return 0\n\n\n\ndef simularPartida(idjuego, jugadores):\n try:\n pos_jugador1 = 0\n pos_jugador2 = 0 \n generarNuevaPartida(idjuego, jugadores)\n while pos_jugador1 < 32 and pos_jugador2 < 32:\n #turno jugador1\n dado = tirarDado()\n pos_jugador1 += dado\n guardarBitacoraPartida('SIMULAR', 'JUGADOR ' + jugadores[0] + ' TIRA DADO')\n guardarBitacoraPartida('SIMULAR', 'JUGADOR ' + jugadores[0] + ' NUEVA POSICION ' + str(pos_jugador1))\n #turno jugador2\n dado = tirarDado ()\n pos_jugador2 += dado\n guardarBitacoraPartida('SIMULAR', 'JUGADOR ' + jugadores[1] + ' TIRA DADO')\n guardarBitacoraPartida('SIMULAR', 'JUGADOR ' + jugadores[1] + ' NUEVA POSICION ' + str(pos_jugador2))\n\n if pos_jugador1 > 32:\n marcarGanador(idjuego, 1)\n guardarBitacoraPartida('SIMULAR', 'JUGADOR ' + jugadores[0] + ' HA GANADO PARTIDA')\n elif pos_jugador2 > 32:\n marcarGanador(idjuego, 2)\n guardarBitacoraPartida('SIMULAR', 'JUGADOR ' + jugadores[1] + ' HA GANADO PARTIDA')\n return Response(\"{'respuesta': 'Juego Simulado con Exito'}\", status=201, mimetype='application/json')\n except Exception as e:\n print(e, flush=True)\n return Response(error_message, status=500, mimetype='application/json')\n\ndef guardarBitacoraPartida(nombremicro, accion):\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n today = date.today()\n #query insert hacia juego\n sql_query = \"\"\"INSERT INTO bitacora_juego (nombre_microservicio, accion, created_at)\n VALUES (%(nombre)s, %(accion)s, %(created)s)\"\"\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'nombre': nombremicro, 'accion': accion, 'created': today})\n connection.commit()\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n\n#se insertar un nuevo registro en la tabla juego\n#se inicializan las posiciones y el marcador de los jugadores.\ndef generarNuevaPartida(idjuego, jugadores):\n #verificar si existen los jugadores\n #verificar jugador 1\n '''\n url = os.getenv(\"USERS_ENDPOINT\") + str(jugadores[0])\n r1 = requests.get(url = os.getenv(\"USERS_ENDPOINT\")) \n #verificar jugador2\n\n url = os.getenv(\"USERS_ENDPOINT\") + str(jugadores[1])\n r2 = requests.get(url = os.getenv(\"USERS_ENDPOINT\"))\n if r1.status_code != 200 or r2.status_code != 200:\n return Response(\"{'Respuesta':'Usuario no encontrado'}\", status=404, mimetype='application/json')'''\n try:\n\n # obtener la fecha de hoy\n today = date.today()\n # variable de la conexion con la base de datos\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n #query insert hacia juego\n sql_query = \"\"\"INSERT INTO juego (juego, estado, created_at)\n VALUES (%(id_juego)s, %(estado)s, %(created)s)\"\"\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'id_juego': idjuego, 'estado': 0, 'created': today})\n #query insert hacia posicion\n connection.commit()\n sql_query = \"\"\"INSERT INTO posicion (jugador, posicion, juego)\n VALUES (%(jugador)s, %(posicion)s, %(id_juego)s)\"\"\"\n cursor.execute(sql_query, {'jugador': str(jugadores[0]), 'posicion': 0, 'id_juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n connection.commit()\n #se inserta jugador2\n sql_query = \"\"\"INSERT INTO posicion (jugador, posicion, juego)\n VALUES (%(jugador)s, %(posicion)s, %(id_juego)s)\"\"\"\n cursor.execute(sql_query, {'jugador': str(jugadores[1]), 'posicion': 0, 'id_juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n connection.commit()\n #se inserta turno jugador 1, inicia partida\n sql_query = \"\"\"INSERT INTO turno (jugador, turno, juego)\n VALUES (%(jugador)s, %(turno)s, %(id_juego)s)\"\"\"\n cursor.execute(sql_query, {'jugador': str(jugadores[0]), 'turno': True, 'id_juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n connection.commit()\n #se inserta turno jugador 1, inicia partida\n sql_query = \"\"\"INSERT INTO turno (jugador, turno, juego)\n VALUES (%(jugador)s, %(turno)s, %(id_juego)s)\"\"\"\n cursor.execute(sql_query, {'jugador': str(jugadores[1]), 'turno': False, 'id_juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n connection.commit()\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n # retorno del objeto con el contenido de la tabla\n \n return Response(success_message, status=201, mimetype='application/json')\n except Exception as e:\n print(e)\n return Response(error_message, status=406, mimetype='application/json')\n\ndef cambiarPosicionJugador(idjuego, idjugador, nuevaPosicion):\n try:\n today = date.today()\n # variable de la conexion con la base de datos\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n #consulta hacia que se utilizara en base de datos\n sql_query = \"\"\"UPDATE posicion SET posicion = %(posicion)s WHERE jugador = %(jugador)s and juego = %(juego)s\"\"\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'posicion': nuevaPosicion, 'jugador': idjugador, 'juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n connection.commit()\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n return Response(success_message, status=201, mimetype='application/json')\n except Exception as e:\n print(e)\n return Response(error_message, status=500, mimetype='application/json')\n\n\ndef obtenerPosicionJugadores(idjuego):\n # variable de la conexion con la base de datos\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n #consulta hacia que se utilizara en base de datos\n sql_query = \"SELECT jugador, posicion FROM posicion WHERE juego = %(juego)s\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'juego': idjuego})\n # creacion de objeto donde se almacenara el contenido de la tabla\n results = cursor.fetchall()\n json_data_list = []\n for row in results:\n data = {}\n data['jugador'] = str(row[0])\n data['posicion'] = str(row[1])\n json_data_list.append(data)\n # se cierra el cursor\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n # retorno del objeto con el contenido de la tabla\n json_data = json.dumps(json_data_list)\n return json_data\n\ndef obtenerTurnoJuego(idjuego, idjugador):\n try:\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n #consulta hacia que se utilizara en base de datos\n sql_query = \"SELECT turno FROM turno WHERE juego = %(juego)s AND jugador = %(jugador)s\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'juego': idjuego, 'jugador': idjugador})\n results = cursor.fetchall() \n data = {}\n for row in results:\n data['turno'] = str(row[0])\n json_data = json.dumps(data)\n # se cierra el cursor\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n return Response(json_data, status=201, mimetype='application/json')\n except Exception as e:\n print(e)\n return Response(error_message, status=500, mimetype='application/json')\n \n\ndef cambiarTurnoJugador(idjuego, jugador):\n try:\n connection = mysql.connector.connect(**config)\n cursor = connection.cursor()\n #consulta hacia que se utilizara en base de datos\n sql_query = \"UPDATE turno set turno = 0 WHERE juego = %(juego)s and jugador = %(jugador)s\"\n sql_query_update = \"UPDATE turno set turno = 1 WHERE juego = %(juego)s and jugador <> %(jugador)s\"\n # ejecucion de consulta hacia la base de datos \n cursor.execute(sql_query, {'juego': idjuego, 'jugador': jugador})\n connection.commit()\n cursor.execute(sql_query_update, {'juego': idjuego, 'jugador': jugador})\n connection.commit()\n # se cierra el cursor\n cursor.close()\n # se cierra tambien con la conexion hacia la BD\n connection.close()\n return Response(\"{'respuesta':'turno cambiado'}\", status=201, mimetype='application/json')\n except Exception as e:\n print(e)\n return Response(\"{'respuesta':'Error'}\", status=500, mimetype='application/json')\n\ndef marcarGanador(idjuego, valor):\n urls = os.getenv(\"TORNEOS_ENDPOINT\") + str(idjuego)\n data = {'marcador': [valor]} \n\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n r1 = requests.put(urls, data=json.dumps(data), headers=headers)\n\n if r1.status_code == 201:\n return Response(\"{'respuesta': 'Marcador guardado en torneos'}\", status=201, mimetype='application/json')\n else:\n print(r1.status_code)\n return Response(\"{'respuesta': 'Error'}\", status=r1.status_code, mimetype='application/json')\n\n\n# se guarda la transaccion\n# funcion raiz obtener la posicion de los jugadores dentro de un \n# determinado juego\n@app.route('/obtenerPosicion/', methods=['GET'])\ndef obtenerPosicion(idjuego):\n #se obtiene el id del juego\n posiciones = obtenerPosicionJugadores(idjuego)\n return posiciones\n\n\n@app.route('/guardarPosicion///', methods=['POST'])\ndef guardarPosicion(idjuego, idjugador, posicion):\n #se obtiene el id del juego\n cambiarPosicionJugador(idjuego, idjugador, posicion)\n return Response(\"{'respuesta': 'Posicion Cambiada'}\", status=201, mimetype='application/json')\n\n# funcion que permite obtener el turno de un jugador en\n# determinado juego\n@app.route('/obtenerTurno//', methods=['GET'])\ndef obtenerTurno(idjuego, idjugador):\n #variable de la conexion con la base de datos\n return obtenerTurnoJuego(idjuego, idjugador)\n \n \n\n@app.route('/cambiarTurno//', methods=['POST'])\ndef cambiarTurno(idjuego, idjugador):\n return cambiarTurnoJugador(idjuego, idjugador)\n\n# Funcion que permite iniciar un nuevo juego creado desde un torneo\n@app.route('/generar', methods=['POST'])\n@check_for_token\ndef generar():\n inputs = request.get_json(force=True)\n idjuego = inputs['id']\n jugadores = inputs['jugadores']\n valor = generarNuevaPartida(idjuego, jugadores)\n guardarBitacoraPartida('GENERAR', 'SE GENERO UNA NUEVA PARTIDA')\n return valor\n\n@app.route('/finalizarPartida/', methods=['POST'])\ndef finalizarPartida(idjuego):\n return updateFinalizarPartida(idjuego)\n \n\n@app.route('/simular', methods=['POST'])\n@check_for_token\ndef simular():\n inputs = request.get_json(force=True)\n idjuego = inputs['id']\n jugadores = inputs['jugadores']\n return simularPartida(idjuego, jugadores)\n\n\n\n@app.route('/obtenerJuegos', methods=['GET'])\ndef obtenerJuegos():\n return juegos()\n\n@app.route('/obtenerEnv', methods=['GET'])\ndef obtenerEnv():\n valor = os.getenv(\"SECRET_KEY\")\n return valor \n\n@app.route('/ganador//', methods=['POST'])\ndef obtenerGanador(idjuego, valor):\n return marcarGanador(idjuego, valor)\n\n\n\n #verificar jugador2 \n\nif __name__ == '__main__':\n # comando para configurar la ip del servicio\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"docker_app/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":16415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"383318200","text":"\nfrom enum import Enum, unique\n\n@unique\nclass InPin(Enum):\n \"\"\"List of all current INPUTS to the Autokote Pro system\"\"\"\n TongueSensor = 4\n PileHeightSafety = 14\n TableLowerLimit = 15\n TableUpperLimit = 17\n GapSensor = 27\n LaminatorSafetyCover = 22\n KnifeSafetyCover = 23\n\n# Descriptions of each pin for the user interface\ninpin_desc = {\n InPin.TongueSensor:\"Triggers when the pile reaches proper feeder height\",\n InPin.PileHeightSafety:\"Triggers only if the pile misses the tongue switch and hits the feed head\",\n InPin.TableLowerLimit:\"Triggers when the feeder table is at its lowest position\",\n InPin.TableUpperLimit:\"Triggers when the feeder table is at its highest position\",\n InPin.GapSensor:\"True if there is a gap between sheets fed into the system\",\n InPin.LaminatorSafetyCover:\"True if the laminator safety cover is open\",\n InPin.KnifeSafetyCover:\"True if the knife safety cover is open\"\n}\n","sub_path":"app/enum/inpins.py","file_name":"inpins.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"564900947","text":"import logging\nfrom .libpynexmo.nexmomessage import NexmoMessage\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\noutbox = []\n\ndef send_message(to, message):\n \"\"\"Shortcut to send a sms using libnexmo api.\n\n Usage:\n\n >>> from nexmo import send_message\n >>> send_message('+33612345678', 'My sms message body')\n \"\"\"\n params = {\n 'api_key': settings.NEXMO_USERNAME,\n 'api_secret': settings.NEXMO_PASSWORD,\n 'type': 'unicode',\n 'from': settings.NEXMO_FROM,\n 'to': to,\n 'text': message.encode('utf-8'),\n }\n\n sms = NexmoMessage(params)\n\n #if settings.NEXMO_LOG:\n # logger.info(u'Nexmo outbound SMS to: %s, message: %s' % (\n # sms.sms['to'],\n # sms.sms['text'],\n # ))\n\n if settings.NEXMO_TEST_MODE:\n outbox.append(sms)\n else:\n response = sms.send_request()\n return response\n\n return False\n","sub_path":"nexmo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"489173639","text":"import pandas as pd\nimport os\nimport csv\nimport random\nimport pickle\nfrom collections import defaultdict\nfrom collections import Counter\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nimport os\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nfloyd_flag = True\n\ndef dna_sequence_reader(INPUT_DIR, FILE):\n genes = []\n line_idxs = []\n text = ''\n gene_seq_dict = {}\n\n input_file = open(os.path.join(INPUT_DIR,FILE),\"r\")\n csvReader = csv.reader(input_file)\n for line_i, row in enumerate(csvReader):\n if '>' in row[0]:\n gene_name = row[0].split(\" \")[0][17:]\n genes.append(gene_name)\n line_idxs.append(line_i)\n text = text + \",\"\n else:\n text = text + row[0].upper()\n\n dna_seqs = text.split(\",\")[1:]\n\n for i, gene in enumerate(genes):\n gene_seq_dict[gene] = dna_seqs[i]\n return gene_seq_dict, dna_seqs, genes\n\n\n\ndef obtain_interactions(floyd):\n interactions = []\n if floyd:\n FILE = '../input/collins-sc-emap-gis.tsv'\n else:\n FILE = '../input/collins-sc-emap-gis.tsv'\n with open(FILE) as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n for row in reader:\n interactions.append(row)\n\n interactions = interactions[1:]\n # num_inter_records = len(interactions) # 150636\n # print(\"number of interactions: \", num_inter_records)\n gene1 = [inter[0] for inter in interactions]\n gene2 = [inter[1] for inter in interactions]\n interaction_genes = sorted(list(set(gene1 + gene2)))\n # build interaction matrix\n gene_pairs = []\n pair_scores = []\n interactions_matrix = defaultdict(dict)\n for inter in interactions:\n interactions_matrix[inter[0]][inter[1]] = inter[2]\n gene_pairs.append((inter[0],inter[1]))\n pair_scores.append(inter[2])\n return interaction_genes,interactions_matrix, gene_pairs,pair_scores\n\ndef get_fixed_length_sequence(interaction_genes,dict,seq_len):\n filtered_genes = {}\n for gene in interaction_genes:\n if gene in dict.keys():\n filtered_genes[gene] = dict[gene]\n\n # deal with protein sequence length\n fixed_len_protein = {}\n for gene in filtered_genes:\n if len(filtered_genes[gene]) > seq_len:\n fixed_len_protein[gene] = filtered_genes[gene][:seq_len]\n else:\n fixed_len_protein[gene] = filtered_genes[gene]\n return fixed_len_protein\n\ndef pad_seq(seq, max_length):\n PAD_token = 0\n seq += [PAD_token for i in range(max_length - len(seq))]\n return seq\n\ndef construct_feature_vector(gene_pairs, gene_dict, pair_scores, protein2index, seq_len):\n feature_vector = []# simply concatenate the two vectors\n target_score = []\n # only interaction pairs (num_interactions) x (5125+1) assume (g1,g2) is equivalent to (g2,g1)\n row_idx = 0\n for idx,(gene1,gene2) in enumerate(gene_pairs):\n if gene1 in gene_dict and gene2 in gene_dict:\n protein_i = gene_dict[gene1]\n protein_j = gene_dict[gene2]\n f = pad_seq([protein2index[p] for p in protein_i],seq_len) + pad_seq([protein2index[p] for p in protein_j],seq_len)\n feature_vector.append(f)\n target_score.append(float(pair_scores[idx]))\n row_idx +=1\n return feature_vector, target_score\n\ndef one_hot_encoding(gene_dict,gene,base2index,seq_len,):\n encoded = np.zeros([4,seq_len])\n seq = gene_dict[gene]\n for i,s in enumerate(seq):\n if base2index[s] > 0:\n encoded[base2index[s]-1,i] = 1 # base2index start with 1\n return encoded\n\ndef get_one_hot_pairs(gene_pairs, gene_dict,base2index, pair_scores, seq_len):\n pair = []\n target_score = []\n for idx, (gene1,gene2) in enumerate(gene_pairs):\n if gene1 in gene_dict and gene2 in gene_dict:\n encoded1 = one_hot_encoding(gene_dict, gene1, base2index, seq_len)\n encoded2 = one_hot_encoding(gene_dict, gene2, base2index, seq_len)\n pair.append((encoded1,encoded2))\n target_score.append(pair_scores[idx])\n return pair, target_score\n\ndef build_base_vocab(gene_seq_dict,floyd_flag=False):\n bases = ''\n for p in gene_seq_dict.values():\n bases += p\n unique_bases = set(bases)\n base2index = {}\n index2base = {}\n # print(\"unique bases: \",unique_bases)\n for idx,p in enumerate(unique_bases):\n base2index[p] = int(idx)+1\n for k,v in base2index.items():\n index2base[v] = k\n\n # save index2protein\n if floyd_flag:\n with open('../output/protein_vocab.pickle', 'wb') as f:\n pickle.dump(index2base, f)\n else:\n with open('../output/protein_vocab.pickle', 'wb') as f:\n pickle.dump(index2base, f)\n return base2index, index2base\n\ndef pairing_data(X, Y):\n paired_data = [(x,y) for (x, y) in zip(X, Y)]\n return paired_data\n\ndef label_interaction(preds):\n labels = []\n for pred in preds:\n if pred < -2.5:\n labels.append('negative')#()\n elif pred > 2:\n labels.append('positive')#()\n else:\n labels.append('no-interaction')#()\n return labels\n\ndef select_indexes(scores):\n negative_indexes = []\n positive_indexes = []\n non_inter_indexes = []\n for i, score in enumerate(scores):\n if score < -2.5:\n negative_indexes.append(i)\n elif score > 2:\n positive_indexes.append(i)\n else:\n non_inter_indexes.append(i)\n\n # print(len(negative_indexes),len(positive_indexes),len(non_inter_indexes))\n return positive_indexes,negative_indexes,non_inter_indexes\n\ndef get_inputs(feature_vectors, target_scores):\n pos_indexes, neg_indexes, non_indexes = select_indexes(target_scores)\n input_non_indexes = random.sample(non_indexes, (len(non_indexes) // 5))\n all_indexes = pos_indexes+neg_indexes+input_non_indexes\n\n input_vectors = [feature_vectors[i] for i in all_indexes]\n targets = [target_scores[i] for i in all_indexes]\n return input_vectors,targets\n\n\ndef load_dna_data(floyd_flag=False):\n if floyd_flag:\n INPUT_DIR = \"../input/\"\n else:\n INPUT_DIR = \"../input\"\n\n FILE = \"dna_seqs\"\n gene_seq_dict, dna_seqs, genes = dna_sequence_reader(INPUT_DIR, FILE)\n interaction_genes, interactions_matrix, gene_pairs, pair_scores = obtain_interactions(floyd_flag)\n\n # set fixed seqence length\n SEQ_LEN = 333\n # obtain fixed length seq dict\n fixed_length_dict = get_fixed_length_sequence(interaction_genes, gene_seq_dict, SEQ_LEN)\n\n base2index, index2base = build_base_vocab(fixed_length_dict, floyd_flag)\n\n # transfer amino-acid seq to index seq (cancatenate the two vectors)\n feature_vectors, target_scores = construct_feature_vector(gene_pairs, fixed_length_dict, pair_scores,\n base2index, SEQ_LEN)\n input_vectors, targets = get_inputs(feature_vectors, target_scores)\n # print(len(feature_vectors[0]), len(input_vectors))\n # split into train, test, dev\n train_X, test_X, train_Y, test_Y = train_test_split(input_vectors, targets, test_size=0.2)\n train_X, dev_X, train_Y, dev_Y = train_test_split(train_X, train_Y, test_size=0.1)\n # print(\"number of records in training: %d, dev: %d, test: %d\"%(len(train), len(dev), len(test)))\n train = pairing_data(train_X, train_Y)\n dev = pairing_data(dev_X, dev_Y)\n test = pairing_data(test_X, test_Y)\n\n input_size = len(base2index)\n return train, dev, test, input_size\n\ndef load_one_hot_data(floyd_flag):\n if floyd_flag:\n INPUT_DIR = \"../input/\"\n else:\n INPUT_DIR = \"../input\"\n\n FILE = \"dna_seqs\"\n gene_seq_dict, dna_seqs, genes = dna_sequence_reader(INPUT_DIR, FILE)\n interaction_genes, interactions_matrix, gene_pairs, pair_scores = obtain_interactions(floyd_flag)\n\n # set fixed seqence length\n SEQ_LEN = 1000\n # obtain fixed length seq dict\n fixed_length_dict = get_fixed_length_sequence(interaction_genes, gene_seq_dict, SEQ_LEN)\n\n base2index, index2base = build_base_vocab(fixed_length_dict, floyd_flag)\n\n pair, targets = get_one_hot_pairs(gene_pairs,fixed_length_dict,base2index,pair_scores,SEQ_LEN)\n # print(len(pair),len(targets))\n\n train_X, test_X, train_Y, test_Y = train_test_split(pair, targets, test_size=0.2)\n train_X, dev_X, train_Y, dev_Y = train_test_split(train_X, train_Y, test_size=0.1)\n # print(\"number of records in training: %d, dev: %d, test: %d\"%(len(train), len(dev), len(test)))\n train = pairing_data(train_X, train_Y)\n dev = pairing_data(dev_X, dev_Y)\n test = pairing_data(test_X, test_Y)\n\n return train, dev, test\n\n\n\n\n","sub_path":"gi_from_dna/codes/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"573236662","text":"#!usr/bin/env python\n#coding:utf-8\n\"\"\"\n@author: Haidong Zhang\n@contact: haidong_zhang14@yahoo.com\n@time: 2020/12/10 18:23\n@project: MalariaDetection\n@description: \n\"\"\"\nfrom keras.layers import * #For adding convolutional layer\nfrom keras.layers import Dense, ZeroPadding2D, BatchNormalization, Activation #For adding layers to NN\nfrom keras.optimizers import adam\nfrom keras.models import * #for loading the model\nfrom utils import module as base_model\n\n\ndef pool_block(feats, pool_factor):\n\n pool_size = strides = [int(64/pool_factor),\n int(64/pool_factor)]\n x = AveragePooling2D(pool_size, strides=strides, padding='same')(feats)\n x = Conv2D(512, (1, 1))(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n return x\n\n\ndef decode_pspnet(img_input, o):\n pool_factors = [1, 2, 3, 6]\n pool_outs = []\n\n for p in pool_factors:\n pooled = pool_block(o, p)\n pool_outs.append(pooled)\n\n o = Concatenate()(pool_outs)\n o = UpSampling2D((4, 4))(o)\n o = Concatenate()([o, o])\n o = Conv2D(512, (1, 1), name='decode_pspnet_conv1')(o)\n o = BatchNormalization()(o)\n o = Activation('relu')(o)\n\n o = Flatten()(o)\n o = Dense(512, activation=\"relu\")(o)\n print(f\"pspnet: {o}\")\n\n o = Dense(1, activation=\"sigmoid\")(o)\n\n model = Model(img_input, o)\n model.compile(optimizer=adam(lr=0.00001), loss='binary_crossentropy',\n metrics=['accuracy']) # define optimizer and loss functions as well as required metrics\n return model\n\n\ndef pspnet():\n img_input, [f1, f2, f3, f4, f5] = base_model.base_encode()\n o = f5\n model = decode_pspnet(img_input, o)\n\n return model\n\n\ndef vgg_pspnet():\n img_input, [f1, f2, f3, f4, f5] = base_model.vgg_encode()\n o = f5\n model = decode_pspnet(img_input, o)\n\n return model\n\n\ndef res_pspnet():\n img_input, [f1, f2, f3, f4, f5] = base_model.res50_encode()\n o = f5\n model = decode_pspnet(img_input, o)\n\n return model\n\n\n\n\n","sub_path":"Step 3/model/PSPNet.py","file_name":"PSPNet.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"604689618","text":"import nashpy as nash\nimport numpy as np\nfrom Edges import *\nimport copy\nfrom NodeRepository import nodeRepository\nfrom Settings import settings\nfrom Choices import *\nfrom Strategy import *\nimport time\nclass Path:\n def __init__(self):\n self._compEdges = []\n self._ua = -1\n self._ub = -1\n def setUtility(self,values):\n print(\"values\",values)\n self._ua = values[0]\n self._ub = values[1]\n def getUtility(self):\n return [self._ua,self._ub]\n def appendEdge(self,ce):\n self._compEdges.append(ce)\n def getPathChoices(self):\n pathChoiceA = []\n pathChoiceB = []\n\n for ce in self._compEdges:\n ChoiceA, ChoiceB = ce.getAllChoices() # TODO we need to add nodeID into CompositeEdge\n if ChoiceA.isEmpty() == False:\n pathChoiceA.append(ChoiceA)\n if ChoiceB.isEmpty() == False:\n pathChoiceB.append(ChoiceB)\n pathChoiceA = Choice.removeDupChoices2(pathChoiceA)\n pathChoiceB = Choice.removeDupChoices2(pathChoiceB)\n\n return pathChoiceA,pathChoiceB\n\n # 如果匹配,则返回 utility 的值 ua1, ub1 (>=0)\n # 否则, 返回 -1, -1 注意, 假设 utility值均为非负\n def findUtility(self, straA, straB):\n pathChoicesA, pathChoicesB = self.getPathChoices()\n if straA.contain(pathChoicesA) and straB.contain(pathChoicesB):\n return self.getUtility()\n else:\n return [-1, -1]\n def toString(self):\n rlt = \"\"\n for ce in self._compEdges:\n rlt += ce.toString()+\"//\"\n return rlt\ndef printChoicesSet(choicesSet):\n for choice in choicesSet:\n print (choice.toString())\n# @leavesUtil 所有叶节点的utility值, [[leafNode1, ua1, ub1],...]\ndef getUtility(node, leavesUtil):\n for item in leavesUtil:\n leaf = item[0]\n if node.getId() == leaf.getId():\n return [item[1], item[2]]\n return [-1,-1]\n# @subtreeRoot : 子树的根节点\n# @path : 从根到 subtreeRoot 的路径\n# @output 从subtreeRoot 展开的到叶节点的所有路径的集合\ndef getAllPaths(subtreeRoot, path,leavesUtil):\n children = nodeRepository.loadNodes(subtreeRoot.getChildrenId()) # 直接取得是策略\n rlt = []\n if len(children) == 0: # 如果是叶子节点,则直接返回 @path\n utils = getUtility(subtreeRoot, leavesUtil)\n print(\"zhuangtai :\",subtreeRoot.getStates(),utils)\n path.setUtility(utils)\n path.toString()\n rlt.append(path)\n return rlt\n else:\n for child in children: # 如果不是叶子节点 则在原来的策略上加上一条边\n outEdge = subtreeRoot.getOutEdge(child.getId())\n newPath = copy.deepcopy(path)\n newPath.appendEdge(outEdge)\n rlt.extend(getAllPaths(child, newPath, leavesUtil))\n return rlt\n\n\n#@straSetA, @straSetB 两个player的策略集\n#@leavesUtil 所有叶节点的utility值, [[leafNode1, ua1, ub1],...]\n\ndef createPayoffMatrix(straSetA, straSetB,root,leavesUtil):\n print(\"调用\")\n initPath = Path()\n paths = getAllPaths(root, initPath, leavesUtil) #得到所有的路径\n if settings.DEBUG:\n print(\"num of paths:\", len(paths))\n file = \"./path.text\"\n with open(file, 'a+') as f:\n f.write(str(len(paths)) + '\\n')\n length1 = len(straSetA)\n length2 = len(straSetB)\n matrixA = np.zeros((length1,length2))\n matrixB = np.zeros((length1, length2))\n counter = 0\n # starttime = time.time()\n for i in range(length1):\n for j in range(length2):\n for p in paths:\n print(\"当前的路径\",p.toString())\n ua1, ub1 = p.findUtility(straSetA[i],straSetB[j])\n counter += 1\n # if counter % 100 == 0:\n # endtime = time.time()\n # print(\"已匹配的个数:\", counter)\n # print(\"匹配这些路径:\", endtime - starttime)\n # starttime = endtime\n if ua1 >= 0:\n matrixA[i][j] = ua1\n matrixB[i][j] = ub1\n break\n if settings.DEBUG:\n print(\"收益矩阵A\")\n print(matrixA)\n print(\"收益矩阵B\")\n print(matrixB)\n print(matrixA[0][0])\n print(matrixB[0][0])\n print(straSetA[0].toString())\n print(straSetB[0].toString())\n return matrixA,matrixB\ndef createReducedPayoffMatrix(straSetA, straSetB,leavesUtil):\n length1 = len(straSetA)\n length2 = len(straSetB)\n matrixA = np.zeros((length1,length2))\n matrixB = np.zeros((length1, length2))\n counter = 0\n starttime = time.time()\n for i in range(length1):\n for j in range(length2):\n counter = counter + 1\n if counter % 100 == 0:\n endtime = time.time()\n print(\"已匹配的个数:\", counter)\n print(\"匹配���些路径:\", endtime - starttime)\n starttime = endtime\n ua1, ub1 = straSetA[i].findUtility(straSetB[j],leavesUtil)\n if ua1 >= 0:\n matrixA[i][j] = ua1\n matrixB[i][j] = ub1\n if settings.DEBUG:\n print(\"收益矩阵A\")\n print(matrixA)\n print(\"收益矩阵B\")\n print(matrixB)\n return matrixA ,matrixB\n#求解两个矩阵的纳什均衡\ndef nash (matrixA,matrixB): #输入收益矩阵 输出纳什均衡点\n toyple = matrixA.shape\n matrixB = matrixB.T\n print(toyple[0])\n row = toyple[0]\n column = toyple[1]\n Alable = [0]*row #存储每个节点A的lable ,收益矩阵的行数,即第一个人的纯策略\n Blable = [0]*column #存储每个节点B的lable ,收益矩阵的列数,即第二个人的纯策略\n for i in range(row): #找出每个策略的收益,并且找出最大收益\n Max = []\n Alable[i] = []\n for m in range(row):\n if m != i:\n Alable[i].append(m)\n for j in range(column):\n Max.append(matrixB[j][i])\n M = max(Max)\n for q in range(len(Max)): #找到最佳即为lable\n if Max[q] == M:\n Alable[i].append(row+q)\n for j in range(column):\n Blable[j]=[] #初始化y的lable即x分量为0的下标加1\n Max = []\n for m in range(column):\n if m != j:\n Blable[j].append(row+m)\n\n for i in range(row):\n Max.append(matrixA[i][j])\n M = max(Max) #找到最佳即为lable\n for q in range(len(Max)):\n if Max[q] == M:\n Blable[j].append(q)\n for i in range (row):\n print(\"x\"+str(i),Alable[i])\n for j in range(column):\n print(\"y\"+str(j),Blable[j])\n Nash = [] #寻找纳什均衡点 即lable中包含(1到m +n中的所有数值即为纳什均衡点)\n for i in range (row):\n for j in range(column):\n lable = []\n lable.extend(Alable[i])\n lable.extend(Blable[j])\n flage = 1\n for k in range(row + column):\n if k not in lable:\n flage = 0\n break\n if flage == 1:\n nash = [i,j]\n Nash.append(nash)\n print(\"纳什均衡点: \", Nash)\n return Nash\n\nif __name__ == '__main__':\n print()\n","sub_path":"SmartContract1-master/Payoff.py","file_name":"Payoff.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"179103343","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nvalor mais um\n cont = cont + 1\nexpressão equivalente para não repetir\n cont += 1\n\nexpressões matemáticas\n cont += 2 + 3*4 - 8/4\n\nQueremos números naturais que sejam produtos de três\nnúmeros consecutivos\n\nExemplo: 120 é procurado, pois 4.5.6 == 120.\nDado um inteiro não-negativo n, verificar se n é procurado.\n\"\"\"\nnum = int(input(\"Digite um numero: \"))\n\ni = 1\n\nwhile i * (i + 1) * (i + 2) < num:\n i += 1\n j += 1\n k += 1\nif i * (i + 1) * (i + 2) == num:\n print(num, \"eh triangular\")\nelse:\n print(num, \"nao eh triangular\")\n","sub_path":"15/triangular.py","file_name":"triangular.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"454008393","text":"# coding=utf-8\n\nfrom django.contrib import admin\nfrom mps.base.admin import LockableAdmin, TrackableAdmin\nfrom .models import (\n MicrophysiologyCenter,\n Manufacturer,\n Microdevice,\n OrganModel,\n ValidatedAssay,\n OrganModelProtocol,\n GroupDeferral,\n OrganModelReference,\n MicrodeviceReference\n)\nfrom.forms import MicrophysiologyCenterForm, GroupDeferralForm\nfrom django.urls import resolve\nfrom django.db.models.fields.files import FieldFile\n\nfrom django.utils.safestring import mark_safe\n\n\nclass MicrophysiologyCenterAdmin(LockableAdmin):\n \"\"\"Admin for Microphysiology Centers\"\"\"\n form = MicrophysiologyCenterForm\n save_on_top = True\n list_display = (\n 'name', 'center_id', 'institution', 'description', 'pi', 'contact_person', 'center_site'\n )\n search_fields = (\n 'name', 'center_id', 'institution', 'description', 'pi', 'contact_person'\n )\n list_per_page = 300\n filter_horizontal = (\n 'groups',\n 'accessible_groups'\n )\n fieldsets = (\n (\n None, {\n 'fields': (\n 'name',\n 'center_id',\n 'institution',\n 'description',\n 'contact_person',\n 'contact_email',\n 'contact_web_page',\n 'pi',\n 'pi_email',\n 'pi_web_page',\n 'website',\n 'groups',\n 'accessible_groups',\n )\n }\n ),\n (\n 'Change Tracking', {\n 'fields': (\n # 'locked',\n ('created_by', 'created_on'),\n ('modified_by', 'modified_on'),\n ('signed_off_by', 'signed_off_date'),\n )\n }\n ),\n )\n\n @mark_safe\n def center_site(self, obj):\n return '%s' % (obj.website, obj.website)\n center_site.allow_tags = True\n\nadmin.site.register(MicrophysiologyCenter, MicrophysiologyCenterAdmin)\n\n\nclass ManufacturerAdmin(LockableAdmin):\n \"\"\"Admin for Manufacturers\"\"\"\n save_on_top = True\n list_per_page = 300\n list_display = ['name', 'contact_person', 'manufacturer_site']\n search_fields = ['name', 'contact_person']\n fieldsets = (\n (\n None, {\n 'fields': (\n 'name',\n 'contact_person',\n 'website',\n )\n }\n ),\n (\n 'Change Tracking', {\n 'fields': (\n # 'locked',\n ('created_by', 'created_on'),\n ('modified_by', 'modified_on'),\n ('signed_off_by', 'signed_off_date'),\n )\n }\n ),\n )\n\n @mark_safe\n def manufacturer_site(self, obj):\n return '%s' % (obj.website, obj.website)\n manufacturer_site.allow_tags = True\n\nadmin.site.register(Manufacturer, ManufacturerAdmin)\n\n\nclass MicrodeviceReferenceInline(admin.TabularInline):\n \"\"\"Inline for Microdevices\"\"\"\n model = MicrodeviceReference\n exclude = []\n extra = 1\n\n\nclass MicrodeviceAdmin(LockableAdmin):\n \"\"\"Admin for Microdevices\"\"\"\n class Media(object):\n js = ('microdevices/layout.js',)\n css = {'all': ('assays/customize_admin.css',)}\n\n @mark_safe\n def device_image_display(self, obj):\n if obj.id and obj.device_image:\n return '
' % \\\n obj.device_image.url\n return ''\n\n @mark_safe\n def device_cross_section_image_display(self, obj):\n if obj.id and obj.device_cross_section_image:\n return '
' % \\\n obj.device_cross_section_image.url\n return ''\n\n device_image_display.allow_tags = True\n device_cross_section_image_display.allow_tags = True\n\n save_as = True\n save_on_top = True\n list_per_page = 300\n list_display = ('name', 'organ', 'center', 'manufacturer',\n 'description')\n # TODO REVISE ORGAN_NAME\n search_fields = ['name', 'organ__organ_name', 'center__name', 'description']\n list_filter = ['organ', 'center', ]\n\n fieldsets = (\n (\n None, {\n 'fields': (\n (\n 'device_type'\n ),\n (\n 'center', 'manufacturer',\n ),\n (\n 'name', 'organ',\n ),\n (\n 'description', 'barcode',\n ),\n (\n 'device_image_display',\n 'device_cross_section_image_display',\n ),\n (\n 'references'\n )\n )\n }\n ),\n (\n 'Dimensions', {\n 'fields': (\n (\n 'device_width', 'device_length', 'device_thickness',\n ),\n (\n 'device_fluid_volume',\n ),\n (\n 'substrate_material', 'substrate_thickness',\n ),\n (\n 'device_image', 'device_cross_section_image',\n ),\n )\n }\n ),\n (\n 'Layout', {\n 'fields': (\n (\n 'number_of_rows', 'number_of_columns',\n ),\n (\n 'row_labels', 'column_labels',\n ),\n )\n }\n ),\n (\n None, {\n 'fields': (\n # 'locked',\n 'created_by',\n 'modified_by',\n ('signed_off_by', 'signed_off_date'),\n )\n }\n ),\n )\n actions = ['update_fields']\n inlines = [MicrodeviceReferenceInline]\n readonly_fields = (\n 'device_image_display', 'device_cross_section_image_display',\n )\n\n def save_model(self, request, obj, form, change):\n # Django always sends this when \"Save as new is clicked\"\n if '_saveasnew' in request.POST:\n # Get the ID from the admin URL\n original_pk = resolve(request.path).args[0]\n # Get the original object\n original_obj = obj._meta.concrete_model.objects.get(id=original_pk)\n\n # Iterate through all it's properties\n for prop, value in list(vars(original_obj).items()):\n # if the property is an Image (don't forget to import ImageFieldFile!)\n if isinstance(getattr(original_obj, prop), FieldFile):\n # Copy it!\n setattr(obj, prop, getattr(original_obj, prop))\n obj.save()\n\nadmin.site.register(Microdevice, MicrodeviceAdmin)\n\n\nclass OrganModelProtocolInline(admin.TabularInline):\n \"\"\"Admin Inline for Organ Model Protocols\"\"\"\n model = OrganModelProtocol\n fields = ('version', 'protocol_file')\n extra = 1\n\n class Media(object):\n css = {\"all\": (\"css/hide_admin_original.css\",)}\n\n\nclass ValidatedAssayInline(admin.TabularInline):\n \"\"\"Admin Inline for Validated Assays\"\"\"\n # Results calculated from CHIP READOUTS\n model = ValidatedAssay\n verbose_name = 'Validated Assay'\n verbose_name_plural = 'Validated Assays'\n fields = ('assay',)\n extra = 0\n\n class Media(object):\n css = {\"all\": (\"css/hide_admin_original.css\",)}\n\n\nclass OrganModelReferenceInline(admin.TabularInline):\n \"\"\"Inline for MPS Models\"\"\"\n model = OrganModelReference\n exclude = []\n extra = 1\n\n\nclass OrganModelAdmin(LockableAdmin):\n \"\"\"Admin for Organ Models\"\"\"\n class Media(object):\n js = ('js/inline_fix.js',)\n\n list_per_page = 300\n list_display = (\n 'name',\n 'center',\n 'base_model',\n 'organ',\n 'device',\n 'disease',\n 'description',\n 'alt_name',\n 'model_type',\n 'disease_trigger',\n )\n search_fields = [\n 'name', 'organ__organ_name', 'device__name', 'center__name', 'description']\n readonly_fields = ['created_by', 'created_on',\n 'modified_by', 'modified_on']\n\n # fieldsets = (\n # (\n # None, {\n # 'fields': (\n # (\n # 'name', 'organ', 'alt_name', 'base_model', 'model_type'\n # ),\n # (\n # 'disease', 'disease_trigger'\n # ),\n # (\n # 'device', 'description',\n # ),\n # (\n # 'mps', 'epa', 'tctc'\n # ),\n # (\n # 'model_image'\n # ),\n # (\n # 'references'\n # )\n # )\n # }\n # ),\n # (\n # 'Change Tracking', {\n # 'fields': (\n # # 'locked',\n # ('created_by', 'created_on'),\n # ('modified_by', 'modified_on'),\n # ('signed_off_by', 'signed_off_date'),\n # )\n # }\n # )\n # )\n\n actions = ['update_fields']\n save_on_top = True\n inlines = [ValidatedAssayInline, OrganModelProtocolInline, OrganModelReferenceInline]\n\nadmin.site.register(OrganModel, OrganModelAdmin)\n\n\nclass GroupDeferralAdmin(TrackableAdmin):\n \"\"\"Admin for Manufacturers\"\"\"\n form = GroupDeferralForm\n save_on_top = True\n list_per_page = 300\n list_display = ['group', 'approval_file', 'notes']\n search_fields = ['notes']\n fieldsets = (\n (\n None, {\n 'fields': (\n 'group',\n 'approval_file',\n 'notes',\n )\n }\n ),\n (\n 'Change Tracking', {\n 'fields': (\n ('created_by', 'created_on'),\n ('modified_by', 'modified_on'),\n ('signed_off_by', 'signed_off_date'),\n )\n }\n ),\n )\n\nadmin.site.register(GroupDeferral, GroupDeferralAdmin)\n","sub_path":"microdevices/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":10628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"135045887","text":"\"\"\"\nLogging module.\n\"\"\"\n\nimport sys\nimport pydoc\nimport logging\nfrom io import BytesIO\n\n\n# Logging levels\nfrom logging import DEBUG # NOQA\nfrom logging import INFO # NOQA\nfrom logging import WARNING # NOQA\nfrom logging import ERROR # NOQA\nfrom logging import CRITICAL # NOQA\n\n\n# Holds the logging configuration\nconfig = {}\n\n# Holds the logger prefix\nprefix = None\n\n# Minimum loging level\nlevel = DEBUG\n\n\nclass LoggerLookupError(LookupError):\n \"\"\"\n Exception when a logger cannot be found.\n \"\"\"\n pass\n\n\nclass LoggerLevelError(ValueError):\n \"\"\"\n Exception when a logger level is invalid.\n \"\"\"\n pass\n\n\nclass LoggerConfigError(SyntaxError):\n \"\"\"\n Exception when a logger is improperly configured.\n \"\"\"\n pass\n\n\nclass StandardIOHandler(logging.StreamHandler):\n \"\"\"\n Logger handler that logs messages to stdout and errors to stderr.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the handler.\n \"\"\"\n super(StandardIOHandler, self).__init__(BytesIO())\n self.stdout = logging.StreamHandler(sys.stdout)\n self.stderr = logging.StreamHandler(sys.stderr)\n\n def emit(self, record):\n \"\"\"\n Log a record.\n\n record -- the record to log.\n \"\"\"\n if record.levelno < level:\n return\n\n self.stdout.setFormatter(self.formatter)\n self.stderr.setFormatter(self.formatter)\n\n if record.levelno <= 20:\n self.stdout.emit(record)\n else:\n self.stderr.emit(record)\n\n\nclass ColoredStandardIOHandler(StandardIOHandler):\n \"\"\"\n Standard logger handler that uses colors for messages.\n \"\"\"\n\n # Color codes\n C_END = '\\033[0m'\n C_DEBUG = '\\033[2;37m'\n C_INFO = '\\033[0;37m'\n C_WARNING = '\\033[0;33m'\n C_ERROR = '\\033[0;31m'\n C_CRITICAL = '\\033[0;37;41m'\n\n def emit(self, record):\n \"\"\"\n Log a record.\n\n record -- the record to log.\n \"\"\"\n color_const = 'C_' + record.levelname\n color_code = getattr(self, color_const)\n record.msg = '{0}{1}{2}'.format(color_code, record.msg, self.C_END)\n\n super(ColoredStandardIOHandler, self).emit(record)\n\n\nclass StandardPathHandler(logging.StreamHandler):\n \"\"\"\n Logger handler that uses the logger name as the filename for the log file.\n \"\"\"\n\n def __init__(self, path, suffix='.log'):\n \"\"\"\n Initialize the handler.\n\n path -- the path to put the log files.\n suffix -- the log file suffix.\n \"\"\"\n super(StandardPathHandler, self).__init__(BytesIO())\n self.path = path\n self.suffix = suffix\n self.streams = {}\n\n def emit(self, record):\n \"\"\"\n Log a record.\n\n record -- the record to log.\n \"\"\"\n name = '{0}{1}'.format(record.name.split('.', 1)[1], self.suffix)\n filename = '{0}/{1}'.format(self.path.rstrip('/'), name)\n\n try:\n stream = self.streams[name]\n except KeyError:\n stream = self.streams[name] = logging.StreamHandler(open(filename, 'a'))\n\n stream.setFormatter(self.formatter)\n stream.emit(record)\n\n\ndef set_config(data):\n \"\"\"\n Set the services configuration.\n\n data -- the configuration data.\n \"\"\"\n config.clear()\n config.update(data)\n\n\ndef set_prefix(value):\n \"\"\"\n Set the logger prefix.\n\n prefix -- the prefix name.\n \"\"\"\n global prefix\n prefix = value\n\n\ndef set_level(value):\n \"\"\"\n Set the minimum logging level.\n\n value -- the level to set.\n \"\"\"\n global level\n level = value\n\n\ndef get(name):\n \"\"\"\n Logger factory.\n\n name -- the name of the logger.\n\n Returns the logger object.\n\n May raise LoggerLookupError if a logger or its handler cannot be found.\n May raise LoggerLevelError if a logger level is invalid.\n May raise LoggerConfigError if a logger is improperly configured.\n \"\"\"\n if prefix is not None:\n fq_name = '{0}.{1}'.format(prefix, name)\n else:\n fq_name = name\n\n logger_obj = logging.getLogger(fq_name)\n if logger_obj.level > logging.NOTSET:\n return logger_obj\n\n logging_config = dict(config)\n try:\n del logging_config['loggers']\n except KeyError:\n pass\n\n try:\n raise_errors = config['raise']\n del logging_config['raise']\n except KeyError:\n raise_errors = False\n\n try:\n logger_config = config['loggers'][name]\n logging_config.update(logger_config)\n except KeyError:\n pass\n\n try:\n logger_cls = pydoc.locate(logging_config['class'])\n if logger_cls is None:\n raise LoggerLookupError(logging_config['class'])\n except KeyError:\n if not raise_errors:\n logger_obj = logging.getLogger('nil')\n logger_obj.setLevel(logging.DEBUG + 1000)\n return logger_obj\n\n raise LoggerLookupError(name)\n\n try:\n level_name = logging_config['level']\n level_no = getattr(logging, level_name.upper())\n if level_no <= 0:\n raise AttributeError('invalid logging level')\n except KeyError:\n raise LoggerLevelError(name)\n except AttributeError:\n raise LoggerLevelError(name)\n\n try:\n logger_format = logging_config['format']\n del logging_config['format']\n except KeyError:\n logger_format = '%(message)s'\n\n del logging_config['class'], logging_config['level']\n\n logger_init = logger_cls.__init__.func_code\n logger_args_count = logger_init.co_argcount\n logger_args = logger_init.co_varnames[1:logger_args_count]\n try:\n logger_args_req = logger_args[:0 - len(logger_cls.__init__.func_defaults)]\n except TypeError:\n logger_args_req = logger_args\n\n missing_args = set(logger_args_req) ^ set(logging_config) & set(logger_args_req)\n if len(missing_args) > 0:\n raise LoggerConfigError(name)\n\n extra_args = set(logger_args) & set(logging_config) ^ set(logging_config)\n if len(extra_args) > 0:\n for arg in extra_args:\n del logging_config[arg]\n\n logger_obj.setLevel(level_no)\n formatter = logging.Formatter(logger_format)\n handler_obj = logger_cls(**logging_config)\n handler_obj.setFormatter(formatter)\n logger_obj.addHandler(handler_obj)\n\n # TODO streamline this\n return logger_obj\n","sub_path":"loggers.py","file_name":"loggers.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"243350439","text":"class Node:\n #constructor\n def __init__(self, d, nal, left=None, right=None, root=None):\n self.d = d\n self.Value = nal\n self.Left = left\n self.Right = right\n self.root = root\n#function to che\n def hasLeftChild(self):\n return self.Left\n\n def hasRightChild(self):\n return self.Right\n\n def isLeftChild(self):\n return self.root and self.root.Left == self\n\n def isRightChild(self):\n return self.root and self.root.Right == self\n\n def isRoot(self):\n return not self.root\n\n def isLeafNode(self):\n return not (self.Right or self.Left)\n\n def hasAnyChildren(self):\n return self.Right or self.Left\n\n def hasBothChildren(self):\n return self.Right and self.Left\n\n\nclass BST:\n\n def __init__(self):\n self.root = None\n self.size = 0\n\n def length(self):\n return self.size\n\n def __len__(self):\n return self.size\n\n def put(self, d, nal):\n if self.root:\n self._put(d, nal, self.root)\n else:\n self.root = Node(d, nal)\n self.size = self.size + 1\n\n def _put(self, d, nal, currentNode):\n if d < currentNode.d:\n if currentNode.hasLeftChild():\n self._put(d, nal, currentNode.Left)\n else:\n currentNode.Left = Node(d, nal, root=currentNode)\n else:\n if currentNode.hasRightChild():\n self._put(d, nal, currentNode.Right)\n else:\n currentNode.Right = Node(d, nal, root=currentNode)\n\n def __setitem__(self, m, n):\n self.put(m, n)\n\n def get(self, d):\n if self.root:\n res = self._get(d, self.root)\n if res:\n\n return res.Value\n else:\n return None\n else:\n return None\n\n def _get(self, d, currentNode):\n\n if not currentNode:\n return None\n elif currentNode.d == d:\n return currentNode\n elif d < currentNode.d:\n return self._get(d, currentNode.Left)\n else:\n return self._get(d, currentNode.Right)\n\n def __getitem__(self, d):\n return self.get(d)\n\n def __contains__(self, d):\n if self._get(d, self.root):\n return True\n else:\n return False\n\n def delete(self, d):\n\n if self.size > 1:\n\n nodeToRemone = self._get(d, self.root)\n if nodeToRemone:\n self.remone(nodeToRemone)\n self.size = self.size - 1\n else:\n raise KeyError('Error, d not in tree')\n elif self.size == 1 and self.root.d == d:\n self.root = None\n self.size = self.size - 1\n else:\n raise KeyError('Error, d not in tree')\n\n def __deleteitem__(self, d):\n\n self.delete(d)\n\n def splice(self):\n if self.isLeafNode():\n if self.isLeftChild():\n\n self.root.Left = None\n else:\n self.root.Right = None\n elif self.hasAnyChildren():\n if self.hasLeftChild():\n\n if self.isLeftChild():\n\n self.root.Left = self.Left\n else:\n\n self.root.Right = self.Left\n self.Left.root = self.root\n else:\n\n if self.isLeftChild():\n\n self.root.Left = self.Right\n else:\n self.root.Right = self.Right\n self.Right.root = self.root\n\n def find(self):\n\n succ = None\n if self.hasRightChild():\n succ = self.Right.findMin()\n else:\n if self.root:\n\n if self.isLeftChild():\n\n succ = self.root\n else:\n self.root.Right = None\n succ = self.root.findSuccessor()\n self.root.Right = self\n \n\n def findMin(self):\n\n current = self\n while current.hasLeftChild():\n current = current.Left\n return current\n\n def remone(self, currentNode):\n\n if currentNode.isLeafNode(): # leaf\n if currentNode == currentNode.root.Left:\n currentNode.root.Left = None\n else:\n currentNode.root.Right = None\n elif currentNode.hasBothChildren(): # interior\n\n succ = currentNode.findSuccessor()\n succ.splice()\n currentNode.d = succ.d\n currentNode.Value = succ.Value\n\n else: # this node has one child\n if currentNode.hasLeftChild():\n if currentNode.isLeftChild():\n currentNode.Left.root = currentNode.root\n currentNode.root.Left = currentNode.Left\n elif currentNode.isRightChild():\n currentNode.Left.root = currentNode.root\n currentNode.root.Right = currentNode.Left\n else:\n\n currentNode.replaceNodeData(currentNode.Left.d,\n currentNode.Left.Value,\n currentNode.Left.Left,\n currentNode.Left.Right)\n else:\n\n if currentNode.isLeftChild():\n currentNode.Right.root = currentNode.root\n currentNode.root.Left = currentNode.Right\n elif currentNode.isRightChild():\n currentNode.Right.root = currentNode.root\n currentNode.root.Right = currentNode.Right\n else:\n currentNode.replaceNodeData(currentNode.Right.d,currentNode.Right.Value,\n currentNode.Right.Left,\n currentNode.Right.Right)\n\n \n\ntree = BST()\ntree[3] = \"Amagi\"\ntree[4] = \"Umwembe\"\ntree[6] = \"Imboga\"\ntree[2] = \"nyanya\"\ntree[8] = \"Imbwija\"\n\nprint(tree[2])\nprint(tree[4])\nprint(tree[8])\n","sub_path":"Part3/Binary.py","file_name":"Binary.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"179867126","text":"\"\"\"\nCopyright ©2020. The Regents of the University of California (Regents). All Rights Reserved.\n\nPermission to use, copy, modify, and distribute this software and its documentation\nfor educational, research, and not-for-profit purposes, without fee and without a\nsigned licensing agreement, is hereby granted, provided that the above copyright\nnotice, this paragraph and the following two paragraphs appear in all copies,\nmodifications, and distributions.\n\nContact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue,\nSuite 510, Berkeley, CA 94720-1620, (510) 643-7201, otl@berkeley.edu,\nhttp://ipira.berkeley.edu/industry-info for commercial licensing opportunities.\n\nIN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,\nINCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF\nTHE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED\nOF THE POSSIBILITY OF SUCH DAMAGE.\n\nREGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE\nSOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED\n\"AS IS\". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,\nENHANCEMENTS, OR MODIFICATIONS.\n\"\"\"\n\nimport json\n\nfrom flask import current_app as app\nfrom nessie.externals import calnet\nfrom nessie.externals import s3\nfrom nessie.jobs.background_job import BackgroundJob\nfrom nessie.lib.queries import get_all_instructor_uids, get_all_student_ids\nfrom nessie.lib.util import get_s3_calnet_daily_path\n\n\nclass ImportCalNetData(BackgroundJob):\n\n def run(self, advisee_csids=None, instructor_uids=None):\n if not advisee_csids:\n advisee_csids = [row['sid'] for row in get_all_student_ids()]\n if not instructor_uids:\n instructor_uids = [row['instructor_uid'] for row in get_all_instructor_uids()]\n _put_advisee_data_to_s3(advisee_csids)\n _put_instructor_data_to_s3(instructor_uids)\n return True\n\n\ndef _put_advisee_data_to_s3(sids):\n app.logger.info(f'Starting CalNet import job for {len(sids)} advisees...')\n all_attributes = calnet.client(app).search_csids(sids)\n if len(sids) != len(all_attributes):\n ldap_sids = [person['csid'] for person in all_attributes]\n missing = set(sids) - set(ldap_sids)\n app.logger.warning(f'Looked for {len(sids)} advisee SIDs but only found {len(all_attributes)} : missing {missing}')\n\n serialized_data = ''\n for index, a in enumerate(all_attributes):\n sid = a['csid']\n affiliations = a['affiliations']\n first_name, last_name = calnet.split_sortable_name(a)\n # JsonSerDe in Redshift schema creation requires one and only one JSON record per line in text file in S3.\n serialized_data += json.dumps({\n 'affiliations': ','.join(affiliations) if isinstance(affiliations, list) else affiliations,\n 'campus_email': a['campus_email'],\n 'email': a['email'],\n 'first_name': first_name,\n 'last_name': last_name,\n 'ldap_uid': a['uid'],\n 'sid': sid,\n }) + '\\n'\n s3.upload_data(serialized_data, f'{get_s3_calnet_daily_path()}/advisees/advisees.json')\n app.logger.info(f'Uploaded data for {len(all_attributes)} advisees')\n\n\ndef _put_instructor_data_to_s3(uids):\n app.logger.info(f'Starting CalNet import job for {len(uids)} instructors...')\n all_attributes = calnet.client(app).search_uids(uids)\n if len(uids) != len(all_attributes):\n ldap_uids = [person['uid'] for person in all_attributes]\n missing = set(uids) - set(ldap_uids)\n app.logger.warning(f'Looked for {len(uids)} instructor UIDs but only found {len(all_attributes)} : missing {missing}')\n\n serialized_data = ''\n for index, a in enumerate(all_attributes):\n uid = a['uid']\n affiliations = a['affiliations']\n first_name, last_name = calnet.split_sortable_name(a)\n serialized_data += json.dumps({\n 'affiliations': ','.join(affiliations) if isinstance(affiliations, list) else affiliations,\n 'campus_email': a['campus_email'],\n 'dept_code': calnet.get_dept_code(a),\n 'email': a['email'],\n 'first_name': first_name,\n 'last_name': last_name,\n 'ldap_uid': uid,\n 'csid': a['csid'],\n 'title': a['title'],\n }) + '\\n'\n s3.upload_data(serialized_data, f'{get_s3_calnet_daily_path()}/instructors/instructors.json')\n app.logger.info(f'Uploaded data for {len(all_attributes)} instructors')\n","sub_path":"nessie/jobs/import_calnet_data.py","file_name":"import_calnet_data.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"329110425","text":"import os\nimport pickle as Pickle\nimport numpy as np\nimport math\nimport scipy\nimport scipy.stats\nimport matplotlib.pyplot as plt\nfrom flarestack.shared import name_pickle_output_dir, plot_output_dir, \\\n k_to_flux, inj_dir_name, scale_shortener\nfrom flarestack.core.ts_distributions import plot_background_ts_distribution, \\\n plot_fit_results\nfrom flarestack.utils.neutrino_astronomy import calculate_astronomy\nfrom flarestack.core.minimisation import MinimisationHandler\nfrom flarestack.utils.catalogue_loader import load_catalogue\nimport sys\nimport logging\n\n\nclass ResultsHandler(object):\n\n def __init__(self, rh_dict):\n\n self.sources = load_catalogue(rh_dict[\"catalogue\"])\n\n self.name = rh_dict[\"name\"]\n\n self.results = dict()\n self.pickle_output_dir = name_pickle_output_dir(self.name)\n self.plot_dir = plot_output_dir(self.name)\n self.merged_dir = os.path.join(self.pickle_output_dir, \"merged\")\n\n # Checks if the code should search for flares. By default, this is\n # not done.\n # try:\n # self.flare = llh_kwargs[\"Flare Search?\"]\n # except KeyError:\n # self.flare = False\n\n # if self.flare:\n # self.make_plots = self.flare_plots\n # else:\n self.make_plots = self.noflare_plots\n\n # Checks whether negative n_s is fit or not\n #\n # try:\n # self.negative_n_s = llh_kwargs[\"Fit Negative n_s?\"]\n # except KeyError:\n # self.negative_n_s = False\n #\n # try:\n # self.fit_weights = llh_kwargs[\"Fit Weights?\"]\n # except KeyError:\n # self.fit_weights = False\n\n # Sets default Chi2 distribution to fit to background trials\n #\n # if self.fit_weights:\n # self.ts_type = \"Fit Weights\"\n # elif self.flare:\n # self.ts_type = \"Flare\"\n # elif self.negative_n_s:\n # self.ts_type = \"Negative n_s\"\n # else:\n self.ts_type = \"Standard\"\n #\n # print \"negative_ns\", self.negative_n_s\n\n p0, bounds, names = MinimisationHandler.find_parameter_info(rh_dict)\n\n # p0, bounds, names = fit_setup(llh_kwargs, self.sources, self.flare)\n self.param_names = names\n self.bounds = bounds\n self.p0 = p0\n\n # if cleanup:\n # self.clean_merged_data()\n\n self.sensitivity = np.nan\n self.bkg_median = np.nan\n self.frac_over = np.nan\n self.disc_potential = np.nan\n self.disc_potential_25 = np.nan\n self.disc_ts_threshold = np.nan\n self.extrapolated_sens = False\n self.extrapolated_disc = False\n\n # if self.show_inj:\n self.inj = self.load_injection_values()\n # else:\n # self.inj = None\n\n try:\n self.merge_pickle_data()\n except FileNotFoundError:\n logging.warning(\"No files found at {0}\".format(self.pickle_output_dir))\n\n try:\n self.find_sensitivity()\n except ValueError as e:\n logging.warning(\"RuntimeError for discovery potential: \\n {0}\".format(e))\n\n try:\n self.find_disc_potential()\n except RuntimeError as e:\n logging.warning(\"RuntimeError for discovery potential: \\n {0}\".format(e))\n except TypeError as e:\n logging.warning(\"TypeError for discovery potential: \\n {0}\".format(e))\n except ValueError as e:\n logging.warning(\"TypeError for discovery potential: \\n {0}\".format(e))\n\n self.plot_bias()\n\n def astro_values(self, e_pdf_dict):\n \"\"\"Function to convert the values calculated for sensitivity and\n discovery potential, which are given in terms of flux at the\n detector, to physical quantities for a source of mean luminosity. The\n fluxes are integrated over an energy range, either specified in\n e_pdf_dict, or by default between 100GeV and 10PeV. They are then\n scaled by the luminosity distance to source, giving the mean\n luminosity of the sources in the catalogue. The assumption is that\n the sources are standard candles, so this value would be the same for\n each source, and is thus only calculated once. To convert further from\n this mean luminosity to the luminosity of a specific source,\n the values must be multiplied by the \"relative injection weight\" of\n the source, which has a mean of 1.\n\n :param e_pdf_dict: Dictionary containing energy PDF information\n :return: Values for the neutrino luminosity sensitivity and\n discovery potential\n \"\"\"\n\n astro_sens = self.nu_astronomy(self.sensitivity, e_pdf_dict)\n astro_disc = self.nu_astronomy(self.disc_potential, e_pdf_dict)\n\n return astro_sens, astro_disc\n\n\n def nu_astronomy(self, flux, e_pdf_dict):\n \"\"\"Function to convert a local flux in the detector at 1GeV to physical\n quantities for a source of mean luminosity. The\n fluxes are integrated over an energy range, either specified in\n e_pdf_dict, or by default between 100GeV and 10PeV. They are then\n scaled by the luminosity distance to source, giving the mean\n luminosity of the sources in the catalogue. The assumption is that\n the sources are standard candles, so this value would be the same for\n each source, and is thus only calculated once. To convert further from\n this mean luminosity to the luminosity of a specific source,\n the values must be multiplied by the \"relative injection weight\" of\n the source, which has a mean of 1.\n\n :param flux: Flux to be converted\n :param e_pdf_dict: Dictionary containing energy PDF information\n :return: Value for the neutrino luminosity\n \"\"\"\n return calculate_astronomy(flux, e_pdf_dict, self.sources)\n\n def clean_merged_data(self):\n \"\"\"Function to clear cache of all data\"\"\"\n try:\n for f in os.listdir(self.merged_dir):\n os.remove(self.merged_dir + f)\n except OSError:\n pass\n\n def load_injection_values(self):\n \"\"\"Function to load the values used in injection, so that a\n comparison to the fit results can be made.\n\n :return: Dictionary of injected values.\n \"\"\"\n\n load_dir = inj_dir_name(self.name)\n\n inj_values = dict()\n\n for file in os.listdir(load_dir):\n path = os.path.join(load_dir, file)\n\n with open(path, \"rb\") as f:\n inj_values[os.path.splitext(file)[0]] = Pickle.load(f)\n\n return inj_values\n\n def merge_pickle_data(self):\n\n all_sub_dirs = [x for x in os.listdir(self.pickle_output_dir)\n if x[0] != \".\" and x != \"merged\"]\n\n try:\n os.makedirs(self.merged_dir)\n except OSError:\n pass\n\n for sub_dir_name in all_sub_dirs:\n sub_dir = os.path.join(self.pickle_output_dir, sub_dir_name)\n\n files = os.listdir(sub_dir)\n\n merged_path = os.path.join(self.merged_dir, sub_dir_name + \".pkl\")\n\n if os.path.isfile(merged_path):\n with open(merged_path, \"rb\") as mp:\n merged_data = Pickle.load(mp)\n else:\n merged_data = {}\n\n for filename in files:\n path = os.path.join(sub_dir, filename)\n\n try:\n with open(path, \"rb\") as f:\n data = Pickle.load(f)\n except EOFError:\n logging.warning(\"Failed loading: {0}\".format(path))\n continue\n os.remove(path)\n\n if merged_data == {}:\n merged_data = data\n else:\n for (key, info) in data.items():\n if isinstance(info, list):\n merged_data[key] += info\n else:\n for (param_name, params) in info.items():\n try: merged_data[key][param_name] += params\n except KeyError as m:\n logging.warning('Keys [{key}][{param_name}] not found in \\n {merged_data}')\n raise KeyError(m)\n\n with open(merged_path, \"wb\") as mp:\n Pickle.dump(merged_data, mp)\n\n if len(list(merged_data.keys())) > 0:\n self.results[scale_shortener(float(sub_dir_name))] = merged_data\n\n if len(list(self.results.keys())) == 0:\n logging.warning(\"No data was found by ResultsHandler object! \\n\")\n logging.warning(\"Tried root directory: \\n {0} \\n \".format(self.pickle_output_dir))\n sys.exit()\n\n def find_sensitivity(self):\n \"\"\"Uses the results of the background trials to find the median TS\n value, determining the sensitivity threshold. This sensitivity is\n not necessarily zero, for example with negative n_s, fitting of\n weights or the flare search method.\n \"\"\"\n\n try:\n bkg_dict = self.results[scale_shortener(0.0)]\n except KeyError:\n logging.error(\"No key equal to '0'\")\n return\n\n bkg_ts = bkg_dict[\"TS\"]\n\n bkg_median = np.median(bkg_ts)\n self.bkg_median = bkg_median\n\n savepath = os.path.join(self.plot_dir, \"sensitivity.pdf\")\n\n self.sensitivity, self.extrapolated_sens = self.find_overfluctuations(\n bkg_median, savepath)\n\n msg = \"\"\n\n if self.extrapolated_sens:\n msg = \"EXTRAPOLATED \"\n\n logging.info(\"{0}Sensitivity is {1:.3g}\".format(msg, self.sensitivity))\n\n # def set_upper_limit(self, ts_val, savepath):\n # \"\"\"Set an upper limit, based on a Test Statistic value from\n # unblinding, as well as a\n #\n # :param ts_val: Test Statistic Value\n # :param savepath: Path to save plot\n # :return: Upper limit, and whether this was extrapolated\n # \"\"\"\n #\n # try:\n # bkg_dict = self.results[scale_shortener(0.0)]\n # except KeyError:\n # print \"No key equal to '0'\"\n # return\n #\n # bkg_ts = bkg_dict[\"TS\"]\n # bkg_median = np.median(bkg_ts)\n #\n # # Set an upper limit based on the Test Statistic value for an\n # # overfluctuation, or the median background for an underfluctuation.\n #\n # ref_ts = max(ts_val, bkg_median)\n #\n # ul, extrapolated = self.find_overfluctuations(\n # ref_ts, savepath)\n #\n # if extrapolated:\n # print \"EXTRAPOLATED\",\n #\n # print \"Upper limit is\", \"{0:.3g}\".format(ul)\n # return ul, extrapolated\n\n def find_overfluctuations(self, ts_val, savepath):\n \"\"\"Uses the values of injection trials to fit an 1-exponential decay\n function to the overfluctuations, allowing for calculation of the\n sensitivity. Where the injected flux was not sufficient to reach the\n sensitivity, extrapolation will be used instead of interpolation,\n but this will obviously have larger associated errors. If\n extrapolation is used, self.extrapolated_sens is set to true. In\n either case, a plot of the overfluctuations as a function of the\n injected signal will be made.\n \"\"\"\n\n x = sorted(self.results.keys())\n x_acc = []\n y = []\n\n x = [scale_shortener(i) for i in sorted([float(j) for j in x])]\n\n for scale in x:\n ts_array = np.array(self.results[scale][\"TS\"])\n frac = float(len(ts_array[ts_array > ts_val])) / (float(len(\n ts_array)))\n \n logging.info(\n \"Fraction of overfluctuations is {0:.2f} above {1:.2f} (N_trials={2}) (Scale={3})\".format(\n frac, ts_val, len(ts_array), scale\n )\n )\n\n if scale == scale_shortener(0.0):\n self.frac_over = frac\n\n if len(ts_array) > 1:\n y.append(frac)\n x_acc.append(float(scale))\n\n self.make_plots(scale)\n\n # raw_input(\"prompt\")\n\n x = np.array(x_acc)\n\n x_flux = k_to_flux(x)\n\n threshold = 0.9\n\n b = (1 - min(y))\n\n def f(x, a):\n value = (1 - b * np.exp(-a * x))\n return value\n\n best_a = scipy.optimize.curve_fit(\n f, x, y, p0=[1./max(x)])[0][0]\n\n def best_f(x):\n return f(x, best_a)\n\n fit = k_to_flux((1./best_a) * np.log(b / (1 - threshold)))\n\n if fit > max(x_flux):\n extrapolated = True\n else:\n extrapolated = False\n\n xrange = np.linspace(0.0, 1.1 * max(x), 1000)\n\n plt.figure()\n plt.scatter(x_flux, y, color=\"black\")\n plt.plot(k_to_flux(xrange), best_f(xrange), color=\"blue\")\n plt.axhline(threshold, lw=1, color=\"red\", linestyle=\"--\")\n plt.axvline(fit, lw=2, color=\"red\")\n plt.ylim(0., 1.)\n plt.xlim(0., k_to_flux(max(xrange)))\n plt.ylabel('Overfluctuations above TS=' + \"{:.2f}\".format(ts_val))\n plt.xlabel(r\"Flux strength [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$]\")\n plt.savefig(savepath)\n plt.close()\n\n return fit, extrapolated\n\n def find_disc_potential(self):\n\n ts_path = os.path.join(self.plot_dir, \"ts_distributions/0.pdf\")\n\n try:\n bkg_dict = self.results[scale_shortener(0.0)]\n except KeyError:\n logging.error(\"No key equal to '0'\")\n return\n\n bkg_ts = bkg_dict[\"TS\"]\n\n disc_threshold = plot_background_ts_distribution(\n bkg_ts, ts_path, ts_type=self.ts_type)\n\n self.disc_ts_threshold = disc_threshold\n\n bkg_median = np.median(bkg_ts)\n x = sorted(self.results.keys())\n y = []\n y_25 = []\n\n x = [scale_shortener(i) for i in sorted([float(j) for j in x])]\n\n for scale in x:\n ts_array = np.array(self.results[scale][\"TS\"])\n frac = float(len(ts_array[ts_array > disc_threshold])) / (\n float(len(ts_array)))\n\n logging.info(\n \"Fraction of overfluctuations is {0:.2f} above {1:.2f} (N_trials={2}) (Scale={3})\".format(\n frac, disc_threshold, len(ts_array), scale\n )\n )\n\n y.append(frac)\n frac_25 = float(len(ts_array[ts_array > 25.])) / (\n float(len(ts_array)))\n\n logging.info(\n \"Fraction of overfluctuations is {0:.2f} above 25 (N_trials={1}) (Scale={2})\".format(\n frac_25, len(ts_array), scale\n )\n )\n\n y_25.append(frac_25)\n\n x = np.array([float(s) for s in x])\n\n x_flux = k_to_flux(x)\n\n threshold = 0.5\n\n sols = []\n\n for i, y_val in enumerate([y, y_25]):\n\n def f(x, a, b, c):\n value = scipy.stats.gamma.cdf(x, a, b, c)\n return value\n\n res = scipy.optimize.curve_fit(\n f, x, y_val, p0=[6, -0.1 * max(x), 0.1 * max(x)])\n\n best_a = res[0][0]\n best_b = res[0][1]\n best_c = res[0][2]\n\n def best_f(x):\n return f(x, best_a, best_b, best_c)\n\n sol = scipy.stats.gamma.ppf(0.5, best_a, best_b, best_c)\n setattr(self, [\"disc_potential\", \"disc_potential_25\"][i],\n k_to_flux(sol))\n\n xrange = np.linspace(0.0, 1.1 * max(x), 1000)\n\n savepath = self.plot_dir + \"disc\" + [\"\", \"_25\"][i] + \".pdf\"\n\n plt.figure()\n plt.scatter(x_flux, y_val, color=\"black\")\n plt.plot(k_to_flux(xrange), best_f(xrange), color=\"blue\")\n plt.axhline(threshold, lw=1, color=\"red\", linestyle=\"--\")\n plt.axvline(self.sensitivity, lw=2, color=\"black\", linestyle=\"--\")\n plt.axvline(self.disc_potential, lw=2, color=\"red\")\n plt.ylim(0., 1.)\n plt.xlim(0., k_to_flux(max(xrange)))\n plt.ylabel(r'Overfluctuations relative to 5 $\\sigma$ Threshold')\n plt.xlabel(r\"Flux [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$]\")\n plt.savefig(savepath)\n plt.close()\n\n if self.disc_potential > max(x_flux):\n self.extrapolated_disc = True\n\n msg = \"\"\n\n if self.extrapolated_disc:\n msg = \"EXTRAPOLATED \"\n\n logging.info(\"{0}Discovery Potential is {1:.3g}\".format(msg, self.disc_potential))\n logging.info(\"Discovery Potential (TS=25) is {0:.3g}\".format(self.disc_potential_25))\n\n def noflare_plots(self, scale):\n ts_array = np.array(self.results[scale][\"TS\"])\n ts_path = os.path.join(self.plot_dir, \"ts_distributions/\" + str(scale) + \".pdf\")\n\n plot_background_ts_distribution(ts_array, ts_path,\n ts_type=self.ts_type)\n\n param_path = os.path.join(self.plot_dir, \"params/\" + str(scale) + \".pdf\")\n\n # if self.show_inj:\n inj = self.inj[str(scale)]\n\n plot_fit_results(self.results[scale][\"Parameters\"], param_path,\n inj=inj)\n\n # def flare_plots(self, scale):\n #\n # sources = [x for x in self.results[scale].keys() if x != \"TS\"]\n #\n # for source in sources:\n #\n # ts_array = np.array(self.results[scale][source][\"TS\"])\n # ts_path = self.plot_dir + source + \"/ts_distributions/\" + str(\n # scale) + \".pdf\"\n #\n # plot_background_ts_distribution(ts_array, ts_path,\n # ts_type=self.ts_type)\n #\n # param_path = self.plot_dir + source + \"/params/\" + str(scale) + \\\n # \".pdf\"\n #\n # if self.show_inj:\n # inj = self.inj[str(scale)]\n # else:\n # inj = None\n #\n # plot_fit_results(self.results[scale][source][\"Parameters\"],\n # param_path, inj)\n\n def plot_bias(self):\n x = sorted(self.results.keys())\n raw_x = [scale_shortener(i) for i in sorted([float(j) for j in x])]\n base_x = [k_to_flux(float(j)) for j in raw_x]\n base_x_label = r\"$\\Phi_{1GeV}$ (GeV$^{-1}$ cm$^{-2}$)\"\n\n for i, param in enumerate(self.param_names):\n\n plt.figure()\n\n ax = plt.subplot(111)\n\n meds = []\n ulims = []\n llims = []\n trues = []\n\n for scale in raw_x:\n vals = self.results[scale][\"Parameters\"][param]\n med = np.median(vals)\n meds.append(med)\n sig = np.std(vals)\n ulims.append(med + sig)\n llims.append(med - sig)\n\n true = self.inj[scale][param]\n trues.append(true)\n\n if \"n_s\" in param:\n x = trues\n x_label = r\"$n_{injected}$\" + param.replace(\"n_s\", \"\")\n else:\n x = base_x\n x_label = base_x_label\n\n plt.scatter(x, meds, color=\"orange\")\n plt.plot(x, meds, color=\"black\")\n plt.plot(x, trues, linestyle=\"--\", color=\"red\")\n plt.fill_between(x, ulims, llims, alpha=0.5, color=\"orange\")\n\n ax.set_xlim(left=0.0, right=max(x))\n if min(trues) == 0.0:\n ax.set_ylim(bottom=0.0)\n\n plt.xlabel(x_label)\n plt.ylabel(param)\n plt.title(\"Bias (\" + param + \")\")\n\n savepath = os.path.join(self.plot_dir, \"bias_\" + param + \".pdf\")\n logging.info(\"Saving bias plot to {0}\".format(savepath))\n plt.savefig(savepath)\n plt.close()\n\n\n\n\n\n\n\n\n\n","sub_path":"flarestack/core/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":19865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"322737966","text":"#!/usr/bin/env python\n\n# Output Format:\n# Displacement should be enabled\n\n# Broadcasting:\n# BVH should be enabled, with string format\n\nSERVER_PORT_BVH = 7001\n\nimport socket\n\nclass PnReceiver:\n def connect(self, host, port):\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((host, port))\n\n def get_frames(self):\n for line in self._readlines(delim='||'):\n yield self._process_pn_bvh_line(line)\n\n def _readlines(self, buffer_size=4096, delim='\\n'):\n buffer = ''\n data = True\n while data:\n data = self._socket.recv(buffer_size)\n buffer += data\n\n while buffer.find(delim) != -1:\n line, buffer = buffer.split(delim, 1)\n yield line\n return\n\n def _process_pn_bvh_line(self, line):\n values_as_strings = line.split(\" \")\n # print values_as_strings\n values_as_strings = values_as_strings[2:] # skip ID (?) and name\n values_as_floats = [float(string)\n for string in values_as_strings\n if len(string) > 0]\n return values_as_floats\n","sub_path":"tracking/pn/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"282379367","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtCore import *\nimport sys\nclass SubDlg:\n def __init__(self):\n self.dlg = loadUi('qtTest/dlg.ui')\n self.dlg.pushButton.clicked.connect(self.closeFn)\n # self.dlg.show() #modaless\n self.dlg.exec() # modal\n\n def closeFn(self):\n self.dlg.close()\n\nclass MyDlg:\n def __init__(self):\n self.dlg = loadUi('qtTest/e.ui')\n self.dlg.actiondialog.triggered.connect(self.dlgClick)\n self.dlg.show()\n\n def dlgClick(self):\n sub = SubDlg()\n s = sub.dlg.lineEdit.text()\n self.dlg.lineEditMain.setText( s )\n\n print(\"end....\")\n\napp = QApplication( sys.argv )\ndlg = MyDlg()\napp.exec() #무한loop 큐메모리 감시(윈도우종료시)\nprint('bye!')\n\n","sub_path":"Python/pythonTest_class/PythonClass/qtTest/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"243345656","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef down(x, lmax):\n j = np.zeros((lmax+2,), float)\n j[lmax+1] = 1\n j[lmax] = 1\n for l in range(lmax, 0, -1):\n j[l-1] = ((2 * l + 1) / x) * j[l] - j[l+1]\n print('j%d is %.4f'%(l,j[l]))\n print(j[0])\n scale_factor = (np.sin(x) / x) / j[0]\n scaled_j = np.multiply(j, scale_factor)\n return scaled_j\n\ndef up(x, lmax):\n j = np.zeros((lmax+2,), float)\n j[0] = np.sin(x) / x\n j[1] = ((np.sin(x) ** 2) / (x * x)) - (np.cos(x) / x)\n for l in range(1, lmax+1, 1):\n j[l+1] = ((2 * l + 1) / x) * j[l] - j[l-1]\n return j\n\nif __name__ == '__main__':\n l = 25\n d1 = down(0.1, l)\n d2 = down(1, l)\n d3 = down(10, l)\n u1 = up(0.1, l)\n u2 = up(1, l)\n u3 = up(10, l)\n print(d1)\n print(d2)\n print(d3)\n print(u1)\n print(u2)\n print(u3)\n","sub_path":"scripts/task9.py","file_name":"task9.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"244034857","text":"test_size = 0.15\r\nn_epochs = 10\r\nbatch_size = 20\r\nlearning_rate = 0.01\r\nfilter_shape_1 = [5,5]\r\nfilter_shape_2 = [4,4]\r\nfilter_shape_3 = [3,3]\r\npool_shape = [2,2]\r\nnum_channels = 1\r\nfc_neurons_size = 256\r\nnum_filters1 = 32\r\nnum_filters2 = 64\r\nnum_filters3 = 128\r\n","sub_path":"nn_config.py","file_name":"nn_config.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"270530290","text":"\"\"\"\nCreated by Ryan Kan\nCopyright Ryan Kan 2018\n\nMain File.\n\"\"\"\n\n# IMPORTS\nimport os\nimport pathlib\n\nfrom modules.classes import *\nfrom modules.extra_functions import *\nfrom modules.items import *\n\n# GLOBAL VARIABLES\nVERSION_NUMBER = \"1.8.0\"\n\nHELP_FILE = \"HELP.txt\"\nCHANGELOG_FILE = \"CHANGELOG.txt\"\nINVENTORY_FILE = os.environ[\"HOME\"] + \"/Documents/Python/MurderMystery.inv\"\n\nDEFAULT_PLAYER_NAMES = [\"Alpha\", \"Bravo\", \"Charlie\", \"Delta\", \"Echo\", \"Foxtrot\", \"Golf\", \"Hotel\", \"India\", \"Juliet\",\n \"Kilo\", \"Lima\", \"Mike\", \"November\", \"Oscar\", \"Papa\", \"Quebec\", \"Romeo\", \"Sierra\", \"Tango\",\n \"Uniform\", \"Victor\", \"Whiskey\", \"X-ray\", \"Yankee\", \"Zulu\"] # The default names\nEXTRA_PLAYER_NAMES = ['Brett', 'Thomas', 'Karl', 'Michael', 'Isaac', 'Markus', 'Susan', 'Orville', 'Kara', 'Oxford',\n 'Troy', 'James', 'William', 'Shaun', 'Brett', 'Richard', 'Scott', 'Stanley', 'Amanda', 'Matthew',\n 'Scarlett', 'Paul', 'Dwayne', 'Anthony', 'Matthew', 'Chris', 'Simon', 'Yankee', 'Marquis',\n 'Roland', 'David', 'Allan', 'Vincent', 'Yorkshire', 'Carol', 'Tyler', 'Randy', 'Clarence', 'Tim',\n 'Hercule', 'North', 'Samantha', 'Bryan', 'Josh', 'Lisa', 'Anderson', 'Sean', 'Samuel', 'Ben',\n 'Connor', 'Neil', 'Frank', 'Andrew', 'Robert', 'Steve', 'Jessica', 'Charles', 'Andy', 'Joey',\n 'Adam'] # Extra custom names\n\nMAP_SIZE = (5, 5)\nMAX_TURN_AMOUNT = 100\nMIN_EVENT_TURN = 20\nMAX_EVENT_TURN = 70\nNO_GOLD_SPAWN_LOCATIONS = 4\nNO_OF_AI_PLAYERS = 5\nTRUST_REDUCTION_AMOUNT = 3\nPREGAME_GOLD = 0\n\nMURDERER_ONLY = False\nDETECTIVE_ONLY = False\nSHOW_ALL_CHAR_POS = False\nADD_EXTRA_NAMES = False\nREPLACE_DEFAULT_NAMES = False\n\n\n# FUNCTIONS\ndef get_player_from_number(computer_player_number, get_local_name_list=False):\n player_index = int(computer_player_number) - 1\n\n if get_local_name_list:\n return aiPlayers[player_index], knowThatIsAliveAI[player_index]\n\n else:\n return aiPlayers[player_index]\n\n\ndef make_accusation(accuser, accused):\n # Make accusation\n if not wantToSpectate:\n accusation_speech = random.randint(0, 2)\n if accusation_speech == 0:\n print(pink + accuser.name + \": I think %s is the murderer!\" % accused.name + reset)\n\n elif accusation_speech == 1:\n print(pink + accuser.name + \": %s definitely stabbed someone.\" % accused.name + reset)\n\n else:\n print(pink + accuser.name + \": The murderer is definitely %s!\" % accused.name + reset)\n\n # Create accusation sequence\n if accuser == you:\n accusation_sequence = [0, (aiPlayers + [you]).index(accused)]\n\n else:\n accusation_sequence = [aiPlayers.index(accuser) + 1, (aiPlayers + [you]).index(accused)]\n\n # Calculate trust\n t_player_list = aiPlayers.copy()\n\n if accusation_sequence[0] != 0: # If accuser is NOT \"you\"\n t_player_list.remove(accuser)\n\n if accusation_sequence[0] != 0: # If accuser is NOT \"you\"\n for t_player in t_player_list: # Human player can decide for him/herself\n random_selection = random.random() * 100\n if random_selection <= t_player.trustCounts[aiPlayers[accusation_sequence[0] - 1]] and accused in list(\n t_player.trustCounts.keys()):\n # Remove trust\n t_player.trustCounts[accused] -= TRUST_REDUCTION_AMOUNT\n\n # Give trust equally to other players\n other_players = list(t_player.trustCounts.keys())\n\n # Give trust\n for x_player in other_players:\n t_player.trustCounts[x_player] = t_player.trustCounts[x_player] + (\n TRUST_REDUCTION_AMOUNT / len(other_players))\n\n else: # If accuser is you\n for t_player in t_player_list:\n random_selection = random.random() * 100\n if random_selection <= t_player.trustCounts[\n ([you] + aiPlayers)[accusation_sequence[0]]] and accused in list(t_player.trustCounts.keys()):\n # Remove trust\n t_player.trustCounts[accused] -= TRUST_REDUCTION_AMOUNT\n\n # Give trust equally to other players\n other_players = list(t_player.trustCounts.keys())\n\n # Give trust\n for x_player in other_players:\n t_player.trustCounts[x_player] = t_player.trustCounts[x_player] + (\n TRUST_REDUCTION_AMOUNT / len(other_players))\n\n\n# Game functions\ndef player_available_options(player_pos_x, player_pos_y, role):\n left_bool = False\n right_bool = False\n up_bool = False\n down_bool = False\n\n together_count = 0\n together_string = None\n\n kill_bool = False\n shoot_bool = False\n investigate_bool = False\n interact_bool = False\n\n # Get interactives\n if (player_pos_x, player_pos_y) in interactivePos:\n the_interactive = interacts[interactivePos.index((player_pos_x, player_pos_y))]\n print(lightBlue + the_interactive.interact_message + \"\\n\" + reset)\n\n interact_bool = True\n\n for t_player in aiPlayers:\n if (player_pos_x, player_pos_y) == t_player.position:\n together_count += 1\n if t_player.alive is True:\n print(cyan + t_player.name, \"is with you.\\n\" + reset)\n\n else:\n print(purple + \"You see\", t_player.name + \"'s corpse on the floor.\\n\" + reset)\n try:\n knowThatIsAliveYou.remove(t_player.name)\n logs.append(\n str(currentTurn) + \": \" + you.name + \" found \" + t_player.name + \"'s corpse at \" + str(\n you.position))\n\n except ValueError:\n pass\n\n together_string = t_player\n\n # Show map\n if SHOW_ALL_CHAR_POS:\n all_player_pos = []\n all_player_names = []\n\n for x_player in aiPlayers:\n all_player_pos.append(x_player.position)\n all_player_names.append(x_player.name)\n\n show_map(interactivePos, [(player_pos_x, player_pos_y)] + all_player_pos, [you.name] + all_player_names,\n MAP_SIZE)\n\n else:\n show_map(interactivePos, [(player_pos_x, player_pos_y)], [you.name], MAP_SIZE)\n\n print(\"Your available actions are:\\n\")\n\n print(\"1. Do nothing\")\n if player_pos_x != 1 and (player_pos_x, player_pos_y) != (eventLocation[0] + 1, eventLocation[1]):\n left_bool = True\n print(\"2. Go left\")\n\n if player_pos_x != MAP_SIZE[0] and (player_pos_x, player_pos_y) != (eventLocation[0] - 1, eventLocation[1]):\n right_bool = True\n print(\"3. Go right\")\n\n if player_pos_y != 1 and (player_pos_x, player_pos_y) != (eventLocation[0], eventLocation[1] + 1):\n down_bool = True\n print(\"4. Go down\")\n\n if player_pos_y != MAP_SIZE[1] and (player_pos_x, player_pos_y) != (eventLocation[0], eventLocation[1] - 1):\n up_bool = True\n print(\"5. Go up\")\n\n if together_count == 1:\n if together_string.alive is True:\n if role == \"Murderer\":\n kill_bool = True\n print(\"6. Kill player\")\n\n elif role == \"Detective\":\n shoot_bool = True\n print(\"7. Shoot player\")\n\n for t_player in aiPlayers:\n if (player_pos_x, player_pos_y) == t_player.position and not t_player.alive:\n investigate_bool = True\n print(\"8. Investigate corpse\")\n\n print(\"9. Make accusation\")\n\n if interact_bool:\n print(\"10. Interact\")\n\n print()\n\n return left_bool, right_bool, up_bool, down_bool, together_string, kill_bool, shoot_bool, investigate_bool, interact_bool\n\n\ndef computers_available_options(computer_player_number, player_pos_x, player_pos_y, role):\n left_bool = False\n right_bool = False\n up_bool = False\n down_bool = False\n investigate_bool = False\n interact_bool = False\n\n together_count = 0\n computer_together_string = \"\"\n\n kill_bool = False\n shoot_bool_combo = (0, 0) # In 1s and 0s because we need a \"truth table\" format\n\n for t_n, t_player in enumerate(aiPlayers):\n player_number = str(t_n + 1)\n\n if computer_player_number != player_number:\n if (player_pos_x, player_pos_y) == t_player.position:\n together_count += 1\n computer_together_string = t_player\n if t_player.alive is False:\n character, local_alive_list = get_player_from_number(computer_player_number,\n get_local_name_list=True)\n\n try:\n local_alive_list.remove(t_player.name)\n logs.append(str(\n currentTurn) + \": \" + character.name + \" found \" + t_player.name + \"'s corpse at \" + str(\n character.position))\n\n except ValueError:\n pass\n\n investigate_bool = True\n\n character, know_that_is_alive = get_player_from_number(computer_player_number,\n get_local_name_list=True)\n character.update_trust(aiPlayers + [you], know_that_is_alive)\n\n if (player_pos_x, player_pos_y) == you.position:\n if you.alive is True:\n together_count += 1\n computer_together_string = you\n\n else:\n character, local_alive_list = get_player_from_number(computer_player_number,\n get_local_name_list=True)\n\n try:\n local_alive_list.remove(you.name)\n logs.append(str(currentTurn) + \": \" + character.name + \" found \" + you.name + \"'s corpse at \" + str(\n character.position))\n\n except ValueError:\n pass\n\n investigate_bool = True\n\n if (player_pos_x, player_pos_y) in interactivePos:\n interact_bool = True\n\n if player_pos_x != 1 and (player_pos_x, player_pos_y) != (eventLocation[0] + 1, eventLocation[1]):\n left_bool = True\n\n if player_pos_x != MAP_SIZE[0] and (player_pos_x, player_pos_y) != (eventLocation[0] - 1, eventLocation[1]):\n right_bool = True\n\n if player_pos_y != 1 and (player_pos_x, player_pos_y) != (eventLocation[0], eventLocation[1] + 1):\n down_bool = True\n\n if player_pos_y != MAP_SIZE[1] and (player_pos_x, player_pos_y) != (eventLocation[0], eventLocation[1] - 1):\n up_bool = True\n\n if together_count == 1:\n if role == \"Murderer\":\n kill_bool = True\n\n elif role == \"Detective\":\n character = get_player_from_number(computer_player_number)\n shoot_bool_combo = (1, 0) # First is whether can shoot or not, second is weather is utmost priority\n\n if (player_pos_x, player_pos_y) == list(character.trustCounts.keys())[\n list(character.trustCounts.values()).index(min(\n character.trustCounts.values()))].position: # If the most untrusted person is with the player, then allow him/her to shoot\n shoot_bool_combo = (1, 1)\n\n else:\n investigate_bool = False\n\n return left_bool, right_bool, up_bool, down_bool, computer_together_string, computer_player_number, kill_bool, shoot_bool_combo, investigate_bool, interact_bool\n\n\ndef player_act_on_input(player_input, player_pos_x, player_pos_y, together_string):\n if player_input == 2: # Left\n player_pos_x -= 1\n\n elif player_input == 3: # Right\n player_pos_x += 1\n\n elif player_input == 4: # Down\n player_pos_y -= 1\n\n elif player_input == 5: # Up\n player_pos_y += 1\n\n elif player_input == 6: # Murder\n print(red + random.choice(\n [\"You stab the victim using a %s. The victim screams, falling on the floor before bleeding to death.\",\n \"You stab the victim using a %s. The victim becomes limp, falling on the floor before bleeding to death.\"]) % you.weapon.name + reset)\n logs.append(str(\n currentTurn) + \": \" + you.name + \" stabbed \" + together_string.name + \" using a %s at \" % (\n red + you.weapon.name + reset)\n + str(together_string.position))\n together_string.alive = False\n together_string.death_cause = \"stabbed\"\n together_string.killer = you.name\n\n elif player_input == 7: # Shoot\n print(\n red + \"You shot the victim using a %s. The victim becomes limp, falling on the floor before bleeding to death.\" % you.weapon.name + reset)\n logs.append(str(\n currentTurn) + \": \" + you.name + \" shot \" + together_string.name + \" using a %s at \" % (\n red + you.weapon.name + reset) + str(\n together_string.position))\n together_string.alive = False\n together_string.death_cause = \"shot\"\n together_string.killer = you.name\n\n if together_string.role == \"Innocent\" or together_string.role == \"Detective\":\n you.role = \"Innocent\"\n print(yellow + \"The person you shot was not the murderer! You lost your gun!\\n\" + reset)\n\n elif player_input == 8: # Investigate\n print(green + \"\\nAfter investigating the corpse, you found out that\", together_string.name, \"was\",\n together_string.death_cause,\n \"to death. He was a \" + together_string.role + \".\" + reset)\n\n if you.role == \"Innocent\" and together_string.role == \"Detective\":\n print(green + \"You picked up a gun from\",\n together_string.name + \"'s body. You are now a detective.\" + reset)\n logs.append(str(currentTurn) + \": \" + you.name + \" picked up a gun at \" + str(you.position))\n\n you.role = \"Detective\"\n together_string.role = \"DETECTIVE\"\n\n if random.randint(0, 3) == 0: # Random tip\n tip = get_partial_killer_name(together_string.killer)\n print(green + \"You learn that the killer of\", together_string.name, \"is\", tip + \".\" + reset)\n\n logs.append(str(\n currentTurn) + \": \" + you.name + \" found out that the killer of \" + together_string.name + \" was \" + tip + \". (Actual name: \" + together_string.killer + \")\")\n\n elif player_input == 9: # Make Accusation\n accusation_index = 0\n while True:\n print(\"Here's the people you can accuse:\", *knowThatIsAliveYou, sep=\"\\n- \")\n print()\n\n try:\n accusation_index = int(input(\"Enter the respective number to accuse: \"))\n\n except SyntaxError and ValueError:\n print(\"Invalid input. Try again.\\n\")\n pass\n\n if 0 < accusation_index < len(knowThatIsAliveYou): # It passed the test\n break\n\n else:\n print(\"Invalid input. Try again.\\n\")\n\n # Get character from name\n char_name = knowThatIsAliveYou[accusation_index]\n accusation_char = None\n for t_player in aiPlayers:\n if char_name == t_player.name:\n accusation_char = t_player\n\n make_accusation(you, accusation_char)\n\n elif player_input == 10: # Interact\n i_index = interactivePos.index(you.position)\n appropriate_interactive = interacts[i_index]\n\n appropriate_interactive.attached_function(you)\n\n you.position = (player_pos_x, player_pos_y)\n\n\ndef computers_act_on_input(computer_player_number, player_input, player_pos_x, player_pos_y,\n computer_together_string):\n character, know_list = get_player_from_number(computer_player_number, get_local_name_list=True)\n\n if player_input == 2: # Left\n player_pos_x -= 1\n\n elif player_input == 3: # Right\n player_pos_x += 1\n\n elif player_input == 4: # Down\n player_pos_y -= 1\n\n elif player_input == 5: # Up\n player_pos_y += 1\n\n elif player_input == 6: # Murder\n if computer_together_string.alive is True:\n if computer_together_string.name != you.name:\n if random.randint(0, 4) == 0:\n print(red + \"\\nYou hear a scream in the distance...\\n\" + reset)\n\n else:\n print(\n red + \"\\nYou feel a sharp pain at your neck. You let out a scream, before collapsing on the floor.\\n\" + reset)\n\n logs.append(str(\n currentTurn) + \": \" + character.name + \" stabbed \" + computer_together_string.name + \" using a %s at \" % (\n red + character.weapon.name + reset) + str(computer_together_string.position))\n\n computer_together_string.alive = False\n computer_together_string.death_cause = \"stabbed\"\n computer_together_string.killer = character.name\n\n elif player_input == 7: # Shoot\n if computer_together_string.alive is True:\n if computer_together_string.name != you.name:\n if random.randint(0, 2) == 0:\n print(red + \"\\nYou hear a gun fire, followed by a scream...\\n\" + reset)\n\n else:\n print(red + \"\\nYou hear a gun fire...\\n\" + reset)\n else:\n print(red + \"\\nYou heard a gun fire. You black out, before collapsing on the floor, dead.\\n\" + reset)\n\n logs.append(str(\n currentTurn) + \": \" + character.name + \" shot \" + computer_together_string.name + \" using a %s at \" % (\n red + character.weapon.name + reset) + str(computer_together_string.position))\n\n if computer_together_string.role == \"Innocent\" or computer_together_string.role == \"Detective\":\n character.role = \"Innocent\"\n\n computer_together_string.alive = False\n computer_together_string.death_cause = \"shot\"\n computer_together_string.killer = character.name\n\n elif player_input == 8: # Investigate\n if character.role == \"Innocent\" and computer_together_string.role == \"Detective\":\n logs.append(str(currentTurn) + \": \" + character.name + \" picked up a gun at \" + str(\n character.position))\n\n character.role = \"Detective\"\n computer_together_string.role = \"DETECTIVE\"\n\n if random.randint(0, 2) == 0: # Random tip\n tip = get_partial_killer_name(computer_together_string.killer)\n\n logs.append(str(\n currentTurn) + \": \" + character.name + \" found out that the killer of \" + computer_together_string.name + \" was \" + tip + \". (Actual name: \" + computer_together_string.killer + \")\")\n\n elif player_input == 9: # Make accusation\n make_accusation(character, list(character.trustCounts.keys())[\n list(character.trustCounts.values()).index(min(list(character.trustCounts.values())))])\n\n elif player_input == 10:\n i_index = interactivePos.index((player_pos_x, player_pos_y))\n appropriate_interactive = interacts[i_index]\n\n appropriate_interactive.attached_function(character)\n\n character.position = (player_pos_x, player_pos_y)\n\n\ndef get_alive_players(): # For the end of the game\n alive_players = []\n\n for t_player in aiPlayers:\n # Get respective role's colour\n if t_player.role == \"Detective\" or t_player.role == \"DETECTIVE\":\n format_colour = blue\n\n elif t_player.role == \"Murderer\":\n format_colour = red\n\n else:\n format_colour = green\n\n # Test player if alive\n if t_player.alive:\n format_alive = \"ALIVE\"\n\n else:\n format_alive = \"DEAD\"\n\n format_string = \"%-7s| %-10s %-10s\" % (\n format_alive, t_player.name, format_colour + t_player.role.upper() + reset)\n alive_players.append(format_string)\n\n if not wantToSpectate:\n # Test player if alive\n if you.alive:\n format_alive = \"ALIVE\"\n\n else:\n format_alive = \"DEAD\"\n\n # Get role\n if you.role == \"Detective\" or you.role == \"DETECTIVE\":\n format_colour = blue\n\n elif you.role == \"Murderer\":\n format_colour = red\n\n else:\n format_colour = green\n\n format_string = \"%-7s| %-10s %-10s\" % (format_alive, you.name, format_colour + you.role.upper() + reset)\n alive_players.append(format_string)\n\n return sorted(alive_players)\n\n\n# Attached functions\ndef att_gold_machine(hitter):\n if hitter.gold >= 1:\n hitter.gold -= 1\n\n if random.randint(0, 3) == 0:\n if hitter == you:\n print(lightBlue + \"You hit the Jackpot! You got 5 gold!\" + reset)\n you.gold = 5\n\n logs.append(str(currentTurn) + \": \" + hitter.name + \" hit the Jackpot and got 5 gold!\")\n\n else:\n if hitter == you:\n print(lightBlue + \"You did not hit the Jackpot. You lost 1 gold.\" + reset)\n else:\n if hitter == you:\n print(lightBlue + \"Insufficient gold. You currently have\",\n str(you.gold) + \" gold. Come back when you have at least 1 gold.\" + reset)\n\n\n# CODE\ntempTurnLimit = MAX_TURN_AMOUNT\n\n# Main menu\nwhile True:\n print(\"-\" * 50)\n while True:\n # Generate inventory file if not generated already\n if not pathlib.Path(INVENTORY_FILE).is_file():\n tempInventoryFile = open(INVENTORY_FILE, \"w+\")\n tempInventoryFile.write(str({\"CrateCount\": 0, \"Items\": [\"Kitchen Knife\", \"Desert Eagle\"],\n \"SelectedItems\": [\"Kitchen Knife\", \"Desert Eagle\"]}))\n tempInventoryFile.close()\n\n inventory = parse_text_inventory(open(INVENTORY_FILE, \"r\").read())\n itemsOwned = inventory[\"Items\"]\n crateCount = inventory[\"CrateCount\"]\n selectedItems = inventory[\"SelectedItems\"]\n\n # Print menu\n wantToSpectate = False\n print(\"MURDER MYSTERY\")\n print(\"VERSION\", VERSION_NUMBER)\n print(\"\\n1. Start Game\\n2. Quit\\n3. Help\\n4. Changelog\\n5. Spectate a game\\n6. Console\\n7. Crates\\n\")\n\n try:\n playerMenuSelection = int(input(\"Enter the respective option here: \"))\n break\n\n except SyntaxError and ValueError:\n print(\"\\nInvalid input. Please try again.\\n\")\n\n if playerMenuSelection == 2: # No\n print(\"-\" * 50)\n exit()\n\n elif playerMenuSelection == 3: # Help\n print()\n print(open(HELP_FILE, \"r\").read())\n\n elif playerMenuSelection == 4: # Changelog\n print()\n print(open(CHANGELOG_FILE, \"r\").read())\n\n elif playerMenuSelection == 5:\n playerMenuSelection = 1\n wantToSpectate = True\n\n elif playerMenuSelection == 6: # Console\n print(green + \"STARTING CONSOLE\")\n try:\n while True:\n cmd = input(\"\\nENTER CONSOLE COMMAND: \")\n\n if cmd == \"exit\":\n print(\"CLOSING GAME\" + reset)\n print(\"-\" * 50)\n exit()\n\n elif cmd == (\"close\" or \"close_console\"):\n print(\"CLOSING CONSOLE\" + reset)\n break\n\n elif cmd == \"debug --mode murderer_only\":\n MURDERER_ONLY = not MURDERER_ONLY\n DETECTIVE_ONLY = False\n print(\"TOGGLED MURDERER ONLY FROM\", str(not MURDERER_ONLY).upper(), \"TO\",\n str(MURDERER_ONLY).upper())\n\n elif cmd == \"debug --mode detective_only\":\n DETECTIVE_ONLY = not DETECTIVE_ONLY\n MURDERER_ONLY = False\n print(\"TOGGLED DETECTIVE ONLY FROM\", str(not DETECTIVE_ONLY).upper(), \"TO\",\n str(DETECTIVE_ONLY).upper())\n\n elif cmd == \"debug --setting show_all_char_pos\":\n SHOW_ALL_CHAR_POS = not SHOW_ALL_CHAR_POS\n print(\"TOGGLED SHOWING OF ALL CHARACTER POSITIONS, FROM\", str(not SHOW_ALL_CHAR_POS).upper(), \"TO\",\n str(SHOW_ALL_CHAR_POS).upper())\n\n elif cmd == \"debug --setting add_extra_names\":\n ADD_EXTRA_NAMES = not ADD_EXTRA_NAMES\n print(\"TOGGLED ADDING OF EXTRA NAMES, FROM\", str(not ADD_EXTRA_NAMES).upper(), \"TO\",\n str(ADD_EXTRA_NAMES).upper())\n\n elif cmd == \"debug --setting replace_default_names\":\n REPLACE_DEFAULT_NAMES = not REPLACE_DEFAULT_NAMES\n print(\"TOGGLED REPLACING OF DEFAULT NAMES, FROM\", str(not REPLACE_DEFAULT_NAMES).upper(), \"TO\",\n str(REPLACE_DEFAULT_NAMES).upper())\n\n else:\n print(\"\\nERROR: UNKNOWN COMMAND '\" + cmd + \"'\\n\")\n\n except ValueError or SyntaxError:\n print(\"\\nERROR: INAPPROPRIATE SYNTAX\\n\")\n\n elif playerMenuSelection == 7: # Crates\n while True:\n while True:\n try:\n print(\"\\nYou have %s%s%s crates.\" % (blue, crateCount, reset))\n print(\"What would you like to do?\")\n print(\"\\n1. Close Menu\\n2. Open Crate\\n3. Change Selected Items\")\n crateMenuOption = int(input(\"\\nEnter the respective option here: \"))\n break\n\n except ValueError or SyntaxError:\n print(\"\\nInvalid input. Please try again.\\n\")\n\n if crateMenuOption == 1: # Close Menu\n finalItemsOwned = []\n for item in itemsOwned:\n finalItemsOwned.append(item.name)\n\n finalSelectionItems = []\n for item in selectedItems:\n finalSelectionItems.append(item.name)\n\n tempInventoryFile = open(INVENTORY_FILE, \"w\")\n tempInventoryFile.write(\n str({\"CrateCount\": crateCount, \"Items\": finalItemsOwned, \"SelectedItems\": finalSelectionItems}))\n tempInventoryFile.close()\n break\n\n elif crateMenuOption == 2: # Open Crate\n crateCount, itemsOwned = open_crate(crateCount, itemsOwned)\n\n elif crateMenuOption == 3: # Change selected item\n while True:\n print(\"\\nCurrently, your selected weapons are the %s and the %s.\" % (\n RARITY_TABLE[selectedItems[0].rarity] + \" \" + selectedItems[0].name + reset,\n RARITY_TABLE[selectedItems[1].rarity] + \" \" + selectedItems[1].name + reset))\n print(\"Here are your weapons that you own:\\n\")\n\n for n, item in enumerate(itemsOwned):\n print(\"%s. %s\" % (n + 1, RARITY_TABLE[item.rarity] + \" \" + item.name + reset))\n print()\n\n try:\n selectItemResponse = int(\n input(\"Enter the respective number to set it as your selected weapons: \"))\n\n if (selectItemResponse > len(itemsOwned)) or (selectItemResponse < 1):\n raise ValueError\n\n break\n except SyntaxError and ValueError:\n print(\"\\nInvalid input. Try again.\")\n\n print(\"You set the %s as your default weapon for the %s role.\" % (\n itemsOwned[selectItemResponse - 1].name, itemsOwned[selectItemResponse - 1].role))\n\n for item in selectedItems:\n if item.role == itemsOwned[selectItemResponse - 1].role:\n selectedItems.remove(item)\n selectedItems.append(itemsOwned[selectItemResponse - 1])\n\n if playerMenuSelection == 1: # Yes\n haveNotPassedThroughEndScreen = True\n optForPlaying = None\n gameNo = 0\n\n while True:\n if wantToSpectate:\n haveNotPassedThroughEndScreen = not haveNotPassedThroughEndScreen # Toggle\n optForPlaying = 1\n break\n\n print(\"Game modes:\")\n print(\"\\n1. Single Game\\n2. Murder Spree\\n\")\n\n try:\n optForPlaying = int(input(\"Enter the respective option here: \"))\n\n if optForPlaying > 2 or optForPlaying < 1:\n raise ValueError\n break\n\n except SyntaxError and ValueError:\n print(\"\\nInvalid input. Please try again.\\n\")\n\n while True:\n # Update game count\n gameNo += 1\n\n # Update turn limit\n MAX_TURN_AMOUNT = tempTurnLimit\n\n # Map selection\n mapNames = [\"Mine\", \"Aircraft\", \"Skyscraper\", \"Herald Express\", \"War Zone\", \"Casino\"]\n\n if optForPlaying == 1:\n print(\"\\nSelect a map:\\n\")\n\n for i in range(len(mapNames)):\n print(str(i + 1) + \". \" + mapNames[i])\n print()\n\n while True:\n try:\n playerMapSelection = int(input(\"Enter the respective option here: \"))\n\n if (playerMapSelection > len(mapNames)) or (playerMapSelection < 1):\n raise ValueError\n break\n\n except SyntaxError and ValueError:\n print(\"\\nInvalid input. Please try again.\\n\")\n\n print()\n print(yellow + \"Welcome, players, to the \" + mapNames[playerMapSelection - 1] + \"!\" + reset)\n\n else:\n playerMapSelection = random.randint(1, len(mapNames))\n\n print(red + \"\\nGAME\", gameNo, reset)\n print(yellow + \"For map number\", str(gameNo) + \", it is the\",\n mapNames[playerMapSelection - 1] + \"!\" + reset)\n\n if ADD_EXTRA_NAMES: # Add the extra names\n for extra_name in EXTRA_PLAYER_NAMES:\n DEFAULT_PLAYER_NAMES.append(extra_name)\n\n if REPLACE_DEFAULT_NAMES:\n DEFAULT_PLAYER_NAMES = EXTRA_PLAYER_NAMES\n\n # Check if enough names\n assert NO_OF_AI_PLAYERS <= len(DEFAULT_PLAYER_NAMES), \"Insufficient names to be assigned to players.\"\n\n print(\"\\nStarting the game!\\n\")\n print(\"-\" * 50)\n\n # Per-game variables\n roles = [\"Detective\", \"Murderer\"]\n for _ in range(NO_OF_AI_PLAYERS - 1):\n roles.append(\"Innocent\")\n\n tempNames = DEFAULT_PLAYER_NAMES.copy()\n gameEnded = False\n eventTurn = random.randint(MIN_EVENT_TURN, MAX_EVENT_TURN)\n\n eventLocation = (-1, -1) # Temporary before the event occurs\n\n # Game section\n logs = [] # For end-game events\n detectives = set([]) # For printing later; not for storing the roles\n\n # Get roles\n playerRoles = []\n for _ in range(NO_OF_AI_PLAYERS + 1):\n playerRole, roles = choose_role(roles)\n playerRoles.append(playerRole)\n\n # Setup players\n you = Player(random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]), playerRoles[0],\n name_selector(tempNames), 0)\n\n aiPlayers = []\n for index in range(NO_OF_AI_PLAYERS):\n aiPlayers.append(\n Player(random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]), playerRoles[index + 1],\n name_selector(tempNames), PREGAME_GOLD))\n\n # Update trust\n for aiPlayer in aiPlayers:\n aiPlayer.initial_trust([you] + aiPlayers)\n\n if MURDERER_ONLY is True: # Murderer Only\n if you.role == \"Innocent\":\n for player in aiPlayers:\n if player.role == \"Murderer\":\n player.role = \"Innocent\"\n break\n\n elif you.role == \"Detective\":\n for player in aiPlayers:\n if player.role == \"Innocent\":\n player.role = \"Detective\"\n break\n\n you.role = \"Murderer\"\n logs.append(\"0: \" + you.name.upper() + \" TURNED INTO A MURDERER USING THE DEBUG MENU.\")\n\n if DETECTIVE_ONLY is True: # Detective Only\n if you.role == \"Innocent\":\n for player in aiPlayers:\n if player.role == \"Detective\":\n player.role = \"Innocent\"\n break\n\n elif you.role == \"Murderer\":\n for player in aiPlayers:\n if player.role == \"Innocent\":\n player.role = \"Murderer\"\n break\n\n you.role = \"Detective\"\n logs.append(\"0: \" + you.name.upper() + \" TURNED INTO A DETECTIVE USING THE DEBUG MENU.\")\n\n Murderer = \"\"\n for player in aiPlayers + [you]:\n if player.role == \"Murderer\":\n Murderer = player.name\n break\n\n # Setup \"you\" list\n knowThatIsAliveYou = []\n\n for player in aiPlayers:\n knowThatIsAliveYou.append(player.name)\n\n random.shuffle(knowThatIsAliveYou)\n\n # Setup ai list\n knowThatIsAliveAI = []\n for index in range(NO_OF_AI_PLAYERS):\n aiPlayersCopy = aiPlayers.copy()\n del aiPlayersCopy[index]\n\n knowList = [you.name]\n\n for player in aiPlayersCopy:\n knowList.append(player.name)\n\n random.shuffle(knowList)\n knowThatIsAliveAI.append(knowList)\n\n # Check if the player wants to be a spectator or not.\n if wantToSpectate is True:\n you.alive = False\n if you.role == \"Murderer\":\n for player in aiPlayers:\n if player.role == \"Innocent\":\n player.role = \"Murderer\"\n break\n\n elif you.role == \"Detective\":\n for player in aiPlayers:\n if player.role == \"Innocent\":\n player.role = \"Detective\"\n break\n\n you.role = \"Spectator\"\n you.name = \"that of a spectator\" # The end result will be \"Your name is that of a spectator.\"\n you.position = (-2, -2)\n\n # Remove \"you\" from trust counts\n for player in aiPlayers:\n del player.trustCounts[you]\n\n # Game starting point\n currentTurn = 1\n doNotHaveWeapon = [you] + aiPlayers\n\n print(\"Your name is\", you.name + \". You are a\", you.role + \".\\nThe map size is\", str(MAP_SIZE) + \".\")\n\n while True:\n # Pre-turn computations\n interactivePos = []\n\n # Map events\n if playerMapSelection == 1: # The mine\n interacts = []\n\n for interactive in interacts:\n interactivePos.append(interactive.position)\n\n if currentTurn == eventTurn:\n eventLocation = (random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]))\n print(lightBlue + \"The part of the mine at\", eventLocation,\n \"has collapsed, crushing any players at that location!\" + reset)\n\n logs.append(\n str(currentTurn) + \": The part of the mine at \" + str(eventLocation) + \" has collapsed!\")\n\n for player in aiPlayers + [you]:\n if player.position == eventLocation:\n player.alive = False\n player.killer = \"Rocks\"\n player.death_cause = \"crushed\"\n\n logs.append(\n str(currentTurn) + \": \" + player.name + \" was crushed by the collapsing mine. \")\n\n elif playerMapSelection == 2: # Aircraft\n interacts = []\n\n for interactive in interacts:\n interactivePos.append(interactive.position)\n if currentTurn == eventTurn:\n if random.randint(0, 2) == 2:\n eventLocation = (random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]))\n MAX_TURN_AMOUNT = random.randint(eventTurn, eventTurn + 30)\n\n print(\n lightBlue + \"There has been a fire on the aircraft at\",\n str(eventLocation) + \"! It will land by the\",\n str(MAX_TURN_AMOUNT) + \"th turn!\" + reset)\n logs.append(str(\n currentTurn) + \": The aircraft has turned back due to a fire at \" + str(\n eventLocation) + \", killing any players at that location! It will land by the \" + str(\n MAX_TURN_AMOUNT) + \"th turn.\")\n\n for player in aiPlayers + [you]:\n if player.position == eventLocation:\n player.alive = False\n player.killer = \"Fire\"\n player.death_cause = \"burned\"\n\n logs.append(\n str(\n currentTurn) + \": \" + player.name + \" was burnt by the fire on the aircraft. \")\n\n else:\n MAX_TURN_AMOUNT = random.randint(eventTurn, MAX_TURN_AMOUNT - 20)\n print(\n lightBlue + \"The aircraft has not enough fuel to continue its flight! It will land back at the airport on the\",\n str(MAX_TURN_AMOUNT) + \"th turn!\" + reset)\n logs.append(str(\n currentTurn) + \": The aircraft has turned back due to fuel shortage, it will land by the \" + str(\n MAX_TURN_AMOUNT) + \"th turn.\")\n\n elif playerMapSelection == 3: # Skyscraper\n interacts = []\n\n for interactive in interacts:\n interactivePos.append(interactive.position)\n if currentTurn == eventTurn:\n eventTurn = random.randint(eventTurn, MAX_TURN_AMOUNT)\n\n eventType = random.randint(0, 4)\n if eventType == 0:\n for player in aiPlayers + [you]:\n player.position = (1, player.position[1])\n\n print(lightBlue + \"The skyscraper is collapsing, the building is tilting left!\" + reset)\n logs.append(str(currentTurn) + \": The skyscraper tilted left!\")\n\n elif eventType == 1:\n for player in aiPlayers + [you]:\n player.position = (MAP_SIZE[0], player.position[1])\n\n print(lightBlue + \"The skyscraper is collapsing, the building is tilting right!\" + reset)\n logs.append(str(currentTurn) + \": The skyscraper tilted right!\")\n\n elif eventType == 2:\n for player in aiPlayers + [you]:\n player.position = (player.position[0], 1)\n\n print(lightBlue + \"The skyscraper is collapsing, the building is tilting down!\" + reset)\n logs.append(str(currentTurn) + \": The skyscraper tilted down!\")\n\n elif eventType == 3:\n for player in aiPlayers + [you]:\n player.position = (player.position[0], MAP_SIZE[1])\n\n print(lightBlue + \"The skyscraper is collapsing, the building is tilting up!\" + reset)\n logs.append(str(currentTurn) + \": The skyscraper tilted up!\")\n\n elif eventType == 4:\n eventLocation = (random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]))\n print(lightBlue + \"The skyscraper caught fire at\", str(eventLocation) + \"!\" + reset)\n logs.append(\n str(currentTurn) + \": The skyscraper caught fire at\" + str(eventLocation) + \".\")\n\n for player in aiPlayers + [you]:\n if player.position == eventLocation:\n player.alive = False\n player.killer = \"Fire\"\n player.death_cause = \"burned\"\n\n logs.append(\n str(\n currentTurn) + \": \" + you.name + \" was burnt by the fire on the skyscraper. \")\n\n elif playerMapSelection == 4: # Herald Express\n interacts = []\n\n for interactive in interacts:\n interactivePos.append(interactive.position)\n if currentTurn == 1:\n print(lightBlue + \"Conductor: All aboard the Herald Express!\" + reset)\n\n if currentTurn == eventTurn:\n if eventTurn < tempTurnLimit:\n MAX_TURN_AMOUNT = random.randint(MAX_TURN_AMOUNT + 10, MAX_TURN_AMOUNT + 50)\n eventTurn = random.randint(tempTurnLimit, MAX_TURN_AMOUNT)\n\n print(\n lightBlue + \"Conductor: Ladies and Gentlemen, the train has encountered a track fault. The time taken to reach our destination has been extended.\" + reset)\n print(\n lightBlue + \"Conductor: We estimate the train to arrive on the \" + str(\n MAX_TURN_AMOUNT) + \"th turn!\" + reset)\n print(lightBlue + \"Conductor: Many apologies.\" + reset)\n\n # Move players right\n for player in aiPlayers + [you]:\n player.position = (MAP_SIZE[0], player.position[1])\n\n logs.append(str(\n currentTurn) + \": The train stopped; it will arrive by the \" + str(\n MAX_TURN_AMOUNT) + \"th turn.\")\n logs.append(str(\n currentTurn) + \": All players were pushed to the right!\")\n\n debounced = True\n else:\n print(lightBlue + \"Conductor: We got the train moving! Full speed ahead!\" + reset)\n\n # Move players left\n for player in aiPlayers + [you]:\n player.position = (1, player.position[1])\n\n logs.append(str(\n currentTurn) + \": The train is moving again! All players were pushed to the left!\")\n\n debounced = None\n\n elif playerMapSelection == 5: # War Zone\n interacts = []\n\n for interactive in interacts:\n interactivePos.append(interactive.position)\n eventLocation = (random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]))\n\n if random.randint(0, 2) == 0:\n deathType = random.randint(0, 4)\n weapon = None\n if deathType == 0:\n weapon = \"artillery shell struck\"\n\n elif deathType == 1:\n weapon = \"random RPG struck\"\n\n elif deathType == 2:\n weapon = \"mounted turret emptied its rounds at\"\n\n elif deathType == 3:\n weapon = \"missile struck\"\n\n else:\n weapon = \"grenade blew up\"\n\n print(lightBlue + \"A(n)\", weapon,\n str(eventLocation) + \"! Anyone there has been killed!\" + reset)\n\n for player in aiPlayers + [you]:\n if player.position == eventLocation:\n logs.append(str(currentTurn) + \": A(n) \" + weapon + \" \" + str(\n eventLocation) + \". \" + player.name + \" was killed as a result!\")\n\n player.alive = False\n player.killer = \"War\"\n player.death_cause = \"exploded/shot\"\n\n elif playerMapSelection == 6: # Casino\n interacts = [Interactive(int(round((MAP_SIZE[0] + 1) / 2)) - 1, int(round((MAP_SIZE[1] + 1) / 2)),\n att_gold_machine, \"You see a slot machine.\"),\n Interactive(int(round((MAP_SIZE[0] + 1) / 2)) + 1, int(round((MAP_SIZE[1] + 1) / 2)),\n att_gold_machine, \"You see a slot machine.\")]\n for interactive in interacts:\n interactivePos.append(interactive.position)\n\n # Spawn gold at GOLD_SPAWN_LOCATIONS locations\n goldLocations = []\n while True:\n goldLocation = (random.randint(1, MAP_SIZE[0]), random.randint(1, MAP_SIZE[1]))\n if goldLocation not in goldLocations:\n goldLocations.append(goldLocation)\n\n if len(goldLocations) == NO_GOLD_SPAWN_LOCATIONS:\n break\n\n # Calculate if any player is in these locations. If so, give player the gold if the player is an innocent\n for player in aiPlayers + [you]:\n if player.role == \"Innocent\" and player.alive is True and player.position in goldLocations:\n player.gold += 1\n if player == you:\n print(orange + \"You received 1 gold. (\" + str(5 - you.gold),\n \"more before you become a detective)\" + reset)\n\n logs.append(str(currentTurn) + \": \" + player.name + \" received 1 gold at \" + str(\n player.position))\n\n # Check if any player has 5 gold. If so, let that player be a detective.\n for player in aiPlayers + [you]:\n if player.gold == 5:\n if player == you:\n print(lightBlue + \"Since you had 5 gold, you became a detective!\" + reset)\n\n player.gold = 0\n player.role = \"Detective\"\n\n logs.append(\n str(currentTurn) + \": \" + player.name + \" became a detective at \" + str(\n player.position))\n\n # Add current and new detectives to the detectives' set\n for player in aiPlayers + [you]:\n if player.role == \"Detective\":\n detectives.add(player.name)\n\n # Set all players' weapons\n for player in doNotHaveWeapon:\n if player.role == \"Murderer\" or player.role == \"Detective\":\n if player != you:\n while True:\n weaponOfChoice = random.choice(CURRENT_ITEMS)\n if weaponOfChoice.role == player.role:\n player.weapon = weaponOfChoice\n break\n\n else:\n for item in selectedItems:\n if you.role == item.role:\n you.weapon = item\n\n doNotHaveWeapon.remove(player)\n\n # Execute turns\n if you.alive: # Your turn\n print(\"\\nThe current turn is\", str(currentTurn), \"out of\", str(MAX_TURN_AMOUNT),\n \"turns.\\n\")\n left, right, up, down, together, kill, shoot, investigate, interact = player_available_options(\n you.position[0],\n you.position[1],\n you.role)\n print('Other players that you think are still alive:', *knowThatIsAliveYou, sep='\\n- ')\n print()\n players_input = player_selected_option(left, right, up, down, kill, shoot, investigate, interact)\n player_act_on_input(players_input, you.position[0], you.position[1], together)\n\n for n, player in enumerate(aiPlayers):\n if player.alive:\n computerIdentifier = str(n + 1)\n\n left, right, up, down, computer_together, computer_together_number, kill, shoot, investigate, interact = computers_available_options(\n computerIdentifier, player.position[0], player.position[1], player.role)\n computers_act_on_input(computerIdentifier,\n computers_selected_option(left, right, up, down, kill, shoot,\n investigate, interact), player.position[0],\n player.position[1], computer_together)\n\n # End-game conditions\n for player in aiPlayers:\n if player.role == \"Murderer\" and player.alive is False:\n logs.append(str(currentTurn) + \": Game ended!\")\n\n trueAlivePlayers = get_alive_players()\n detectives = list(detectives) # Convert from set to list\n\n print(\"\\nThe murderer was killed! Innocents win! The murderer was\",\n Murderer + \"! The detective(s) was/were\",\n ', '.join(detectives) + \"!\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- ')\n gameEnded = True\n\n if haveNotPassedThroughEndScreen is True:\n crateCount += 1\n print(blue + \"You got a crate.\" + reset)\n\n break\n if gameEnded:\n break\n\n elif you.role == \"Murderer\" and you.alive is False:\n logs.append(str(currentTurn) + \": Game ended!\")\n\n trueAlivePlayers = get_alive_players()\n detectives = list(detectives) # Convert from set to list\n\n print(\"\\nYou died! Innocents win! The detective(s) was/were\",\n ', '.join(detectives) + \"!\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- ')\n break\n\n elif you.role == \"Murderer\" and you.alive is False:\n logs.append(str(currentTurn) + \": Game ended!\")\n\n trueAlivePlayers = get_alive_players()\n detectives = list(detectives) # Convert from set to list\n\n print(\"\\nYou died! Innocents win! The detective(s) was/were\",\n ', '.join(detectives) + \"!\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- ')\n break\n\n if gameEnded:\n break\n\n elif MAX_TURN_AMOUNT == currentTurn:\n # Have custom ending text for some maps\n if playerMapSelection == 4: # Herald Express\n print(lightBlue + \"Conductor: Ladies and gentlemen, we have arrived!\" + reset)\n\n logs.append(str(currentTurn) + \": Game ended!\")\n\n trueAlivePlayers = get_alive_players()\n detectives = list(detectives) # Convert from set to list\n\n if you.role == \"Murderer\":\n print(\"\\nYou ran out of time! Innocents win! The murderer was\", Murderer,\n \"and the detective(s) was/were\",\n ', '.join(detectives) + \"!\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- ')\n break\n else:\n print(\"\\nThe murderer ran out of time! Innocents win! The murderer was\", Murderer,\n \"and the detective(s) was/were\", ', '.join(detectives) + \"!\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- ')\n\n if haveNotPassedThroughEndScreen is True:\n crateCount += 1\n print(blue + \"You got a crate.\" + reset)\n\n break\n\n else:\n for person in aiPlayers + [you]:\n otherPlayers = aiPlayers + [you]\n otherPlayers.remove(person)\n\n alivePlayers = []\n\n for p in otherPlayers:\n if p.alive is True:\n alivePlayers.append(p)\n\n if len(alivePlayers) == 0:\n logs.append(str(currentTurn) + \": Game ended!\")\n\n trueAlivePlayers = get_alive_players()\n detectives = list(detectives) # Convert from set to list\n\n print(\"\\nThe murderer won! The murderer was\", Murderer, \"and the detective(s) was/were\",\n ', '.join(detectives) + \"!\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- ')\n\n gameEnded = True\n break\n\n if gameEnded:\n break\n\n # If you died.\n if you.alive is False and haveNotPassedThroughEndScreen is True: # This will appear only if the player is dead and he is considered to be dead.\n print(\"\\nYou died! The murderer was\", Murderer, \"and The current detective(s) was/were\",\n ', '.join(detectives) + \"!\")\n\n while True:\n print(\"Would you like for the game to continue without you?\\n\\n1. Yes\\n2. No\\n\")\n try:\n deathResponse = int(input(\"Enter the respective option here: \"))\n break\n except SyntaxError and ValueError:\n print(\"\\nInvalid input. Try again.\\n\")\n\n if deathResponse == 2:\n trueAlivePlayers = get_alive_players()\n\n print(\"\\nSince you decided to end the game prematurely, here's the round stats:\")\n print('Players:', *trueAlivePlayers, sep='\\n- ')\n print('\\nEvents of this game:', *logs, sep='\\n- Turn ')\n\n haveNotPassedThroughEndScreen = True\n break\n\n else:\n haveNotPassedThroughEndScreen = False\n print(\"\\nContinuing the game...\\n\")\n pass\n\n currentTurn += 1\n\n if not wantToSpectate:\n print()\n if optForPlaying == 1:\n break\n\n else:\n while True:\n print(\"Would you like for the game to continue?\\n\\n1. Yes\\n2. No\\n\")\n try:\n deathResponse = int(input(\"Enter the respective option here: \"))\n\n break\n except SyntaxError and ValueError:\n print(\"\\nInvalid input. Try again.\\n\")\n\n if deathResponse == 2:\n print(\"You have ended the Murderer Spree.\")\n print(\"You have played\", gameNo, \"games.\")\n break\n\n else:\n pass\n else:\n break\n","sub_path":"Previous Versions/Python 3/1.8.0/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":57805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"11116689","text":"__author__ = 'jacob'\r\nfrom django import template\r\nfrom videos.models import Video\r\n\r\nregister = template.Library()\r\n\r\n@register.inclusion_tag('videos/front_page_video.html')\r\ndef front_page_video(slug):\r\n obj = Video.objects.get(slug=slug)\r\n\r\n return {'video':obj}","sub_path":"videos/templatetags/videotemplatetags.py","file_name":"videotemplatetags.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"214786314","text":"list1 = input()[1:-1].split(\",\")\nlist1 = [int(i) for i in list1]\nlist2 = []\nfor m in range(len(list1) - 1):\n count = 0\n for n in range(m + 1, len(list1)):\n if list1[n] < list1[m]:\n count += 1\n list2.append(count)\nlist2.append(0)\nprint(list2)","sub_path":"Code/CodeRecords/2456/60692/256067.py","file_name":"256067.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"279746219","text":"from pymongo import MongoClient\nfrom bson.son import SON\nimport pprint\nfrom bson.code import Code\n\ndb = MongoClient().aggregation_example\nresult = db.things.insert_many(\n [\n {\"x\":1,\"tags\":[\"dog\",\"cat\"]},\n {\"x\":2,\"tags\":[\"cat\"]},\n {\"x\":2,\"tags\":[\"mouse\",\"cat\",\"dog\"]},\n {\"x\":3,\"tags\":[]}\n ]\n )\n\nprint(result.inserted_ids)\n\npipeline = [\n {\"$unwind\":\"$tags\"},\n {\"$group\":{\"_id\":\"$tags\",\"count\":{\"$sum\":1}}},\n {\"$sort\":SON([(\"count\",-1),(\"_id\",-1)])}\n ]\n\npprint.pprint(list(db.things.aggregate(pipeline)))\n\nprint(db.command('aggregate','things',pipeline = pipeline,explain=True))\n\nmapper = Code(\"\"\"\n function(){\n this.tags.forEach(function(z){\n emit(z,1);\n });\n }\n \"\"\"\n )\n\nreducer = Code(\"\"\"\n function(key,values){\n var total =0;\n for (var i = 0;i < values.length;i++){\n total +=values[i];\n }\n return total;\n }\n \"\"\")\n#map reduce\nresult =db.things.map_reduce(mapper,reducer,\"myresults\")\n\nfor doc in result.find():\n pprint.pprint(doc)\n#Advanced map/reduce\npprint.pprint(db.things.map_reduce(mapper,reducer,\"myresults\",full_response=True))\n\n\n#clear all messages\nclear_state=db.things.delete_many({})\nprint(clear_state.deleted_count)\n\n","sub_path":"shijingxian/demo/exercises/pymongo_test/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"503106051","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom .base import Base\nfrom deoplete.util import load_external_module\n\n\ntry:\n current = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n sys.path.insert(0, current)\n\n from clang_source_base import import_library\n from clang_source_base import ClangDeopleteSourceBase\n from clang_source_base import ClangCompletionWrapper\nexcept Exception:\n pass\n\n\nclass Source(Base, ClangDeopleteSourceBase):\n def __init__(self, vim):\n Base.__init__(self, vim)\n ClangDeopleteSourceBase.__init__(self, vim)\n\n # The description of a source.\n self.description = 'clang completion'\n # Available filetype list.\n self.filetypes = ['objcpp']\n # The mark of a source\n self.mark = '[objcpp]'\n # The unique name of a source.\n self.name = 'objcpp'\n # Source priority. Higher values imply higher priority.\n self.rank = 600\n\n def setup_arg_manager(self, vim):\n clang_completer = import_library()\n argument_manager = clang_completer.OBJCPPArgumentManager()\n\n v = vim.vars\n definitions = v['deoplete#sources#objc#definitions']\n include_paths = v['deoplete#sources#objc#include_paths']\n include_paths += self.search_for_includes()\n\n for ip in include_paths:\n argument_manager.AddIncludePath(ip)\n for d in definitions:\n argument_manager.AddDefinition(d)\n return argument_manager\n\n def on_init(self, context):\n argument_manager = self.setup_arg_manager(self.vim)\n completer = ClangCompletionWrapper(argument_manager)\n self.set_completer(completer)\n\n def on_event(self, context):\n self.update(context)\n\n def gather_candidates(self, context):\n return self.get_candidates(context)\n","sub_path":"rplugin/python3/deoplete/sources/objcpp.py","file_name":"objcpp.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"88123452","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 24 11:52:04 2018\n\n@author: Chad\n\"\"\"\n\nimport sys\nimport random\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn import datasets\ndef readCSV2(): \n filename = './pima-indians-diabetes.data'\n data = pd.read_csv(filename, skipinitialspace=True)\n df = pd.DataFrame(data = data)\n df.columns = ['pregnant', 'plasma_glucose_level','Diastolic blood pressure', 'skin fold', 'serum insulin', 'BMI', 'Diabetes pedigree function', 'age(years)','targets1']\n return df\n\ndef separateDataAndTargetsEnd(dataframe,a):\n target = dataframe.iloc[:,a]\n target = pd.DataFrame.as_matrix(target,columns=None)\n dataframe.drop(dataframe.columns[len(dataframe.columns)-1], axis=1, inplace=True)\n dat = pd.DataFrame.as_matrix(dataframe,columns=None)\n return dat, target\n\ndef experimentalShell(data_train, data_test , targets_train, targets_test, classifier):\n model = classifier.fit(data_train, targets_train)\n targets_predicted = model.predict(data_test)\n return targets_predicted\n\n\n\nclass perception:\n accuricy = 0\n def __init__(self, forLayerParameter, numInputs, data, targets, df2):\n \n df2 = df2[df2.iloc[:,2] != 0]\n \n data = preprocessing.normalize(data, norm='l2') \n \n #number of inputs\n \n Classi = NNClassifier()\n model = Classi.fit(data, targets)\n targets = targets\n \n layerList = model.createLayer(forLayerParameter, numInputs)\n array = []\n for i in range(100):\n listtry = model.feedForward(layerList, data[i])\n layerList,predictons = model.backwardProp(listtry, targets[i])\n if predictons[1] > predictons[0]:\n array.append(1) \n else:\n array.append(0)\n \n count = 0.\n for i in range(len(array)):\n if int(array[i]) == int(targets[i]):\n count += 1\n self.accuracy = count/len(array)\n return \n \n def get(self):\n return self.accuracy\n\n #####################################################################\n # Neural Network Model\n # \n##################################################################### \nclass NNModel:\n def __init__(self, data_train, targets_train):\n self.data = data_train\n self.targets = targets_train\n return None\n \n def addBias(self,inputs):\n return np.append(inputs, -1)\n \n def checkActivation(self,inputs, weights):\n return np.dot(inputs,weights)\n \n def getActivation(self, hval):\n return 1/(1 + np.exp(-hval))\n \n def makeNode(self, size):\n weights = []\n activationVal = 0\n for i in range(size+1): \n weights.append(random.uniform(-.5, .5))\n return weights, activationVal\n \n def createNodelist(self, numNodes, numWeights):\n nodeList = []\n for i in range(numNodes): \n nodeList.append(self.makeNode(numWeights))\n return nodeList\n \n def createLayer(self, layerArray, numInputs):\n layerList = []\n layerList.append(self.createNodelist(layerArray[0],numInputs))\n \n for i in range(len(layerArray)-1):\n layerList.append(self.createNodelist(layerArray[i+1],layerArray[i]))\n \n \n return layerList\n \n def feedForward(self, layerList, inputs):\n inputAndBias = self.addBias(inputs)\n array = []\n array.append(-1)\n for i in range(len(layerList[0])):\n hval = self.checkActivation(inputAndBias,np.transpose(layerList[0][i][0]))\n activationVal = self.getActivation(hval)\n layerList[0][i]= layerList[0][i][0],activationVal\n array.append(layerList[0][i][1])\n \n activationArray = [[] for d in range(len(layerList))]\n weightsArray = [[] for d in range(len(layerList))]\n for i in range(len(layerList)):\n for j in range(len(layerList[i])):\n weightsArray[i].append(layerList[i][j][0])\n activationArray[i].append(layerList[i][j][1])\n activationArray[0].append(-1) \n \n for i in range(len(layerList)-1):\n if i > 0:\n activationArray[i].append(-1) \n for i in range(len(layerList)-1): \n for j in range(len(layerList[i+1])):\n hval = self.checkActivation(activationArray[i],np.transpose(weightsArray[i+1][j]))\n activationVal = self.getActivation(hval)\n activationArray[i+1][j] = activationVal\n for i in range(len(layerList)): \n for j in range(len(layerList[i])): \n layerList[i][j] = weightsArray[i][j],activationArray[i][j]\n return layerList\n# \n def backwardProp(self, layerList, target):\n errorArray = [[] for d in range(len(layerList))]\n for j in range(len(layerList[-1])):\n errorArray[-1].append(layerList[-1][j][1]*(1-layerList[-1][j][1])*(layerList[-1][j][1] - target))\n predictions = []\n for i in range(len(layerList[-1])):\n predictions.append(layerList[-1][i][1])\n for i in range(len(layerList)-1):\n for j in range(len(layerList[-(i+2)])):\n errorArray[-(i+2)].append(0) \n for i in range(len(layerList)-1):\n for j in range(len(layerList[-(i+2)])):\n for k in range(len(layerList[-(i+1)])):\n errorArray[-(i+2)][j] += layerList[-(i+2)][j][1]*(1-layerList[-(i+2)][j][1])*(errorArray[-(i+1)][k] * layerList[-(i+1)][k][0][j])\n lr = .2\n for i in range(len(layerList)):\n for j in range(len(layerList[i])):\n for k in range(len(layerList[i][j][0])):\n layerList[i][j][0][k] = layerList[i][j][0][k] - lr*errorArray[i][j]*layerList[i][j][1] \n for i in range(len(layerList)):\n for j in range(len(layerList[i])):\n layerList[i][j] = layerList[i][j][0],0 \n self.saveError(errorArray[-1][0], errorArray[-1][1], errorArray[-1][2])\n \n return layerList,predictions\n \n def saveError(self, valOne, valTwo, valThree):\n file = open('report2.csv', 'a')\n file.write('%f, %f, %f,\\n' % (valOne, valTwo, valThree))\n file.close()\n \n \nclass NNClassifier:\n def __init__(self):\n pass\n return\n \n def fit(self, data, targets):\n m = NNModel(data, targets)\n return m\n\n\ndef main(argv):\n avg = 0\n total = 0\n count = 0\n df1 = pd.DataFrame(datasets.load_iris().data)\n df2 = pd.DataFrame(datasets.load_iris().target)\n df_c = pd.concat([df1, df2], axis=1)\n print(df_c)\n \n for i in range(1) :\n df2 = df_c\n data, targets = separateDataAndTargetsEnd(df2,4)\n data = MinMaxScaler().fit_transform(data)\n numInputs = len(data[0])\n\n forLayerParameter = [3,2,6,3]\n nuralNetwork = perception(forLayerParameter, numInputs, data, targets, df2)\n acc = nuralNetwork.get()\n total += acc\n count += 1\n print(count, \" Accuracy =, \", acc) \n avg = total/count\n print(\"avg = \", avg)\n\n \n\n\nif __name__== \"__main__\":\n main(sys.argv)\n \n \n","sub_path":"Neuron Iris.py","file_name":"Neuron Iris.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"20523712","text":"from logistics_project.apps.tanzania.tests.base import TanzaniaTestScriptBase\nfrom logistics_project.apps.tanzania.tests.util import register_user\n\nclass TestYes(TanzaniaTestScriptBase):\n \n def testYes(self):\n contact = register_user(self, \"778\", \"someone\")\n script = \"\"\"\n 778 > ndio\n 778 < Kama umetuma R&R fomu yako jibu 'nimetuma', kama umepokea vifaa jibu 'nimepokea'\n \"\"\"\n self.runScript(script)","sub_path":"logistics_project/apps/tanzania/tests/yes.py","file_name":"yes.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"634592064","text":"# Zerynth - libs - microchip-mcp2515/mcp2515.py\n#\n# Zerynth library for mcp2515 component.\n#\n# @Author: m.cipriani\n#\n# @Date: 2017-11-24 10:47:11\n# @Last Modified by: m.cipriani\n# @Last Modified time: 2018-03-30 12:39:16\n\n\nimport spi\n\nTIMEOUTVALUE = 50\nMCP_SIDH = 0\nMCP_SIDL = 1\nMCP_EID8 = 2\nMCP_EID0 = 3\n\nMCP_TXB_EXIDE_M = 0x08\nMCP_DLC_MASK = 0x0F\nMCP_RTR_MASK = 0x40\n\n# Define SPI Instruction Set\nMCP_WRITE = 0x02\nMCP_READ = 0x03\nMCP_BITMOD = 0x05\nMCP_LOAD_TX0 = 0x40\nMCP_LOAD_TX1 = 0x42\nMCP_LOAD_TX2 = 0x44\nMCP_RTS_TX0 = 0x81\nMCP_RTS_TX1 = 0x82\nMCP_RTS_TX2 = 0x84\nMCP_RTS_ALL = 0x87\nMCP_READ_RX0 = 0x90\nMCP_READ_RX1 = 0x94\nMCP_READ_STATUS = 0xA0\nMCP_RX_STATUS = 0xB0\nMCP_RESET = 0xC0\n\n# CANCTRL Register Values\nMCP_MODE = {\n \"NORMAL\" : 0x00,\n \"SLEEP\" : 0x20,\n \"LOOPBACK\" : 0x40,\n \"LISTENONLY\": 0x60,\n \"CONFIG\" : 0x80,\n \"POWERUP\" : 0xE0,\n \"ONE_SHOT\" : 0x08\n}\n\nMODE_MASK = 0xE0\nABORT_TX = 0x10\nCLKOUT_ENABLE = 0x04\nCLKOUT_DISABLE = 0x00\nCLKOUT_PS1 = 0x00\nCLKOUT_PS2 = 0x01\nCLKOUT_PS4 = 0x02\nCLKOUT_PS8 = 0x03\n\n\n# Define MCP2515 register addresses\nMCP_RXF0SIDH = 0x00\nMCP_RXF0SIDL = 0x01\nMCP_RXF0EID8 = 0x02\nMCP_RXF0EID0 = 0x03\nMCP_RXF1SIDH = 0x04\nMCP_RXF1SIDL = 0x05\nMCP_RXF1EID8 = 0x06\nMCP_RXF1EID0 = 0x07\nMCP_RXF2SIDH = 0x08\nMCP_RXF2SIDL = 0x09\nMCP_RXF2EID8 = 0x0A\nMCP_RXF2EID0 = 0x0B\nMCP_BFPCTRL = 0x0C\nMCP_TXRTSCTRL = 0x0D\nMCP_CANSTAT = 0x0E\nMCP_CANCTRL = 0x0F\nMCP_RXF3SIDH = 0x10\nMCP_RXF3SIDL = 0x11\nMCP_RXF3EID8 = 0x12\nMCP_RXF3EID0 = 0x13\nMCP_RXF4SIDH = 0x14\nMCP_RXF4SIDL = 0x15\nMCP_RXF4EID8 = 0x16\nMCP_RXF4EID0 = 0x17\nMCP_RXF5SIDH = 0x18\nMCP_RXF5SIDL = 0x19\nMCP_RXF5EID8 = 0x1A\nMCP_RXF5EID0 = 0x1B\nMCP_TEC = 0x1C\nMCP_REC = 0x1D\nMCP_RXM0SIDH = 0x20\nMCP_RXM0SIDL = 0x21\nMCP_RXM0EID8 = 0x22\nMCP_RXM0EID0 = 0x23\nMCP_RXM1SIDH = 0x24\nMCP_RXM1SIDL = 0x25\nMCP_RXM1EID8 = 0x26\nMCP_RXM1EID0 = 0x27\nMCP_CNF3 = 0x28\nMCP_CNF2 = 0x29\nMCP_CNF1 = 0x2A\nMCP_CANINTE = 0x2B\nMCP_CANINTF = 0x2C\nMCP_EFLG = 0x2D\n\nMCP_TXB0CTRL = 0x30\nMCP_TXB1CTRL = 0x40\nMCP_TXB2CTRL = 0x50\nMCP_TXB = [\n MCP_TXB0CTRL,\n MCP_TXB1CTRL,\n MCP_TXB2CTRL\n]\n\nMCP_RXB0CTRL = 0x60\nMCP_RXB0SIDH = 0x61\nMCP_RXB1CTRL = 0x70\nMCP_RXB1SIDH = 0x71\n\n\nMCP_TX_INT = 0x1C #Enable all transmit interrup ts\nMCP_TX01_INT = 0x0C #Enable TXB0 and TXB1 interru pts\nMCP_RX_INT = 0x03 #Enable receive interrupts\nMCP_NO_INT = 0x00 #Disable all interrupts\n\nMCP_TX01_MASK = 0x14\nMCP_TX_MASK = 0x54\n\n#Bits in the TXBnCTRL registers.\nMCP_TXB_TXBUFE_M = 0x80\nMCP_TXB_ABTF_M = 0x40\nMCP_TXB_MLOA_M = 0x20\nMCP_TXB_TXERR_M = 0x10\nMCP_TXB_TXREQ_M = 0x08\nMCP_TXB_TXIE_M = 0x04\nMCP_TXB_TXP10_M = 0x03\n#In TXBnDLC\nMCP_TXB_RTR_M = 0x40 \nMCP_RXB_IDE_M = 0x08 \nMCP_RXB_RTR_M = 0x40 \n#In RXBnSIDL, RXBnDLC\nMCP_STAT_RXIF_MASK = (0x03)\nMCP_STAT_RX0IF = (1<<0)\nMCP_STAT_RX1IF = (1<<1)\n\nMCP_EFLG_RX1OVR = (1<<7)\nMCP_EFLG_RX0OVR = (1<<6)\nMCP_EFLG_TXBO = (1<<5)\nMCP_EFLG_TXEP = (1<<4)\nMCP_EFLG_RXEP = (1<<3)\nMCP_EFLG_TXWAR = (1<<2)\nMCP_EFLG_RXWAR = (1<<1)\nMCP_EFLG_EWARN = (1<<0)\nMCP_EFLG_ERRORMASK = (0xF8)\n\nMCP_BxBFS_MASK = 0x30\nMCP_BxBFE_MASK = 0x0C\nMCP_BxBFM_MASK = 0x03\n\nMCP_BxRTS_MASK = 0x38\nMCP_BxRTSM_MASK = 0x07\n\n#CANINTF Register Bits\nMCP_RX0IF = 0x01\nMCP_RX1IF = 0x02\nMCP_TX0IF = 0x04\nMCP_TX1IF = 0x08\nMCP_TX2IF = 0x10\nMCP_ERRIF = 0x20\nMCP_WAKIF = 0x40\nMCP_MERRF = 0x80\n\nMCP_DLC_MASK = 0x0F \nMCP_RTR_MASK = 0x40 \n\nMCP_RXB_RX_ANY = 0x60\nMCP_RXB_RX_EXT = 0x40\nMCP_RXB_RX_STD = 0x20\nMCP_RXB_RX_STDEXT = 0x00\nMCP_RXB_RX_MASK = 0x60\nMCP_RXB_BUKT_MASK = (1<<2)\n\n#CNF1 Register Values\nSJW1 = 0x00\nSJW2 = 0x40\nSJW3 = 0x80\nSJW4 = 0xC0\n\n#CNF2 Register Values\nBTLMODE = 0x80\nSAMPLE_1X = 0x00\nSAMPLE_3X = 0x40\n\n#CNF3 Register Values\nSOF_ENABLE = 0x80\nSOF_DISABLE = 0x00\nWAKFIL_ENABLE = 0x40\nWAKFIL_DISABLE = 0x00\n\nMCPDEBUG = (0)\nMCPDEBUG_TXBUF = (0)\nMCP_N_TXBUFFERS = (3)\n\nMCP_RXBUF_0 = (MCP_RXB0SIDH)\nMCP_RXBUF_1 = (MCP_RXB1SIDH)\n\nCANSENDTIMEOUT = (200)\n\n#initial value of gCANAutoProcess\nCANAUTOPROCESS = (1)\nCANAUTOON = (1)\nCANAUTOOFF = (0)\n\nCAN_STDID = (0)\nCAN_EXTID = (1)\n\nCANDEFAULTIDENT = (0x55CC)\nCANDEFAULTIDENTEXT = (CAN_EXTID)\n\nMCP_STDEXT = 0\nMCP_STD = 1\nMCP_EXT = 2\nMCP_ANY = 3\n\nMAX_CHAR_IN_MESSAGE = 8\n\nCAN_RATE = {\n \"8MHZ\" : {\n \"5KBPS\" : [0xA7, 0xF6, 0x84],\n \"10KBPS\" : [0x93, 0xF6, 0x84],\n \"20KBPS\" : [0x89, 0xF6, 0x84],\n \"31KBPS\" : [0x87, 0xE5, 0x83],\n \"33KBPS\" : [0x85, 0xF6, 0x84],\n \"40KBPS\" : [0x84, 0xF6, 0x84],\n \"50KBPS\" : [0x84, 0xE5, 0x83],\n \"80KBPS\" : [0x84, 0xD3, 0x81],\n \"100KBPS\" : [0x81, 0xF6, 0x84],\n \"125KBPS\" : [0x81, 0xE5, 0x83],\n \"200KBPS\" : [0x80, 0xF6, 0x84],\n \"250KBPS\" : [0x80, 0xE5, 0x83],\n \"500KBPS\" : [0x00, 0xD1, 0x81],\n \"1000KBPS\": [0x00, 0xC0, 0x80],\n },\n \"16MHZ\" : {\n \"5KBPS\" : [0x3F, 0xFF, 0x87],\n \"10KBPS\" : [0x67, 0xF6, 0x84],\n \"20KBPS\" : [0x53, 0xF6, 0x84],\n \"33KBPS\" : [0x4E, 0xE5, 0x83],\n \"40KBPS\" : [0x49, 0xF6, 0x84],\n \"50KBPS\" : [0x47, 0xF6, 0x84],\n \"80KBPS\" : [0x44, 0xF6, 0x84],\n \"100KBPS\" : [0x44, 0xE5, 0x83],\n \"125KBPS\" : [0x43, 0xE5, 0x83],\n \"200KBPS\" : [0x41, 0xF6, 0x84],\n \"250KBPS\" : [0x41, 0xE5, 0x83],\n \"500KBPS\" : [0x40, 0xE5, 0x83],\n \"1000KBPS\": [0x00, 0xCA, 0x81],\n },\n \"20MHZ\" : {\n \"40KBPS\" : [0x18, 0xD3, 0x81],\n \"50KBPS\" : [0x49, 0xF6, 0x84],\n \"80KBPS\" : [0xC4, 0xFF, 0x87],\n \"100KBPS\" : [0x44, 0xF6, 0x84],\n \"125KBPS\" : [0x44, 0xE5, 0x83],\n \"200KBPS\" : [0x44, 0xD3, 0x81],\n \"250KBPS\" : [0x41, 0xF6, 0x84],\n \"500KBPS\" : [0x40, 0xF6, 0x84],\n \"1000KBPS\": [0x00, 0xD9, 0x82],\n }\n}\n\n\"\"\"\n.. module:: mcp2515\n\n***************\n MCP2515 Module\n***************\n.. _datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20001801H.pdf\n\nThis module contains the driver for Microchip MCP2515, a second generation \nstand-alone CAN controller. It is pin and function compatible with the MCP2510\nand also includes upgraded features like faster throughput, databyte\nfiltering, and support for time-triggered protocols (datasheet_).\n\nExample: ::\n \n from microchip.mcp2515 import mcp2515\n \n ...\n \n can = mcp2515.MCP2515(SPI0, D17, D16, clk=10000000)\n can.init(mcp2515.MCP_ANY, \"500KBPS\", \"16MHZ\")\n can.set_mode(\"NORMAL\")\n\n ...\n \n can.send(canid, data)\n \n \"\"\"\n\ndebug = False\n\nclass MCP2515(spi.Spi):\n \"\"\"\n===============\n MCP2515 class\n===============\n\n\n.. class:: MCP2515(dvr, cs, rst=None, clk=800000):\n\n Creates an instance of the MCP2515 class. This class allows the control of MCP2515 devices.\n \n :param drv: SPI Driver used '(SPI0, ...)'\n :param cs: Chip select of the SPI driver\n :param rst: Reset pin (default None)\n :param clk: Clock speed, default 800 kHz\n\n\n \"\"\"\n def __init__(self, dvr, cs, rst=None, clk=800000):\n\n spi.Spi.__init__(self,cs,dvr,clock=clk)\n if rst:\n self.rst = rst\n pinMode(self.rst, OUTPUT)\n self._reset()\n sleep(100)\n #Identifier Type\n self.ext = 0\n #CAN ID - Extended (29 bit) or Standard (11 bit)\n self.canid = bytearray([0x00, 0x00, 0x00, 0x00])\n #Data Length Code\n self.dlc = 0\n #Data array\n self.data = bytearray(MAX_CHAR_IN_MESSAGE)\n #Remote request flag\n self.rtr = 0\n self.mcpmode = MCP_MODE[\"LOOPBACK\"]\n self.buf = bytearray(1)\n\n def _reset(self):\n digitalWrite(self.rst,0)\n sleep(20)\n digitalWrite(self.rst,1)\n sleep(500)\n\n def _sw_reset(self):\n self.select()\n sleep(10)\n self.buf[0] = MCP_RESET\n self.write(self.buf)\n sleep(10)\n self.unselect()\n sleep(10)\n\n def _read_regs(self, addr,n=1):\n self.buf = bytearray(2)\n self.buf[0] = MCP_READ\n self.buf[1] = addr\n self.select()\n self.write(self.buf)\n res = self.read(n)\n self.unselect()\n self.buf = bytearray(1)\n return res\n\n def _set_regs(self, addr, data):\n if type(data) != PLIST:\n data = [data]\n self.buf = bytearray(len(data)+2)\n self.buf[0] = MCP_WRITE\n self.buf[1] = addr\n for i,elem in enumerate(data):\n self.buf[i+2] = elem\n self.select()\n self.write(self.buf)\n self.unselect()\n self.buf = bytearray(1)\n\n def _read_status(self):\n self.buf[0] = MCP_READ_STATUS\n self.select()\n self.write(self.buf)\n res = self.read(1)[0]\n # print(res)\n self.unselect()\n return res\n\n def _modify_register(self, addr, mask, value):\n self.buf = bytearray(4)\n self.select()\n for i,elem in enumerate([MCP_BITMOD, addr, mask, value]):\n self.buf[i] = elem\n self.write(self.buf)\n self.unselect()\n self.buf = bytearray(1)\n\n def _set_canctrl_mode(self, mode):\n self._modify_register(MCP_CANCTRL, MODE_MASK, mode)\n if debug:\n print(\"debug\")\n i = self._read_regs(MCP_CANCTRL)[0]\n i &= MODE_MASK\n if i == mode:\n print(\"ok\")\n else:\n raise InvalidHardwareStatusError\n\n def _config_rate(self, spd, clk):\n if clk not in CAN_RATE or spd not in CAN_RATE[clk]:\n raise ValueError\n self._set_regs(MCP_CNF1, CAN_RATE[clk][spd][0])\n self._set_regs(MCP_CNF2, CAN_RATE[clk][spd][1])\n self._set_regs(MCP_CNF3, CAN_RATE[clk][spd][2])\n\n def _write_masks_and_filters(self, addr, ext, canid):\n # print(canid)\n data = [0]*4\n idl = canid[0] + (canid[1] << 8)\n idh = canid[2] + (canid[3] << 8)\n data[MCP_EID0] = idl & 0xFF\n data[MCP_EID8] = idl >> 8 & 0xFF\n if ext:\n data[MCP_SIDL] = (idh & 0x03) & 0xFF\n data[MCP_SIDL] += ((idh & 0x1C) << 3) & 0xFF\n data[MCP_SIDL] |= MCP_TXB_EXIDE_M\n data[MCP_SIDH] = (idh >> 5 ) & 0xFF\n else:\n data[MCP_SIDL] = ((idh & 0x07) << 5) & 0xFF\n data[MCP_SIDH] = (idh >> 3 ) & 0xFF\n self._set_regs(addr, data)\n\n def _init_can_buffers(self):\n \n std = 0\n ext = 1\n #set both mask to 0\n self._write_masks_and_filters(MCP_RXM0SIDH, ext, self.canid)\n self._write_masks_and_filters(MCP_RXM1SIDH, ext, self.canid)\n #set all filters to 0\n self._write_masks_and_filters(MCP_RXF0SIDH, ext, self.canid)\n self._write_masks_and_filters(MCP_RXF1SIDH, std, self.canid)\n self._write_masks_and_filters(MCP_RXF2SIDH, ext, self.canid)\n self._write_masks_and_filters(MCP_RXF3SIDH, std, self.canid)\n self._write_masks_and_filters(MCP_RXF4SIDH, ext, self.canid)\n self._write_masks_and_filters(MCP_RXF5SIDH, std, self.canid)\n\n #clear, deactivate the three transmit buffers\n self._set_regs(MCP_TXB0CTRL, [0]*14)\n self._set_regs(MCP_TXB1CTRL, [0]*14)\n self._set_regs(MCP_TXB2CTRL, [0]*14)\n\n self._set_regs(MCP_RXB0CTRL, 0)\n self._set_regs(MCP_RXB1CTRL, 0)\n\n def _write_id(self, addr, ext, canid):\n idl = canid[0] + (canid[1] << 8)\n idh = canid[2] + (canid[3] << 8)\n data = [0]*4\n if ext:\n data[MCP_EID0] = idl & 0xFF\n data[MCP_EID8] = (idl >> 8) & 0xFF\n data[MCP_SIDL] = (idh & 0x03) & 0xFF\n data[MCP_SIDL] += ((idh & 0x1C) << 3) & 0xFF\n data[MCP_SIDL] |= MCP_TXB_EXIDE_M\n data[MCP_SIDH] = (idh >> 5 ) & 0xFF\n else:\n data[MCP_EID0] = 0\n data[MCP_EID8] = 0\n data[MCP_SIDL] = ((idh & 0x07) << 5) & 0xFF\n data[MCP_SIDH] = (idh >> 3) & 0xFF\n self._set_regs(addr, data)\n\n def _read_id(self, addr):\n buf = self._read_regs(addr, n=4)\n self.ext = 0\n idl = 0\n idh = ((buf[MCP_SIDH]<<3) + (buf[MCP_SIDL]>>5)) & 0xFFFF\n if (buf[MCP_SIDL] & MCP_TXB_EXIDE_M) == MCP_TXB_EXIDE_M:\n #extended id\n idh = (idh<<2) + (buf[MCP_SIDL] & 0x03)\n idl = buf[MCP_EID8]\n idl = idl<<8 + buf[MCP_EID0]\n self.ext = 1\n self.canid = bytearray([idl & 0xFF, (idl >> 8) & 0xFF, idh & 0xFF, (idh >> 8) & 0xFF])\n\n def _write_can_msg(self, sidh_addr):\n self._set_regs(sidh_addr+5, self.data, self.dlc)\n if self.rtr:\n self.dlc |= MCP_RTR_MASK\n self._set_regs(sidh_addr+4, self.dlc)\n self._write_id(sidh_addr, self.ext, self.canid)\n\n def _read_can_msg(self, sidh_addr):\n self._read_id(sidh_addr)\n ctrl = self._read_regs(sidh_addr-1)[0]\n self.dlc = self._read_regs(sidh_addr+4)[0]\n self.rtr = 0\n if ctrl == 0x08:\n self.rtr = 1\n self.dlc &= MCP_DLC_MASK\n return self._read_regs(sidh_addr+5, self.dlc)\n\n def _find_empty_transmit_buffer(self):\n res = None #ALLTXB_BUSY\n for elem in MCP_TXB:\n ctrlval = self._read_regs(elem)[0]\n if ctrlval & MCP_TXB_TXREQ_M == 0:\n res = elem+1\n break\n return res\n\n def _set_msg(self, canid, rtr, ext, data):\n self.canid = canid\n self.rtr = rtr\n self.ext = ext\n self.dlc = len(data)\n self.data = data\n\n def _clear_msg(self):\n self.canid = bytearray(4)\n self.dlc = 0\n self.ext = 0\n self.rtr = 0\n self.data = bytearray(MAX_CHAR_IN_MESSAGE)\n\n def _send_msg(self):\n attempts = 0\n txbuf = None\n while txbuf == None and attempts < 100:\n txbuf = self._find_empty_transmit_buffer()\n attempts += 1\n \n if attempts >= 100:\n raise TimeoutError\n\n self._write_can_msg(txbuf)\n self._modify_register(txbuf-1, MCP_TXB_TXREQ_M, MCP_TXB_TXREQ_M)\n\n attempts = 0\n full = 1\n while full and attempts < 100:\n full = self._read_regs(txbuf-1)[0]\n full = full & MCP_TXB_TXREQ_M\n attempts += 1\n \n # print(full,attempts,full & MCP_TXB_TXREQ_M)\n\n if attempts >= 150:\n raise TimeoutError\n\n def _read_msg(self):\n msg = None\n status = self._read_status()\n # print(\"status\",status)\n if status & MCP_STAT_RX0IF:\n # print(\"ch0\")\n msg = self._read_can_msg(MCP_RXBUF_0)\n self._modify_register(MCP_CANINTF, MCP_RX0IF, 0)\n elif status & MCP_STAT_RX1IF:\n # print(\"ch1\")\n msg = self._read_can_msg(MCP_RXBUF_1)\n self._modify_register(MCP_CANINTF, MCP_RX1IF, 0)\n return msg\n\n def init(self, idmode, speed, clock):\n \"\"\"\n.. method:: init(idmode, speed, clock) \n\n Initializes the MCP2515 chip \n \n :param idmode: set the RX buffer id mode (selectable from mcp2515.MCP_STDEXT, mcp2515.MCP_STD, mcp2515.MCP_EXT, or mcp2515.MCP_ANY\n :param speed: set the speed of the CAN communication \n :param clock: set the clock of the CAN Communication\n\n Possible combination of values for Clock and Speed are:\n\n * Clock --> 8MHZ\n \n =========== ================\n Clock Speed\n =========== ================\n \"8MHZ\" \"5KBPS\" \n \"8MHZ\" \"10KBPS\" \n \"8MHZ\" \"20KBPS\" \n \"8MHZ\" \"31KBPS\" \n \"8MHZ\" \"33KBPS\" \n \"8MHZ\" \"40KBPS\" \n \"8MHZ\" \"50KBPS\" \n \"8MHZ\" \"80KBPS\" \n \"8MHZ\" \"100KBPS\" \n \"8MHZ\" \"125KBPS\" \n \"8MHZ\" \"200KBPS\" \n \"8MHZ\" \"250KBPS\" \n \"8MHZ\" \"500KBPS\"\n \"8MHZ\" \"1000KBPS\"\n =========== ================\n\n * Clock --> 16MHZ\n \n =========== ================\n Clock Speed\n =========== ================\n \"16MHZ\" \"5KBPS\" \n \"16MHZ\" \"10KBPS\" \n \"16MHZ\" \"20KBPS\" \n \"16MHZ\" \"33KBPS\" \n \"16MHZ\" \"40KBPS\" \n \"16MHZ\" \"50KBPS\" \n \"16MHZ\" \"80KBPS\" \n \"16MHZ\" \"100KBPS\" \n \"16MHZ\" \"125KBPS\" \n \"16MHZ\" \"200KBPS\" \n \"16MHZ\" \"250KBPS\" \n \"16MHZ\" \"500KBPS\" \n \"16MHZ\" \"1000KBPS\"\n =========== ================\n\n * Clock --> 20MHZ\n \n =========== ================\n Clock Speed\n =========== ================\n \"20MHZ\" \"40KBPS\" \n \"20MHZ\" \"50KBPS\" \n \"20MHZ\" \"80KBPS\" \n \"20MHZ\" \"100KBPS\" \n \"20MHZ\" \"125KBPS\" \n \"20MHZ\" \"200KBPS\" \n \"20MHZ\" \"250KBPS\" \n \"20MHZ\" \"500KBPS\" \n \"20MHZ\" \"1000KBPS\" \n =========== ================\n\n \"\"\"\n self._sw_reset()\n self._set_canctrl_mode(MCP_MODE[\"CONFIG\"])\n self._config_rate(speed, clock)\n self._init_can_buffers()\n #interrupt mode\n self._set_regs(MCP_CANINTE, MCP_RX0IF | MCP_RX1IF)\n #set BF pins as GPO\n self._set_regs(MCP_BFPCTRL, MCP_BxBFS_MASK | MCP_BxBFE_MASK)\n #set RTS pin as GPI\n self._set_regs(MCP_TXRTSCTRL,0x00)\n\n #set mode\n if idmode == MCP_ANY:\n self._modify_register(MCP_RXB0CTRL, MCP_RXB_RX_MASK | MCP_RXB_BUKT_MASK, MCP_RXB_RX_ANY | MCP_RXB_BUKT_MASK)\n self._modify_register(MCP_RXB1CTRL, MCP_RXB_RX_MASK, MCP_RXB_RX_ANY)\n elif idmode == MCP_STD:\n self._modify_register(MCP_RXB0CTRL, MCP_RXB_RX_MASK | MCP_RXB_BUKT_MASK, MCP_RXB_RX_STD | MCP_RXB_BUKT_MASK)\n self._modify_register(MCP_RXB1CTRL, MCP_RXB_RX_MASK, MCP_RXB_RX_STD)\n elif idmode == MCP_EXT:\n self._modify_register(MCP_RXB0CTRL, MCP_RXB_RX_MASK | MCP_RXB_BUKT_MASK, MCP_RXB_RX_EXT | MCP_RXB_BUKT_MASK)\n self._modify_register(MCP_RXB1CTRL, MCP_RXB_RX_MASK, MCP_RXB_RX_EXT)\n elif idmode == MCP_STDEXT:\n self._modify_register(MCP_RXB0CTRL, MCP_RXB_RX_MASK | MCP_RXB_BUKT_MASK, MCP_RXB_RX_STDEXT | MCP_RXB_BUKT_MASK)\n self._modify_register(MCP_RXB1CTRL, MCP_RXB_RX_MASK, MCP_RXB_RX_STDEXT)\n else:\n raise ValueError\n\n self._set_canctrl_mode(MCP_MODE[\"LOOPBACK\"])\n\n def set_mode(self, mode):\n \"\"\"\n.. method:: set_mode(mode) \n\n Sets the operation mode of the MCP2515\n \n :param mode: operation mode (\"NORMAL\", \"SLEEP\", \"LOOPBACK\", \"LISTENONLY\", \"CONFIG\", \"POWERUP\", \"ONE_SHOT\")\n \n \"\"\"\n if mode not in MCP_MODE:\n raise ValueError\n self.mcpmode = mode\n self._set_canctrl_mode(MCP_MODE[mode])\n\n def init_mask(self, num, data, ext):\n \"\"\"\n.. method:: init_mask(num, data, ext) \n\n Initializes Masks\n \n :param num: 0 to set mask 0 on RX buffer, 1 to set mask 1 on RX buffer\n :param data: Data Mask\n :param ext: 0 for standard ID, 1 for Extended ID \n \n \"\"\"\n res = self._set_canctrl_mode(MCP_MODE[\"CONFIG\"])\n if res > 0:\n raise HardwareInitializationError\n\n if num == 0:\n self._write_masks_and_filters(MCP_RXM0SIDH, ext, data)\n elif num == 1:\n self._write_masks_and_filters(MCP_RXM1SIDH, ext, data)\n else:\n raise ValueError\n\n self._set_canctrl_mode(self.mcpmode)\n\n def init_filter(self, num, data, ext):\n \"\"\"\n.. method:: init_filter(num, data, ext) \n\n Initializes Filters\n \n :param num: number of filter to be set in RX buffer (from 0 to 5)\n :param data: Data Filter\n :param ext: 0 for standard ID, 1 for Extended ID \n \n \"\"\"\n res = self._set_canctrl_mode(MCP_MODE[\"CONFIG\"])\n if res > 0:\n raise HardwareInitializationError\n\n if num == 0:\n self._write_masks_and_filters(MCP_RXF0SIDH, ext, data);\n elif num == 1:\n self._write_masks_and_filters(MCP_RXF1SIDH, ext, data);\n elif num == 2:\n self._write_masks_and_filters(MCP_RXF2SIDH, ext, data);\n elif num == 3:\n self._write_masks_and_filters(MCP_RXF3SIDH, ext, data);\n elif num == 4:\n self._write_masks_and_filters(MCP_RXF4SIDH, ext, data);\n elif num == 5:\n self._write_masks_and_filters(MCP_RXF5SIDH, ext, data);\n else:\n raise ValueError\n\n self._set_canctrl_mode(self.mcpmode)\n\n def send(self, canid, data, ext=None):\n \"\"\"\n.. method:: send(canid, data, ext=None) \n\n Sends CAN messages\n \n :param canid: ID of the CAN message (bytearray of 4 bytes)\n :param data: Data to be sent (list of 8 bytes)\n :param ext: 0 for standard ID, 1 for Extended ID (default None - auto detected) \n \n \"\"\"\n if type(canid) != PBYTEARRAY:\n raise TypeError\n elif len(canid) != 4:\n raise ValueError\n if type(data) != PLIST:\n raise TypeError\n if len(data) > 8:\n raise IndexError\n rtr = 0\n if ext is None:\n ext = 0\n if canid[3] & 0x80 == 0x80:\n ext = 1\n if canid[3] & 0x40 == 0x40:\n rtr = 1\n self._set_msg(canid, rtr, ext, data)\n self._send_msg()\n\n def recv(self):\n \"\"\"\n.. method:: recv() \n\n Receives CAN messages returnung CAN id value and related data message\n \n Returns canid, msg \n \"\"\"\n msg = self._read_msg()\n canid = self.canid\n if msg is not None:\n if self.ext:\n canid[3] |= 0x80\n if self.rtr:\n canid[3] |= 0x40\n return canid, msg\n\n def check_recv(self):\n status = self._read_status()\n if status & MCP_STAT_RXIF_MASK:\n return True\n return None\n\n def check_error(self):\n eflag = self._read_regs(MCP_EFLG)[0]\n if eflag & MCP_EFLG_ERRORMASK:\n return True\n return None\n\n def get_error(self):\n return self._read_regs(MCP_EFLG)[0]\n\n def error_count_rx(self):\n return self._read_regs(MCP_REC)[0]\n\n def error_count_tx(self):\n return self._read_regs(MCP_TEC)[0]\n\n def enable_one_shot_tx(self):\n self._modify_register(MCP_CANCTRL, MCP_MODE[\"ONE_SHOT\"], MCP_MODE[\"ONE_SHOT\"])\n if (self._read_regs(MCP_CANCTRL)[0] & MCP_MODE[\"ONE_SHOT\"]) != MCP_MODE[\"ONE_SHOT\"]:\n return False\n return True\n\n def disable_one_shot_tx(self):\n self._modify_register(MCP_CANCTRL, MCP_MODE[\"ONE_SHOT\"], 0)\n if (self._read_regs(MCP_CANCTRL)[0] & MCP_MODE[\"ONE_SHOT\"]) != 0:\n return False\n return True\n\n def abort_tx(self):\n self._modify_register(MCP_CANCTRL, ABORT_TX, ABORT_TX)\n if(self._read_regs(MCP_CANCTRL)[0] & ABORT_TX) != ABORT_TX:\n return False\n return True\n\n def set_GPO(self, data):\n self._modify_register(MCP_BFPCTRL, MCP_BxBFS_MASK, (data<<4))\n\n def set_GPI(self):\n res = self.read_regs(MCP_TXRTSCTRL) & MCP_BxRTS_MASK\n return res\n \n\n\n\n\n\n","sub_path":"mcp2515.py","file_name":"mcp2515.py","file_ext":"py","file_size_in_byte":24210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"327423827","text":"#!/usr/bin/env python3\n\n# This software was developed at the National Institute of Standards\n# and Technology by employees of the Federal Government in the course\n# of their official duties. Pursuant to title 17 Section 105 of the\n# United States Code this software is not subject to copyright\n# protection and is in the public domain. NIST assumes no\n# responsibility whatsoever for its use by other parties, and makes\n# no guarantees, expressed or implied, about its quality,\n# reliability, or any other characteristic.\n#\n# We would appreciate acknowledgement if the software is used.\n\n\"\"\"\nThis tool parses the RDF output of ExifTool, mapping it into UCO properties and relationships-of-assumption. An analyst should later annotate the output with their beliefs on its verity.\n\"\"\"\n\n__version__ = \"0.2.0\"\n\nimport argparse\nimport contextlib\nimport logging\nimport os\n\nimport rdflib.plugins.sparql\n\ntry:\n from case_exiftool import local_uuid\nexcept ImportError:\n if __name__ != \"__main__\":\n raise\n import local_uuid\n\n_logger = logging.getLogger(os.path.basename(__file__))\n\nNS_EXIFTOOL_COMPOSITE = \"http://ns.exiftool.ca/Composite/1.0/\"\nNS_EXIFTOOL_ET = \"http://ns.exiftool.ca/1.0/\"\nNS_EXIFTOOL_EXIFTOOL = \"http://ns.exiftool.ca/ExifTool/1.0/\"\nNS_EXIFTOOL_GPS = \"http://ns.exiftool.ca/EXIF/GPS/1.0/\"\nNS_EXIFTOOL_SYSTEM = \"http://ns.exiftool.ca/File/System/1.0/\"\nNS_EXIFTOOL_FILE = \"http://ns.exiftool.ca/File/1.0/\"\nNS_EXIFTOOL_IFD0 = \"http://ns.exiftool.ca/EXIF/IFD0/1.0/\"\nNS_EXIFTOOL_EXIFIFD = \"http://ns.exiftool.ca/EXIF/ExifIFD/1.0/\"\nNS_EXIFTOOL_NIKON = \"http://ns.exiftool.ca/MakerNotes/Nikon/1.0/\"\nNS_EXIFTOOL_PREVIEWIFD = \"http://ns.exiftool.ca/MakerNotes/PreviewIFD/1.0/\"\nNS_EXIFTOOL_INTEROPIFD = \"http://ns.exiftool.ca/EXIF/InteropIFD/1.0/\"\nNS_EXIFTOOL_IFD1 = \"http://ns.exiftool.ca/EXIF/IFD1/1.0/\"\nNS_RDF = rdflib.RDF\nNS_RDFS = rdflib.RDFS\nNS_UCO_CORE = rdflib.Namespace(\"https://unifiedcyberontology.org/ontology/uco/core#\")\nNS_UCO_LOCATION = rdflib.Namespace(\"https://unifiedcyberontology.org/ontology/uco/location#\")\nNS_UCO_OBSERVABLE = rdflib.Namespace(\"https://unifiedcyberontology.org/ontology/uco/observable#\")\nNS_UCO_TYPES = rdflib.Namespace(\"https://unifiedcyberontology.org/ontology/uco/types#\")\nNS_UCO_VOCABULARY = rdflib.Namespace(\"https://unifiedcyberontology.org/ontology/uco/vocabulary#\")\nNS_XSD = rdflib.namespace.XSD\n\nargument_parser = argparse.ArgumentParser(epilog=__doc__)\nargument_parser.add_argument(\"--base-prefix\", default=\"http://example.org/kb/\")\nargument_parser.add_argument(\"--debug\", action=\"store_true\")\nargument_parser.add_argument(\"--output-format\", help=\"RDF syntax to use for out_graph. Passed to rdflib.Graph.serialize(format=). The format will be guessed based on the output file extension, but will default to Turtle.\")\nargument_parser.add_argument(\"--print-conv-xml\", help=\"A file recording the output of ExifTool run against some file. Expects exiftool was run as for --raw-xml, but also with the flag --printConv (note the double-dash).\")\nargument_parser.add_argument(\"--raw-xml\", help=\"A file recording the output of ExifTool run against some file. Expects exiftool was run with -binary, -duplicates, and -xmlFormat.\", required=True)\nargument_parser.add_argument(\"out_graph\", help=\"A self-contained RDF graph file, in the format requested by --output-format.\")\n\ndef guess_graph_format(filename):\n ext = os.path.splitext(filename)[-1].replace(\".\", \"\")\n if ext in (\"json\", \"json-ld\", \"jsonld\"):\n return \"json-ld\"\n elif ext in (\"ttl\", \"turtle\"):\n return \"turtle\"\n return \"turtle\"\n\ndef controlled_dictionary_object_to_node(graph, controlled_dict):\n n_controlled_dictionary = rdflib.BNode()\n graph.add((\n n_controlled_dictionary,\n NS_RDF.type,\n NS_UCO_TYPES.ControlledDictionary\n ))\n for key in sorted(controlled_dict.keys()):\n v_value = controlled_dict[key]\n try:\n assert isinstance(v_value, rdflib.Literal)\n except:\n _logger.info(\"v_value = %r.\" % v_value)\n raise\n n_entry = rdflib.BNode()\n graph.add((\n n_controlled_dictionary,\n NS_UCO_TYPES.entry,\n n_entry\n ))\n graph.add((\n n_entry,\n NS_RDF.type,\n NS_UCO_TYPES.ControlledDictionaryEntry\n ))\n graph.add((\n n_entry,\n NS_UCO_TYPES.key,\n rdflib.Literal(key)\n ))\n graph.add((\n n_entry,\n NS_UCO_TYPES.value,\n v_value\n ))\n return n_controlled_dictionary\n\nclass ExifToolRDFMapper(object):\n \"\"\"\n This class maps ExifTool RDF predicates into UCO objects and Facets.\n\n The implementation strategy is:\n * Iterating through an if-elif ladder of IRIs with known interpretation strategies; and\n * Lazily instantiating objects with class @property methods.\n The lazy (or just-in-time) instantiation is because some graph objects can be needed for various reasons, but because of ExifTool's varied format coverage, it would not be appropriate to create each object each time. For instance, on encountering GPS data in a JPEG's EXIF data (prefixes \"http://ns.exiftool.ca/Composite/1.0/GPS\", \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPS\"), three things need to be created:\n * A Location object.\n * A derivation and assumption relationship between the original trace and the inferred Location object.\n * Entries in the EXIF dictionary.\n Separately, other EXIF properties like picture dimension descriptors need the EXIF dictionary. The first IRI found to need the dictionary will trigger its creation, leading to its serialization.\n\n Those interested in extending this tool's mapping coverage of ExifTool IRIs are encouraged to update the method map_raw_and_printconv_iri.\n \"\"\"\n\n def __init__(self, graph, ns_base):\n assert isinstance(graph, rdflib.Graph)\n\n # TODO Build n_file_facet and n_content_data_facet from new case_file function, or inherit from graph that is just that file.\n self._exif_dictionary_dict = None\n self._graph = graph\n self._kv_dict_raw = None\n self._kv_dict_printconv = None\n self._mime_type = None\n self._n_camera_object = None\n self._n_camera_object_device_facet = None\n self._n_content_data_facet = None\n self._n_exif_dictionary_object = None\n self._n_exif_facet = None\n self._n_file_facet = None\n self._n_location_object = None\n self._n_location_object_latlong_facet = None\n self._n_observable_object = None\n self._n_raster_picture_facet = None\n self._n_relationship_object_location = None\n self._oo_slug = None\n self.ns_base = ns_base\n\n def map_raw_and_printconv_iri(self, exiftool_iri):\n \"\"\"\n This method implements mapping into UCO for known ExifTool IRIs.\n\n This method has a side effect of mutating the internal variables:\n * self._kv_dict_raw\n * self._kv_dict_raw\n * self._exiftool_predicate_iris\n \"\"\"\n assert isinstance(exiftool_iri, str)\n #_logger.debug(\"map_raw_and_printconv_iri(%r).\" % exiftool_iri)\n\n if exiftool_iri == \"http://ns.exiftool.ca/EXIF/IFD0/1.0/Make\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.graph.add((\n self.n_camera_object_device_facet,\n NS_UCO_OBSERVABLE.manufacturer,\n v_printconv\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/EXIF/IFD0/1.0/Model\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.graph.add((\n self.n_camera_object_device_facet,\n NS_UCO_OBSERVABLE.model,\n v_raw\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/File/1.0/MIMEType\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.mime_type = v_raw.toPython()\n # Special case - graph logic is delayed for this IRI, because of needing to initialize the base ObservableObject based on the value.\n elif exiftool_iri == \"http://ns.exiftool.ca/File/System/1.0/FileSize\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.graph.add((\n self.n_content_data_facet,\n NS_UCO_OBSERVABLE.sizeInBytes,\n rdflib.Literal(v_raw.toPython(), datatype=NS_XSD.long)\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/Composite/1.0/GPSAltitude\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n l_altitude = rdflib.Literal(v_raw.toPython(), datatype=NS_XSD.decimal)\n self.graph.add((\n self.n_location_object_latlong_facet,\n NS_UCO_LOCATION.altitude,\n l_altitude\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/Composite/1.0/GPSLatitude\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n l_latitude = rdflib.Literal(v_raw.toPython(), datatype=NS_XSD.decimal)\n self.graph.add((\n self.n_location_object_latlong_facet,\n NS_UCO_LOCATION.latitude,\n l_latitude\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/Composite/1.0/GPSLongitude\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n l_longitude = rdflib.Literal(v_raw.toPython(), datatype=NS_XSD.decimal)\n self.graph.add((\n self.n_location_object_latlong_facet,\n NS_UCO_LOCATION.longitude,\n l_longitude\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/Composite/1.0/GPSPosition\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.graph.add((\n self.n_location_object,\n NS_RDFS.label,\n v_printconv\n ))\n elif exiftool_iri in {\n \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPSAltitudeRef\",\n \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPSAltitude\",\n \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPSLatitudeRef\",\n \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPSLatitude\",\n \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPSLongitudeRef\",\n \"http://ns.exiftool.ca/EXIF/GPS/1.0/GPSLongitude\"\n }:\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n dict_key = exiftool_iri.replace(\"http://ns.exiftool.ca/EXIF/GPS/1.0/GPS\", \"\")\n self.exif_dictionary_dict[dict_key] = v_raw\n elif exiftool_iri == \"http://ns.exiftool.ca/EXIF/ExifIFD/1.0/ExifImageHeight\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.exif_dictionary_dict[\"Image Height\"] = v_raw\n if not self._n_raster_picture_facet is None:\n self.graph.add((\n self.n_raster_picture_facet,\n NS_UCO_OBSERVABLE.pictureHeight,\n rdflib.Literal(int(v_raw.toPython()))\n ))\n elif exiftool_iri == \"http://ns.exiftool.ca/EXIF/ExifIFD/1.0/ExifImageWidth\":\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n self.exif_dictionary_dict[\"Image Width\"] = v_raw\n if not self._n_raster_picture_facet is None:\n self.graph.add((\n self.n_raster_picture_facet,\n NS_UCO_OBSERVABLE.pictureWidth,\n rdflib.Literal(int(v_raw.toPython()))\n ))\n else:\n # Somewhat in the name of information preservation, somewhat as a progress marker on converting data: Attach all remaining unconverted properties directly to the ObservableObject. Provide both values to assist with mapping decisions.\n (v_raw, v_printconv) = self.pop_iri(exiftool_iri)\n if not v_raw is None:\n self.graph.add((\n self.n_observable_object,\n rdflib.URIRef(exiftool_iri),\n v_raw\n ))\n if not v_printconv is None:\n self.graph.add((\n self.n_observable_object,\n rdflib.URIRef(exiftool_iri),\n v_printconv\n ))\n\n def map_raw_and_printconv_rdf(self, filepath_raw_xml, filepath_printconv_xml):\n \"\"\"\n Loads the print-conv and raw graphs into a dictionary for processing by consuming known IRIs.\n\n This function has a side effect of mutating the internal variables:\n * self._kv_dict_raw\n * self._kv_dict_raw\n * self._exiftool_predicate_iris\n \"\"\"\n # Output key: Graph predicate from file RDF-corrected IRI.\n # Output value: Object (whether Literal or URIRef).\n def _xml_file_to_dict(xml_file):\n kv_dict = dict()\n with contextlib.closing(rdflib.Graph()) as in_graph:\n in_graph.parse(xml_file, format=\"xml\")\n query = rdflib.plugins.sparql.prepareQuery(\"\"\"\\\nSELECT ?s ?p ?o\nWHERE {\n ?s ?p ?o .\n}\"\"\")\n for (result_no, result) in enumerate(in_graph.query(query)):\n # v_object might be a literal, might be an object reference. \"v\" for \"varying\". Because some properties are binary, do not decode v_object.\n (\n n_subject,\n p_predicate,\n v_object,\n ) = result\n subject_iri = n_subject.toPython()\n predicate_iri = p_predicate.toPython()\n kv_dict[predicate_iri] = v_object\n return kv_dict\n self._kv_dict_raw = _xml_file_to_dict(filepath_raw_xml)\n self._kv_dict_printconv = _xml_file_to_dict(filepath_printconv_xml)\n self._exiftool_predicate_iris = set(self._kv_dict_raw.keys()) | set(self._kv_dict_printconv.keys())\n\n # Start by mapping some IRIs that affect the base observable object.\n self.map_raw_and_printconv_iri(\"http://ns.exiftool.ca/File/1.0/MIMEType\")\n\n # Determine slug by MIME type.\n self.oo_slug = \"file-\" # The prefix \"oo_\" means generic observable object.\n if self.mime_type == \"image/jpeg\":\n self.oo_slug = \"picture-\"\n else:\n _logger.warning(\"TODO - MIME type %r not yet implemented.\" % mime_type)\n\n # Access observable object to instantiate it with the oo_slug value.\n _ = self.n_observable_object\n\n # Finish special case MIME type processing left undone by map_raw_and_printconv_iri.\n if not self.mime_type is None:\n self.graph.add((\n self.n_content_data_facet,\n NS_UCO_OBSERVABLE.mimeType,\n rdflib.Literal(self.mime_type)\n ))\n # Define the raster picture facet depending on MIME type.\n mime_type_to_picture_type = {\n \"image/jpeg\": \"jpg\"\n }\n if self.mime_type in mime_type_to_picture_type:\n l_picture_type = rdflib.Literal(mime_type_to_picture_type[self.mime_type])\n self.graph.add((\n self.n_raster_picture_facet,\n NS_UCO_OBSERVABLE.pictureType,\n l_picture_type\n ))\n\n # Create independent sorted copy of IRI set, because this iteration loop will mutate the set.\n sorted_exiftool_predicate_iris = sorted(self._exiftool_predicate_iris)\n for exiftool_predicate_iri in sorted_exiftool_predicate_iris:\n self.map_raw_and_printconv_iri(exiftool_predicate_iri)\n\n # Derive remaining objects.\n if not self._exif_dictionary_dict is None:\n _ = self.n_exif_dictionary_object\n if not self._n_location_object is None:\n _ = self.n_relationship_object_location\n\n def pop_iri(self, exiftool_iri):\n \"\"\"\n Returns: (raw_object, printconv_object) from input graphs.\n\n This function has a side effect of mutating the internal variables:\n * self._kv_dict_raw\n * self._kv_dict_raw\n * self._exiftool_predicate_iris\n The exiftool_iri is removed from each of these dicts and set.\n \"\"\"\n assert isinstance(exiftool_iri, str)\n v_raw = None\n v_printconv = None\n if exiftool_iri in self._exiftool_predicate_iris:\n self._exiftool_predicate_iris -= {exiftool_iri}\n if exiftool_iri in self._kv_dict_raw:\n v_raw = self._kv_dict_raw.pop(exiftool_iri)\n if exiftool_iri in self._kv_dict_printconv:\n v_printconv = self._kv_dict_printconv.pop(exiftool_iri)\n return (v_raw, v_printconv)\n\n @property\n def exif_dictionary_dict(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._exif_dictionary_dict is None:\n self._exif_dictionary_dict = dict()\n return self._exif_dictionary_dict\n\n @property\n def graph(self):\n \"\"\"\n No setter provided.\n \"\"\"\n return self._graph\n\n @property\n def mime_type(self):\n return self._mime_type\n\n @mime_type.setter\n def mime_type(self, value):\n assert isinstance(value, str)\n self._mime_type = value\n return self._mime_type\n\n @property\n def n_camera_object(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_camera_object is None:\n self._n_camera_object = rdflib.URIRef(self.ns_base[\"device-\" + local_uuid.local_uuid()])\n self.graph.add((\n self._n_camera_object,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.CyberItem\n ))\n return self._n_camera_object\n\n @property\n def n_camera_object_device_facet(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_camera_object_device_facet is None:\n self._n_camera_object_device_facet = rdflib.BNode()\n self.graph.add((\n self._n_camera_object_device_facet,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.Device\n ))\n self.graph.add((\n self.n_camera_object,\n NS_UCO_CORE.facets,\n self._n_camera_object_device_facet\n ))\n return self._n_camera_object_device_facet\n\n @property\n def n_content_data_facet(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_content_data_facet is None:\n self._n_content_data_facet = rdflib.BNode()\n self.graph.add((\n self._n_content_data_facet,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.ContentData\n ))\n self.graph.add((\n self.n_observable_object,\n NS_UCO_CORE.facets,\n self._n_content_data_facet\n ))\n return self._n_content_data_facet\n\n @property\n def n_exif_dictionary_object(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_exif_dictionary_object is None:\n self._n_exif_dictionary_object = controlled_dictionary_object_to_node(self.graph, self.exif_dictionary_dict)\n self.graph.add((\n self.n_exif_facet,\n NS_UCO_OBSERVABLE.exifData,\n self._n_exif_dictionary_object\n ))\n return self._n_exif_dictionary_object\n\n @property\n def n_exif_facet(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_exif_facet is None:\n self._n_exif_facet = rdflib.BNode()\n self.graph.add((\n self._n_exif_facet,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.EXIF\n ))\n self.graph.add((\n self.n_observable_object,\n NS_UCO_CORE.facets,\n self._n_exif_facet\n ))\n return self._n_exif_facet\n\n @property\n def n_file_facet(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_file_facet is None:\n self._n_file_facet = rdflib.BNode()\n self.graph.add((\n self._n_file_facet,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.File\n ))\n self.graph.add((\n self.n_observable_object,\n NS_UCO_CORE.facets,\n self._n_file_facet\n ))\n return self._n_file_facet\n\n @property\n def n_location_object(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_location_object is None:\n self._n_location_object = rdflib.URIRef(self.ns_base[\"location-\" + local_uuid.local_uuid()])\n self.graph.add((\n self._n_location_object,\n NS_RDF.type,\n NS_UCO_LOCATION.Location\n ))\n return self._n_location_object\n\n @property\n def n_location_object_latlong_facet(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_location_object_latlong_facet is None:\n self._n_location_object_latlong_facet = rdflib.BNode()\n self.graph.add((\n self._n_location_object_latlong_facet,\n NS_RDF.type,\n NS_UCO_LOCATION.LatLongCoordinates\n ))\n self.graph.add((\n self.n_location_object,\n NS_UCO_CORE.facets,\n self._n_location_object_latlong_facet\n ))\n return self._n_location_object_latlong_facet\n\n @property\n def n_observable_object(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_observable_object is None:\n self._n_observable_object = rdflib.URIRef(self.ns_base[self.oo_slug + local_uuid.local_uuid()])\n # TODO Prepare list of more interesting types on adoption of the UCO release providing the ObservableObject subclass hierarchy.\n self.graph.add((\n self._n_observable_object,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.CyberItem\n ))\n return self._n_observable_object\n\n @property\n def n_raster_picture_facet(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_raster_picture_facet is None:\n self._n_raster_picture_facet = rdflib.BNode()\n self.graph.add((\n self._n_raster_picture_facet,\n NS_RDF.type,\n NS_UCO_OBSERVABLE.RasterPicture\n ))\n self.graph.add((\n self.n_observable_object,\n NS_UCO_CORE.facets,\n self._n_raster_picture_facet\n ))\n return self._n_raster_picture_facet\n\n @property\n def n_relationship_object_location(self):\n \"\"\"\n Initialized on first access.\n \"\"\"\n if self._n_relationship_object_location is None:\n self._n_relationship_object_location = rdflib.URIRef(self.ns_base[\"relationship-\" + local_uuid.local_uuid()])\n self.graph.add((\n self._n_relationship_object_location,\n NS_RDF.type,\n NS_UCO_CORE.Relationship\n ))\n self.graph.add((\n self._n_relationship_object_location,\n NS_UCO_CORE.source,\n self.n_location_object\n ))\n self.graph.add((\n self._n_relationship_object_location,\n NS_UCO_CORE.target,\n self.n_observable_object\n ))\n self.graph.add((\n self._n_relationship_object_location,\n NS_UCO_CORE.kindOfRelationship,\n rdflib.Literal(\"Extracted_From\", datatype=NS_UCO_VOCABULARY.CyberItemRelationshipVocab)\n ))\n return self._n_relationship_object_location\n\n @property\n def ns_base(self):\n return self._ns_base\n\n @ns_base.setter\n def ns_base(self, value):\n assert isinstance(value, rdflib.Namespace)\n self._ns_base = value\n return self._ns_base\n\n @property\n def oo_slug(self):\n return self._oo_slug\n\n @oo_slug.setter\n def oo_slug(self, value):\n assert isinstance(value, str)\n self._oo_slug = value\n return self._oo_slug\n\ndef main():\n local_uuid.configure()\n\n args = argument_parser.parse_args()\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)\n\n NS_BASE = rdflib.Namespace(args.base_prefix)\n out_graph = rdflib.Graph()\n\n out_graph.namespace_manager.bind(\"exiftool-Composite\", NS_EXIFTOOL_COMPOSITE)\n out_graph.namespace_manager.bind(\"exiftool-et\", NS_EXIFTOOL_ET)\n out_graph.namespace_manager.bind(\"exiftool-ExifTool\", NS_EXIFTOOL_EXIFTOOL)\n out_graph.namespace_manager.bind(\"exiftool-System\", NS_EXIFTOOL_SYSTEM)\n out_graph.namespace_manager.bind(\"exiftool-File\", NS_EXIFTOOL_FILE)\n out_graph.namespace_manager.bind(\"exiftool-GPS\", NS_EXIFTOOL_GPS)\n out_graph.namespace_manager.bind(\"exiftool-IFD0\", NS_EXIFTOOL_IFD0)\n out_graph.namespace_manager.bind(\"exiftool-ExifIFD\", NS_EXIFTOOL_EXIFIFD)\n out_graph.namespace_manager.bind(\"exiftool-Nikon\", NS_EXIFTOOL_NIKON)\n out_graph.namespace_manager.bind(\"exiftool-PreviewIFD\", NS_EXIFTOOL_PREVIEWIFD)\n out_graph.namespace_manager.bind(\"exiftool-InteropIFD\", NS_EXIFTOOL_INTEROPIFD)\n out_graph.namespace_manager.bind(\"exiftool-IFD1\", NS_EXIFTOOL_IFD1)\n out_graph.namespace_manager.bind(\"kb\", NS_BASE)\n out_graph.namespace_manager.bind(\"uco-core\", NS_UCO_CORE)\n out_graph.namespace_manager.bind(\"uco-location\", NS_UCO_LOCATION)\n out_graph.namespace_manager.bind(\"uco-observable\", NS_UCO_OBSERVABLE)\n out_graph.namespace_manager.bind(\"uco-types\", NS_UCO_TYPES)\n out_graph.namespace_manager.bind(\"uco-vocabulary\", NS_UCO_VOCABULARY)\n\n exiftool_rdf_mapper = ExifToolRDFMapper(out_graph, NS_BASE)\n exiftool_rdf_mapper.map_raw_and_printconv_rdf(args.raw_xml, args.print_conv_xml)\n\n #_logger.debug(\"args.output_format = %r.\" % args.output_format)\n output_format = args.output_format or guess_graph_format(args.out_graph)\n\n out_graph.serialize(destination=args.out_graph, format=output_format)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"case_exiftool/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":26040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"486953194","text":"game_type = 'input_output'\n\nparameter_list = [ ['$i1','string'],['$y0','int'] ]\n\ntuple_list = [\n\t['KnR_1-5-2b_',[None,4]]\n]\n\nglobal_code_template = '''\\\nd\t#include <stdio.h>\nx\t#include \ndx\t\ndx\t/* count characters in input; 2nd version */\n'''\n\nmain_code_template = '''\\\ndx\tdouble nc;\ndx\t\ndx\tfor (nc = 0; getchar() != EOF; ++nc)\ndx\t\t;\ndx\tprintf(\"%.0f\\\\n\", nc);\n'''\n\nargv_template = ''\n\nstdin_template = '''\\\n$i1\nb\n'''\n\nstdout_template = '''\\\n$y0\n'''\n\n","sub_path":"Assignment2/ttt/archive/_old/KnR/KnR_1-5-2b.py","file_name":"KnR_1-5-2b.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"294677780","text":"import time\nimport serial\nimport sys,tty,termios\n\nserdev = '/dev/ttyUSB0'\ns = serial.Serial(serdev, 9600)\n\n\nwhile(1):\n init_position = str(input(\"Enter your position:(d1 d2 direction) \")).split()\n\n command = \"/parking/run \" + init_position[1] + \" \" + init_position[0] + \" \"\n if init_position[2] == \"west\":\n command = command + \"1\"+ \" \\n\"\n elif init_position[2] == \"east\":\n command = command + \"0\"+ \" \\n\"\n print(command)\n s.write(command.encode())\n time.sleep(2)\n\n# class _Getch:\n# def __call__(self):\n# fd = sys.stdin.fileno()\n# old_settings = termios.tcgetattr(fd)\n# try:\n# tty.setraw(sys.stdin.fileno())\n# ch = sys.stdin.read(1)\n# finally:\n# termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n# return ch\n\n# def get():\n# inkey = _Getch()\n# while(1):\n# k=inkey()\n# if k!='':break\n# if k=='\\x1b':\n# k2 = inkey()\n# k3 = inkey()\n# if k3=='A':\n# print (\"up\")\n# s.write(\"/goStraight/run 100 \\n\".encode())\n# if k3=='B':\n# print (\"down\")\n# s.write(\"/goStraight/run -100 \\n\".encode())\n# if k3=='C':\n# print (\"right\")\n# s.write(\"/turn/run 100 -0.3 \\n\".encode())\n# if k3=='D':\n# print (\"left\")\n# s.write(\"/turn/run 100 0.3 \\n\".encode())\n# time.sleep(1)\n# s.write(\"/stop/run \\n\".encode())\n# elif k=='q':\n# print (\"quit\")\n# return 0\n# else:\n# print (\"not an arrow key!\")\n# return 1\n\n# if len(sys.argv) < 1:\n# print (\"No port input\")\n# s = serial.Serial(sys.argv[1])\n# while get():\n# i = 0","sub_path":"1_xbee/car_control.py","file_name":"car_control.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"304745021","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\n\n# the next line can be removed after installation\nsys.path.insert(0, os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__)))))\n\nimport nngen as ng\nimport veriloggen\n\nimport matrix_add_axi_delayed\n\n\na_shape = (15, 15)\nb_shape = (15, 15)\na_dtype = ng.int32\nb_dtype = ng.int32\nc_dtype = ng.int32\npar = 1\naxi_datawidth = 32\n\n\ndef test(request, silent=True):\n veriloggen.reset()\n\n simtype = request.config.getoption('--sim')\n\n rslt = matrix_add_axi_delayed.run(a_shape, b_shape,\n a_dtype, b_dtype, c_dtype,\n par, axi_datawidth, silent,\n filename=None, simtype=simtype,\n outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')\n\n verify_rslt = [line for line in rslt.splitlines() if line.startswith('# verify:')][0]\n assert(verify_rslt == '# verify: PASSED')\n\n\nif __name__ == '__main__':\n rslt = matrix_add_axi_delayed.run(a_shape, b_shape,\n a_dtype, b_dtype, c_dtype,\n par, axi_datawidth, silent=False,\n filename='tmp.v',\n outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')\n print(rslt)\n","sub_path":"tests/ng/10_io_test/matrix_add_axi_delayed/test_matrix_add_axi_delayed_int32.py","file_name":"test_matrix_add_axi_delayed_int32.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"320094919","text":"import os\nimport sys\nimport logging\nimport docker\nimport py2neo\nfrom atexit import register\nfrom Configs import getConfig\nimport datetime\n\nconfig = getConfig()\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.StreamHandler())\nlog.setLevel(getattr(logging, config.LOG_LEVEL))\n\nif __name__ == \"__main__\":\n SCRIPT_DIR = os.path.dirname(\n os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))\n )\n SCRIPT_DIR = os.path.join(SCRIPT_DIR, \"..\")\n sys.path.append(os.path.normpath(SCRIPT_DIR))\n\nfrom motherlode.data_sources_registry import DataSourcesRegistry\n\n\ndocker_client = docker.DockerClient(base_url=config.DOCKER_DEAMON_BASE_URL)\n\n\ndef get_graph():\n if config.NEO4J_USER is not None:\n return py2neo.Graph(\n config.NEO4J_URL, password=config.NEO4J_PASSWORD, user=config.NEO4J_USER\n )\n else:\n return py2neo.Graph(config.NEO4J_URL)\n\n\ndef create_log_node(dataloader_name, image):\n n = py2neo.Node(\"LoadingLog\")\n n[\"loader\"] = dataloader_name\n n[\"dockerhub_image_name\"] = image.tags[0].split(\":\")[0]\n n[\"dockerhub_image_tag\"] = image.tags[0].split(\":\")[1]\n n[\"dockerhub_image_hash\"] = image.id\n n[\"loading_finished_at\"] = str(datetime.datetime.now(tz=None))\n tx = get_graph().begin()\n tx.create(n)\n tx.commit()\n\n\ndef get_log_nodes(dataloader_name, image):\n return list(\n py2neo.NodeMatcher(get_graph()).match(\n \"LoadingLog\",\n dockerhub_image_name=image.tags[0].split(\":\")[0],\n dockerhub_image_tag=image.tags[0].split(\":\")[1],\n dockerhub_image_hash=image.id,\n )\n )\n\n\ndef exit_func():\n # Clean up containers\n for datasource in get_sorted_data_sources(DataSourcesRegistry):\n name = \"ML_{}\".format(datasource[\"name\"])\n clean_up_container(name)\n\n\nregister(exit_func)\n\n\ndef create_log_dir():\n os.makedirs(config.LOADING_LOGS_DIRECTORY, exist_ok=True)\n\n\ndef clean_up_container(name):\n try:\n c = docker_client.containers.get(name)\n log.warning(\"Get rid of old {}\".format(name))\n try:\n c.kill()\n except docker.errors.APIError:\n pass\n c.remove()\n except docker.errors.NotFound:\n pass\n\n\ndef get_sorted_data_sources(datasources_unsorted):\n \"\"\"Returns the data sources sorted by dependecies and filters out non environemnt relevant datasources\n \n \"\"\"\n sorted_datasources = []\n try:\n current_env = os.environ[\"ENV\"]\n except:\n current_env = \"DEV\"\n if config.DATALOADER_LIST:\n # filter out non listed datasources\n datasources_unsorted = [\n datasource\n for datasource in datasources_unsorted\n if datasource[\"name\"] in config.DATALOADER_LIST\n ]\n if config.DATALOADER_SINGLE:\n datasources_unsorted = [\n datasource\n for datasource in datasources_unsorted\n if datasource[\"name\"] == config.DATALOADER_SINGLE\n ]\n return datasources_unsorted\n\n def add_data_source(datasource):\n for index, dep in enumerate(datasource[\"dependencies\"]):\n add_data_source(\n next(item for item in datasources_unsorted if item[\"name\"] == dep)\n )\n if (\n next(\n (\n item\n for item in sorted_datasources\n if item[\"name\"] == datasource[\"name\"]\n ),\n None,\n )\n is None\n and current_env not in datasource[\"exlude_in_env\"]\n ):\n sorted_datasources.append(datasource)\n\n for datasource in datasources_unsorted:\n add_data_source(datasource)\n return sorted_datasources\n\n\ndef pull_image(image_name):\n log.info(\"Pull image '{}'...\".format(image_name))\n try:\n docker_client.images.remove(image_name)\n except docker.errors.ImageNotFound:\n pass\n docker_client.images.pull(image_name, tag=\"latest\")\n log.info(\"...image '{}' pulled.\".format(image_name))\n\n\ndef absolute_volume_path(volumes):\n absolute_volumes = {}\n for vol, mount in volumes.items():\n if vol.startswith(\".\"):\n absolute_volumes[os.path.abspath(vol)] = mount\n return absolute_volumes\n\n\ndef run_datasource_containers():\n\n create_log_dir()\n # gather env vars\n env_vars = {}\n try:\n env_vars[\"ENV\"] = os.environ[\"ENV\"]\n except:\n env_vars[\"ENV\"] = \"DEV\"\n if config.NEO4J_URL is not None:\n env_vars[\"GC_NEO4J_URL\"] = config.NEO4J_URL\n if config.NEO4J_USER is not None:\n env_vars[\"GC_NEO4J_USER\"] = config.NEO4J_USER\n if config.NEO4J_PASSWORD is not None:\n env_vars[\"GC_NEO4J_PASSWORD\"] = config.NEO4J_PASSWORD\n env_vars.update(config.OTHER_ENV_IN_DOCKER_CONTAINERS.items())\n\n for datasource in get_sorted_data_sources(DataSourcesRegistry):\n\n envs = env_vars.copy()\n if \"envs\" in datasource:\n envs.update(datasource[\"envs\"])\n log.info(\"###########################\".format(datasource[\"dockerimage\"]))\n container_name = \"ML_{}\".format(datasource[\"name\"])\n log.info(\"Run Datasource container '{}'...\".format(datasource[\"dockerimage\"]))\n\n clean_up_container(container_name)\n pull_image(datasource[\"dockerimage\"])\n\n image = docker_client.images.get(datasource[\"dockerimage\"])\n log.info(\"'{}' using image '{}'\".format(image.tags[0], image.id))\n log_nodes = get_log_nodes(datasource[\"name\"], image)\n if log_nodes and not config.FORCE_RERUN_PASSED_DATALOADERS:\n # we skip this dataloader as it allready did a run\n log.info(\n \"[{}]: Skip Dataloader. Did allready run at {}\".format(\n datasource[\"name\"], log_nodes[0][\"loading_finished_at\"]\n )\n )\n continue\n\n container = docker_client.containers.run(\n image,\n environment=envs,\n detach=True,\n name=container_name,\n volumes=absolute_volume_path(datasource[\"volumes\"]),\n )\n log_file_path = os.path.join(\n config.LOADING_LOGS_DIRECTORY, \"{}.log\".format(datasource[\"name\"])\n )\n for l in container.logs(\n stream=True, timestamps=True, follow=True, stderr=True, stdout=True\n ):\n log.info(\"[{}]: {}\".format(datasource[\"name\"], l.decode()))\n log_file = open(log_file_path, \"a\")\n log_file.write(l.decode())\n log_file.close()\n res = container.wait()\n\n log_file = open(log_file_path, \"a\")\n log_file.write(\"================================================\")\n log_file.write(\"EXITED with status: {}\".format(res))\n log_file.close()\n log.info(\"[{}]: Finished with Exit Code:\".format(res[\"StatusCode\"]))\n if res[\"StatusCode\"] != 0 and not config.CONTINUE_WHEN_ONE_DATALOADER_FAILS:\n log.error(\"[{}]: Cancel Motherlode:\".format(datasource[\"name\"]))\n exit(res[\"StatusCode\"])\n else:\n create_log_node(dataloader_name=datasource[\"name\"], image=image)\n\n\nif __name__ == \"__main__\":\n run_datasource_containers()\n # print(config.NEO4J_URL)\n","sub_path":"motherlode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"197540553","text":"import os\nimport time\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, Dataset, Subset\n#from torch.utils.tensorboard import SummaryWriter\n\n# fixed the seed for reproducibility\nseed = 999\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.backends.cudnn.deterministic = True\n\n# some constants\nCHECK = False # True #\nROOT_DIR = Path(os.getcwd()) / 'assets'\n\n# hyperparameters\nMAX_EPOCH = 1000 # 2000 # 682 #\n\ndef mean_absolute_precentage_error(y_pred, y_target):\n y_target = y_target.view_as(y_pred)\n e = torch.abs(y_target - y_pred) / torch.abs(y_target)\n return 100.0 * torch.mean(e).item()\n\nclass TrainData(Dataset):\n def __init__(self, csv_path):\n super().__init__()\n self.anns = pd.read_csv(csv_path).to_dict('records')\n\n def __len__(self):\n '''Return the number of sample\n '''\n return len(self.anns)\n\n def __getitem__(self, idx):\n '''Map index 'idx' to a sample, i.e., a design case and its performances\n Args:\n idx: (int) index\n Return:\n dcase: (torch.FloatTensor) design case\n kpi : (torch.FloatTensor) performance\n '''\n ann = self.anns[idx]\n dcase = [ann['AV1'],\n ann['AV2'],\n ann['AV3'],\n ann['DD_factor'],\n ann['DP_rule']\n ]\n dcase = torch.tensor(dcase).float()\n kpi = [ann['Uti_A'],\n ann['Uti_B'],\n ann['Uti_C'],\n ann['Uti_D']] # [ann['avgWIP']] # [ann['avgTardi']] #\n kpi = torch.tensor(kpi).float()\n\n return dcase, kpi\n\nclass TestData(TrainData):\n def __init__(self, csv_path):\n super().__init__(csv_path)\n\n# Do some check for TrainData\nif CHECK is True:\n # train_dir = Path(os.getcwd() + '/assets/training_data')\n # data = TrainData(train_dir / 'train_labels.csv')\n data = TrainData(ROOT_DIR / 'train_labels.csv')\n print('========================= Check TrainData ==========================')\n print(len(data))\n dcase, kpi = data[-1]\n print(dcase.size())\n print(kpi.size())\n print('------'); print('case: '); print(dcase)\n print('------------'); print('performance: '); print(kpi)\n print('====================================================================')\n print()\n\n\n\nclass Net(nn.Module):\n def __init__(self):\n '''Defines parameters (what layers you gonna use)\n '''\n super().__init__()\n self.fc = nn.Linear(3, 1, bias=True)\n self.regression1 = nn.Sequential(\n nn.Linear(5, 55), nn.ReLU(), nn.Linear(55, 4)#, nn.Sigmoid()\n )\n self.regression2 = nn.Sequential(\n nn.Linear(5, 6), nn.ReLU(), nn.Linear(6, 7), nn.ReLU(),\n nn.Linear(7, 6), nn.ReLU(), nn.Linear(6,1)#, nn.Sigmoid()\n )\n self.regression3 = nn.Sequential(\n nn.Linear(5, 12), nn.ReLU(), nn.Linear(12, 14), nn.ReLU(),\n nn.Linear(14, 8), nn.ReLU(), nn.Linear(8,1)#, nn.Sigmoid()\n )\n self.regression4 = nn.Sequential(\n nn.Linear(5, 6), nn.ReLU(), nn.Linear(6, 7), nn.ReLU(),\n nn.Linear(7, 8), nn.ReLU(), nn.Linear(8, 7), nn.ReLU(),\n nn.Linear(7, 6), nn.ReLU(), nn.Linear(6, 1)#, nn.Sigmoid()\n )\n self.regression5 = nn.Sequential(\n nn.Linear(5, 20), nn.ReLU(), nn.Linear(20, 21), nn.ReLU(),\n nn.Linear(21, 22), nn.ReLU(), nn.Linear(22, 11), nn.ReLU(),\n nn.Linear(11, 6), nn.ReLU(), nn.Linear(6, 1)#, nn.Sigmoid()\n )\n self.regression6 = nn.Sequential(\n nn.Linear(5, 66), nn.ReLU(), nn.Linear(66, 77), nn.ReLU(),\n nn.Linear(77, 66), nn.ReLU(), nn.Linear(66,1)#, nn.Sigmoid()\n )\n\n def forward(self, dcase_b):\n '''Define how layers are interact, that is, the forward function.\n Args:\n dcase_b: design cases (mini-batch), shaped [N, 3]\n Return:\n pred_b: the predictions (mini-batch) of the performances, shaped [N, 1]\n '''\n pred_b = self.regression1(dcase_b)\n # pred_b = self.regression1(dcase_b)\n # pred_b = self.fc(dcase_b)#.flatten() # [N, 1] -> [N,]\n return pred_b\n\n# Do some check\nif CHECK is True:\n print('========================= Check DataLoader ==========================')\n loader = DataLoader(data, batch_size=10)\n dcase_b, target_b = next(iter(loader))\n print(dcase_b.size())\n print(target_b.size())\n\n # Do a forward\n device = 'cpu'\n model = Net().to(device)\n criterion = nn.L1Loss()\n\n dcase_b = dcase_b.to(device)\n target_b = target_b.to(device)\n pred_b = model(dcase_b)\n loss = criterion(pred_b, target_b)\n print(loss)\n print('====================================================================')\n print()\n\n\nclass Trainer:\n def __init__(self, log_dir):\n '''Initialize the varibles for training\n Args:\n log_dir: (pathlib.Path) the direction used for logging\n '''\n self.log_dir = log_dir\n\n # Datasets and dataloaders\n data = TrainData(ROOT_DIR / 'train_labels.csv')\n # 1. Split the whole training data into train and valid (validation)\n pivot = len(data) * 7 // 10\n self.train_set = Subset(data, range(0, pivot))\n self.valid_set = Subset(data, range(pivot, len(data)))\n # 2. Make the corresponding dataloaders\n self.train_loader = DataLoader(self.train_set, 50, shuffle=True, num_workers=2)\n self.valid_loader = DataLoader(self.valid_set, 10, shuffle=False, num_workers=2)\n\n # model, loss function, optimizer\n self.device = 'cpu'\n self.model = Net().to(self.device)\n self.criterion = nn.L1Loss() # nn.MSELoss()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=5e-3)\n self.max_epoch = MAX_EPOCH #5 #20\n\n def run(self):\n metrics = {'train_loss': [], 'valid_loss': [], 'train_acc': [], 'valid_acc': []}\n\n for self.epoch in tqdm(range(self.max_epoch)):\n self.train_loss, self.train_acc = self.train() # train 1 epoch\n self.valid_loss, self.valid_acc = self.valid() # valid 1 epoch\n\n # print(f'Epoch {self.epoch:03d}:')\n # print('train loss: {} acc: {} %'.format(self.train_loss.item(), self.train_acc))\n # print('valid loss: {} acc: {} %'.format(self.valid_loss.item(), self.valid_acc)); print('')\n metrics['train_loss'].append(self.train_loss.item()); metrics['train_acc'].append(self.train_acc)\n metrics['valid_loss'].append(self.valid_loss.item()); metrics['valid_acc'].append(self.valid_acc)\n\n # Save the parameters(weights) of the model to disk\n self.weights = self.model.state_dict()\n torch.save(self.weights, self.log_dir / 'model_weights.pth')\n pd.DataFrame(metrics).to_excel(self.log_dir / 'train_log.xlsx')\n # print('Finished Training')\n\n # Plot the loss curve against epoch\n fig, ax = plt.subplots(1, 1, sharex=True)\n ax.plot(range(self.max_epoch), np.array(metrics['train_loss']), label='train_loss')\n ax.plot(range(self.max_epoch), np.array(metrics['valid_loss']), label='valid_loss')\n ax.set_ylabel(\"loss\")\n ax.set_xlabel(\"epoch\")\n ax.legend(loc='upper right')\n fig.savefig(log_dir / 'lossCurve.png')\n\n def train(self):\n self.model.train()\n step = 0; losses = 0; mape_total = 0\n for inp_b, tgt_b in (iter(self.train_loader)):\n inp_b = inp_b.to(self.device)\n tgt_b = tgt_b.to(self.device)\n\n # Standard steps of training flow\n self.optimizer.zero_grad()\n pred_b = self.model(inp_b)\n loss = self.criterion(pred_b, tgt_b)\n loss.backward()\n self.optimizer.step()\n\n mape = mean_absolute_precentage_error(pred_b, tgt_b)\n\n # To compute the average loss\n mape_total += mape\n losses += loss\n step += 1\n return (losses / step), 100 - (mape_total / step)\n\n @torch.no_grad()\n def valid(self):\n self.model.eval()\n step = 0; losses = 0; mape_total = 0\n for inp_b, tgt_b in (iter(self.valid_loader)):\n inp_b = inp_b.to(self.device)\n tgt_b = tgt_b.to(self.device)\n\n # Just do forwarding\n pred_b = self.model(inp_b)\n loss = self.criterion(pred_b, tgt_b)\n mape = mean_absolute_precentage_error(pred_b, tgt_b)\n\n # To compute the average loss\n mape_total += mape\n losses += loss\n step += 1\n return (losses / step), 100 - (mape_total / step)\n\n# Do some check\nif CHECK is True:\n print(\"========================= Let's do Training ==========================\")\n log_dir = Path('./runs/') / f'{datetime.now():%b.%d %H:%M:%S}'\n log_dir.mkdir(parents=True)\n Trainer(log_dir).run()\n print('======================================================================')\n print()\n\n test_data = TestData(ROOT_DIR / 'test_labels.csv')\n print('========================= Check TestData ==========================')\n print(len(data))\n dcase, tgt = data[-1]\n print(dcase.size())\n print(tgt.size())\n print('------'); print('case: '); print(dcase)\n print('------------'); print('performance: '); print(tgt)\n print('====================================================================')\n print()\n\n\nclass Tester:\n def __init__(self, csv_path, model, criterion, device='cpu'):\n self.test_data = TestData(Path(csv_path))\n self.data_loader = DataLoader(self.test_data, batch_size=10)\n # hyperparameters\n self.model = model\n self.criterion = criterion\n self.device = device\n # statistics\n self.loss, self.mape = 0, 0\n\n def run(self):\n with torch.no_grad():\n self.model.eval()\n step, losses, mape_total = 0, 0 ,0\n for inp_b, tgt_b in (iter(self.data_loader)):\n inp_b = inp_b.to(self.device)\n tgt_b = tgt_b.to(self.device)\n\n # Just do forwarding\n pred_b = self.model(inp_b)\n loss = self.criterion(pred_b, tgt_b)\n loss = loss.cpu()\n loss = loss.detach().numpy()\n # MAPE\n mape = mean_absolute_precentage_error(pred_b, tgt_b)\n\n # To compute the average loss and maperror\n mape_total += mape\n losses += loss\n step += 1\n self.loss = (losses / step)\n self.mape = (mape_total / step)\n\n\nif __name__ == '__main__':\n # Do training\n if CHECK is False:\n print(\"========================= Let's do Training ==========================\")\n log_dir = Path('./runs/') / f'{datetime.now():%b.%d %H:%M:%S}'\n log_dir.mkdir(parents=True)\n\n t0 = time.process_time()\n\n trainer = Trainer(log_dir)\n trainer.run()\n\n t_end = time.process_time()\n print('====================')\n print('* Training results *')\n print('====================')\n print('Train Loss: {} acc: {} %'.format(trainer.train_loss.item(), trainer.train_acc))\n print('Valid Loss: {} acc: {} %'.format(trainer.valid_loss.item(), trainer.valid_acc))\n print('Cost {} seconds.'.format(t_end - t0)); print()\n print()\n\n # # prepare the testing model\n # device = 'cpu'\n # metamodel = Net().to(device)\n # metamodel.load_state_dict(torch.load(log_dir / 'model_weights.pth'))\n # criterion = nn.L1Loss()\n #\n # # Do testing\n # print('===========')\n # print('* Testing *')\n # print('===========')\n # tester = Tester(ROOT_DIR / 'test_labels.csv', metamodel, criterion)\n # t0 = time.process_time()\n # tester.run()\n # t_end = time.process_time()\n # print('Testing Loss: ', tester.loss)\n # print(\"Acc : {} %\".format(100 - tester.mape))\n # print('Cost {} seconds.'.format(t_end - t0)); print()\n","sub_path":"metamodel_with_ann.py","file_name":"metamodel_with_ann.py","file_ext":"py","file_size_in_byte":12307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"549971347","text":"\n\nprint(\"Type 1 for walk forward, 0 to walk backward and type E to exit\")\nlist = []\nwhile(1):\n v = input()\n if v == 'E':\n break\n else:\n list.append(v)\n\nprint(len(list))\n\nB = list.count('0')\nF = list.count('1')\nprint(F - B)\n\n\n\n\n\n\n\n","sub_path":"Ex01/RobotRL2.py","file_name":"RobotRL2.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"118247207","text":"\"\"\" Main Access to VT calls. Should make for sinple access to VT with with a quick interface to the VT system.\n\"\"\"\n\n__author__ = 'obrien@webroot.com (David OBrien)'\n__name__ = 'VTI_API'\n__version__ = '2.1.0'\n\n\nfrom pyReq import require\nfrom VT import VTConfig\n#depends on:\nVTI = require('VT.VTI_API', '1.0.0')\nVTP = require('VT.VTPublic_API', '1.0.0')\nThreatEnv = require('Threader', '1.2.0')\nWSA_API = require('WSA_API', '1.0.0')\nSimpleLogger = require('Logger', '2.0.0')\n\nimport os\nimport time\nimport hashlib\nfrom functools import partial\n\nVTLogger = SimpleLogger('VTLog', 'debug')\n\nWSA = WSA_API(logger=VTLogger)\nVTI_API = VTI.VTI_API(logger=VTLogger)\n\nclass VT_API(object):\n def __init__(self, apiKey=VTConfig.API_KEY, maxRetryOn=None, timeOut=None, logger=None):\n \"\"\" Class for managing the VT API. works as the Gatway for the VTI and VTP APIs to provided a more seamless interface.\n\n \"\"\"\n self.filesDownloaded = []\n self.logger = SimpleLogger('VTLog', 'debug', exsistingLog=logger)\n self.VTI = VTI.VTI_API(logger=logger, apiKey=apiKey, timeOut=timeOut)\n self.VTP = VTP.VIP_API(logger=logger, apiKey=apiKey, maxRetryOnREST=maxRetryOn, timeOut=timeOut)\n self.env = ThreatEnv(timeout=5)\n self.WSA = WSA_API(logger=logger, env=self.env)\n\n self.storage = {}\n\n\n def searchVIT(self, query, searchLimit=None, callPerPage=None):\n \"\"\"\n\n :param query: Query to search\n :param searchLimit: Max results\n :return: All of the found Data\n \"\"\"\n resultHashes = []\n\n\n results = self.VTI.search(query, searchLimit=searchLimit or 1000000)\n nextPage = results['nextPage']\n resultHashes += results[\"hashes\"]\n\n if callPerPage:\n callPerPage(results[\"hashes\"])\n while nextPage:\n results = self.VTI.search()\n if callPerPage:\n callPerPage(results[\"hashes\"])\n nextPage = results['nextPage']\n resultHashes += results[\"hashes\"]\n\n return {\"hashes\":resultHashes, 'totalFound':results['totalFound']}\n\n\n\n def searchVTP(self, hashes):\n \"\"\"\n\n :param hashes:\n :return:\n \"\"\"\n\n return self.VTP.search(hashes)\n\n\n def download(self, hashs, downloadLocation=None, callback=None, callPerFile=None, env=None, scanWSA=True):\n \"\"\"Function to download from VT\n\n :param hashs: List of hashes to download\n :param downloadLocation: Where to send the files to\n :param callback: something to call at the end.\n :param callPerFile: Something that will be called on each file\n :param env: A custom Threat envirement\n :param scanWSA: If they will be Scan with WSA\n :return:\n \"\"\"\n\n runOnEmpty = None\n def callWrapper():\n callback(self.filesDownloaded)\n\n if callback:\n runOnEmpty = callWrapper\n\n if not env:\n env = self.env\n env.addGroup('downLoadThreads', runOnEmpty=runOnEmpty)\n\n\n\n def getFile(hash, downloadLocation):\n\n fileLocation = self.VTI.download_file(hash)\n\n #scan with WSA\n if scanWSA:\n self.WSA.scan(fileLocation)\n #self.WSA.scanAndReport(fileLocation)\n\n\n # something that can be called for Each file that is downloaded\n if callPerFile:\n callPerFile(fileLocation)\n #what to do if a custom download location is provided.\n if downloadLocation:\n try:\n fullPath = downloadLocation + hash\n with open(fileLocation, 'rb') as temp:\n with open(fullPath, 'wb') as new:\n new.write(temp.read())\n\n self.filesDownloaded += [fullPath]\n\n #Ensure the download actually happens.\n removeCheck = False\n while not removeCheck:\n try:\n os.remove(fileLocation)\n removeCheck = True\n except PermissionError as e:\n self.logger.debug(e)\n\n\n except FileNotFoundError as e:\n self.logger.error(e)\n self.logger.stack()\n else:\n self.filesDownloaded += [fileLocation]\n\n\n\n\n\n for hash in hashs:\n #env.run(getFile, {\"hash\": hash, \"downloadLocation\": downloadLocation})\n env.setRun('downLoadThreads', getFile, {\"hash\": hash, \"downloadLocation\": downloadLocation})\n\n def searchStoreScan(self, query, downloadLocation, searchLimit=None):\n \"\"\" Tool to use a Quert to download files from VT by a query and scan them with WSA\n\n :param query:\n :param downloadLocation:\n :param searchLimit:\n :return: list of MD5 Hashes\n \"\"\"\n self.storage['MD5'] = []\n self.storage['done'] = False\n def finished(results):\n self.storage['done'] = True\n\n def getMD5(location):\n with open(location, mode='rb') as f:\n d = hashlib.md5()\n for buf in iter(partial(f.read, 1024), b''):\n d.update(buf)\n self.storage['MD5'] += [d.hexdigest()]\n\n\n def localDownload(hashes):\n self.download(hashes, downloadLocation=downloadLocation, callback=finished, callPerFile=getMD5)\n\n self.searchVIT(query, searchLimit=searchLimit, callPerPage=localDownload)\n\n while not self.storage['done']:\n time.sleep(1)\n\n return self.storage['MD5']\n\n def searchScan(self, query, searchLimit=1000000, callPerFile=None):\n \"\"\" Tool to download Search VT, Scan the files with WSA and retu the MD5 of those files.\n\n :param query: what to find in VT\n :param searchLimit: Max number of items to find from VT\n :return: list of MD5 Hashes\n \"\"\"\n self.storage['MD5'] = []\n self.storage['done'] = False\n def finished(results):\n self.storage['done'] = True\n\n def getMD5(location):\n with open(location, 'rb') as fAsB:\n hash = hashlib.md5(fAsB.read()).hexdigest()\n\n if callPerFile:\n try:\n callPerFile(hash)\n except Exception as e:\n self.logger.error(e)\n self.logger.stack()\n else:\n self.storage['MD5'] += [hash]\n os.remove(location)\n\n def localDownload(hashes):\n self.download(hashes, callback=finished, callPerFile=getMD5)\n\n self.searchVIT(query, searchLimit=searchLimit, callPerPage=localDownload)\n\n while not self.storage['done']:\n time.sleep(1)\n #self.WSA.isWatching = False\n return self.storage['MD5']\n\n def search(self, query, searchLimit=None, filter=None):\n \"\"\" General search method. Takes a VTI query and Returns the VT 2.0 Results. \n \n :param query: VTI query string \n \n :param searchLimit: max items to return \n \n :param filter: List of what fields to return from the VTP 2.0 results \n \n :return: the Filters VTP 2.0 results. \n \"\"\"\n self.storage['reports'] = []\n def getReport(hashes):\n if hashes:\n self.storage['reports'] = self.VTP.search(hashes, filter=filter)\n\n\n self.searchVIT(query, searchLimit=searchLimit, callPerPage=getReport)\n return self.storage['reports']\n\n def rescan(self, queryOrHashes, searchLimit=None):\n \"\"\"Rescan in VT 2.0 \n \n :param queryOrHashes: Either a List of hashes to Rescan or a query to search and rescan \n \n :param searchLimit: if a Query is given, the search limit for that query \n \n :return: List of dicts containing all items from the Respons. \n \"\"\"\n if type(queryOrHashes) == str:\n hashes = self.searchVIT(queryOrHashes, searchLimit=searchLimit)[\"hashes\"]\n elif type(queryOrHashes) == list:\n hashes = queryOrHashes\n else:\n print(type(queryOrHashes))\n raise TypeError('queryOrHashes not of required type str or list')\n\n return self.VTP.rescan(hashes)\n\n\n\n\"\"\"Everything from below here is Old and will be removed at some point.\n\n\n\n\n\"\"\"\n\ndef search(query, searchLimit=None, fileOut=None, logger=VTLogger):\n \"\"\"\n\n :param query:\n :return: results\n \"\"\"\n var = {}\n var['totalProcessed'] = 0\n var['finished'] = False\n var['searchResults'] = []\n var['currentPage'] = {}\n\n def GetResults():\n\n if'next_page' in var['currentPage']:\n logger.info('Processing Next Page')\n var['currentPage'] = VTI.searchVT(query, var['currentPage']['next_page'])\n else:\n logger.info('Processing First or Last Page')\n var['currentPage'] = VTI.searchVT(query)\n\n hashs = [var['currentPage']['hashes'][i:i + VTConfig.CALL_LIMIT['REPORTS']] for i in range(0, len(var['currentPage']['hashes']), VTConfig.CALL_LIMIT['REPORTS'])]\n for hashBlock in hashs:\n var['totalProcessed'] += len(hashBlock)\n if searchLimit:\n if var['totalProcessed'] >= searchLimit:\n newlimit = var['totalProcessed'] - searchLimit\n hashBlock = hashBlock[0:len(hashBlock) - newlimit:]\n var['finished'] = True\n\n if len(hashBlock) < 1:\n raise \"Has Block empty\"\n results = VTP.reports(hashBlock)\n #print(hashBlock)\n #results = hashBlock\n\n if (fileOut):\n reportFile = open(fileOut, 'a')\n for result in results:\n if result is not None and result['response_code'] > 0:\n reportFile.write(str(result) + \"\\n\")\n reportFile.close()\n var['searchResults'] = ['Results sent to file: ' + fileOut]\n\n else:\n var['searchResults']+=results\n\n if var['finished']:\n break\n\n while not var['finished']:\n GetResults()\n\n logger.info('Finished getting Query results')\n return var['searchResults']\n\n\ndef searchVTI(query, searchLimit=1000000, fileOut=None, nextPage=None, logger=VTLogger):\n \"\"\"Function to just search VTI\n\n :param query:\n :param searchLimit:\n :param fileOut:\n :return:\n \"\"\"\n var = {}\n\n var['totalProcessed'] = 0\n var['finished'] = False\n var['searchResults'] = []\n var['currentPage'] = {}\n if nextPage:\n var['currentPage']['next_page'] = nextPage\n\n def GetResults():\n\n if 'next_page' in var['currentPage']:\n logger.info('Processing Next Page')\n var['currentPage'] = VTI.searchVT(query, var['currentPage']['next_page'])\n\n else:\n logger.info('Processing First or Last Page')\n var['currentPage'] = VTI.searchVT(query)\n\n hashs = var['currentPage']['hashes']\n\n if not 'next_page' in var['currentPage'] or len(var['searchResults'])+len(hashs) > searchLimit:\n\n\n var['finished'] = True\n if fileOut:\n reportFile = open(fileOut, 'a')\n reportFile.write(hashs)\n reportFile.close()\n return ['Writing Page of Hashes to File']\n\n return hashs\n\n while not var['finished']:\n var['searchResults'] += GetResults()\n\n if len(var['searchResults']) > searchLimit:\n var['searchResults'] = var['searchResults'][:searchLimit]\n return {'hashes':var['searchResults'], 'nextPage':var['currentPage']['next_page']}\n\n\ndef rescan(hashList, logger=VTLogger):\n \"\"\"\n\n :param hashList:\n :return:\n \"\"\"\n var = {}\n var['totalProcessed'] = 0\n var['finished'] = False\n hashs = [hashList[i:i + VTConfig.CALL_LIMIT['REPORTS']] for i in range(0, len(hashList), VTConfig.CALL_LIMIT['REPORTS'])]\n for hashBlock in hashs:\n var['totalProcessed'] += len(hashBlock)\n results = VTP.rescanInVT(hashBlock)\n logger.debug(results)\n\n\ndef download(hashs, downloadLocation=None, callback=None, env=ThreatEnv(), scanWSA=True, logger=VTLogger):\n \"\"\"Function to just download from VT\n\n :param hash:\n :param downloadLocation:\n :return:\n \"\"\"\n\n results = {\"return\": []}\n\n def getFile(hash, downloadLocation):\n\n fileLocation = VTI.download_file(hash)\n if scanWSA:\n WSA.scan(fileLocation)\n\n if downloadLocation:\n try:\n fullPath = downloadLocation + hash\n with open(fileLocation, 'rb') as temp:\n with open(fullPath, 'wb') as new:\n new.write(temp.read())\n os.remove(fileLocation)\n results[\"return\"] += [fullPath]\n\n except FileNotFoundError as e:\n logger.error(e)\n logger.stack()\n else:\n results[\"return\"] += [fileLocation]\n\n\n\n def callWrapper():\n callback(results[\"return\"])\n\n if callback:\n env.runOnEmpty = callWrapper\n for hash in hashs:\n env.run(getFile, {\"hash\": hash, \"downloadLocation\": downloadLocation})\n\n\n\n\n\n__export__ = VT_API\n","sub_path":"VT_API.py","file_name":"VT_API.py","file_ext":"py","file_size_in_byte":13334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"25696445","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\napp_name = 'shop'\r\n\r\nurlpatterns = [\r\n\r\n # index/dashboard\r\n url(r'^$', views.IndexView.as_view(), name='index'),\r\n\r\n # profile\r\n url(r'^profile/$', views.ProfileView.as_view(), name='profile'),\r\n\r\n # vehicle management\r\n url(r'^vehicle-mgt/$', views.VehicleView.as_view(), name='vehicle'),\r\n\r\n # vehicle -add\r\n url(r'^vehicle-add/$', views.VehicleAdd.as_view(), name='vehicle-add'),\r\n\r\n # vehicle -upd\r\n url(r'^vehicle-upd/(?P\\d+)/$', views.VehicleUpd.as_view(), name='vehicle-upd'),\r\n\r\n # vehicle -dlt\r\n url(r'^vehicle-dlt/(?P\\d+)/delete/$', views.VehicleDlt.as_view(), name='vehicle-dlt'),\r\n\r\n # customer management\r\n url(r'^customer-mgt/$', views.CustomerView.as_view(), name='customer'),\r\n\r\n # customer -add\r\n url(r'^customer-add/$', views.CustomerAdd.as_view(), name='cust-add'),\r\n\r\n # customer -update\r\n url(r'^customer-upd/(?P\\d+)/$', views.CustomerUpd.as_view(), name='cust-upd'),\r\n\r\n # customer -dlt\r\n url(r'^customer-dlt/(?P\\d+)/delete/$', views.CustomerDlt.as_view(), name='cust-dlt'),\r\n\r\n # technician management\r\n url(r'^technician-mgt/$', views.TechnicianView.as_view(), name='technician'),\r\n\r\n # technician -add\r\n url(r'^technician-add/$', views.TechnicianAdd.as_view(), name='tech-add'),\r\n\r\n # technician -upd\r\n url(r'^technician-upd/(?P\\d+)/$', views.TechnicianUpd.as_view(), name='tech-upd'),\r\n\r\n # technician -dlt\r\n url(r'^technician-dlt/(?P\\d+)/delete/$', views.TechnicianDlt.as_view(), name='tech-dlt'),\r\n\r\n # Supplier management\r\n url(r'^supplier-mgt/$', views.SupplierView.as_view(), name='supplier'),\r\n\r\n # supplier -add\r\n url(r'^supplier-add/$', views.SupplierAdd.as_view(), name='supplier-add'),\r\n\r\n # supplier -upd\r\n url(r'^supplier-upd/(?P\\d+)/$', views.SupplierUpd.as_view(), name='supplier-upd'),\r\n\r\n # supplier -dlt\r\n url(r'^supplier-dlt/(?P\\d+)/delete/$', views.SupplierDlt.as_view(), name='supplier-dlt'),\r\n\r\n # Jobs management\r\n url(r'^jobs-mgt/$', views.JobsView.as_view(), name='jobs'),\r\n\r\n # Jobs -add\r\n url(r'^jobs-add/$', views.JobsAdd.as_view(), name='jobs-add'),\r\n\r\n # Jobs -upd\r\n url(r'^jobs-upd/(?P\\d+)/$', views.JobsUpd.as_view(), name='jobs-upd'),\r\n\r\n # Jobs -dlt\r\n url(r'^jobs-dlt/(?P\\d+)/delete/$', views.JobsDlt.as_view(), name='jobs-dlt'),\r\n\r\n # Parts management\r\n url(r'^parts-mgt/$', views.PartsView.as_view(), name='parts'),\r\n\r\n # Parts -add\r\n url(r'^parts-add/$', views.PartsAdd.as_view(), name='parts-add'),\r\n\r\n # Parts -upd\r\n url(r'^parts-upd/(?P\\d+)/$', views.PartsUpd.as_view(), name='parts-upd'),\r\n\r\n # Parts -dlt\r\n url(r'^parts-dlt/(?P\\d+)/delete/$', views.PartsDlt.as_view(), name='parts-dlt'),\r\n\r\n\r\n\r\n\r\n]\r\n","sub_path":"djangow-master/FirstProj/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"369607980","text":"from argparse import ArgumentParser\nfrom io import FileIO\nfrom os import path, remove\nfrom typing import Dict\n\nimport numpy\nfrom tqdm import tqdm\n\nfrom random import shuffle, seed\n\nfrom pathlib import Path\n\nimport multiprocessing as mp\n\n\ndef _get_id2value_from_csv(path_: str) -> Dict[str, str]:\n return dict(numpy.genfromtxt(path_, delimiter=\",\", dtype=(str, str))[1:])\n\n\ndef preprocess_csv(data_folder: str, holdout_name: str, is_shuffled: bool, c2s_output: FileIO):\n \"\"\"\n Preprocessing for files tokens.csv, paths.csv, node_types.csv\n \"\"\"\n id_to_token_data_path = path.join(data_folder, f\"tokens.csv\")\n id_to_type_data_path = path.join(data_folder, f\"node_types.csv\")\n id_to_paths_data_path = path.join(data_folder, f\"paths.csv\")\n path_contexts_path = path.join(data_folder, f\"path_contexts.csv\")\n\n id_to_paths_stored = _get_id2value_from_csv(id_to_paths_data_path)\n id_to_paths = {index: [n for n in nodes.split()] for index, nodes in id_to_paths_stored.items()}\n\n id_to_node_types = _get_id2value_from_csv(id_to_type_data_path)\n id_to_node_types = {index: node_type.rsplit(\" \", maxsplit=1)[0] for index, node_type in id_to_node_types.items()}\n\n id_to_tokens = _get_id2value_from_csv(id_to_token_data_path)\n\n with open(path_contexts_path, \"r\") as path_contexts_file:\n output_lines = []\n for line in path_contexts_file:\n label, *path_contexts = line.split()\n parsed_line = [label]\n for path_context in path_contexts:\n from_token_id, path_types_id, to_token_id = path_context.split(\",\")\n from_token, to_token = id_to_tokens[from_token_id], id_to_tokens[to_token_id]\n nodes = [id_to_node_types[p_] for p_ in id_to_paths[path_types_id]]\n parsed_line.append(\",\".join([from_token, \"|\".join(nodes), to_token]))\n output_lines.append(\" \".join(parsed_line + [\"\\n\"]))\n if is_shuffled:\n shuffle(output_lines)\n c2s_output.write(\"\".join(output_lines))\n\n\nif __name__ == \"__main__\":\n arg_parser = ArgumentParser()\n arg_parser.add_argument(\"data\", type=str)\n arg_parser.add_argument(\"output\", type=str)\n arg_parser.add_argument(\"holdout\", type=str)\n arg_parser.add_argument(\"--shuffle\", action=\"store_true\")\n args = arg_parser.parse_args()\n\n seed(7)\n\n output_c2s_path = path.join(args.output, f\"data.{args.holdout}.c2s\")\n\n if path.exists(output_c2s_path):\n remove(output_c2s_path)\n\n paths = list(Path(args.data).glob(\"*/\"))\n\n with open(output_c2s_path, \"a+\") as c2s_output, mp.Pool(4) as p:\n for project_path in tqdm(paths):\n try:\n preprocess_csv(path.join(str(project_path), \"kt\"), args.holdout, args.shuffle, c2s_output)\n\n except ValueError:\n print(project_path)\n","sub_path":"pycode2seq/training/astminer_to_code2seq.py","file_name":"astminer_to_code2seq.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"37083720","text":"\nfrom .socketwrapper import SocketWrapper\n\nfrom os import path\nfrom . import log\n\nclass FormFile:\n\n def __init__(self, soc : SocketWrapper, boundary : str):\n self.soc=soc\n self.boundary=bytes(\"--\"+boundary, \"ascii\")\n self.filename=\"\"\n self.name=\"\"\n self.mime=\"\"\n self.attrs={}\n self.has_next=False\n\n def parse_head(self):\n bound=self.soc.readline()\n if bound.endswith(\"--\\r\") : return False\n head=self.soc.readline()[:-1]\n head+=\"\\n\"+self.soc.readline()[:-1]\n head=head.split(\"\\n\")\n for h in head:\n tmp=h.split(\":\")\n key = tmp[0]\n val = tmp[1][1:]\n if val.find(\";\")>0:\n v={}\n for k in val.split(\";\"):\n k=k.lstrip()\n if k.find(\"=\")>0:\n v[k[:k.find(\"=\")]]=k[k.find(\"=\")+1:]\n else: v[k]=None\n val=v\n self.attrs[key]=val\n self.soc.readline()\n self.name=self.attrs[\"Content-Disposition\"][\"name\"][1:-1]\n self.filename=self.attrs[\"Content-Disposition\"][\"filename\"][1:-1]\n return True\n NO_BOUND=0\n SIMPLE_BOUND=1\n END_BOUND=2\n #\n #\n #\n def is_bound(self):\n tmp = self.soc.read(len(self.boundary))\n self.soc.rewind(tmp)\n if tmp != self.boundary:\n return FormFile.NO_BOUND\n if self.soc.read(2) == bytes(\"\\r\\n\", \"ascii\"): return FormFile.SIMPLE_BOUND\n self.soc.read(2)\n return FormFile.END_BOUND\n\n def parse_content(self):\n x=self.soc.read(1)\n out=x\n while True:\n x=self.soc.read(1)\n if x==bytes(\"\\n\", \"ascii\"):\n bound=self.is_bound()\n out+=x\n if bound==FormFile.END_BOUND:\n self.has_next=False\n return out\n elif bound == FormFile.SIMPLE_BOUND:\n self.has_next=True\n return out\n else: out+=x\n\n\n def save(self, p, forcePath=False):\n out=path.normpath(p+(\"\" if forcePath else \"/\"+self.filename))\n\n # si 'out\" est un dossier => filename = \"\" => Pas de fichier\n if self.filename==\"\":\n while True:\n self.soc.read(1)\n bound=self.is_bound()\n if bound == FormFile.END_BOUND:\n self.has_next=False\n return False\n elif bound == FormFile.SIMPLE_BOUND:\n self.has_next=True\n return True\n log.debug(\"Writing file to '\"+out+\"'\")\n with open(out, \"wb\") as f:\n x=self.soc.read(1)\n while True:\n f.write(x)\n x=self.soc.read(1)\n if x==bytes(\"\\n\", \"ascii\"):\n bound=self.is_bound()\n\n if bound==FormFile.END_BOUND:\n self.has_next=False\n return False\n elif bound == FormFile.SIMPLE_BOUND:\n self.has_next=True\n return True","sub_path":"formfile.py","file_name":"formfile.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"33432044","text":"import random\nfrom textblob import TextBlob\n\n# The text file for this project is called \"blackcat.txt\" \n# referring to the story by Edgar Allan Poe.\nwith open('blackcat.txt', 'r') as file:\n text = file.read()\n\nblob = TextBlob(text)\n#five word lists for our poetry\nadject = []\nnoun = []\nverb = []\nprep = []\ndeterm = []\n#loading lists based on word tags\nfor word,pos in blob.tags:\n if (pos == 'JJ'):\n adject.append(word)\n if (pos == 'NN'):\n noun.append(word)\n if (pos == 'VB'):\n verb.append(word)\n if (pos == 'IN'):\n prep.append(word)\n if (pos == 'DT'):\n determ.append(word)\n#Takes a determiner, a noun, two adjectives, two prepositions and a verb\n#to form four couplets for each poem\nfor i in range(4):\n dt = random.choice(determ)\n n1 = random.choice(noun)\n a1 = random.choice(adject)\n a2 = random.choice(adject)\n p1 = random.choice(prep)\n p2 = random.choice(prep)\n vb = random.choice(verb)\n print(dt + \" \" + n1 + \" \" + p1 + \" \" + a2 +\"\\n\" +\n vb + \" \" + p2 + \" \" + a1 + \"\\n\")\n","sub_path":"poem/blackcat.py","file_name":"blackcat.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"514633522","text":"# coding=utf-8\n# 石头剪刀布游戏\n\nimport random\nupc = random.randint(0, 2)\nplayer = input(\"请输入 剪刀(0) 石头(1) 布(2)\")\nplayer = int(player)\nif (player == 0 and upc == 2) or (player == 1 and upc == 0) or(player == 2 and upc == 1):\n print(\"upc = %d,你赢了\" % upc)\nelif player == upc:\n print(\"upc = %d,平局\" % upc)\nelse:\n print(\"upc = %d,你输了\" % upc)\n","sub_path":"1216/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"354350934","text":"import cv2\nimport numpy as np \n\n# open image and scale to reasonable size for viewing\nimg = cv2.imread(\"..\\Photos\\space_image.jpg\")\nimg = cv2.resize(img, (int(img.shape[1]*0.5), int(img.shape[0]*0.5)))\ncv2.imshow(\"Original image\", img)\n\n# rescale to half size\nwidth = int(img.shape[1] * 0.5)\nheight = int(img.shape[0] * 0.5)\nnew_dim = (width, height)\n\nimg_rescaled = cv2.resize(img, new_dim)\ncv2.imshow(\"Image scaled to half size\", img_rescaled)\n\n\n# draw rectangle at centre of image\nimg_drawn = np.copy(img)\nbottom_corner = (int(img_drawn.shape[1] * 0.5 - 200), int(img_drawn.shape[0] * 0.5 - 150))\ntop_corner = (int(img_drawn.shape[1] * 0.5 + 200), int(img_drawn.shape[0] * 0.5 + 150))\n\ncv2.rectangle(img_drawn, bottom_corner, top_corner, (255, 255, 255), thickness=cv2.BORDER_DEFAULT)\ncv2.imshow(\"Image with rectangle drawn at centre\", img_drawn)\n\n\n# pass through blur, greyscale and canny\nimg_processed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimg_processed = cv2.GaussianBlur(img_processed, (9,9), cv2.BORDER_DEFAULT)\n# probably not the best image choice for this....\nimg_processed = cv2.Canny(img_processed, 10, 20)\ncv2.imshow(\"Image after passing through greyscale, blur and canny\", img_processed)\n\n\n# rotate by 45 degrees\nrotMat = cv2.getRotationMatrix2D((img.shape[1]//2, img.shape[0]//2), 45, scale=1.0)\n\nimg_rotated = cv2.warpAffine(img, rotMat, (img.shape[1], img.shape[0]))\ncv2.imshow(\"Image rotated 45 degrees\", img_rotated)\n\ncv2.waitKey(0)\n","sub_path":"week4_challenge.py","file_name":"week4_challenge.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"72593227","text":"import cv2\nimport subprocess\nimport os\nimport numpy as np\n#file_name='/scratch/KurcGroup/mazhao/wsi_tiles_prediction/O3936-multires/96001_72001_4000_4000_0.25_1_SEG_0_pred.png'\n#save_path='/scratch/KurcGroup/mazhao/quip4_files/'+os.path.basename(os.path.dirname(file_name)+'/'+cell_type[stain_num])\n#save_path='.'\ndef get_poly(pair):\n thre_mode = 0\n print('len(pair)',len(pair))\n file_name,save_path,stain_index,argmax_name = pair\n print(pair)\n if argmax_name==None:\n thre_mode = 1\n else:\n print('argmax mode!')\n #file_name is the heatmap absolute path,save_path is the folder to save the result\n #if not os.path.isfile(os.path.join(save_path,file_id+'-features.csv')):\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n global_xy_offset= [int(x) for x in os.path.basename(file_name).split('--')[1].split('_')[0:2]]\n if thre_mode==1:\n img=cv2.imread(file_name,0)\n print('file_name',file_name)\n thre,img=cv2.threshold(img,210,255,cv2.THRESH_BINARY)\n else:\n\n argmax_map = np.load(argmax_name)\n binary_mask = np.zeros((argmax_map.shape[0],argmax_map.shape[1])).astype('uint8')\n binary_mask[argmax_map == stain_index+1]=255\n img = binary_mask\n #resizing to 2 times!!!!!!!!!!!!!!!!!\n #heat_map = cv2.imread(file_name)\n #img = cv2.resize(img,(heat_map.shape[1],heat_map.shape[0]),cv2.INTER_NEAREST)\n #cv2.imwrite(os.path.join(save_path,os.path.basename(file_name)[0:-10]+'-binary.png'),img)\n\n #print(np.max(img),img.shape)\n #poly = cv2.findContours(img.astype('uint8'), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n poly = cv2.findContours(img.astype('uint8'), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n contour,hia=poly\n num_contour=len(contour)\n file_id=os.path.basename(file_name)[0:-len('_1_SEG_argmax.png')]\n fid = open(os.path.join(save_path,file_id+'-features.csv'), 'w')\n fid.write('AreaInPixels,PhysicalSize,Polygon\\n')\n for idx in range(num_contour):\n contour_i = contour[idx]\n physical_size = cv2.contourArea(contour_i)\n #print(physical_size)\n #if physical_size>4000 or physical_size<5:\n # continue\n contour_i = contour_i[:,0,:].astype(np.float32)\n\n contour_i[:, 0] = contour_i[:, 0] + global_xy_offset[0]\n\n contour_i[:, 1] = contour_i[:, 1] + global_xy_offset[1]\n poly_str = ':'.join(['{:.1f}'.format(x) for x in contour_i.flatten().tolist()])\n #print(poly_str)\n fid.write('{},{},[{}]\\n'.format(\n\t\tint(physical_size), int(physical_size), poly_str))\n fid.close()\n return 1\n","sub_path":"prediction_phase_for_WSI/wsi_pred_code/4_generating_polygons_and_meta_files_for_quip4/get_poly_6slides.py","file_name":"get_poly_6slides.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"532684918","text":"#!/usr/bin/env python\n\ndef getDisaster():\n import random\n \n disasters = (\n #natural\n {'description' : 'Small fire'},\n {'description' : 'Whole room on fire'},\n {'description' : 'Building on fire'},\n {'description' : 'Tornado warning'},\n {'description' : 'Flood warning'},\n {'description' : 'Volcanic eruption'},\n {'description' : 'Earthquake'},\n {'description' : 'Blizzard'},\n {'description' : 'Icy roads'},\n {'description' : 'Zero visibility'},\n \n ##silly\n #{'description' : 'Zombie apocalypse'},\n #{'description' : 'Single raptor attack'},\n #{'description' : 'Multiple raptor attack'},\n \n #social / military / crime\n {'description' : 'Riot in progress'},\n {'description' : 'Unruly crowd approaching'},\n {'description' : 'Enemy air raid'},\n {'description' : 'Drunk driver spotted'},\n {'description' : 'Two men fighting'},\n {'description' : 'Two women fighting'},\n {'description' : 'Child alone and crying'},\n {'description' : 'Man beating man'},\n {'description' : 'Man beating woman'},\n {'description' : 'Woman beating man'},\n {'description' : 'Group beating man'},\n {'description' : 'Group beating cop'},\n {'description' : 'Rape in progress nearby, one rapist'},\n {'description' : 'Rape in progress nearby, multiple rapists'},\n {'description' : 'Police officer shot'},\n {'description' : 'Police foot chase'},\n {'description' : 'Police car chase'},\n {'description' : 'Being pursued on foot by individual'},\n {'description' : 'Being pursued on foot by group'},\n {'description' : 'Being pursued in vehicle by vehicle'},\n \n #terror\n {'description' : 'Shooter in the area'},\n {'description' : 'Car bomb explosion'},\n {'description' : 'Car bomb threat'},\n {'description' : 'Pipe bomb threat'},\n {'description' : 'Mail bomb threat'},\n {'description' : 'Car bomb discovered, detonation in 5 minutes'},\n {'description' : 'Pipe bomb discovered, detonation in 5 minutes'},\n {'description' : 'Mail bomb discovered, detonation in 5 minutes'},\n {'description' : 'Incoming sniper fire from long range'},\n \n #mystery / misc\n {'description' : 'Distant explosion'},\n {'description' : 'Toxic gas release'},\n {'description' : 'Nerve gas release'},\n {'description' : 'Strong smell of natural gas'},\n {'description' : 'Unidentified loud noise'},\n {'description' : 'Radiation leak'},\n {'description' : 'Woman screaming in distance'},\n {'description' : 'Loud shouts in distance'},\n {'description' : 'Loud bang, close by'},\n \n #accidents\n {'description' : 'Small aircraft crash nearby'},\n {'description' : 'Airliner crash nearby'},\n {'description' : 'Train derailed nearby'},\n {'description' : 'Car-train collision nearby'},\n {'description' : 'Head-on car accident nearby'},\n {'description' : 'T-bone car accident nearby'},\n \n #medical\n {'description' : 'Nosebleed'},\n {'description' : 'Man bleeding without obvious cause'},\n {'description' : 'Nauseating smell'},\n {'description' : 'Sudden vomiting'},\n {'description' : 'Person begins choking nearby'},\n {'description' : 'Person unresponsive after falling, hitting head'}\n \n )\n \n return random.choice(disasters)['description']\n\ndef getStatus():\n '''Returns various status effects'''\n \n import random\n \n # Chances (out of 100)\n armedChance = 50\n hurtChance = 20\n aloneChance = 60\n groupChance = 40\n \n isArmed = \"Armed, \" if (random.randint(1,100) < armedChance) else \"Unarmed, \"\n \n isHurt = \"injured, \" if (random.randint(1,100) < hurtChance) else \"\"\n \n #must determine this first, might or might not be used\n isGroup = \"with group.\" if (random.randint(1,100) < groupChance) else \"with friend.\"\n \n isAlone = \"alone.\" if (random.randint(1,100) < aloneChance) else isGroup\n \n body = isArmed + isHurt + isAlone\n \n #print(body)\n \n return(body)\n \ndef disaster():\n from sendText import sendText, getCredentials, getRecepients\n \n subject = 'SIMULATED Alert'\n \n recepients = getRecepients()\n \n #recepients = ('litwhistle@mailinator.com')\n \n body = 'SIMULATED alert. Situation: {}. Your status: {}'.format( getDisaster(), getStatus() )\n \n print (body)\n \n #print (recepients[0])\n \n sendText(subject,body,getCredentials(),recepients[0])\n \n #for eachRecepient in recepients:\n #print (eachRecepient)\n #sendText(subject, body, getCredentials(), eachRecepient)\n\ndef exits():\n from sendText import sendText, getCredentials, getRecepients\n \n subject = 'Exits'\n \n recepients = getRecepients()\n \n #recepients = ('litwhistle@mailinator.com')\n \n body = 'Reminder: Nearest Exit?'\n \n print (body)\n \n #print (recepients[0])\n \n sendText(subject,body,getCredentials(),recepients[0])\n\ndef runDebug():\n from sendText import getRecepients\n print( getRecepients() )\n print( getRecepients()[0] )\n\ndef main():\n \n import random\n if random.randint(1,4000) < 10:\n if random.randint(1,100) < 80:\n exits()\n else:\n disaster()\n else:\n print(\"You got lucky this time\")\n\nif __name__ == '__main__':\n main()\n #runDebug()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"292995325","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys, os, re\nimport json, csv\nimport shutil, string\nimport numpy as np\nfrom lxml import etree\nfrom collections import Counter\nfrom operator import itemgetter\nfrom inout.utils.helper import *\nfrom random import shuffle\nimport json\n\nfrom inout.dta.document import Document\nfrom inout.dta.poem import Poem\n\n\n\n#import pyphen\n#from textblob_de import TextBlobDE as tb\n#from sklearn.feature_extraction import DictVectorizer as DV\n\n\nclass Corpus(object):\n def __init__(self, corpuspath, debug=False):\n self.debug = debug\n self.corpuspath = corpuspath\n print('TEI corpus at path ' + corpuspath + ' initialized.')\n #self.allpoems = self.find_poems(self.corpuspath, debug=True)\n #self.all_rhyme_pairs = self.find_rhyme_pairs()\n #self.all_non_rhyme_pairs = self.find_non_rhyme_pairs()\n\n def doc_iter(self):\n paths = [os.path.join(self.corpuspath, fn) for fn in next(os.walk(self.corpuspath))[2]]\n corpus_files = []\n alldocs = []\n for path in paths:\n # print path\n if not path.endswith('.swp'): # and '18' in path:\n doc = Document(path)\n #doc.read()\n alldocs.append(doc)\n return alldocs\n\n def get_all_g2p(self):\n init_dict = {}\n paths = [os.path.join(self.corpuspath, fn) for fn in next(os.walk(self.corpuspath))[2]]\n c = 0\n for path in paths:\n c += 1\n print(c, len(paths), path)\n d = Document(path)\n g2p_dict = d.get_graphem_phonem_dict(d.get_path())\n init_dict.update(g2p_dict)\n return init_dict\n\n def print_rhyme_pairs(self):\n for rp in self.all_rhyme_pairs:\n print (rp)\n print (len(self.all_rhyme_pairs))\n\n def get_authors(self):\n authors = []\n for poem in self.allpoems:\n authors.append(poem.get_author())\n for i, j in sorted(Counter(authors).items(), key=itemgetter(1))[::-1]:\n print (\"H: \", j, \" ... \", \"S: \", i, \"\\\\\\\\\")\n\n def read_poems(self):\n a_dir = self.corpuspath\n if self.debug==True:\n paths = [os.path.join(a_dir, fn) for fn in next(os.walk(a_dir))[2]][:5]\n if self.debug==False:\n paths = [os.path.join(a_dir, fn) for fn in next(os.walk(a_dir))[2]]\n corpus_files = []\n allpoems = []\n print('Loading ' + str(len(paths)) + ' documents.')\n c = 0\n n_poems = 0\n for path in paths:\n c += 1\n print('\\nLoaded ' + str(n_poems) + ' poems in ' + str(c) + ' documents of ' + str(len(paths)) + ' documents so far.')\n #print path\n if not path.endswith('.swp'): # and '18' in path:\n doc = Document(path)\n doc.read()\n n_poems += doc.get_amountof_poems()\n for poem in doc.get_poems():\n yield poem\n #return allpoems\n\n def find_rhyme_pairs(self):\n allpairs = []\n for poem in self.allpoems:\n for stanza in poem.get_stanzas():\n pairs = stanza.get_rhyme_pairs()\n for pair in pairs:\n allpairs.append(pair)\n return allpairs\n \n def find_non_rhyme_pairs(self):\n allpairs = []\n for poem in self.allpoems:\n for stanza in poem.get_stanzas():\n pairs = stanza.get_non_rhyme_pairs()\n for pair in pairs:\n allpairs.append(pair)\n return allpairs\n\n def get_rhyme_pairs(self):\n return self.rhyme_pairs\n\n def get_poems(self):\n return self.allpoems\n\n def get_all_poems(self):\n return self.allpoems\n\n def get_corpuspath(self):\n return self.corpuspath\n \n \n def get_4_schemas(self):\n schemas = []\n for poem in self.allpoems:\n stanzas = poem.get_stanzas()\n for stanza in stanzas:\n schema = stanza.get_rhyme_schema()\n try:\n if len(schema) == 4:\n if schema == \"None\":\n print (poem.get_author())\n print (poem.get_year())\n #print(etree.tostring(poem.get_lg_element(), pretty_print=True))\n else:\n schemas.append(schema)\n except TypeError:\n continue\n #for schema in schemas:\n # print schema\n for i, j in sorted(Counter(schemas).items(), key=itemgetter(1))[::-1]:\n print (\"H: \", j, \" ... \", \"S: \", i, \"\\\\\\\\\")\n print (len(schemas))\n \n def get_all_schemas(self):\n all_schemas = {}\n for poem in self.allpoems:\n stanzas = poem.get_stanzas()\n for stanza in stanzas:\n schema = stanza.get_rhyme_schema()\n if not schema == \"None\":\n try:\n group = all_schemas.setdefault(len(schema), [])\n group.append(schema)\n except TypeError:\n continue\n for length, schemas in all_schemas.items():\n print()\n print (\"LENGTH:\", length)\n alllen = 0\n for i, j in sorted(Counter(schemas).items(), key=itemgetter(1))[::-1]:\n alllen += j\n print(\"#ALL:\", int(alllen/2))\n for i, j in sorted(Counter(schemas).items(), key=itemgetter(1))[::-1]:\n print (\"W:\", round(j/float(alllen),3), \" ... \", \"H: \", int(j/2), \" ... \", \"S: \", i, \"\\\\\\\\\")\n \n\n\n\n def get_stats(self):\n no_poems = len(self.allpoems)\n no_stanzas = 0\n no_lines = 0\n no_token = 0\n no_syllables = 0\n no_line_syllables = []\n authors = []\n\n for poem in self.allpoems:\n author = poem.get_author()\n authors.append(author)\n no_stanzas += len(poem.get_stanzas())\n for stanza in poem.get_stanzas():\n lines = stanza.get_lines()\n no_lines += len(lines)\n for line in lines:\n tokens = line.split()\n no_token += len(tokens)\n syll_per_line = 0\n for token in tokens:\n hyphenated = syllabify(token)\n syllables = hyphenated.split('-')\n no_syllables += len(syllables)\n syll_per_line += len(syllables)\n no_line_syllables.append(syll_per_line)\n\n author_distr = sorted(Counter(authors).items(), key=itemgetter(1))[::-1]\n\n print (\"Syllables per line Distrib: \", str(sorted(Counter(no_line_syllables).items(), key=itemgetter(1))))\n\n for author, count in author_distr:\n print (author, \" & \", count, \" \\\\\\\\\")\n\n print (\"Authors Distrib: \", str(author_distr))\n print (\"POEMS: \", str(no_poems))\n print (\"STANZAS: \", str(no_stanzas))\n print (\"VERSES: \", str(no_lines))\n print (\"TOKENS: \", str(no_token))\n print (\"Syllables: \", str(no_syllables))\n print (\"Syllables per line average: \" + str(np.mean(no_line_syllables)))\n print (\"Syllables per line median: \" + str(np.median(no_line_syllables)))\n print (\"No of Authors: \", str(len(dict(author_distr).keys())))\n print (\"Poems per Author Mean: \", str(np.mean(dict(author_distr).values())))\n print (\"Poems per Author Median: \", str(np.median(dict(author_distr).values())))\n print (\"Min Max Poems per Author: \", str(np.min(dict(author_distr).values())), str(np.max(dict(author_distr).values())))\n\n\n \nif __name__ == '__main__':\n \n c = Corpus(sys.argv[1])\n #c.get_all_schemas()\n \n '''\n #g2p = c.get_all_g2p()\n #for item in g2p.items():\n # print(item)\n \n #f = open('g2p.dict.json', 'w')\n #json.dump(g2p, f)\n \n \n all_rhyme_pairs = c.find_rhyme_pairs()\n all_non_rhyme_pairs = c.find_non_rhyme_pairs()\n #print (all_non_rhyme_pairs)\n \n anrp = []\n for r1, r2 in all_non_rhyme_pairs:\n if str(r1) != str(r2):\n anrp.append((r1, r2))\n all_non_rhyme_pairs = list(set(anrp))\n \n print(all_rhyme_pairs)\n #print(all_non_rhyme_pairs)\n \n \n \n mixed = []\n shuffle(all_non_rhyme_pairs)\n \n for r1, r2 in set(all_rhyme_pairs):\n if r1.startswith(\"xmlns\") or r2.startswith(\"xmlns\") or \" \" in r1 or \" \" in r2: \n continue\n else:\n mixed.append([r1, r2, 'y'])\n \n #c = 0\n #while c < len(all_rhyme_pairs):\n for nr1, nr2 in all_non_rhyme_pairs:\n if \"xmlns\" in nr1 or \"xmlns\" in nr2 or \" \" in nr1 or \" \" in nr2:\n continue\n else:\n mixed.append([nr1, nr2, 'n'])\n #c += 1\n \n \n newmixed = [] \n for w1, w2, label in mixed:\n if w1.endswith('ln'):\n w1 = w1[:-2]\n if w2.endswith('ln'):\n w2 = w2[:-2]\n newmixed.append((w1.lower(),w2.lower(),label))\n \n \n rp_file = open('hiphop_full_train_with_assonance.csv', 'w')\n rp0_file = open('hiphop_full_test_with_assonance.csv', 'w')\n #nrp_file = open('non_rhyme_pairs_dta.csv', 'w')\n \n shuffle(newmixed)\n \n for r1, r2, i in newmixed:\n line = r1 + u\"\\t\" + r2 + u\"\\t\" + i\n rp_file.write(line)\n rp_file.write('\\n')\n \n for r1, r2, i in newmixed:\n if i == 'n':\n i = '0'\n elif i == 'y':\n i = '1'\n line = i+ u\"\\t\" + r1 + u\"\\t\" + r2\n rp0_file.write(line)\n rp0_file.write('\\n')\n \n #counter = 0\n #for r1, r2 in all_non_rhyme_pairs:\n # counter += 1\n # line = str(str(counter) + u\"\\t\" + str(r1) + u\"\\t\" + str(r2))\n # nrp_file.write(line)\n # nrp_file.write('\\n')\n '''\n \n \n","sub_path":"dta/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":9976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"296515087","text":"import discord\nimport asyncio\nimport string\nimport logging\n\nclient = discord.Client()\n\n#token =\n\ndef is_me(m):\n return m.author == client.user\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n@client.event\nasync def on_message(message):\n if message.content.startswith('!hello'):\n await client.send_message(message.channel, 'Hello World!')\n\n elif message.content.startswith('!clear'): \n deleted = await client.purge_from(channel, limit=30, check=is_me)\n await client.send_message(channel, 'Deleted {} message(s)'.format(len(deleted)))\n\n elif message.content.startswith('!aesthetic'):\n trans = message.content.split(' ', 1)[1]\n intable = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-=!@#$%&*()+;',./:? \"\n outtable = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-=!@#$%&*()+;',./:? \"\n transtable = str.maketrans(intable, outtable)\n await client.send_message(message.channel, trans.translate(transtable))\n\n #elif message.content.startswith('!leave'):\n\n\nclient.run(token)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"358726612","text":"piFile = 'pi_dec_1m.txt'\nwith open(piFile) as file_object:\n lines = file_object.readlines()\n\npi_string = ''\nfor line in lines:\n pi_string += line.strip()\n\nbirthday = input('Enter your B\\'day, in the form mmddyy: ')\n\nif birthday in pi_string:\n print('Your B\\'day appears in the first million digits of pi!')\nelse:\n print('Your B\\'day does not appear in the first million digits of pi!')\n","sub_path":"dob_pi/pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"198440656","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom django.http import JsonResponse\nfrom .models import Movie,Comment,UserMovie\nfrom collections import OrderedDict\nfrom django.views.decorators.csrf import csrf_exempt\nimport time\nimport json\nimport random\nimport os\nimport re\n\npath = os.path.dirname(os.path.abspath(__file__)).replace('\\\\','/').replace(\"Movies\",'Sign')\n\ncontents = {'sign_in':'/signIn/',\n 'sign_up':'/signUp/',\n 'index':'/index/',\n 'logo_url':'../static/image/logo.png',\n 'user_url':'../static/image/user.png'}\n\ntags = ['大陆电影','美国电影','香港电影','台湾电影','日本电影','韩国电影','英国电影','法国电影','德国电影','印度电影','泰国电影',\n '剧情','爱情','喜剧','科幻','动作','悬疑','犯罪','恐怖','青春','励志','战争','文艺','黑色幽默','传记','历史','情色','暴力',\n '音乐','家庭','经典','冷门佳片','魔幻','黑帮','女性']\n\ninfo_tags = {'评分':'score','导演':'diretor','编剧':'screenwriter','类型':'types',\n '主演':'actor','官方网站':'website','制片国家/地区':'country',\n '语言':'language','上映日期':'date','片长':'length',\n '又名':'name','imdb链接':'imdb','集数':'episodes','单集片长':'length_episodes'}\n\norder_tags = OrderedDict(info_tags)\n\n# 得到image_id地址,只在下面的loadAllInfo()中使用\ndef getPath(num):\n now_path = \"\"\n now_dir = 100000\n while now_dir>1 and num//now_dir==0:\n now_path += \"0\"\n now_dir //= 10 \n now_path += str(num)\n return now_path\n\n# 将电影信息储存到数据库\ndef loadAllInfo():\n num = 0\n true_num = 0\n for x in tags:\n fp = open(path + \"/static/items/\" + x + \"/\" + x + \".txt\",\"r\",encoding='utf-8')\n nowtxt = str(fp.read()).split(\"-----------------------------------------------------------------------------------\\n\")\n fp.close()\n for info in nowtxt:\n if info==\"\" or len(info)==0 or str(info).isspace():\n break\n flag = 0\n now_info = {}\n now_title = info.split('\\n',1)[0]\n now_obj = Movie.objects.filter(title__exact = now_title)\n if len(now_obj)!=0:\n flag = 1\n if flag == 0:\n now_info['title'] = info.split('\\n',1)[0]\n for y in info_tags:\n pattern = re.compile(y + '.*?\\n')\n nowtxt = re.findall(pattern, info)\n if len(nowtxt)>0:\n nowtxt = nowtxt[0].split(\":\", 1)\n if len(nowtxt)>=2:\n nowtxt = nowtxt[1]\n if len(nowtxt)>=2:\n nowtxt = nowtxt[1:-1]\n if len(nowtxt)==0:\n now_info[info_tags[y]] = \"无\"\n else:\n now_info[info_tags[y]] = nowtxt\n else:\n now_info[info_tags[y]] = \"无\"\n else:\n now_info[info_tags[y]] = \"无\"\n else:\n now_info[info_tags[y]] = \"无\"\n now_info['search_tag'] = x\n now_info['image_id'] = getPath(num)\n if Movie.objects.get_or_create(**now_info)[1]==True:\n true_num += 1\n else:\n now_tag = str(now_obj[0].search_tag)\n if now_tag.find(x)==-1:\n now_obj[0].search_tag += \", \" + x\n now_obj[0].save()\n num += 1\n print(\"Save %s finished. The total num is %d. The True number is %d.\"%(x, num-1, true_num-1))\n \n# loadAllInfo()\n\n@csrf_exempt\ndef pullMovieList(request):\n try:\n content = request.GET\n now_movie = {}\n now_tag = str(content['tag'])\n if 'isChange' in content:\n is_change = str(content['isChange']) # isChange用于判断前端页面是否已被渲染\n else:\n is_change = '1'\n now_movie['now_title'] = now_tag\n is_vis = [0 for i in range(550)] # 用于判断是否重复\n all_movies = Movie.objects.filter(search_tag__contains = now_tag)\n for i in range(12):\n if now_tag!=\"All\":\n now_type = now_tag # 若不为All,说明已经确定了类型\n else:\n now_type = tags[random.randint(0,34)] # 否则,在35种类型中随机选一种\n all_movies = Movie.objects.filter(search_tag__contains = now_type) # 找到所有符合对应类型的电影\n length = len(all_movies)\n now_num = random.randint(0, length-1)\n now_movie_info = all_movies[now_num]\n while is_vis[now_num]==1 or now_movie_info.score==\"无\" or float(now_movie_info.score)<7: # 在相应的类型中找到评分大于7的电影\n now_num = random.randint(0, length-1)\n now_movie_info = all_movies[now_num]\n is_vis[now_num] = 1\n # 传递给前端页面用��渲染的信息\n now_movie['movie_show_' + str(i+1)] = \"poster/\" + now_movie_info.image_id + \".jpg\"\n now_movie['movie_show_' + str(i+1) + '_id'] = now_movie_info.image_id\n now_movie['movie_show_' + str(i+1) + '_title'] = now_movie_info.title\n now_movie['movie_show_' + str(i+1) + '_score'] = now_movie_info.score\n now_movie['movie_show_' + str(i+1) + '_type'] = now_movie_info.types\n now_movie['movie_show_' + str(i+1) + '_country'] = now_movie_info.country\n if request.method=='POST' and str(request.user)!=\"AnonymousUser\":\n if len(UserMovie.objects.filter(user_name__exact=request.user, movie_title__exact=request.POST['movie_title'], movie_tag__exact=request.POST['tag']))==0:\n UserMovie.objects.create(user_name=request.user, movie_title=request.POST['movie_title'], movie_tag=request.POST['tag'])\n if is_change=='1':\n if str(request.user)==\"AnonymousUser\": # 用于判断当前页面的用户是否为匿名用户\n return render_to_response('Index.html',dict(now_movie, **contents))\n now_movie['now_user'] = request.user\n return render_to_response('Index_User.html',dict(now_movie, **contents))\n else:\n return JsonResponse(now_movie) # 若已被渲染,则发送Json到前端\n except Exception as e:\n print(e)\n return render_to_response(None)\n\n@csrf_exempt\ndef showPerMovie(request, id):\n now_movie = Movie.objects.filter(image_id__exact = id)[0]\n other_info = {'index':'/index/',\n 'logo_url':'../static/image/logo.png',\n 'title':now_movie.title,\n 'image_id':'../static/items/poster/' + now_movie.image_id + \".jpg\",\n 'sign_in':'/signIn/',\n 'sign_up':'/signUp/',\n 'user_url':'../static/image/user.png',\n }\n movie = {}\n movie['score'] = now_movie.score\n movie['diretor'] = now_movie.diretor\n movie['screenwriter'] = now_movie.screenwriter\n movie['types'] = now_movie.types\n movie['actor'] = now_movie.actor\n movie['website'] = now_movie.website\n movie['country'] = now_movie.country\n movie['language'] = now_movie.language\n movie['date'] = now_movie.date\n movie['length'] = now_movie.length\n movie['name'] = now_movie.name\n movie['imdb'] = now_movie.imdb\n movie['episodes'] = now_movie.episodes\n movie['length_episodes'] = now_movie.length_episodes\n return_dict = {\n 'Info':json.dumps(movie),\n }\n if request.method=='POST' and str(request.user)!=\"AnonymousUser\":\n if request.POST['comment_flag']=='t':\n Comment.objects.create(user_name=request.user, content=request.POST['post_comment'], movie_name=now_movie.title)\n else:\n if len(UserMovie.objects.filter(user_name__exact=request.user, movie_title__exact=request.POST['movie_title'], movie_tag__exact=request.POST['tag']))==0:\n UserMovie.objects.create(user_name=request.user, movie_title=request.POST['movie_title'], movie_tag=request.POST['tag'])\n comment_list=Comment.objects.filter(movie_name__exact = now_movie.title)\n post_comment = []\n for x in comment_list:\n now_comment={}\n now_comment['user_name']=str(x.user_name)\n now_comment['content']=str(x.content)\n now_comment['date']=x.date\n now_comment['user_image_url']='../static/image/user.png'\n post_comment.append(now_comment)\n if len(post_comment) > 0:\n return_dict['comment_list'] = post_comment\n return_dict['movie_title'] = now_movie.title\n if str(request.user)==\"AnonymousUser\":\n return_dict['Other'] = json.dumps(other_info)\n return render(request, 'Movie.html', return_dict)\n else:\n other_info['now_user'] = str(request.user)\n return_dict['Other'] = json.dumps(other_info)\n return render(request, 'Movie_User.html', return_dict)\n\n# 简单电影推荐\ndef simpleRecommend(seen, liked):\n types_dict = {}\n country_dict = {}\n types_total_scores = 0\n country_total_scores = 0\n movies_set = set()\n for liked_movie in liked:\n now_types = liked_movie['type'].split(\"/\")\n now_countries = liked_movie['country'].split(\"/\")\n for now_type in now_types:\n if not now_type.strip() in types_dict.keys():\n types_dict[now_type.strip()] = 2\n for movie in Movie.objects.filter(types__icontains = now_type.strip()):\n movies_set.add(movie)\n else:\n types_dict[now_type.strip()] += 2\n types_total_scores += 2\n for now_country in now_countries:\n if not now_country.strip() in country_dict.keys():\n types_dict[now_country.strip()] = 2\n for movie in Movie.objects.filter(country__icontains = now_country.strip()):\n movies_set.add(movie)\n else:\n types_dict[now_country.strip()] += 2\n country_total_scores += 2\n\n for seen_movie in seen:\n now_types = seen_movie['type'].split(\"/\")\n now_countries = seen_movie['country'].split(\"/\")\n for now_type in now_types:\n if not now_type.strip() in types_dict.keys():\n types_dict[now_type.strip()] = 1\n for movie in Movie.objects.filter(types__icontains = now_type.strip()):\n movies_set.add(movie)\n else:\n types_dict[now_type.strip()] += 1\n types_total_scores += 1\n for now_country in now_countries:\n if not now_country.strip() in country_dict.keys():\n types_dict[now_country.strip()] = 1\n for movie in Movie.objects.filter(country__icontains = now_country.strip()):\n movies_set.add(movie)\n else:\n types_dict[now_country.strip()] += 1\n country_total_scores += 1\n if len(movies_set) >= 60:\n select_movies = random.sample(movies_set, 60)\n else:\n select_movies = []\n movie_scores = {}\n for movie in select_movies:\n now_types = movie.types.split(\"/\")\n now_countries = movie.country.split(\"/\")\n if movie.score.strip() != \"无\":\n now_score = float(movie.score.strip()) / 10\n else:\n now_score = 0\n type_score, country_score = 0, 0\n for now_type in now_types:\n if now_type.strip() in types_dict:\n type_score += types_dict[now_type.strip()] / types_total_scores\n for now_country in now_countries:\n if now_country.strip() in country_dict:\n country_score += types_dict[now_country.strip()] / country_total_scores\n movie_scores[movie] = now_score * 10 + type_score * 6 + country_score * 3\n\n movie_info = []\n num = 0\n for now_movie, score in sorted(movie_scores.items(), key = lambda x:x[1], reverse = True):\n if num >= 6:\n break\n now_movie_info={}\n now_movie_info['id']=now_movie.image_id\n now_movie_info['picture']=\"poster/\"+now_movie.image_id+\".jpg\"\n now_movie_info['title']=now_movie.title\n now_movie_info['score']=now_movie.score\n now_movie_info['type']=now_movie.types\n now_movie_info['country']=now_movie.country\n movie_info.append(now_movie_info)\n num += 1\n return movie_info\n\ndef getTagMovie(user, now_tag):\n movies = UserMovie.objects.filter(user_name__exact=user, movie_tag__exact=now_tag)\n movies_info = []\n for x in movies:\n now_movie=Movie.objects.filter(title__exact=x.movie_title)[0]\n now_movie_info={}\n now_movie_info['id']=now_movie.image_id\n now_movie_info['picture']=\"poster/\"+now_movie.image_id+\".jpg\"\n now_movie_info['title']=now_movie.title\n now_movie_info['score']=now_movie.score\n now_movie_info['type']=now_movie.types\n now_movie_info['country']=now_movie.country\n movies_info.append(now_movie_info)\n return movies_info\n\ndef updateUserMovie(request):\n if request.method == \"GET\":\n now_tag = str(request.path).split(\"/\")[2]\n movies_info = getTagMovie(request.user, now_tag)\n return JsonResponse({'More':movies_info})\n return None\n\ndef changeRecommend(request):\n if request.method == \"GET\":\n seen_movies = getTagMovie(request.user, 'seen')\n liked_movies = getTagMovie(request.user, 'liked')\n return JsonResponse({'Changed':simpleRecommend(seen_movies, liked_movies)})","sub_path":"Movies/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":13830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"241517769","text":"'''\nCreated on 26.10.2018\n\n@author: fabian\n'''\nimport unittest\nimport unittest.mock as mock\nimport CLI\nfrom FileSet import FileSet\nfrom test.testing_tools import mock_assert_msg\n\n\nmock_fix = mock.MagicMock(name=\"FileSet.fix\")\n\n@mock.patch(\"FileSet.FileSet.fix\", new=mock_fix)\nclass CliFixTests(unittest.TestCase):\n \n @classmethod\n def setUpClass(cls):\n cls.test_set = FileSet(('test (', ')'), [])\n \n def tearDown(self):\n mock_fix.reset_mock()\n \n mock_fix.side_effect = None\n \n \n def test_normal_fix(self):\n \"\"\"The method should perform a gap-only fix if no further arguments are given.\"\"\"\n test_args = ['fix']\n \n CLI.fix(self.test_set, test_args)\n \n mock_assert_msg(mock_fix.assert_called_once_with, [], \"The method fails to initiate a basic fix if no further arguments are given.\")\n \n def test_full_fix(self):\n \"\"\"The method should perform a complete fix if the additional keyword 'all' is given.\"\"\"\n test_args = ['fix', 'all']\n \n CLI.fix(self.test_set, test_args)\n \n mock_assert_msg(mock_fix.assert_called_once_with, [True], \"The method fails to initiate a full fix if the appropriate arguments are given.\")\n \n def test_no_file_set_selected(self):\n \"\"\"The method should recognize and raise an error if no file set is selected.\"\"\"\n test_args = ['fix']\n \n with self.assertRaises(CLI.CLIRuntimeError, msg=\"The method fails to recognize when no file set is selected.\"):\n CLI.fix(None, test_args)\n \n mock_assert_msg(mock_fix.assert_not_called, [], \"The method tries to perform a fix operation even though an error was raised.\")\n \n def test_too_many_arguments(self):\n \"\"\"The method should recognize and raise an error if too many arguments are given.\"\"\"\n test_args = ['fix', 'all', 'now']\n \n with self.assertRaises(CLI.ArgumentAmountError, msg=\"The method fails to recognize when too many arguments are given.\"):\n CLI.fix(self.test_set, test_args)\n \n mock_assert_msg(mock_fix.assert_not_called, [], \"The method tries to perform a fix operation even though an error was raised.\")\n \n def test_invalid_second_argument(self):\n \"\"\"The method should recognize and raise an error if the second given argument is not 'all'.\"\"\"\n test_args = ['fix', 'arg']\n \n with self.assertRaises(CLI.InputProcessingError, msg=\"The method fails to recognize when an invalid second argument is given.\"):\n CLI.fix(self.test_set, test_args)\n \n mock_assert_msg(mock_fix.assert_not_called, [], \"The method tries to perform a fix operation even though an error was raised.\")\n \n def test_set_with_too_many_files(self):\n \"\"\"The method should raise a CLIRuntimeError if the file set has too many files to perform 'fix all'.\"\"\"\n test_args = ['fix', 'all']\n mock_fix.side_effect = FileSet.TooManyFilesError(\"Too many files.\")\n \n with self.assertRaises(CLI.CLIRuntimeError, msg=\"The method fails to react correctly when the file set contains too many files to fix all.\"):\n CLI.fix(self.test_set, test_args)\n\n mock_assert_msg(mock_fix.assert_called_once_with, [True], \"The method doesn't actually try to perform the fix operation.\")\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","sub_path":"src/test/CLI_tests/test_cli_fix.py","file_name":"test_cli_fix.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"58332346","text":"def read_square_matrix():\n matrix = []\n rows_count = int(input())\n\n for row in range(rows_count):\n matrix.append([int(el) for el in input().split()])\n\n return matrix\n\n\ndef get_bombs_coordinates():\n raw_coordinates = input().split()\n bombs_coordinates = []\n for i in range(len(raw_coordinates)):\n current_coord = tuple(map(int, raw_coordinates[i].split(',')))\n bombs_coordinates.append(current_coord)\n\n return bombs_coordinates\n\n\ndef detonate_bomb(bomb_coordinates, matrix):\n row, col = bomb_coordinates\n bomb_value = matrix[row][col]\n cells_to_reduce = [(row, col), (row-1, col-1), (row-1, col), (row-1, col+1),\n (row, col+1), (row+1, col+1), (row+1, col),\n (row+1, col-1), (row, col-1)]\n if bomb_value > 0:\n for cell in cells_to_reduce:\n cell_row, cell_col = cell\n if 0 <= cell_row < len(matrix) and 0 <= cell_col < len(matrix):\n if not matrix[cell_row][cell_col] <= 0:\n matrix[cell_row][cell_col] -= bomb_value\n\n\ndef find_alive_cells(matrix):\n alive_cells_positions = []\n for row in range(len(matrix)):\n for col in range(len(matrix)):\n if not matrix[row][col] <= 0:\n alive_cells_positions.append((row, col))\n\n return alive_cells_positions\n\n\ndef get_alive_cells_sum(matrix):\n alive_cells_sum = 0\n for cell in find_alive_cells(matrix):\n cell_row, cell_col = cell\n alive_cells_sum += matrix[cell_row][cell_col]\n\n return f\"Sum: {alive_cells_sum}\"\n\n\ndef main():\n matrix = read_square_matrix()\n\n all_bombs = get_bombs_coordinates()\n\n for i in range(len(all_bombs)):\n bomb_coordinates = all_bombs[i]\n detonate_bomb(bomb_coordinates, matrix)\n\n print(f\"Alive cells: {len(find_alive_cells(matrix))}\")\n print(get_alive_cells_sum(matrix))\n\n for row in range(len(matrix)):\n print(*matrix[row], sep=' ')\n\n\nmain()\n","sub_path":"Python Advanced/Multidimensional Lists - Exercise/Bombs.py","file_name":"Bombs.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"518603194","text":"import unittest\nfrom unittest.mock import Mock\n\n# ATS\nfrom pyats.topology import Device\n\n# Metaparset\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError,\\\n SchemaMissingKeyError\n\n# Parser\nfrom genie.libs.parser.iosxe.show_sdwan_omp import ShowSdwanOmpSummary\n\n\n# ============================================\n# Parser for the following commands\n# * 'show bfd sessions'\n# ============================================\nclass TestShowSdwanOmpSummary(unittest.TestCase):\n device = Device(name='aDevice')\n maxDiff = None \n empty_output = {'execute.return_value' : ''}\n golden_output = {'execute.return_value': '''\n #show sdwan omp summary \noper-state UP\nadmin-state UP\npersonality vedge\nomp-uptime 34:03:00:35\nroutes-received 5\nroutes-installed 3\nroutes-sent 2\ntlocs-received 3\ntlocs-installed 2\ntlocs-sent 1\nservices-received 3\nservices-installed 0\nservices-sent 3\nmcast-routes-received 0\nmcast-routes-installed 0\nmcast-routes-sent 0\nhello-sent 146344\nhello-received 146337\nhandshake-sent 2\nhandshake-received 2\nalert-sent 1\nalert-received 0\ninform-sent 16\ninform-received 16\nupdate-sent 79\nupdate-received 157\npolicy-sent 0\npolicy-received 2\ntotal-packets-sent 146442\ntotal-packets-received 146514\nvsmart-peers 1\n'''}\n\n golden_parsed_output = {\n 'oper_state': 'UP',\n 'admin_state': 'UP',\n 'personality': 'vedge',\n 'omp_uptime': '34:03:00:35',\n 'routes_received': 5,\n 'routes_installed': 3,\n 'routes_sent': 2,\n 'tlocs_received': 3,\n 'tlocs_installed': 2,\n 'tlocs_sent': 1,\n 'services_received': 3,\n 'services_installed': 0,\n 'services_sent': 3,\n 'mcast_routes_received': 0,\n 'mcast_routes_sent': 0,\n 'hello_sent': 146344,\n 'hello_received': 146337,\n 'handshake_sent': 2,\n 'handshake_received': 2,\n 'alert_sent': 1,\n 'alert_received': 0,\n 'inform_sent': 16,\n 'inform_received': 16,\n 'update_sent': 79,\n 'update_received': 157,\n 'policy_sent': 0,\n 'policy_received': 2,\n 'total_packets_sent': 146442,\n 'vsmart_peers': 1}\n\n\n def test_empty(self):\n self.device = Mock(**self.empty_output)\n obj = ShowSdwanOmpSummary(device=self.device)\n with self.assertRaises(SchemaEmptyParserError):\n parsed_output = obj.parse()\n \n def test_golden(self):\n self.device = Mock(**self.golden_output)\n obj = ShowSdwanOmpSummary(device=self.device)\n parsed_output = obj.parse()\n self.assertEqual(parsed_output,self.golden_parsed_output)\n\n\nif __name__ == '__main__':\n\t\tunittest.main() \n","sub_path":"src/genie/libs/parser/iosxe/tests/test_show_sdwan_omp.py","file_name":"test_show_sdwan_omp.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"189396037","text":"from .models import PatientProfile\nfrom user.models import UserProfile\nfrom django import forms\n\nclass ProfileUpdateform(forms.ModelForm):\n class Meta:\n model = PatientProfile\n fields = ['Phone', 'Gender', 'Age', 'Address', 'Blood_Group', 'Case_Paper_no', 'Profile_Picture']\n def __init__(self, *args, **kwargs):\n model = PatientProfile\n user = kwargs.pop('user','')\n super(ProfileUpdateform, self).__init__(*args, **kwargs)\n self.fields['user']=forms.ModelChoiceField(queryset=UserProfile.objects.all())","sub_path":"patient/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"304555450","text":"__author__ = 'anna'\n\nfrom selenium import webdriver\nimport time\nimport datetime\n\ndef ff_setup(url):\n driver = webdriver.Firefox()\n driver.maximize_window()\n driver.get(url)\n driver.implicitly_wait(4)\n return driver\n\ndef take_screenshot(driver):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d:%H:%M:%S')\n destination = '/home/anna/Documents/{}.png'.format(st)\n driver.save_screenshot(destination)\n","sub_path":"Udemy_python_anyone_can_code/tools/custom_helpers.py","file_name":"custom_helpers.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"126846466","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis script creates graphical representations of three-voice textures.\nIt uses the csv output of the vertical interval indexer. It shades perfect\nand mixed sonorities (according to Fuller (1986)) dark and light grey,\nrespectively. Dissonant sonorities are shaded with the function hatch_2\nand imperfect sonorities are shaded with hatch_1. White bands indicate\nrests or solos within the texture. CSV files should be passed as arguments\nwhen executing the script, e.g., > 3vv_graphs.py hugo_12.csv nazarea_18.csv\n\"\"\"\n\nimport sys, csv\nfrom PIL import Image, ImageColor\n\ncolP = (65, 65, 65)\ncolM = (160, 160, 160)\ncolR = (255, 255, 255)\ncolours = {'R': colR, 'P': colP, 'RRP': colP, 'RPR': colP, 'PRR': colP, 'M': colM,}\n\nSPECIAL_PERFECTS = [\n\t['P4', '-P5'],\n\t['-P5', 'P4']]\n\nSPECIAL_IMPERFECTS = [\n\t['-P4', '-m6'],\n\t['-m6', '-P4'],\n\t['-P4', '-M6'],\n\t['-M6', '-P4'],\n\t['P4', '-m3'],\n\t['-m3', 'P4'],\n\t['P4', '-M3'],\n\t['-M3', 'P4']]\n\nSPECIAL_DISSONANTS = [\n\t['P5', 'm6'],\n\t['m6', 'P5'],\n\t['P5', 'M6'],\n\t['M6', 'P5'],\n\t['-P5', '-m6'],\n\t['-m6', '-P5'],\n\t['-P5', '-M6'],\n\t['-M6', '-P5']]\n\nif len(sys.argv) == 1:\n\tprint('\\n> Please specify a file(s)')\n\nfor arg in sys.argv[1:]:\n\n\trows = [] # a list of the rows in the CSV file\n\tsonorities = [] # a list of the sonorities in each row (as strings)\n\n\twith open(arg) as csvfile:\n\t\treadCSV = csv.reader(csvfile, delimiter = ',')\n\t\tfor row in readCSV:\n\n\t\t\t# skip the first three rows that don't have any data\n\t\t\tif row[0] != 'Indexer' and row[0] != 'Parts' and row[0] != '':\n\n\t\t\t\t# make a list of the rows\n\t\t\t\tif 'Rest' in row:\n\t\t\t\t\trows.append(row[-3:])\n\n\t\t\t\t# if there are no rests, just use intervals above the lowest voice\n\t\t\t\telse:\n\t\t\t\t\trows.append(row[-2:])\n\n\tfor r in rows:\n\n\t\tif 'Rest' in r:\n\t\t\n\t\t\t# first two columns are rests (single interval between A & T)\n\t\t\tif r[0] == r[1] == 'Rest' and r[2] != 'Rest':\n\t\t\t\tif any(x in str(r) for x in ['d', 'A', '2', '7', '4']):\n\t\t\t\t\tsonorities.append('RRD')\n\t\t\t\telif 'P' in str(r):\n\t\t\t\t\tsonorities.append('RRP')\n\t\t\t\telse:\n\t\t\t\t\tsonorities.append('RRI')\n\n\t\t\t# first and last columns are rests (single interval between S & T)\n\t\t\telif r[0] == r[2] == 'Rest' and r[1] != 'Rest':\n\t\t\t\tif any(x in str(r) for x in ['d', 'A', '2', '7', '4']):\n\t\t\t\t\tsonorities.append('RDR')\n\t\t\t\telif 'P' in str(r):\n\t\t\t\t\tsonorities.append('RPR')\n\t\t\t\telse:\n\t\t\t\t\tsonorities.append('RIR')\n\n\t\t\t# last two columns are rests (single interval between S & A)\n\t\t\telif r[0] != 'Rest' and r[1] == r[2] == 'Rest':\n\t\t\t\tif any(x in str(r) for x in ['d', 'A', '2', '7', '4']):\n\t\t\t\t\tsonorities.append('DRR')\n\t\t\t\telif 'P' in str(r):\n\t\t\t\t\tsonorities.append('PRR')\n\t\t\t\telse:\n\t\t\t\t\tsonorities.append('IRR')\n\n\t\t\t# all three columns are rests\n\t\t\telse:\n\t\t\t\tsonorities.append('R')\n\n\t\t# special cases\n\t\telif r in SPECIAL_DISSONANTS:\n\t\t\tsonorities.append('D')\n\t\telif r in SPECIAL_IMPERFECTS:\n\t\t\tsonorities.append('I')\n\t\telif r in SPECIAL_PERFECTS:\n\t\t\tsonorities.append('P')\n\n\t\t# now what remain are full sonorities with no rests and no consonant fourths\n\t\t# dissonant first\n\t\telif any(x in str(r) for x in ['d', 'A', '2', '7', '4']):\n\t\t\tsonorities.append('D')\n\n\t # then perfect\n\t\telif '3' not in str(r) and '6' not in str(r):\n\t\t\tsonorities.append('P')\n\n\t\t# then imperfect\n\t\telif '1' not in str(r) and '5' not in str(r) and '8' not in str(r):\n\t\t\tsonorities.append('I')\n\n\t\t# finally mixed (all that's left)\n\t\telse:\n\t\t\tsonorities.append('M')\n\n\tprint(sonorities)\n\n\t# making a graph of the sonorities\n\tsize = (len(sonorities)*22, 99)\n\timg = Image.new('RGB', size, 'white')\n\n\tFULL = ['R', 'P', 'M', 'I', 'D']\n\tSA = ['PRR', 'IRR', 'DRR']\n\tST = ['RPR', 'RIR', 'RDR']\n\tAT = ['RRP', 'RRI', 'RRD']\n\n\tDISS = ['D', 'RRD', 'RDR', 'DRR']\n\tIMP = ['I', 'RRI', 'RIR', 'IRR']\n\n\tdef draw_black_line(img, X2):\n\t\t\"draws a black line\"\n\t\tfor x in range(X2, (X2 + 2)):\n\t\t\tfor y in range(0, 99):\n\t\t\t\timg.putpixel((x, y), ImageColor.getcolor('black', 'RGB'))\n\t\treturn None\n\n\tdef draw_band(img, X1, Y1, X2, Y2):\n\t\t\"fills in the specified band with solid colour\"\n\t\tfor x in range(X1, X2):\n\t\t\tfor y in range(Y1, Y2):\n\t\t\t\timg.putpixel((x, y), colours[sonorities[i]])\n\t\treturn None\n\n\tdef draw_hatch_1(img, X1, Y1, X2, Y2):\n\t\t\"draws a horizontal hatch pattern\"\n\t\tk = 0\n\t\tfor y in range(Y1, Y2):\n\t\t\tfor x in range(X1, X2):\n\t\t\t\tif k % 5 == 0:\n\t\t\t\t\timg.putpixel((x, y), ImageColor.getcolor('black', 'RGB'))\n\t\t\tk = k + 1\n\t\treturn None\n\n\tdef draw_hatch_2(img, X1, Y1, X2, Y2):\n\t\t\"draws a diagonal hatch pattern\"\n\t\tm = 0\n\t\tfor y in range(Y1, Y2):\n\t\t\tfor x in range(X1, X2):\n\t\t\t\tif m % 7 == 0:\n\t\t\t\t\timg.putpixel((x, y), ImageColor.getcolor('black', 'RGB'))\n\t\t\t\tm = m + 1\n\t\treturn None\n\n\ti = 0\n\twhile i < len(sonorities):\n\n\t\tif sonorities[i] in FULL:\n\t\t\tX1 = i*22\n\t\t\tY1 = 0\n\t\t\tX2 = X1 + 20\n\t\t\tY2 = 99\n\t\t\tsplit = False \n\t\telif sonorities[i] in SA:\n\t\t\tX1 = i*22\n\t\t\tY1 = 0\n\t\t\tX2 = X1 + 20\n\t\t\tY2 = 66\n\t\t\tsplit = False\n\t\telif sonorities[i] in AT:\n\t\t\tX1 = i*22\n\t\t\tY1 = 33\n\t\t\tX2 = X1 + 20\n\t\t\tY2 = 99\n\t\t\tsplit = False\n\t\telse:\n\t\t\tX1 = i*22\n\t\t\tY1 = 0\n\t\t\tX2 = X1 + 20\n\t\t\tY2 = 33\n\t\t\tsplit = True\n\n\t\tif sonorities[i] in DISS:\n\t\t\tdraw_hatch_2(img, X1, Y1, X2, Y2)\n\t\t\tdraw_black_line(img, X2)\n\n\t\telif sonorities[i] in IMP:\n\t\t\tdraw_hatch_1(img, X1, Y1, X2, Y2)\n\t\t\tdraw_black_line(img, X2)\n\n\t\telse:\n\t\t\tdraw_band(img, X1, Y1, X2, Y2)\n\t\t\tdraw_black_line(img, X2)\n\t\t\n\t\tif split:\n\t\t\t\n\t\t\tif sonorities[i] in DISS:\n\t\t\t\tdraw_hatch_2(img, X1, Y1 + 66, X2, Y2 + 66)\n\n\t\t\telif sonorities[i] in IMP:\n\t\t\t\tdraw_hatch_1(img, X1, Y1 + 66, X2, Y2 + 66)\n\n\t\t\telse:\n\t\t\t\tdraw_band(img, X1, Y1 + 66, X2, Y2 + 66)\n\t\t\n\t\ti = i + 1\n\n\timg.save(str(arg[:-4]) + '.png')","sub_path":"Scripts/3vv_timelines.py","file_name":"3vv_timelines.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"654206038","text":"from flask import Flask, render_template, request, send_from_directory, Response, abort\nfrom slideatlas.version import get_version\nfrom slideatlas import slconn as conn, admindb\nfrom werkzeug.routing import BaseConverter\nimport mongokit\n\nimport sys, os\n\nfrom bson import ObjectId\nfrom slideatlas.common_utils import jsonify\n\nimport base64\n\n\n# Create App\nsys.path.append(os.path.dirname(__file__))\napp = Flask(__name__)\n#app.debug = True\n\napp.config.from_object(\"site_local\")\n\n# Connection settings for local demo database for testing (VM)\nslconn = mongokit.Connection(app.config[\"MONGO_SERVER\"], tz_aware=False, auto_start_request=False)\nadmindb = slconn[\"admin\"]\n\nif app.config[\"LOGIN_REQUIRED\"]:\n admindb.authenticate(app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n\n# set the secret key. keep this really secret:\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n\nclass RegexConverter(BaseConverter):\n def __init__(self, url_map, *items):\n super(RegexConverter, self).__init__(url_map)\n self.regex = items[0]\n\n\napp.url_map.converters['regex'] = RegexConverter\n\nimport glviewer\napp.register_blueprint(glviewer.mod)\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n\n@app.route('/home')\ndef home():\n \"\"\"\n All routes get redirected here\n - / Says Hello \n - / Says Hello \n\n \"\"\"\n return render_template('home.html', git=get_version(), host=app.config[\"MONGO_SERVER\"])\n\n\n#==============================================================================\n# I am getting rid of the special paths in favor of just using the type to select the viewer.\n# this is legarcy code.\n@app.route('/viewer')\ndef viewer():\n \"\"\"\n - /viewer?db=507619bb0a3ee10434ae0827\n \"\"\"\n\n #\n dbName = request.args.get('db', '')\n collectionName = request.args.get('col', '')\n\n return render_template('connectome.html', db=dbName, col=collectionName)\n\n\ndef encodeSection(sectionObj) :\n sectionObj[\"_id\"] = str(sectionObj[\"_id\"])\n if sectionObj.has_key(\"worldPointsFloat64\") :\n sectionObj[\"worldPointsFloat64\"] = base64.b64encode(str(sectionObj[\"worldPointsFloat64\"]))\n for imageObj in sectionObj[\"images\"] :\n if imageObj.has_key(\"meshPointsInt32\") :\n imageObj[\"meshPointsInt32\"] = base64.b64encode(str(imageObj[\"meshPointsInt32\"]))\n if imageObj.has_key(\"meshPointIdsInt32\") :\n imageObj[\"meshPointIdsInt32\"] = base64.b64encode(str(imageObj[\"meshPointIdsInt32\"]))\n if imageObj.has_key(\"meshTrianglesInt32\") :\n imageObj[\"meshTrianglesInt32\"] = base64.b64encode(str(imageObj[\"meshTrianglesInt32\"]))\n\n return jsonify(sectionObj)\n\n\n# List of sections (with id, waferName and section)\n@app.route('/getsections')\ndef getsections():\n dbName = request.args.get('db', '')\n collectionName = request.args.get('col', '')\n sectionId = request.args.get('id', None)\n objType = request.args.get('type', 'Section')\n # passed to server to be returned to client (hack)\n sectionIndex = request.args.get('idx', None)\n\n db = conn[dbName]\n\n if sectionId :\n sectionObj = db[collectionName].find_one({'_id':ObjectId(sectionId)})\n if sectionIndex :\n sectionObj[\"index\"] = int(sectionIndex)\n r = encodeSection(sectionObj)\n return r\n else :\n sectionCursor = db[collectionName].find({\"type\":objType},{\"waferName\":1, \"section\":1}).sort([(\"waferName\", 1), (\"section\", 1)])\n # make a new structure to return. Convert the ids to strings.\n sectionArray = [];\n for section in sectionCursor:\n section[\"_id\"] = str(section[\"_id\"])\n sectionArray.append(section)\n data = {}\n data[\"sections\"] = sectionArray\n return jsonify(data)\n\n\n# Tile that uses database name.\n@app.route('/tile')\ndef tile():\n\n # Get variables\n dbName = request.args.get('db', None)\n imgName = request.args.get('img', None)\n name = request.args.get('name', None)\n\n\n imgdb = conn[dbName]\n colImage = imgdb[imgName]\n docImage = colImage.find_one({'name':name})\n\n if docImage == None:\n abort(403)\n return Response(str(docImage['file']), mimetype=\"image/jpeg\")\n\n# Correlations for section.\n@app.route('/getcorrelations')\ndef getcorrelations():\n dbName = request.args.get('db', '')\n collectionName = request.args.get('col', '')\n wafer = request.args.get('wafer', None)\n section = int(request.args.get('sect', 1))\n\n db = conn[dbName]\n\n data = {};\n data[\"CorrelationArray0\"] = [];\n data[\"CorrelationArray1\"] = [];\n sectionObj0 = db[collectionName].find_one({'montage0.waferName':wafer, 'montage0.sectionNumber':section})\n if sectionObj0 :\n if \"correlations\" in sectionObj0 :\n data[\"CorrelationArray0\"] = sectionObj0[\"correlations\"];\n\n sectionObj1 = db[collectionName].find_one({'montage1.waferName':wafer, 'montage1.sectionNumber':section})\n if sectionObj1 :\n if \"correlations\" in sectionObj1 :\n data[\"CorrelationArray1\"] = sectionObj1[\"correlations\"];\n\n return jsonify(data)\n\n# Remove an object from the database collection.\n@app.route('/removeobject')\ndef removeobject():\n dbName = request.args.get('db', '')\n collectionName = request.args.get('col', '')\n idStr = request.args.get('id', '')\n\n db = conn[dbName]\n\n db[collectionName].remove({'_id': ObjectId(idStr)})\n\n return\n\n\n@app.route('/correlation')\ndef debugcorrelation():\n return render_template('correlation.html')\n","sub_path":"connectome/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"192530487","text":"from unittest import mock\n\nimport numpy as np\nimport pytest\n\nfrom autocnet.matcher import cpu_ring_matcher as rm\n\n@pytest.mark.parametrize('arr, expected', [\n (np.array([[1,0],[1,1], [2,3]]), (1,2)),\n (np.array([[0,0], [1,1], [2,2]]), (3,2)\n )])\ndef test_check_pidx_duplicates(arr, expected):\n pidx = rm.check_pidx_duplicates(arr)\n assert pidx.shape == expected\n\n@pytest.mark.parametrize(\"a, b, threshold, expected\", [\n # Tests standard call\n (np.array([1,2,3]), \n np.array([[1,2,3], [4,5,6], [7,8,9]]),\n 1.5, \n np.array([0])),\n # Tests call where distances are too close\n (np.array([1,2,3]),\n np.array([[7,8,9], [1,2,4], [1,2,4.1]]),\n 1.5, \n None),\n # Tests call with close distances where the threshold is low\n (np.array([1,2,3]),\n np.array([[7,8,9], [1,2,4], [1,2,4.1]]),\n 1., \n 1),\n # Tests call when np.argmin will fail\n (np.array([np.nan, np.nan]),\n np.array([[np.nan, np.nan], [np.nan, np.nan]]),\n 1.5,\n None),\n # Tests call where descriptors are identical\n (np.array([1,2,3]),\n np.array([[1,2,3], [1,2,3], [1,2,3]]),\n 1.5,\n None)\n])\ndef test_sift_match(a, b, threshold, expected):\n assert rm.sift_match(a, b, thresh=threshold) == expected \n\n@pytest.mark.parametrize(\"x,y, eidx\",[(np.array([[1,1],[2,2],[3,3], [4,4], [5,5]]),\n np.array([[1.1,1.0],[1.9,1.95],[3,3], [-4,-4], [5,5]]),\n np.array([[0,1,2,4]])),\n (np.array([[1,1], [5,5]]),\n np.array([[1,1], [3,3]]),\n [])\n ])\ndef test_ransac_permute(x, y, eidx):\n xp, yp, idx = rm.ransac_permute(x, y, 0.2, 2)\n np.testing.assert_array_equal(idx, eidx)\n\n\ndef test_add_correspondences():\n func = 'autocnet.matcher.cpu_ring_matcher.ring_match_one'\n with mock.patch(func, return_value=1):\n in_feats = np.array([[1,1], [2,2]])\n ref_feats = np.array([[1,1],[2,2],[3,3], [4,4], [5,5]])\n tar_feats = np.array([[1.1,1.0],[1.9,1.95],[3,3], [-4,-4], [5,5]])\n \n rm.add_correspondences(in_feats, ref_feats, tar_feats, None, None,\n (0,6), (0,6),(0,1))\n\ndef test_dynamically_grow():\n x = np.ones((3,3))\n y = rm.dynamically_grow_array(x,6)\n assert y.shape == (9,3)\n \ndef test_dynamically_grow_dtype():\n x = np.ones((3,3), dtype=np.int8)\n y = rm.dynamically_grow_array(x,6)\n assert np.issubdtype(y.dtype, np.float64)\n\n y = rm.dynamically_grow_array(x,6,dtype=np.int8)\n assert np.issubdtype(y.dtype, np.int8)\n\ndef test_points_in_ring():\n x = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4])\n for i in np.arange(0.5, 4.5):\n assert np.sum(rm.points_in_ring(x, i, i+1)) == 5\n\ndef test_ring_match():\n ref_feats = np.array([[1,1,1,1],\n [2,2,2,2],\n [3,3,3,3],\n [4,4,4,4]])\n tar_feats = np.array([[2,2,1.1,1],\n [2.5, 2.5, 1.1, 1.1],\n [3,3,2.1,2.1],\n [3.5, 3.5, 2.2, 2.2],\n [4,4,2.9,2.9],\n [4.5, 4.5, 3.0, 3.0],\n [5,5, 4.0, 4.1],\n [5.5, 5.5, 4.1, 4.1]])\n ref_desc = np.array([[0,0,0,0],\n [1,1,1,1],\n [2,2,2,2],\n [3,3,3,3]])\n tar_desc = np.array([[0,0,0,0],\n [6,7,8,9],\n [1,1,1,1],\n [6,7,8,9],\n [2,2,2,2],\n [6,7,8,9],\n [3,3,3,3],\n [6,7,8,9]])\n\n ring_radius = 0.5\n max_radius = 1\n target_points = 2\n tolerance = 0.1\n gr, gt, p_idx, ring = rm.ring_match(ref_feats, tar_feats, ref_desc, tar_desc,\n ring_radius=ring_radius, max_radius=max_radius,\n target_points=target_points, tolerance_val=tolerance,\n iteration_break_point=2)\n assert ring == (0.0, 0.5)\n sorted_pidx = p_idx[p_idx[:,0].astype(np.int).argsort()]\n np.testing.assert_array_equal(sorted_pidx,\n np.array([[0,0],[1,2],[2,4],[3,6]]))\n","sub_path":"autocnet/matcher/tests/test_cpu_ring_matcher.py","file_name":"test_cpu_ring_matcher.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"37194463","text":" \nimport pybullet as p\nimport numpy as np\nimport pybullet_data\nimport IPython\nimport math\nimport time\nimport random\n\nfrom scipy import spatial\nimport pickle as pickle\n\nclass Mesh:\n\tdef __init__(self, m, objName, objIdx, hypoIdx, pos, quat, prob, objRole):\n\t\tself.m = m\n\t\tself.objName = objName\n\t\tself.objIdx = objIdx\n\t\tself.hypoIdx = hypoIdx\n\t\tself.pos = pos\n\t\tself.quat = quat\n\t\tself.prob = prob\n\t\tself.objRole = objRole\n\n\tdef setProb(self, prob):\n\t\tself.prob = prob\n\n'''\n## yaw(Z), pitch (Y), roll(X) \n## This function come from online wiki link\n## https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\ndef euler_to_quaternion(yaw, pitch, roll):\n cy = math.cos(yaw * 0.5)\n sy = math.sin(yaw * 0.5)\n cp = math.cos(pitch * 0.5)\n sp = math.sin(pitch * 0.5)\n cr = math.cos(roll * 0.5)\n sr = math.sin(roll * 0.5)\n\n qw = round(cy * cp * cr + sy * sp * sr, 4)\n qx = round(cy * cp * sr - sy * sp * cr, 4)\n qy = round(sy * cp * sr + cy * sp * cr, 4)\n qz = round(sy * cp * cr - cy * sp * sr, 4)\n\n return [qx, qy, qz, qw]\n## Pybullet provides a built-in function getQuaternionFromEuler() which\n## follows a new rule [Y, Xnew, Znew] and return the quaternion in format [x,y,z,w]\n'''\n\ndef comp_prob(pos, angles, temp_pos, temp_angles):\n\t## parameter tunning for mimicing sensors\n\talpha = 15.0\n\tbeta = 15.0\n\tgamma = 0.003\n\ttemp_prob = math.exp(-alpha*math.fabs(pos[0]-temp_pos[0]) \\\n\t\t\t\t- beta*math.fabs(pos[1]-temp_pos[1]) \\\n\t\t\t\t- gamma*math.fabs(angles[2]-temp_angles[2]))\n\treturn float(format(temp_prob, '.3f'))\n\ndef createTrueMesh(hypoIdx, objIdx, meshFile, objName, objRole, scale, pos, angles, mass, clientId):\n\t### starting hypo index for the current obs\n\t### with the mesh file and scale\n\t### create the collision and visual shape of the object\n\tobjMesh = []\n\n\tif objRole == \"phantom\":\n\t\treturn objMesh\n\t_c = p.createCollisionShape(shapeType=p.GEOM_MESH, \n\t\t\t\t\t\t\t\tfileName=meshFile, meshScale=[scale, scale, scale], physicsClientId=clientId)\n\t_v = p.createVisualShape(shapeType=p.GEOM_MESH, \n\t\t\t\t\t\t\t\tfileName=meshFile, meshScale=[scale, scale, scale], physicsClientId=clientId)\n\ttemp_quat = p.getQuaternionFromEuler([angles[0], angles[1], angles[2]])\n\tquat = []\n\tfor tq in temp_quat:\n\t\tquat.append(float(format(tq, '.3f')))\n\t_m = p.createMultiBody(baseCollisionShapeIndex=_c, baseVisualShapeIndex=_v, \n\t\t\t\t\t\t\t\t\t\t\tbasePosition=pos, baseOrientation=quat, physicsClientId=clientId)\n\tobjMesh.append(Mesh(_m, objName, objIdx, hypoIdx, pos, quat, 1.0, objRole))\n\n\treturn objMesh\n\ndef createHypoMesh(currentlabelIdx, objIdx, meshFile, objName, objRole, scale, pos, angles, \n\t\t\t\t\t\t\t\ttransErrors, orientErrors, known_geometries, benchmarkType, nHypos, clientId, notInSceneProb):\n\t### starting label index for the current obs (currentlabelIdx)\n\t### with the mesh file and scale\n\t### create the collision and visual shape of the object\n\thypotheses = []\n\n\t### first specify the collision (they are identical to poses of the same object)\n\t_c = p.createCollisionShape(shapeType=p.GEOM_MESH, \n\t\t\tfileName=meshFile, meshScale=[scale, scale, scale], physicsClientId=clientId)\n\n\tlargest_prob = 0.0\n\tlargest_prob_idx = currentlabelIdx\n\th = 0\n\n\t### for each hypothesis\n\twhile (h < nHypos):\n\t\t### generate new pos and new quat according to transErrors and orientErrors\n\t\ttemp_px = float(format(random.uniform(pos[0]-transErrors, pos[0]+transErrors), '.3f'))\n\t\ttemp_py = float(format(random.uniform(pos[1]-transErrors, pos[1]+transErrors), '.3f'))\n\t\ttemp_oz = float(format(random.uniform(angles[2]-orientErrors, angles[2]+orientErrors), '.3f'))\n\t\ttemp_pos = [temp_px, temp_py, pos[2]] ### so far no uncertainty in height (z axis)\n\t\ttemp_angles = [angles[0], angles[1], temp_oz] ### so far only uncertainty in the orientation around z axis\n\t\ttemp_quat = p.getQuaternionFromEuler(temp_angles)\n\n\t\ttemp_prob = comp_prob(pos, angles, temp_pos, temp_angles)\n\t\t\n\t\t### create the hypothesis mesh and save it into the hypotheses\n\t\t_v = p.createVisualShape(shapeType=p.GEOM_MESH, fileName=meshFile, \n\t\t\t\tmeshScale=[scale, scale, scale], rgbaColor=[1.0, 1.0, 1.0, temp_prob*(1-notInSceneProb)], physicsClientId=clientId)\n\t\t_m = p.createMultiBody(baseCollisionShapeIndex=_c, baseVisualShapeIndex=_v, \n\t\t\t\t\t\t\t\t\t\tbasePosition=temp_pos, baseOrientation=temp_quat, physicsClientId=clientId)\n\t\t### Before adding the mesh into the hypotheses, check if it collides with known geometries\n\t\ttemp_collided = collisionCheck_amongObjects(_m, known_geometries, benchmarkType, clientId)\n\t\tif (temp_collided):\n\t\t\tp.removeBody(_m, clientId)\n\t\t\tcontinue\n\t\t# print objName + \":\" + str(_m)\n\n\t\t### reached here since it has no collision with known obstacles\t\t\n\t\tif temp_prob > largest_prob:\n\t\t\tlargest_prob = temp_prob\n\t\t\tlargest_prob_idx = currentlabelIdx + h\n\t\t### add this hypothesis\n\t\thypotheses.append( Mesh(_m, objName, objIdx, currentlabelIdx+h, temp_pos, temp_quat, temp_prob, objRole) )\n\t\th = h + 1\n\n\t### last step: normalize the probability\n\ttemp_probs = []\n\tfor hp in hypotheses:\n\t\ttemp_probs.append(hp.prob)\n\ttemp_sum = sum(temp_probs)\n\tfor i in range(0, len(hypotheses)):\n\t\thypotheses[i].setProb(round(temp_probs[i]*(1-notInSceneProb)/temp_sum, 3))\n\n\treturn hypotheses, largest_prob_idx\n\n\ndef printPoses(meshes):\n\tfor mesh in meshes:\n\t\tprint(\"hypo \" + str(mesh.hypoIdx) + \" \" + str(mesh.pos) + \" \" + str(mesh.quat) + \" \" + \\\n\t\t\t\t\t\t\tstr(mesh.prob) + \"\\tfor object \" + str(mesh.objIdx) + \": \" + mesh.objName)\n\tprint(\"--------------------------------------\\n\")\n\ndef collisionCheck_amongObjects(m, known_geometries, benchmarkType, clientId):\n\tisCollision = False\n\tfor g in known_geometries:\n\t\tif benchmarkType == \"table\" and str(g) == \"1\":\n\t\t\tcontinue\n\t\tif benchmarkType == \"shelf\" and str(g) == \"1\":\n\t\t\tcontinue\n\t\tif benchmarkType == \"shelf\" and str(g) == \"5\":\n\t\t\tcontinue\n\t\tcontacts = p.getClosestPoints(m, g, 0.01, physicsClientId=clientId)\n\t\tif len(contacts) != 0:\n\t\t\tisCollision = True\n\t\t\tprint(\"collision between \" + m.objName + \"and \" + \"known geometries \" + str(g))\n\t\t\tbreak\n\n\treturn isCollision\n\ndef collisionCheck_selfCollision(motomanID, clientId):\n\tisCollision = False\n\tcontacts = p.getContactPoints(bodyA=motomanID, bodyB=motomanID, physicsClientId=clientId)\n\tif len(contacts) != 0:\n\t\tisCollision = True\n\t\t# print \"robot self collision occurs!\"\n\t\t# print contacts\n\treturn isCollision\n\ndef collisionCheck_knownObs(motomanID, known_geometries, clientId):\n\tisCollision = False\n\t### loop through all known obstacles\n\tfor g in known_geometries:\n\t\tif g == 0: ## no need to change robot self-collision again\n\t\t\tcontinue\n\t\tcontacts = p.getContactPoints(motomanID, g, physicsClientId=clientId)\n\t\tif len(contacts) != 0:\n\t\t\tisCollision = True\n\t\t\t# print \"collision with known obstacle \" + str(g)\n\t\t\tbreak\n\treturn isCollision\n\ndef collisionCheck_hypos(motomanID, hypotheses, clientId):\n\tcollidedHypos = []\n\t### loop through all hypotheses\n\tfor hp in hypotheses:\n\t\tcontacts = p.getContactPoints(motomanID, hp.m, physicsClientId=clientId)\n\t\tif len(contacts) != 0:\n\t\t\t### add the index of that hypo\n\t\t\tcollidedHypos.append(hp.hypoIdx)\n\n\treturn collidedHypos\n\n\ndef checkEdgeValidity(n1, n2, motomanID, known_geometries, clientId):\n\tstep = 8 * math.pi / 180\n\tnseg = int(math.ceil(max(math.fabs(n1[0]-n2[0]), math.fabs(n1[1]-n2[1]), \n\t\t\tmath.fabs(n1[2]-n2[2]), math.fabs(n1[3]-n2[3]), math.fabs(n1[4]-n2[4]), \n\t\t\tmath.fabs(n1[5]-n2[5]), math.fabs(n1[6]-n2[6]))) / step)\n\tif nseg == 0:\n\t\tnseg = 1\n\tisEdgeValid = True\n\tfor i in range(1, nseg):\n\t\tinterm_j0 = n1[0] + (n2[0]-n1[0]) / nseg * i\n\t\tinterm_j1 = n1[1] + (n2[1]-n1[1]) / nseg * i\n\t\tinterm_j2 = n1[2] + (n2[2]-n1[2]) / nseg * i\n\t\tinterm_j3 = n1[3] + (n2[3]-n1[3]) / nseg * i\n\t\tinterm_j4 = n1[4] + (n2[4]-n1[4]) / nseg * i\n\t\tinterm_j5 = n1[5] + (n2[5]-n1[5]) / nseg * i\n\t\tinterm_j6 = n1[6] + (n2[6]-n1[6]) / nseg * i\n\t\tintermNode = [interm_j0, interm_j1, interm_j2, interm_j3, interm_j4, interm_j5, interm_j6]\n\t\tfor j in range(1, 8):\n\t\t\tresult = p.resetJointState(motomanID, j, intermNode[j-1], physicsClientId=clientId)\n\t\tfor j in range(11, 18):\n\t\t\tresult = p.resetJointState(motomanID, j, 0.0, physicsClientId=clientId)\n\t\tp.stepSimulation(clientId)\n\t\tisCollisionSelf = collisionCheck_selfCollision(motomanID, clientId)\n\t\tisCollisionKnownObs = collisionCheck_knownObs(motomanID, known_geometries, clientId)\n\n\t\tif isCollisionSelf or isCollisionKnownObs:\n\t\t\tisEdgeValid = False\n\t\t\tbreak\n\n\treturn isEdgeValid\n\ndef label_the_edge(n1, n2, motomanID, hypotheses, clientId):\n\ttemp_labels = []\n\tlabels_status = [False] * len(hypotheses)\n\tstep = 8 * math.pi / 180\n\tnseg = int(math.ceil(max(math.fabs(n1[0]-n2[0]), math.fabs(n1[1]-n2[1]), \n\t\t\tmath.fabs(n1[2]-n2[2]), math.fabs(n1[3]-n2[3]), math.fabs(n1[4]-n2[4]), \n\t\t\tmath.fabs(n1[5]-n2[5]), math.fabs(n1[6]-n2[6]))) / step)\n\tif nseg == 0:\n\t\tnseg = 1\n\tfor i in range(0, nseg+1):\n\t\tinterm_j0 = n1[0] + (n2[0]-n1[0]) / nseg * i\n\t\tinterm_j1 = n1[1] + (n2[1]-n1[1]) / nseg * i\n\t\tinterm_j2 = n1[2] + (n2[2]-n1[2]) / nseg * i\n\t\tinterm_j3 = n1[3] + (n2[3]-n1[3]) / nseg * i\n\t\tinterm_j4 = n1[4] + (n2[4]-n1[4]) / nseg * i\n\t\tinterm_j5 = n1[5] + (n2[5]-n1[5]) / nseg * i\n\t\tinterm_j6 = n1[6] + (n2[6]-n1[6]) / nseg * i\n\t\tintermNode = [interm_j0, interm_j1, interm_j2, interm_j3, interm_j4, interm_j5, interm_j6]\n\t\tfor j in range(1, 8):\n\t\t\tresult = p.resetJointState(motomanID, j, intermNode[j-1], physicsClientId=clientId)\n\t\tfor j in range(11, 18):\n\t\t\tresult = p.resetJointState(motomanID, j, 0.0, physicsClientId=clientId)\n\t\tp.stepSimulation(clientId)\n\t\tfor hp in hypotheses:\n\t\t\t### Before you do collision checker\n\t\t\t### check if that hypo has been checked before\n\t\t\tif labels_status[hp.hypoIdx] == False:\n\t\t\t\tcontacts = p.getContactPoints(motomanID, hp.m, physicsClientId=clientId)\n\t\t\t\tif len(contacts) != 0:\n\t\t\t\t\ttemp_labels.append(hp.hypoIdx)\n\t\t\t\t\tlabels_status[hp.hypoIdx] = True\n\n\treturn temp_labels\n\ndef collisionCheck_truePoses(motomanID, truePoses, clientId):\n\ttruePosesIdx = []\n\t### loop through truePoses\n\tfor tp in truePoses:\n\t\tcontacts = p.getContactPoints(motomanID, tp.m, physicsClientId=clientId)\n\t\tif len(contacts) != 0:\n\t\t\t### add the index of that true pose\n\t\t\ttruePosesIdx.append(tp.objIdx)\n\treturn set(truePosesIdx)\n\ndef trueScene_generation(benchmarkType, scene, Objects, clientId):\n\ttruePoses = []\n\tnObjectInExecuting = 0\n\tfor i in range(len(Objects)):\n\t\tif Objects[i][3] != \"phantom\":\n\t\t\ttruePoses += createTrueMesh(-1, Objects[i][0], Objects[i][1], Objects[i][2], Objects[i][3], \n\t\t\t\t\t\t\t\t\t\t\t\tObjects[i][4], Objects[i][5], Objects[i][6], Objects[i][7], clientId)\n\t\t\tnObjectInExecuting += 1\n\tprint(\"Number of Objects in the ground truth (execution): \" + str(nObjectInExecuting))\n\tprint(\"-------true poses: \" + str(benchmarkType) + \", \" + str(scene) + \"-------\")\n\tprintPoses(truePoses)\n\treturn truePoses, nObjectInExecuting\n\ndef planScene_generation(Objects, benchmarkType, known_geometries, transErrors, orientErrors, clientId):\n\thypotheses = []\n\tmostPromisingHypoIdxes = []\n\tcurrentlabelIdx = 0\n\tnObjectInPlanning = 0\n\tfor i in range(len(Objects)):\n\t\tif Objects[i][3] == \"invisible\":\n\t\t\tcontinue\n\t\tmm, pp = createHypoMesh(currentlabelIdx, Objects[i][0], Objects[i][1], Objects[i][2], Objects[i][3], Objects[i][4], \n\t\t\tObjects[i][5], Objects[i][6], transErrors, orientErrors, known_geometries, benchmarkType, Objects[i][8], clientId, Objects[i][9])\n\t\thypotheses += mm\n\t\tmostPromisingHypoIdxes.append(pp)\n\t\tnObjectInPlanning += 1\n\t\tcurrentlabelIdx = len(hypotheses)\n\tprint(\"Number of Objects in the planning scene: \" + str(nObjectInPlanning))\n\tprint(\"-------all hypotheses: \" + str(benchmarkType) + \"-------\")\n\tif clientId == 0:\n\t\tprintPoses(hypotheses)\n\treturn hypotheses, mostPromisingHypoIdxes, nObjectInPlanning\n\ndef calculateEuclidean(previous_state, current_state):\n\ttempSquare = 0.0\n\tfor ii in range(len(previous_state)):\n\t\ttempSquare += math.pow(previous_state[ii]-current_state[ii] ,2)\n\ttempSquare = math.sqrt(tempSquare)\n\treturn tempSquare\n\ndef local_move(n1, n2, motomanID, truePoses, clientId):\n\tlocal_poseIdx = set()\n\t# step = 5 * math.pi / 180\n\tstep = 3.0 * math.pi / 180\n\tnseg = int(math.ceil(max(math.fabs(n1[0]-n2[0]), \n\t\tmath.fabs(n1[1]-n2[1]), math.fabs(n1[2]-n2[2]), \n\t\tmath.fabs(n1[3]-n2[3]), math.fabs(n1[4]-n2[4]), \n\t\tmath.fabs(n1[5]-n2[5]), math.fabs(n1[6]-n2[6]))) / step)\n\tif nseg == 0:\n\t\tnseg = 1\n\tfor i in range(1, nseg):\n\t\tinterm_j0 = n1[0] + (n2[0]-n1[0]) / nseg * i\n\t\tinterm_j1 = n1[1] + (n2[1]-n1[1]) / nseg * i\n\t\tinterm_j2 = n1[2] + (n2[2]-n1[2]) / nseg * i\n\t\tinterm_j3 = n1[3] + (n2[3]-n1[3]) / nseg * i\n\t\tinterm_j4 = n1[4] + (n2[4]-n1[4]) / nseg * i\n\t\tinterm_j5 = n1[5] + (n2[5]-n1[5]) / nseg * i\n\t\tinterm_j6 = n1[6] + (n2[6]-n1[6]) / nseg * i\n\t\tintermNode = [interm_j0, interm_j1, interm_j2, interm_j3, interm_j4, interm_j5, interm_j6]\n\t\tfor j in range(1, 8):\n\t\t\tresult = p.resetJointState(motomanID, j, intermNode[j-1], physicsClientId=clientId)\n\t\tfor j in range(11, 18):\n\t\t\tresult = p.resetJointState(motomanID, j, 0.0, physicsClientId=clientId)\n\t\tp.stepSimulation(clientId)\n\t\t## add collision checker\n\t\ttemp_poseIdx = collisionCheck_truePoses(motomanID, truePoses, clientId)\n\t\tlocal_poseIdx = local_poseIdx.union(temp_poseIdx)\n\n\t\ttime.sleep(0.05)\n\treturn local_poseIdx\n\ndef executeTrajectory(traj_file, motomanID, truePoses, clientId):\n\ttraj = []\n\tf = open(traj_file)\n\tprevious_state = None\n\t### Meanwhile compute the cost as well\n\ttrajectoryCost = 0.0\n\ttrajectoryCollision = set()\n\tisSuccess = 1\n\n\tfor line in f:\n\t\tcurrent_state = line.split()\n\t\tcurrent_state = list(map(float, current_state))\n\t\tif (previous_state is not None):\n\t\t\ttrajectoryCost += calculateEuclidean(previous_state, current_state)\n\t\t\ttrajectoryCollision = trajectoryCollision.union(\n\t\t\t\t\t\t\t\t\t\t\tlocal_move(previous_state, current_state, motomanID, truePoses, clientId))\n\t\t\ttime.sleep(0.04)\n\t\tprevious_state = current_state\n\tprint(\"collisions: \" + str(trajectoryCollision) + \", total: \" + str(len(trajectoryCollision)))\n\n\t### now work on success evaluation\n\tif len(trajectoryCollision) != 0:\n\t\tisSuccess = 0\n\n\treturn len(trajectoryCollision), isSuccess, trajectoryCost\t\n\ndef executeAllTraj_example(home_configuration, motomanID, truePoses, path, clientId):\n\tstatistics_file = path + \"/statistics.txt\"\n\t### write in (#obs, success, cost) information for each search method\n\tf = open(statistics_file, \"w\")\n\n\tinput(\"Press to put Motoman to home configuration\")\n\t### Put the motoman back to its home configuration\n\tfor j in range(1, 8):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-1], physicsClientId=clientId)\n\tfor j in range(11, 18):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-4], physicsClientId=clientId)\n\tinput(\"Press to execute A star trajectory\")\n\tastar_traj_file = path + \"/Astartraj.txt\"\n\ttemp_ncollision, temp_isSuccess, temp_cost = executeTrajectory(astar_traj_file, motomanID, truePoses, clientId)\n\tf.write( str(temp_ncollision) + \" \" + str(temp_isSuccess) + \" \" + str(temp_cost) + \"\\n\" )\n\n\tinput(\"Press to put Motoman to home configuration\")\n\t### Put the motoman back to its home configuration\n\tfor j in range(1, 8):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-1], physicsClientId=clientId)\n\tfor j in range(11, 18):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-4], physicsClientId=clientId)\n\tinput(\"Press to execute MCR Greedy trajectory\")\n\tmcrg_traj_file = path + \"/MCRGtraj.txt\"\n\ttemp_ncollision, temp_isSuccess, temp_cost = executeTrajectory(mcrg_traj_file, motomanID, truePoses, clientId)\n\tf.write( str(temp_ncollision) + \" \" + str(temp_isSuccess) + \" \" + str(temp_cost) + \"\\n\" )\n\n\tinput(\"Press to put Motoman to home configuration\")\n\t### Put the motoman back to its home configuration\n\tfor j in range(1, 8):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-1], physicsClientId=clientId)\n\tfor j in range(11, 18):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-4], physicsClientId=clientId)\n\tinput(\"Press to execute MCR exact trajectory\")\n\tmcre_traj_file = path + \"/MCREtraj.txt\"\n\ttemp_ncollision, temp_isSuccess, temp_cost = executeTrajectory(mcre_traj_file, motomanID, truePoses, clientId)\n\tf.write( str(temp_ncollision) + \" \" + str(temp_isSuccess) + \" \" + str(temp_cost) + \"\\n\" )\n\n\tinput(\"Press to put Motoman to home configuration\")\n\t### Put the motoman back to its home configuration\n\tfor j in range(1, 8):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-1], physicsClientId=clientId)\n\tfor j in range(11, 18):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-4], physicsClientId=clientId)\n\tinput(\"Press to execute MaxSuccess greedy trajectory\")\n\tmsg_traj_file = path + \"/MSGtraj.txt\"\n\ttemp_ncollision, temp_isSuccess, temp_cost = executeTrajectory(msg_traj_file, motomanID, truePoses, clientId)\n\tf.write( str(temp_ncollision) + \" \" + str(temp_isSuccess) + \" \" + str(temp_cost) + \"\\n\" )\n\n\tinput(\"Press to put Motoman to home configuration\")\n\t### Put the motoman back to its home configuration\n\tfor j in range(1, 8):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-1], physicsClientId=clientId)\n\tfor j in range(11, 18):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-4], physicsClientId=clientId)\n\tinput(\"Press to execute MaxSuccess exact trajectory\")\n\tmse_traj_file = path + \"/MSEtraj.txt\"\n\ttemp_ncollision, temp_isSuccess, temp_cost = executeTrajectory(mse_traj_file, motomanID, truePoses, clientId)\n\tf.write( str(temp_ncollision) + \" \" + str(temp_isSuccess) + \" \" + str(temp_cost) + \"\\n\" )\t\n\n\tinput(\"Press to put Motoman to home configuration\")\n\t### Put the motoman back to its home configuration\n\tfor j in range(1, 8):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-1], physicsClientId=clientId)\n\tfor j in range(11, 18):\n\t\tresult = p.resetJointState(motomanID, j, home_configuration[j-4], physicsClientId=clientId)\n\tinput(\"Press to execute MCR-MLC trajectory\")\n\tmcrmcg_traj_file = path + \"/MCRMCGtraj.txt\"\n\ttemp_ncollision, temp_isSuccess, temp_cost = executeTrajectory(mcrmcg_traj_file, motomanID, truePoses, clientId)\n\tf.write( str(temp_ncollision) + \" \" + str(temp_isSuccess) + \" \" + str(temp_cost) + \"\\n\" )\n\n\tf.close()\n\tprint(\"trajectories all executed and time record.\")","sub_path":"utils_motoman.py","file_name":"utils_motoman.py","file_ext":"py","file_size_in_byte":18106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"536503621","text":"import logging\nimport numpy as np\nimport os\nimport pickle\nimport typing\nfrom typing import Any, Dict, List, Optional, Text, Tuple\nimport warnings\n\nfrom rasa.nlu.classifiers import LABEL_RANKING_LENGTH\nfrom rasa.nlu.components import Component\nfrom rasa.utils import train_utils\nfrom rasa.nlu.constants import (\n MESSAGE_RESPONSE_ATTRIBUTE,\n MESSAGE_INTENT_ATTRIBUTE,\n MESSAGE_TEXT_ATTRIBUTE,\n MESSAGE_TOKENS_NAMES,\n MESSAGE_ATTRIBUTES,\n MESSAGE_SPACY_FEATURES_NAMES,\n MESSAGE_VECTOR_FEATURE_NAMES,\n)\n\nimport tensorflow as tf\n\n# avoid warning println on contrib import - remove for tf 2\ntf.contrib._warning = None\n\nlogger = logging.getLogger(__name__)\n\nif typing.TYPE_CHECKING:\n from rasa.nlu.config import RasaNLUModelConfig\n from rasa.nlu.training_data import TrainingData\n from rasa.nlu.model import Metadata\n from rasa.nlu.training_data import Message\n\n\nclass EmbeddingIntentClassifier(Component):\n \"\"\"Intent classifier using supervised embeddings.\n\n The embedding intent classifier embeds user inputs\n and intent labels into the same space.\n Supervised embeddings are trained by maximizing similarity between them.\n It also provides rankings of the labels that did not \"win\".\n\n The embedding intent classifier needs to be preceded by\n a featurizer in the pipeline.\n This featurizer creates the features used for the embeddings.\n It is recommended to use ``CountVectorsFeaturizer`` that\n can be optionally preceded by ``SpacyNLP`` and ``SpacyTokenizer``.\n\n Based on the starspace idea from: https://arxiv.org/abs/1709.03856.\n However, in this implementation the `mu` parameter is treated differently\n and additional hidden layers are added together with dropout.\n \"\"\"\n\n provides = [\"intent\", \"intent_ranking\"]\n\n requires = [MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]]\n\n # default properties (DOC MARKER - don't remove)\n defaults = {\n # nn architecture\n # sizes of hidden layers before the embedding layer for input words\n # the number of hidden layers is thus equal to the length of this list\n \"hidden_layers_sizes_a\": [256, 128],\n # sizes of hidden layers before the embedding layer for intent labels\n # the number of hidden layers is thus equal to the length of this list\n \"hidden_layers_sizes_b\": [],\n # Whether to share the hidden layer weights between input words and labels\n \"share_hidden_layers\": False,\n # training parameters\n # initial and final batch sizes - batch size will be\n # linearly increased for each epoch\n \"batch_size\": [64, 256],\n # how to create batches\n \"batch_strategy\": \"balanced\", # string 'sequence' or 'balanced'\n # number of epochs\n \"epochs\": 300,\n # set random seed to any int to get reproducible results\n \"random_seed\": None,\n # embedding parameters\n # dimension size of embedding vectors\n \"embed_dim\": 20,\n # the type of the similarity\n \"num_neg\": 20,\n # flag if minimize only maximum similarity over incorrect actions\n \"similarity_type\": \"auto\", # string 'auto' or 'cosine' or 'inner'\n # the type of the loss function\n \"loss_type\": \"softmax\", # string 'softmax' or 'margin'\n # how similar the algorithm should try\n # to make embedding vectors for correct labels\n \"mu_pos\": 0.8, # should be 0.0 < ... < 1.0 for 'cosine'\n # maximum negative similarity for incorrect labels\n \"mu_neg\": -0.4, # should be -1.0 < ... < 1.0 for 'cosine'\n # flag: if true, only minimize the maximum similarity for incorrect labels\n \"use_max_sim_neg\": True,\n # scale loss inverse proportionally to confidence of correct prediction\n \"scale_loss\": True,\n # regularization parameters\n # the scale of L2 regularization\n \"C2\": 0.002,\n # the scale of how critical the algorithm should be of minimizing the\n # maximum similarity between embeddings of different labels\n \"C_emb\": 0.8,\n # dropout rate for rnn\n \"droprate\": 0.2,\n # visualization of accuracy\n # how often to calculate training accuracy\n \"evaluate_every_num_epochs\": 20, # small values may hurt performance\n # how many examples to use for calculation of training accuracy\n \"evaluate_on_num_examples\": 0, # large values may hurt performance\n }\n # end default properties (DOC MARKER - don't remove)\n\n def __init__(\n self,\n component_config: Optional[Dict[Text, Any]] = None,\n inverted_label_dict: Optional[Dict[int, Text]] = None,\n session: Optional[\"tf.Session\"] = None,\n graph: Optional[\"tf.Graph\"] = None,\n message_placeholder: Optional[\"tf.Tensor\"] = None,\n label_placeholder: Optional[\"tf.Tensor\"] = None,\n similarity_all: Optional[\"tf.Tensor\"] = None,\n pred_confidence: Optional[\"tf.Tensor\"] = None,\n similarity: Optional[\"tf.Tensor\"] = None,\n message_embed: Optional[\"tf.Tensor\"] = None,\n label_embed: Optional[\"tf.Tensor\"] = None,\n all_labels_embed: Optional[\"tf.Tensor\"] = None,\n ) -> None:\n \"\"\"Declare instant variables with default values\"\"\"\n\n super(EmbeddingIntentClassifier, self).__init__(component_config)\n\n self._load_params()\n\n # transform numbers to labels\n self.inverted_label_dict = inverted_label_dict\n # encode all label_ids with numbers\n self._encoded_all_label_ids = None\n\n # tf related instances\n self.session = session\n self.graph = graph\n self.a_in = message_placeholder\n self.b_in = label_placeholder\n self.sim_all = similarity_all\n self.pred_confidence = pred_confidence\n self.sim = similarity\n\n # persisted embeddings\n self.message_embed = message_embed\n self.label_embed = label_embed\n self.all_labels_embed = all_labels_embed\n\n # internal tf instances\n self._iterator = None\n self._train_op = None\n self._is_training = None\n\n # config migration warning\n def _check_old_config_variables(self, config: Dict[Text, Any]) -> None:\n\n removed_tokenization_params = [\n \"intent_tokenization_flag\",\n \"intent_split_symbol\",\n ]\n for removed_param in removed_tokenization_params:\n if removed_param in config:\n warnings.warn(\n \"Intent tokenization has been moved to Tokenizer components. \"\n \"Your config still mentions '{}'. Tokenization may fail if you specify the parameter here.\"\n \"Please specify the parameter 'intent_tokenization_flag' and 'intent_split_symbol' in the \"\n \"tokenizer of your NLU pipeline\".format(removed_param)\n )\n\n # init helpers\n def _load_nn_architecture_params(self, config: Dict[Text, Any]) -> None:\n self.hidden_layer_sizes = {\n \"a\": config[\"hidden_layers_sizes_a\"],\n \"b\": config[\"hidden_layers_sizes_b\"],\n }\n self.share_hidden_layers = config[\"share_hidden_layers\"]\n if (\n self.share_hidden_layers\n and self.hidden_layer_sizes[\"a\"] != self.hidden_layer_sizes[\"b\"]\n ):\n raise ValueError(\n \"If hidden layer weights are shared,\"\n \"hidden_layer_sizes for a and b must coincide\"\n )\n\n self.batch_size = config[\"batch_size\"]\n self.batch_strategy = config[\"batch_strategy\"]\n\n self.epochs = config[\"epochs\"]\n\n self.random_seed = self.component_config[\"random_seed\"]\n\n def _load_embedding_params(self, config: Dict[Text, Any]) -> None:\n self.embed_dim = config[\"embed_dim\"]\n self.num_neg = config[\"num_neg\"]\n\n self.similarity_type = config[\"similarity_type\"]\n self.loss_type = config[\"loss_type\"]\n if self.similarity_type == \"auto\":\n if self.loss_type == \"softmax\":\n self.similarity_type = \"inner\"\n elif self.loss_type == \"margin\":\n self.similarity_type = \"cosine\"\n\n self.mu_pos = config[\"mu_pos\"]\n self.mu_neg = config[\"mu_neg\"]\n self.use_max_sim_neg = config[\"use_max_sim_neg\"]\n\n self.scale_loss = config[\"scale_loss\"]\n\n def _load_regularization_params(self, config: Dict[Text, Any]) -> None:\n self.C2 = config[\"C2\"]\n self.C_emb = config[\"C_emb\"]\n self.droprate = config[\"droprate\"]\n\n def _load_visual_params(self, config: Dict[Text, Any]) -> None:\n self.evaluate_every_num_epochs = config[\"evaluate_every_num_epochs\"]\n if self.evaluate_every_num_epochs < 1:\n self.evaluate_every_num_epochs = self.epochs\n self.evaluate_on_num_examples = config[\"evaluate_on_num_examples\"]\n\n def _load_params(self) -> None:\n\n self._check_old_config_variables(self.component_config)\n self._tf_config = train_utils.load_tf_config(self.component_config)\n self._load_nn_architecture_params(self.component_config)\n self._load_embedding_params(self.component_config)\n self._load_regularization_params(self.component_config)\n self._load_visual_params(self.component_config)\n\n # package safety checks\n @classmethod\n def required_packages(cls) -> List[Text]:\n return [\"tensorflow\"]\n\n # training data helpers:\n @staticmethod\n def _create_label_id_dict(\n training_data: \"TrainingData\", attribute: Text\n ) -> Dict[Text, int]:\n \"\"\"Create label_id dictionary\"\"\"\n\n distinct_label_ids = set(\n [example.get(attribute) for example in training_data.intent_examples]\n ) - {None}\n return {\n label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))\n }\n\n @staticmethod\n def _find_example_for_label(label, examples, attribute):\n for ex in examples:\n if ex.get(attribute) == label:\n return ex\n return None\n\n @staticmethod\n def _check_labels_features_exist(\n labels_example: List[Tuple[int, \"Message\"]], attribute_feature_name: Text\n ) -> bool:\n \"\"\"Check if all labels have features set\"\"\"\n for (label_idx, label_example) in labels_example:\n if label_example.get(attribute_feature_name) is None:\n return False\n return True\n\n @staticmethod\n def _extract_labels_precomputed_features(\n label_examples: List[Tuple[int, \"Message\"]], attribute_feature_name: Text\n ) -> np.ndarray:\n\n # Collect precomputed encodings\n encoded_id_labels = [\n (label_idx, label_example.get(attribute_feature_name))\n for (label_idx, label_example) in label_examples\n ]\n\n # Sort the list of tuples based on label_idx\n encoded_id_labels = sorted(encoded_id_labels, key=lambda x: x[0])\n\n encoded_all_labels = [encoding for (index, encoding) in encoded_id_labels]\n\n return np.array(encoded_all_labels)\n\n def _compute_default_label_features(\n self, labels_example: List[Tuple[int, \"Message\"]]\n ) -> np.ndarray:\n \"\"\"Compute one-hot representation for the labels\"\"\"\n\n return np.eye(len(labels_example))\n\n def _create_encoded_label_ids(\n self,\n training_data: \"TrainingData\",\n label_id_dict: Dict[Text, int],\n attribute: Text,\n attribute_feature_name: Text,\n ) -> np.ndarray:\n \"\"\"Create matrix with label_ids encoded in rows as bag of words. If the features are already computed, fetch\n them from the message object else compute a one hot encoding for the label as the feature vector\n Find a training example for each label and get the encoded features from the corresponding Message object\"\"\"\n\n labels_example = []\n\n # Collect one example for each label\n for label_name, idx in label_id_dict.items():\n label_example = self._find_example_for_label(\n label_name, training_data.intent_examples, attribute\n )\n labels_example.append((idx, label_example))\n\n # Collect features, precomputed if they exist, else compute on the fly\n if self._check_labels_features_exist(labels_example, attribute_feature_name):\n encoded_id_labels = self._extract_labels_precomputed_features(\n labels_example, attribute_feature_name\n )\n else:\n encoded_id_labels = self._compute_default_label_features(labels_example)\n\n return encoded_id_labels\n\n # noinspection PyPep8Naming\n def _create_session_data(\n self,\n training_data: \"TrainingData\",\n label_id_dict: Dict[Text, int],\n attribute: Text,\n ) -> \"train_utils.SessionData\":\n \"\"\"Prepare data for training and create a SessionData object\"\"\"\n\n X = []\n label_ids = []\n Y = []\n\n for e in training_data.intent_examples:\n if e.get(attribute):\n X.append(e.get(MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]))\n label_ids.append(label_id_dict[e.get(attribute)])\n\n X = np.array(X)\n label_ids = np.array(label_ids)\n\n for label_id_idx in label_ids:\n Y.append(self._encoded_all_label_ids[label_id_idx])\n\n Y = np.array(Y)\n\n return train_utils.SessionData(X=X, Y=Y, label_ids=label_ids)\n\n # tf helpers:\n def _create_tf_embed_fnn(\n self,\n x_in: \"tf.Tensor\",\n layer_sizes: List[int],\n fnn_name: Text,\n embed_name: Text,\n ) -> \"tf.Tensor\":\n \"\"\"Create nn with hidden layers and name\"\"\"\n\n x = train_utils.create_tf_fnn(\n x_in,\n layer_sizes,\n self.droprate,\n self.C2,\n self._is_training,\n layer_name_suffix=fnn_name,\n )\n return train_utils.create_tf_embed(\n x,\n self.embed_dim,\n self.C2,\n self.similarity_type,\n layer_name_suffix=embed_name,\n )\n\n def _build_tf_train_graph(self) -> Tuple[\"tf.Tensor\", \"tf.Tensor\"]:\n self.a_in, self.b_in = self._iterator.get_next()\n\n all_label_ids = tf.constant(\n self._encoded_all_label_ids, dtype=tf.float32, name=\"all_label_ids\"\n )\n\n self.message_embed = self._create_tf_embed_fnn(\n self.a_in,\n self.hidden_layer_sizes[\"a\"],\n fnn_name=\"a_b\" if self.share_hidden_layers else \"a\",\n embed_name=\"a\",\n )\n\n self.label_embed = self._create_tf_embed_fnn(\n self.b_in,\n self.hidden_layer_sizes[\"b\"],\n fnn_name=\"a_b\" if self.share_hidden_layers else \"b\",\n embed_name=\"b\",\n )\n self.all_labels_embed = self._create_tf_embed_fnn(\n all_label_ids,\n self.hidden_layer_sizes[\"b\"],\n fnn_name=\"a_b\" if self.share_hidden_layers else \"b\",\n embed_name=\"b\",\n )\n\n return train_utils.calculate_loss_acc(\n self.message_embed,\n self.label_embed,\n self.b_in,\n self.all_labels_embed,\n all_label_ids,\n self.num_neg,\n None,\n self.loss_type,\n self.mu_pos,\n self.mu_neg,\n self.use_max_sim_neg,\n self.C_emb,\n self.scale_loss,\n )\n\n def _build_tf_pred_graph(\n self, session_data: \"train_utils.SessionData\"\n ) -> \"tf.Tensor\":\n self.a_in = tf.placeholder(\n tf.float32, (None, session_data.X.shape[-1]), name=\"a\"\n )\n self.b_in = tf.placeholder(\n tf.float32, (None, None, session_data.Y.shape[-1]), name=\"b\"\n )\n\n self.message_embed = self._create_tf_embed_fnn(\n self.a_in,\n self.hidden_layer_sizes[\"a\"],\n fnn_name=\"a_b\" if self.share_hidden_layers else \"a\",\n embed_name=\"a\",\n )\n\n self.sim_all = train_utils.tf_raw_sim(\n self.message_embed[:, tf.newaxis, :],\n self.all_labels_embed[tf.newaxis, :, :],\n None,\n )\n\n self.label_embed = self._create_tf_embed_fnn(\n self.b_in,\n self.hidden_layer_sizes[\"b\"],\n fnn_name=\"a_b\" if self.share_hidden_layers else \"b\",\n embed_name=\"b\",\n )\n\n self.sim = train_utils.tf_raw_sim(\n self.message_embed[:, tf.newaxis, :], self.label_embed, None\n )\n\n return train_utils.confidence_from_sim(self.sim_all, self.similarity_type)\n\n def check_input_dimension_consistency(self, session_data):\n\n if self.share_hidden_layers:\n if session_data.X[0].shape[-1] != session_data.Y[0].shape[-1]:\n raise ValueError(\n \"If embeddings are shared \"\n \"text features and label features \"\n \"must coincide. Check the output dimensions of previous components.\"\n )\n\n def preprocess_train_data(self, training_data):\n \"\"\"Performs sanity checks on training data, extracts encodings for labels and prepares data for training\"\"\"\n\n label_id_dict = self._create_label_id_dict(\n training_data, attribute=MESSAGE_INTENT_ATTRIBUTE\n )\n\n self.inverted_label_dict = {v: k for k, v in label_id_dict.items()}\n self._encoded_all_label_ids = self._create_encoded_label_ids(\n training_data,\n label_id_dict,\n attribute=MESSAGE_INTENT_ATTRIBUTE,\n attribute_feature_name=MESSAGE_VECTOR_FEATURE_NAMES[\n MESSAGE_INTENT_ATTRIBUTE\n ],\n )\n\n # check if number of negatives is less than number of label_ids\n logger.debug(\n \"Check if num_neg {} is smaller than \"\n \"number of label_ids {}, \"\n \"else set num_neg to the number of label_ids - 1\"\n \"\".format(self.num_neg, self._encoded_all_label_ids.shape[0])\n )\n # noinspection PyAttributeOutsideInit\n self.num_neg = min(self.num_neg, self._encoded_all_label_ids.shape[0] - 1)\n\n session_data = self._create_session_data(\n training_data, label_id_dict, attribute=MESSAGE_INTENT_ATTRIBUTE\n )\n\n self.check_input_dimension_consistency(session_data)\n\n return session_data\n\n def _check_enough_labels(self, session_data) -> bool:\n\n return len(np.unique(session_data.label_ids)) >= 2\n\n def train(\n self,\n training_data: \"TrainingData\",\n cfg: Optional[\"RasaNLUModelConfig\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"Train the embedding label classifier on a data set.\"\"\"\n\n logger.debug(\"Started training embedding classifier.\")\n\n # set numpy random seed\n np.random.seed(self.random_seed)\n\n session_data = self.preprocess_train_data(training_data)\n\n possible_to_train = self._check_enough_labels(session_data)\n\n if not possible_to_train:\n logger.error(\n \"Can not train a classifier. \"\n \"Need at least 2 different classes. \"\n \"Skipping training of classifier.\"\n )\n return\n\n if self.evaluate_on_num_examples:\n session_data, eval_session_data = train_utils.train_val_split(\n session_data, self.evaluate_on_num_examples, self.random_seed\n )\n else:\n eval_session_data = None\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n # set random seed\n tf.set_random_seed(self.random_seed)\n\n # allows increasing batch size\n batch_size_in = tf.placeholder(tf.int64)\n\n (\n self._iterator,\n train_init_op,\n eval_init_op,\n ) = train_utils.create_iterator_init_datasets(\n session_data, eval_session_data, batch_size_in, self.batch_strategy\n )\n\n self._is_training = tf.placeholder_with_default(False, shape=())\n\n loss, acc = self._build_tf_train_graph()\n\n # define which optimizer to use\n self._train_op = tf.train.AdamOptimizer().minimize(loss)\n\n # train tensorflow graph\n self.session = tf.Session(config=self._tf_config)\n train_utils.train_tf_dataset(\n train_init_op,\n eval_init_op,\n batch_size_in,\n loss,\n acc,\n self._train_op,\n self.session,\n self._is_training,\n self.epochs,\n self.batch_size,\n self.evaluate_on_num_examples,\n self.evaluate_every_num_epochs,\n )\n\n # rebuild the graph for prediction\n self.pred_confidence = self._build_tf_pred_graph(session_data)\n\n # process helpers\n # noinspection PyPep8Naming\n def _calculate_message_sim(self, X: np.ndarray) -> Tuple[np.ndarray, List[float]]:\n \"\"\"Calculate message similarities\"\"\"\n\n message_sim = self.session.run(self.pred_confidence, feed_dict={self.a_in: X})\n\n message_sim = message_sim.flatten() # sim is a matrix\n\n label_ids = message_sim.argsort()[::-1]\n message_sim[::-1].sort()\n\n # transform sim to python list for JSON serializing\n return label_ids, message_sim.tolist()\n\n def predict_label(self, message):\n\n label = {\"name\": None, \"confidence\": 0.0}\n label_ranking = []\n if self.session is None:\n logger.error(\n \"There is no trained tf.session: \"\n \"component is either not trained or \"\n \"didn't receive enough training data\"\n )\n\n else:\n # get features (bag of words) for a message\n # noinspection PyPep8Naming\n X = message.get(\n MESSAGE_VECTOR_FEATURE_NAMES[MESSAGE_TEXT_ATTRIBUTE]\n ).reshape(1, -1)\n\n # load tf graph and session\n label_ids, message_sim = self._calculate_message_sim(X)\n\n # if X contains all zeros do not predict some label\n if X.any() and label_ids.size > 0:\n label = {\n \"name\": self.inverted_label_dict[label_ids[0]],\n \"confidence\": message_sim[0],\n }\n\n ranking = list(zip(list(label_ids), message_sim))\n ranking = ranking[:LABEL_RANKING_LENGTH]\n label_ranking = [\n {\"name\": self.inverted_label_dict[label_idx], \"confidence\": score}\n for label_idx, score in ranking\n ]\n return label, label_ranking\n\n def process(self, message: \"Message\", **kwargs: Any) -> None:\n \"\"\"Return the most likely label and its similarity to the input.\"\"\"\n\n label, label_ranking = self.predict_label(message)\n\n message.set(\"intent\", label, add_to_output=True)\n message.set(\"intent_ranking\", label_ranking, add_to_output=True)\n\n def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]:\n \"\"\"Persist this model into the passed directory.\n\n Return the metadata necessary to load the model again.\n \"\"\"\n\n if self.session is None:\n return {\"file\": None}\n\n checkpoint = os.path.join(model_dir, file_name + \".ckpt\")\n\n try:\n os.makedirs(os.path.dirname(checkpoint))\n except OSError as e:\n # be happy if someone already created the path\n import errno\n\n if e.errno != errno.EEXIST:\n raise\n with self.graph.as_default():\n train_utils.persist_tensor(\"message_placeholder\", self.a_in, self.graph)\n train_utils.persist_tensor(\"label_placeholder\", self.b_in, self.graph)\n\n train_utils.persist_tensor(\"similarity_all\", self.sim_all, self.graph)\n train_utils.persist_tensor(\n \"pred_confidence\", self.pred_confidence, self.graph\n )\n train_utils.persist_tensor(\"similarity\", self.sim, self.graph)\n\n train_utils.persist_tensor(\"message_embed\", self.message_embed, self.graph)\n train_utils.persist_tensor(\"label_embed\", self.label_embed, self.graph)\n train_utils.persist_tensor(\n \"all_labels_embed\", self.all_labels_embed, self.graph\n )\n\n saver = tf.train.Saver()\n saver.save(self.session, checkpoint)\n\n with open(\n os.path.join(model_dir, file_name + \".inv_label_dict.pkl\"), \"wb\"\n ) as f:\n pickle.dump(self.inverted_label_dict, f)\n\n with open(os.path.join(model_dir, file_name + \".tf_config.pkl\"), \"wb\") as f:\n pickle.dump(self._tf_config, f)\n\n return {\"file\": file_name}\n\n @classmethod\n def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Text = None,\n model_metadata: \"Metadata\" = None,\n cached_component: Optional[\"EmbeddingIntentClassifier\"] = None,\n **kwargs: Any\n ) -> \"EmbeddingIntentClassifier\":\n\n if model_dir and meta.get(\"file\"):\n file_name = meta.get(\"file\")\n checkpoint = os.path.join(model_dir, file_name + \".ckpt\")\n\n with open(os.path.join(model_dir, file_name + \".tf_config.pkl\"), \"rb\") as f:\n _tf_config = pickle.load(f)\n\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session(config=_tf_config)\n saver = tf.train.import_meta_graph(checkpoint + \".meta\")\n\n saver.restore(session, checkpoint)\n\n a_in = train_utils.load_tensor(\"message_placeholder\")\n b_in = train_utils.load_tensor(\"label_placeholder\")\n\n sim_all = train_utils.load_tensor(\"similarity_all\")\n pred_confidence = train_utils.load_tensor(\"pred_confidence\")\n sim = train_utils.load_tensor(\"similarity\")\n\n message_embed = train_utils.load_tensor(\"message_embed\")\n label_embed = train_utils.load_tensor(\"label_embed\")\n all_labels_embed = train_utils.load_tensor(\"all_labels_embed\")\n\n with open(\n os.path.join(model_dir, file_name + \".inv_label_dict.pkl\"), \"rb\"\n ) as f:\n inv_label_dict = pickle.load(f)\n\n return cls(\n component_config=meta,\n inverted_label_dict=inv_label_dict,\n session=session,\n graph=graph,\n message_placeholder=a_in,\n label_placeholder=b_in,\n similarity_all=sim_all,\n pred_confidence=pred_confidence,\n similarity=sim,\n message_embed=message_embed,\n label_embed=label_embed,\n all_labels_embed=all_labels_embed,\n )\n\n else:\n logger.warning(\n \"Failed to load nlu model. Maybe path {} \"\n \"doesn't exist\"\n \"\".format(os.path.abspath(model_dir))\n )\n return cls(component_config=meta)\n","sub_path":"rasa/nlu/classifiers/embedding_intent_classifier.py","file_name":"embedding_intent_classifier.py","file_ext":"py","file_size_in_byte":27321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"493441156","text":"from __future__ import unicode_literals\n\nimport json\nfrom hashlib import md5\n\nimport requests\n\nfrom scrapi import settings\nfrom scrapi.processing.osf.hashing import REPORT_HASH_FUNCTIONS\nfrom scrapi.processing.osf.hashing import RESOURCE_HASH_FUNCTIONS\n\n\ndef already_processed(raw_doc):\n _md5 = md5(raw_doc['doc']).hexdigest()\n\n _filter = {\n 'term': {\n 'docHash': _md5\n }\n }\n\n return _search(_filter), _md5\n\n\ndef detect_collisions(hashlist, is_resource=False):\n if is_resource:\n _filter = {\n 'and': [\n {\n 'terms': {\n 'uids': hashlist\n }\n },\n {\n 'term': {\n 'isResource': True\n }\n }\n ]\n }\n else:\n _filter = {\n 'and': [\n {\n 'missing': {\n 'field': 'isResource',\n 'existence': True,\n 'null_value': True\n }\n },\n {\n 'terms': {\n 'uids': hashlist\n }\n }\n ]\n }\n found = _search(_filter)\n\n if found:\n return found\n\n return None\n\n\ndef generate_hash_list(normalized, hashes):\n hashlist = []\n\n for hashfunc in hashes:\n hashlist.append(hashfunc(normalized))\n\n return hashlist\n\n\ndef generate_resource_hash_list(normalized):\n return generate_hash_list(normalized, RESOURCE_HASH_FUNCTIONS)\n\n\ndef generate_report_hash_list(normalized):\n return generate_hash_list(normalized, REPORT_HASH_FUNCTIONS)\n\n\ndef _search(_filter):\n query = {\n 'query': {\n 'filtered': {\n 'filter': _filter\n }\n }\n }\n\n kwargs = {\n 'auth': settings.OSF_AUTH,\n 'verify': settings.VERIFY_SSL,\n 'data': json.dumps(query),\n 'headers': {\n 'Content-Type': 'application/json'\n }\n }\n\n ret = requests.post(settings.OSF_APP_URL, **kwargs).json()\n\n if ret['count'] > 0:\n return ret['results'][0]\n\n return None\n","sub_path":"scrapi/processing/osf/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"492005758","text":"import importlib\n\nclass SubResourceFactory():\n subresource_map = {\"acl\":\"BucketAcl\", \"cors\":\"BucketCors\", \"lifecycle\":\"BucketLifecycle\", \"logging\":\"BucketLogging\", \"policy\":\"BucketPolicy\", \"tagging\":\"BucketTagging\"}\n\n @classmethod\n def create(cls, bucket, subresources):\n subresource_list = []\n module = importlib.import_module(\"s3_sat.bucket_subresource\")\n for subresource_name, should_create in subresources.items():\n if should_create:\n subresource = getattr(module, cls.subresource_map[subresource_name])\n subresource_list.append({subresource_name:subresource(bucket).get_content()})\n\n return subresource_list\n","sub_path":"s3_sat/subresource_factory.py","file_name":"subresource_factory.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"481929879","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 18:40:30 2023\n\n@author: DELL\n\n\nReadMe_: Aqui solo se encontrara la funcion y los paquetes a ser importados\n\"\"\"\n#%%\nimport plotly.io as pio\npio.renderers.default = \"browser\"\nimport plotly.express as px\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport matplotlib.pyplot as plt\n\n\n\n\"\"\" ### CAMBIAR LA EJECUCION DEL CURSOR EN EL ARCHIVO DE CONEXION A LA BASE DE DATOS DE APPLEWOOD A GOODYEAR ### \"\"\"\nimport sys \nsys.path.append(\"/Docs_python/conexion_sql_datebase\")\nfrom conexion_sql_datebase import goodyear\ndf_applewood = pd.DataFrame(goodyear)\ndel goodyear\n\n#%%\n\n\"\"\"## FUNCION QUE RESUELVE TABLA DE DISTRIBUCION DE FRECUENCIAS ABSOLUTAS,\n RELATIVAS Y ACUMULATIVAS DE VARIABLES CUALITATIVAS ## \"\"\"\n \ndef tabfreq_cual(dataframe):\n llaves=input('Insert name to group by:')\n df_pre_group = dataframe.groupby(f'{llaves}').size().to_frame(name='count')\n df_group = df_pre_group.assign(relative_count=(lambda x: np.round(x['count'] / np.sum(df_pre_group['count']),3)),\n percent_count=(lambda x: x['relative_count'] *100))\n \n## Diagrama de barras (Matplotlib)\n\n fig1, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1)\n \n ax1.bar(x=df_group.axes[0],\n height=df_group['count']\n )\n \n ## Grafica de pastel (Matplotlib)\n \n ax2.pie(df_group['count'],\n labels=df_group.index,\n explode=(0, 0, 0.06, 0))\n ax2.legend(bbox_to_anchor=(1.8, 0.5, 0.2, 0.3),\n title=f'{llaves}')\n \n ## Poligono de frecuencias (matplotlib)\n ax3.plot(df_group.index, df_group['count'])\n \n return df_group\n\n\n","sub_path":"function_tab_freq_cual.py","file_name":"function_tab_freq_cual.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"459772655","text":"import re\n\nimport hashlib\nimport logging\nimport requests\nfrom PIL import Image\nfrom selenium import webdriver\nimport time\nfrom ocr import Ocr\nfrom selenium.webdriver.chrome.options import Options\n\n\nclass Login():\n '''拿到验证码登录'''\n\n def __init__(self, url, account, password, title, cont,logger, big_num=20):\n\n self.url = url\n self.account = account\n self.password = password\n self.big_num = big_num\n self.title = title\n self.cont = cont\n self.log = logger\n\n def automatic_login(self, driver, url, account, password):\n '''自动输入登录'''\n driver.get(self.url + '/login.asp')\n\n n = 0\n while True:\n print('第{}次请求验证码'.format(n))\n time.sleep(0.2)\n driver.save_screenshot(r\"D:\\Work_python\\bulider\\img\\full.png\")\n result = self.get_code(driver)\n\n if result:\n if not driver.find_elements_by_xpath(\"//div[@id='login']\"):\n print('已经登录')\n return ''\n print('验证码得出分析结果...正在试图提交验证码', result)\n driver.find_element_by_xpath(\"//dl/dd/input[2]\").click()\n driver.find_element_by_id(\"t0\").send_keys(account)\n print('*账号已输入\\n')\n driver.find_element_by_id(\"t1\").send_keys(password)\n print('*密码已输入\\n')\n driver.find_element_by_xpath(\"//dl/dt[3]/input\").send_keys(result)\n driver.find_element_by_xpath(\"//dl/dd/input[1]\").click()\n print('输入完一次密码,进行登录结果判断..')\n time.sleep(0.2)\n print('休息完毕')\n if n > self.big_num:\n return 'no'\n try:\n time.sleep(0.1)\n driver.find_elements_by_xpath(\"//dl/dt[3]/img\")[0].click()\n except Exception:\n print('已经成功登录')\n return\n n += 1\n\n def get_code(self, driver):\n '''获取验证码'''\n try:\n location = driver.find_elements_by_xpath(\"//dl/dt[3]/img\")[0].location\n size = driver.find_elements_by_xpath(\"//dl/dt[3]/img\")[0].size\n left = location['x']\n top = location['y']\n right = location['x'] + size['width']\n bottom = location['y'] + size['height']\n picture = Image.open(r\"D:\\Work_python\\bulider\\img\\full.png\")\n picture = picture.crop((left, top, right, bottom))\n picture.save(r\"D:\\Work_python\\bulider\\img\\imgcode.png\")\n except Exception:\n return\n ocr = Ocr()\n # 写images下面的imagecode的绝对路径\n result = ocr.exec(img_path=r\"D:\\Work_python\\bulider\\img\\imgcode.png\")\n try:\n if int(result.strip()):\n return result.strip()\n except Exception:\n print('验证码匹配失败')\n return None\n\n def get_number(self, driver, end_cookie, href_id):\n # 点击内容\n driver.switch_to.default_content() # 切回主文档\n driver.switch_to.frame(driver.find_elements_by_tag_name(\"iframe\")[0])\n driver.switch_to.frame(driver.find_elements_by_tag_name(\"frame\")[0])\n driver.find_element_by_xpath(\"//div//dl[@id='menu']//dt[4]\").click()\n # 新闻资讯\n driver.switch_to.default_content() # 切回主文档\n driver.switch_to.frame(driver.find_elements_by_tag_name(\"iframe\")[0])\n driver.switch_to.frame(driver.find_element_by_id(\"leftFrame\"))\n try:\n driver.find_element_by_link_text('新闻资讯').click()\n except Exception:\n driver.find_element_by_link_text('新闻中心').click()\n # 获取id\n driver.switch_to.default_content() # 切回主文档\n driver.switch_to.frame(driver.find_elements_by_tag_name(\"iframe\")[0])\n driver.switch_to.frame(driver.find_element_by_id(\"mainFrame\"))\n top_id = driver.find_element_by_xpath(\"//table[@id='table']/tbody/tr[1]/td[1]/input\").get_attribute('value')\n\n print('成功获取到id..{}'.format(top_id))\n md_code = self.md_encode()\n c_time = time.time()\n with open(r'D:\\Work_python\\bulider\\cookies\\\\' + md_code + '.py', 'w') as f:\n f.write('\"' + end_cookie + '\"' + ',' + '\"' + str(href_id) + '\"' + ',' + str(c_time)+','+ str(top_id) + str(00))\n\n return top_id\n\n def get_fa_href(self, driver):\n # 点击内容\n driver.switch_to.frame(driver.find_element_by_xpath(\"//iframe[@src='sd_frame.asp']\"))\n driver.switch_to.frame(driver.find_element_by_tag_name(\"frame\"))\n driver.find_element_by_xpath(\"//div//dl[@id='menu']//dt[4]\").click()\n # 新闻资讯\n driver.switch_to.default_content() # 切回主文档\n driver.switch_to.frame(driver.find_elements_by_tag_name(\"iframe\")[0])\n driver.switch_to.frame(driver.find_element_by_id(\"leftFrame\"))\n try:\n fa_href = driver.find_element_by_xpath(\"//a[@title='新闻资讯']\").get_attribute('href')\n except Exception:\n fa_href = driver.find_element_by_xpath(\"//a[@title='新闻中心']\").get_attribute('href')\n href_id = fa_href.split('=')[1]\n return href_id\n\n def md_encode(self):\n\n cont = self.url\n md_code = hashlib.md5(cont.encode(encoding='UTF-8')).hexdigest()\n return 'a' + md_code\n\n def get_cookie(self, cook):\n '''拼接cookie'''\n item = ''\n for ck in cook:\n ck['name'] += '='\n ck['name'] += ck['value']\n ck['name'] += '; '\n item += ck['name']\n\n return item\n\n def fa_bu(self, cook, href_id):\n print(href_id,'....1')\n url = self.url + '/sd_model_news.asp?act=adddb&classid={}'.format(href_id)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36\",\n \"Cookie\": cook}\n\n data = {\n 't0': self.title,\n 't1': '',\n 't2': self.cont,\n 't4': '',\n 't5': '',\n 't6': '',\n 't7': 'xiaofan',\n 't8': '',\n 't9': 0,\n 't10': 1,\n 't11': 0,\n 't12': 0,\n 't13': 1,\n 't14': time.strftime(\"%Y / %m / %d\", time.localtime()),\n '21': time.strftime(\"%H:%M:%S\", time.localtime()),\n 't15': 0,\n 't16': 0,\n 'up': 0,\n 'frist': 0,\n 's0': '',\n 's1': '',\n 's2': '',\n }\n print('....', href_id)\n resp = requests.post(url=url, headers=headers, data=data)\n if str(resp.content.decode()) == '1':\n print('发布成功')\n else:\n print('发布失败', resp.content.decode())\n return 2\n\n def generate(self, cook, p_id):\n '''生成'''\n print('生成id :',p_id)\n url = self.url + '/html/show.asp?id={}'.format(p_id)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36\",\n \"Cookie\": cook}\n # print('---', url)\n # print('---', cook)\n resp = requests.get(url, headers=headers)\n winner_code = resp.content.decode()\n if str(winner_code) == '11' or str(winner_code) == '0系统未开启静态模式':\n print('生成完毕',winner_code)\n else:\n print('生成失败--内容: ', winner_code)\n return 3\n\n def out_msg(self, driver):\n '''退出登录'''\n\n # 点击退出\n driver.switch_to.default_content() # 切回主文档\n driver.switch_to.frame(driver.find_elements_by_tag_name(\"iframe\")[0])\n driver.switch_to.frame(driver.find_element_by_id(\"topFrame\"))\n driver.find_element_by_class_name(\"out\").click()\n # 确定\n driver.switch_to.default_content() # 切回主文档\n driver.find_element_by_xpath('//button[1]').click()\n print('已退出登录')\n\n def direct_publish(self, d_code):\n # from cookies.cccc import c\n '''判断cookie是否失效和存在'''\n\n try:\n with open(r'D:\\Work_python\\bulider\\cookies\\\\' + d_code + '.py', 'r') as f:\n old_time = f.read()\n old_msg = list(eval(old_time))\n\n new_id = old_msg[3] + 10\n print('===',new_id,old_time)\n with open(r'D:\\Work_python\\bulider\\cookies\\\\' + d_code + '.py', 'w') as f:\n new_msg = re.sub(str(old_msg[3]),str(new_id) ,old_time)\n f.write(new_msg)\n print('locl-------------', d_code)\n old_time = eval(str(old_msg[2]))\n print('从cookie池中获得cookie')\n if (time.time() - old_time) > 1800:\n print('保存的cookie 已失效')\n return 0\n return old_msg\n except Exception as e:\n print(e)\n print('cookie池中没有当前网站cookie..')\n return 0\n\n def quick_to_submit(self, old_cook):\n '''从cookies带着cookie直接访问'''\n print('发布中...直接发布...')\n fa_code = self.fa_bu(old_cook[0], old_cook[1])\n print('生成中..')\n\n self.generate(old_cook[0], old_cook[3] // 10 + 1)\n return 11\n\n def run(self):\n '''启动..默认最多会执行两遍,两次后返回失败'''\n\n d_code = self.md_encode()\n old_cook = self.direct_publish(d_code)\n print('-----', old_cook)\n if old_cook == 0: # 没有这个cookie\n print('没这个cookie')\n pass\n else:\n fa_code = self.quick_to_submit(old_cook)\n if str(fa_code) == '11':\n print('任务已完成')\n return 1\n n = 2\n while n:\n try:\n\n driver = webdriver.Chrome() # 有界面\n\n # # 设置chrome浏览器��界面模式\n # chrome_options = Options()\n # chrome_options.add_argument('--headless')\n # driver = webdriver.Chrome(chrome_options=chrome_options)\n\n no = self.automatic_login(driver, self.url, self.account, self.password) # 获取验证码,实现登录\n if no == 'no':\n driver.quit()\n\n cook = driver.get_cookies()\n\n # 获取发布href\n print('获取发布href_id..')\n href_id = self.get_fa_href(driver)\n\n # driver\n print('重新获得cookie..拼接 Cookie..')\n end_cookie = self.get_cookie(cook) # end_cookie = 最后需要的cookie\n\n print('发布中...')\n fa_code = self.fa_bu(end_cookie, href_id)\n if fa_code == 2:\n return 2\n\n print('获取发布的id..')\n top_id = self.get_number(driver, end_cookie, href_id)\n\n print('生成中..')\n win_code = self.generate(end_cookie, top_id)\n return\n if win_code:\n return win_code\n\n driver.delete_all_cookies() # 清理缓存\n print('已清理Cookie')\n # self.out_msg(driver) # 退出登录\n\n except Exception:\n driver.quit()\n print('浏览器已关闭..重新登录')\n n -= 1\n if n == 0:\n return 0\n\n\nif __name__ == '__main__':\n # params = sys.argv[1]\n # content = json.loads(params)\n # url = content['url']\n # account = content['username']\n # password = content['password']\n # title = content['title']\n # cont = content['content']\n\n # 创建一个logger\n '---------- -------log-----------'\n logger = logging.getLogger('mylogger')\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler('test.log') # 创建log文件\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n logger.debug('begin ..')\n '--------- --------log-----------'\n\n url = 'http://www.blgsbz.com/admin'\n account = 'sdcms'\n password = 'blgsbz'\n title = '1223'\n cont = 'ddds'\n Login(url, account, password, title, cont, logger).run()\n\n","sub_path":"log_in.py","file_name":"log_in.py","file_ext":"py","file_size_in_byte":12597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"7016954","text":"from datetime import datetime\n\nfrom django.shortcuts import render\nfrom django.views import View\n\nfrom jogo.models import Jogo, Noticia\n\nclass Home(View):\n\n def get(self, *args, **kwargs):\n data = {}\n now = datetime.now()\n jogos_anteriores = Jogo.objects.order_by('-data').filter(data__lt=now)\n proximos_jogos = Jogo.objects.order_by('data').filter(data__gte=now)\n noticias = Noticia.objects.order_by('-data_inclusao')\n string = ''\n if proximos_jogos.count() > 0:\n te = Jogo.objects.filter(adversario=proximos_jogos[0].adversario).order_by('-data')\n\n if te.count() > 1:\n if te[1].placar_real > te[1].placar_adversario:\n string = f'No Último confronto entre as duas equipes vitória do Realmatismo pelo placar de: {te[1].placar_real} x {te[1].placar_adversario}'\n elif te[1].placar_real < te[1].placar_adversario:\n string = f'No Último confronto entre as duas equipes derrota do Realmatismo pelo placar de: {te[1].placar_real} x {te[1].placar_adversario}'\n elif te[1].placar_real == te[1].placar_adversario:\n string = f'No Último confronto entre as duas equipes empate pelo placar de: {te[1].placar_real} x {te[1].placar_adversario}'\n else:\n string= 'Primeiro jogo entre as duas equipes'\n print(Jogo.objects.sequencia())\n data ={\n 'detalhes_ultimo': string,\n 'jogos_anteriores': jogos_anteriores[:1],\n 'proximos_jogos': proximos_jogos[:1],\n 'noticias': noticias[:1],\n 'performace': Jogo.objects.perfomace(),\n 'teste': Jogo.objects.perfomace2(),\n 'sequencia': Jogo.objects.sequencia()\n }\n\n return render(self.request, 'jogo/home.html', data)","sub_path":"jogo/view/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"249503518","text":"from django.shortcuts import render\n\n\n# Create your views here.\n#from mysite.forms import ContactForm,ListForm,UserForm\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth import authenticate, login,logout\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import get_object_or_404, render,redirect\nfrom TaxApp.forms import ProductForm\nfrom TaxApp.models import Product,Code,Result,Taxation\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\n\n\n'''\n\nif request.user.is_authenticated():\n currentuser = request.user\n name=currentuser.first_name\n items = Borrower.objects.filter(borrower__icontains=name)\n'''\n\n\n\n\n#if request.user.is_authenticated():\n@login_required\ndef bill(request):\n\n if request.user.is_authenticated():\n currentuser = request.user\n identity=currentuser.first_name\n bill= Result.objects.filter(identity__icontains=identity)\n print(bill)\n return render(request, 'bill.html',\n {'bill': bill})\n\n\n\n\n\n\n\n@login_required\ndef map(request):\n return render(request,'maps.html')\n\n@login_required\ndef index(request):\n print(\"hi\")\n return render(request,'index.html')\n\n\ndef about(request):\n return render(request,'about.html')\n\ndef gst_logout(request):\n if request.user.is_authenticated():\n logout(request)\n return render(request,'logout.html') \n else:\n return HttpResponseRedirect('/login')\n\n\ndef gst_register(request):\n if request.method == 'POST':\n state = request.POST.get('state')\n username = request.POST.get('email')\n password = request.POST.get('password')\n name = request.POST.get('name')\n user = User.objects.create(\n first_name = name,\n last_name = state,\n username = username,\n )\n user.set_password(password)\n user.save()\n\n user = authenticate(username = username, password = password)\n login(request, user)\n return redirect('/inventory/')\n else:\n return render(request,'loginwithcss.html') \n\ndef gst_login(request):\n if request.method == 'POST':\n username = request.POST.get('email')\n password = request.POST.get('password')\n user = authenticate(username = username, password = password)\n if user :\n if user.is_active:\n login(request,user)\n return redirect('/inventory/')\n else:\n return HttpResponse('Disabled Account')\n else:\n return HttpResponse(\"Invalid Login details.Are you trying to Sign up?\")\n else:\n return render(request,'loginwithcss.html')\n\n\n\n\n\n@login_required\ndef index2(request):\n print(\"hello\")\n print(request.user.is_authenticated())\n\n \n errors = []\n if 'q' and 'states' and 'count' in request.GET:\n\n q = request.GET['q']\n states=request.GET['states']\n count=request.GET['count']\n print(states)\n if not q:\n errors.append('Enter a valid barcode')\n print(\"helllo\")\n \n else:\n \n if request.user.is_authenticated():\n currentuser = request.user\n print(currentuser)\n identity=currentuser.first_name\n email=currentuser.username\n state=currentuser.last_name\n #print (q)\n #print (email)\n text = q.split()\n scan=text[1]\n #print(scan)\n items = Product.objects.filter(email__icontains=email, product_name__icontains=scan)\n #print (items)\n #print(len(items))\n \n item=items[0]\n\n \n cp=item.cost_price\n sp=item.selling_price\n quantity=item.quantity\n hsn=item.category\n\n\n taxlist = Taxation.objects.filter(code__icontains=hsn)\n taxes=taxlist[0]\n cgst=taxes.cgst\n igst=taxes.igst\n sgst=taxes.sgst\n #print(cgst,igst,sgst)\n profit=sp-cp\n if state==states:\n value=1\n else:\n value=0\n\n tax1=(cgst*sp)/100\n\n if value:\n tax3=0\n tax2=(sgst*sp)/100\n else:\n tax2=0\n tax3=(igst*sp)/100\n\n\n\n\n\n print(\"Before result\")\n\n Result.objects.create(identity=identity,product_name=scan,quantity=count,cost_price=cp,selling_price=sp,profit=profit,hsn=hsn,tax1=tax1,tax2=tax2,tax3=tax3)\n\n return render(request, 'index2.html')\n \n return render(request, 'index2.html',\n {'errors': errors})\n\n\n\n\n\n@login_required\ndef inventory(request):\n if request.method == 'POST':\n form = ProductForm(request.POST ,request.FILES or None )\n if form.is_valid():\n cd = form.cleaned_data\n if request.user.is_authenticated():\n currentuser = request.user\n email=currentuser.username\n\n\n Product.objects.create(product_name=cd['product_name'],quantity=cd['quantity'],cost_price=cd['cost_price'],selling_price=cd['selling_price'],email=email,category=cd['category'])\n \t\t\n \n form = ProductForm() \n return render(request,'inventorywithcss.html',{'form': form}) \n else:\n form = ProductForm()\n return render(request, 'inventorywithcss.html', {'form': form}) \n\n","sub_path":"Tax/Tax/TaxApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"573379849","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayCloudCloudrunStaticsiteDomaincertModifyModel(object):\n\n def __init__(self):\n self._assume_token = None\n self._cert_private_key = None\n self._certificate = None\n self._domain_name = None\n self._env = None\n\n @property\n def assume_token(self):\n return self._assume_token\n\n @assume_token.setter\n def assume_token(self, value):\n self._assume_token = value\n @property\n def cert_private_key(self):\n return self._cert_private_key\n\n @cert_private_key.setter\n def cert_private_key(self, value):\n self._cert_private_key = value\n @property\n def certificate(self):\n return self._certificate\n\n @certificate.setter\n def certificate(self, value):\n self._certificate = value\n @property\n def domain_name(self):\n return self._domain_name\n\n @domain_name.setter\n def domain_name(self, value):\n self._domain_name = value\n @property\n def env(self):\n return self._env\n\n @env.setter\n def env(self, value):\n self._env = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.assume_token:\n if hasattr(self.assume_token, 'to_alipay_dict'):\n params['assume_token'] = self.assume_token.to_alipay_dict()\n else:\n params['assume_token'] = self.assume_token\n if self.cert_private_key:\n if hasattr(self.cert_private_key, 'to_alipay_dict'):\n params['cert_private_key'] = self.cert_private_key.to_alipay_dict()\n else:\n params['cert_private_key'] = self.cert_private_key\n if self.certificate:\n if hasattr(self.certificate, 'to_alipay_dict'):\n params['certificate'] = self.certificate.to_alipay_dict()\n else:\n params['certificate'] = self.certificate\n if self.domain_name:\n if hasattr(self.domain_name, 'to_alipay_dict'):\n params['domain_name'] = self.domain_name.to_alipay_dict()\n else:\n params['domain_name'] = self.domain_name\n if self.env:\n if hasattr(self.env, 'to_alipay_dict'):\n params['env'] = self.env.to_alipay_dict()\n else:\n params['env'] = self.env\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayCloudCloudrunStaticsiteDomaincertModifyModel()\n if 'assume_token' in d:\n o.assume_token = d['assume_token']\n if 'cert_private_key' in d:\n o.cert_private_key = d['cert_private_key']\n if 'certificate' in d:\n o.certificate = d['certificate']\n if 'domain_name' in d:\n o.domain_name = d['domain_name']\n if 'env' in d:\n o.env = d['env']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayCloudCloudrunStaticsiteDomaincertModifyModel.py","file_name":"AlipayCloudCloudrunStaticsiteDomaincertModifyModel.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"619837603","text":"import pandas as pd\n\nfrom settings import SQL_PAYMENTS\nfrom .setup_tests import conn, mocks, check\n\n\ndef test_payments():\n payments = pd.read_sql(SQL_PAYMENTS, conn)\n payments_monthly = payments.groupby(['reporting_period', 'COMPANY']).sum()\n for mock in mocks:\n if 'payments' in mock.keys():\n for field in mock['payments'].keys():\n for period, amount in mock['payments'][field].items():\n yield check, payments_monthly, mock['company'], period, amount, field\n","sub_path":"tests/test_payments.py","file_name":"test_payments.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"633044353","text":"import os\nimport shutil\nimport tfnn\n\n\nclass Summarizer(object):\n def __init__(self, network, save_path='/tmp/log', ):\n self.save_path = save_path\n folder = save_path.split('/')[-1]\n name_len = len(folder)\n if folder in os.listdir(save_path[:-name_len]):\n shutil.rmtree(save_path)\n self.merged = tfnn.merge_all_summaries()\n self.train_writer = tfnn.train.SummaryWriter(save_path + '/train', network.sess.graph)\n self.validate_writer = tfnn.train.SummaryWriter(save_path + '/validate', )\n self._network = network\n\n def record_train(self, t_xs, t_ys, global_step, *args):\n if self._network.reg == 'dropout':\n if len(args) != 1:\n raise ValueError('Do not find keep_prob value.')\n keep_prob = args[0]\n feed_dict = {self._network.data_placeholder: t_xs,\n self._network.target_placeholder: t_ys,\n self._network.keep_prob_placeholder: keep_prob}\n elif self._network.reg == 'l2':\n if len(args) != 1:\n raise ValueError('Do not find l2_lambda value.')\n l2_lambda = args[0]\n feed_dict = {self._network.data_placeholder: t_xs,\n self._network.target_placeholder: t_ys,\n self._network.l2_placeholder: l2_lambda}\n else:\n feed_dict = {self._network.data_placeholder: t_xs,\n self._network.target_placeholder: t_ys}\n train_result = self._network.sess.run(self.merged, feed_dict)\n self.train_writer.add_summary(train_result, global_step)\n\n def record_validate(self, v_xs, v_ys, global_step):\n if self._network.reg == 'dropout':\n feed_dict = {self._network.data_placeholder: v_xs,\n self._network.target_placeholder: v_ys,\n self._network.keep_prob_placeholder: 1.}\n elif self._network.reg == 'l2':\n feed_dict = {self._network.data_placeholder: v_xs,\n self._network.target_placeholder: v_ys,\n self._network.l2_placeholder: 0.}\n else:\n feed_dict = {self._network.data_placeholder: v_xs,\n self._network.target_placeholder: v_ys}\n validate_result = self._network.sess.run(self.merged, feed_dict)\n self.validate_writer.add_summary(validate_result, global_step)\n\n def web_visualize(self):\n os.system('tensorboard --logdir=%s' % self.save_path)\n\n\n","sub_path":"tfnn/evaluating/summarizer.py","file_name":"summarizer.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"220225591","text":"import os.path\n\nDATA_ROOT_DIR = 'C:/AppData'\nAPP_DIR = os.getcwd()\nAPP_NAME = os.path.basename(APP_DIR)\nAPP_DATA_DIR = os.path.abspath(os.path.join(DATA_ROOT_DIR, APP_NAME))\n\nSUB_DIR_LIST = [\n 'Data',\n 'Logs'\n]\n\nSUB_DIRS = {} # container of dirs, will load by code below\n\nfor i in SUB_DIR_LIST:\n sub_dir = os.path.join(APP_DATA_DIR, i)\n SUB_DIRS[i] = sub_dir\n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n\n\ndef get_sub_dir(key):\n return SUB_DIRS[key]","sub_path":"py-app/paths/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"318379601","text":"# Copyright 2015 OpenStack Foundation\n# Copyright 2016 VMware Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport netaddr\n\nfrom tempest import config\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import decorators\nfrom tempest import test\n\nfrom vmware_nsx_tempest.common import constants\nfrom vmware_nsx_tempest.lib import feature_manager\n\nCONF = config.CONF\n\nLOG = constants.log.getLogger(__name__)\n\n\nclass L2GatewayScenarioTest(feature_manager.FeatureManager):\n \"\"\"\n Test l2 gateway connection operations.\n \"\"\"\n\n @classmethod\n def skip_checks(cls):\n \"\"\"\n Skip running test if we do not meet criteria to run the tests.\n \"\"\"\n super(L2GatewayScenarioTest, cls).skip_checks()\n if not test.is_extension_enabled(\"l2-gateway\", \"network\"):\n raise cls.skipException(\"l2-gateway extension not enabled.\")\n\n @classmethod\n def resource_setup(cls):\n \"\"\"\n Setting up the resources for the test.\n \"\"\"\n super(L2GatewayScenarioTest, cls).resource_setup()\n # Create subnet on the network just created.\n cls.SUBNET_1_NETWORK_CIDR = CONF.l2gw.subnet_1_cidr\n # VLAN id used in setups\n cls.VLAN_1 = CONF.l2gw.vlan_1\n cls.VLAN_2 = CONF.l2gw.vlan_2\n # IPs of predeployed vms.\n cls.VM_ON_VDS_TZ1_VLAN16_IP = CONF.l2gw.vm_on_vds_tz1_vlan16_ip\n cls.VM1_ON_SWITCH_VLAN16 = CONF.l2gw.vm_on_switch_vlan16\n cls.VM1_ON_VDS_TZ2_VLAN16_IP = CONF.l2gw.vm_on_vds_tz2_vlan16_ip\n cls.VM1_ON_VDS_TZ2_VLAN17_IP = CONF.l2gw.vm_on_vds_tz2_vlan17_ip\n cls.SUBNET_1_MASK = cls.SUBNET_1_NETWORK_CIDR.split(\"/\")[1]\n cls.CIDR = netaddr.IPNetwork(cls.SUBNET_1_NETWORK_CIDR)\n\n @classmethod\n def resource_cleanup(cls):\n \"\"\"\n Clean all the resources used during the test.\n \"\"\"\n super(L2GatewayScenarioTest, cls).resource_cleanup()\n\n def deploy_l2gateway_topology(self):\n router_l2gateway = self.create_topology_router(\"router_l2gateway\")\n # L2gateway network with router\n network_l2gateway = self.create_topology_network(\"network_l2gateway\")\n # cidr must be presented & in IPNetwork structure.\n self.CIDR = netaddr.IPNetwork(self.SUBNET_1_NETWORK_CIDR)\n self.create_topology_subnet(\n \"subnet1_l2gateway\", network_l2gateway, cidr=self.CIDR,\n router_id=router_l2gateway[\"id\"],\n mask_bits=int(self.SUBNET_1_MASK))\n secgroup = self.create_topology_security_group()\n secgroups = [{'name': secgroup['name']}]\n self.create_topology_instance(\n \"server1_l2gateway\", [network_l2gateway],\n security_groups=secgroups)\n self.create_topology_instance(\n \"server2_l2gateway\", [network_l2gateway],\n security_groups=secgroups)\n\n def deploy_topology_and_create_l2gateway(self, vlan_id):\n self.deploy_l2gateway_topology()\n cluster_info = self.nsx_bridge_cluster_info()\n device_name, interface_name = cluster_info[0][0], cluster_info[0][1]\n l2gw_name = data_utils.rand_name(constants.L2GW)\n device_1 = {\"dname\": device_name, \"iname\": interface_name,\n \"vlans\": [vlan_id]}\n l2gw_param = [device_1]\n l2gw_rsp, _ = self.create_l2gw(l2gw_name, l2gw_param)\n l2gwc_param = {\"l2_gateway_id\": l2gw_rsp[constants.L2GW][\"id\"],\n \"network_id\":\n self.topology_networks[\"network_l2gateway\"][\"id\"]}\n l2gwc_rsp = self.create_l2gw_connection(l2gwc_param)\n # Assert if create fails.\n self.assertEqual(constants.EXPECTED_HTTP_RESPONSE_201,\n l2gwc_rsp.response[\"status\"],\n \"Response code is not %(code)s\" % {\n \"code\": constants.EXPECTED_HTTP_RESPONSE_201})\n self.assertEqual(l2gwc_param[\"l2_gateway_id\"],\n l2gwc_rsp[constants.L2GWC][\"l2_gateway_id\"],\n \"l2gw id is not same as expected in \"\n \"create l2gw connection response\")\n self.assertEqual(l2gwc_param[\"network_id\"],\n l2gwc_rsp[constants.L2GWC][\"network_id\"],\n \"network id is not same as expected in \"\n \"create l2gw connection response\")\n\n @decorators.attr(type=\"nsxv3\")\n @decorators.idempotent_id(\"b62a7452-f2c1-4f2b-9403-f121f5201516\")\n def test_l2_gateway_ping_servers_on_overlays(self):\n \"\"\"\n Create l2 gateway connection using one vlan. Vlan parameter is\n passed into L2GW create.\n \"\"\"\n LOG.info(\"Testing test_l2_gateway_ping_servers_on_overlays\")\n self.deploy_topology_and_create_l2gateway(self.VLAN_1)\n server1_floatingip = self.topology_servers[\"server1_l2gateway\"][\n \"floating_ip\"]\n server1 = self.topology_servers[\"server1_l2gateway\"]\n address_list = [server1_floatingip[\"fixed_ip_address\"]]\n address_list.append(self.topology_servers[\"server2_l2gateway\"][\n \"floating_ip\"][\"fixed_ip_address\"])\n self.check_server_internal_ips_using_floating_ip(\n server1_floatingip, server1, address_list)\n\n @decorators.attr(type=\"nsxv3\")\n @decorators.idempotent_id(\"74e67d5f-0319-45e8-9731-d2c245c05beb\")\n def test_l2_gateway_ping_servers_overlay_to_vds_with_same_tz(self):\n \"\"\"\n Create l2 gateway connection using one vlan. Vlan parameter is\n passed into L2GW create. ping from server on OS ls to NSX ls\n \"\"\"\n LOG.info(\"Testing test_l2_gateway_ping_servers_overlay_to_nsx_ls\")\n self.deploy_topology_and_create_l2gateway(self.VLAN_1)\n server1_floatingip = self.topology_servers[\"server1_l2gateway\"][\n \"floating_ip\"]\n server1 = self.topology_servers[\"server1_l2gateway\"]\n address_list = [server1_floatingip[\"fixed_ip_address\"]]\n address_list.append(self.VM_ON_VDS_TZ1_VLAN16_IP)\n self.check_server_internal_ips_using_floating_ip(\n server1_floatingip, server1, address_list)\n\n @decorators.attr(type=\"nsxv3\")\n @decorators.idempotent_id(\"4e66584f-f61b-465d-952c-795a285d7c55\")\n def test_l2_gateway_ping_servers_overlay_to_vds_with_diff_tz(self):\n \"\"\"\n Create l2 gateway connection using one vlan. Vlan parameter is\n passed into L2GW create. ping from server on OS ls to NSX ls\n \"\"\"\n LOG.info(\"Testing test_l2_gateway_ping_servers_overlay_to_nsx_ls\")\n self.deploy_topology_and_create_l2gateway(self.VLAN_1)\n server1_floatingip = self.topology_servers[\"server1_l2gateway\"][\n \"floating_ip\"]\n server1 = self.topology_servers[\"server1_l2gateway\"]\n address_list = [server1_floatingip[\"fixed_ip_address\"]]\n address_list.append(self.VM1_ON_VDS_TZ2_VLAN16_IP)\n self.check_server_internal_ips_using_floating_ip(\n server1_floatingip, server1, address_list)\n\n @decorators.attr(type=\"nsxv3\")\n @decorators.idempotent_id(\"aef2a142-0b49-48a9-8881-f47897c09745\")\n def test_l2_gateway_ping_servers_overlay_to_physical_vlan(self):\n \"\"\"\n Create l2 gateway connection using one vlan. Vlan parameter is\n passed into L2GW create. ping from server on OS ls to NSX ls\n \"\"\"\n LOG.info(\"Testing test_l2_gateway_ping_servers_overlay_to_nsx_ls\")\n self.deploy_topology_and_create_l2gateway(self.VLAN_1)\n server1_floatingip = self.topology_servers[\"server1_l2gateway\"][\n \"floating_ip\"]\n server1 = self.topology_servers[\"server1_l2gateway\"]\n address_list = [server1_floatingip[\"fixed_ip_address\"]]\n address_list.append(self.VM1_ON_SWITCH_VLAN16)\n self.check_server_internal_ips_using_floating_ip(\n server1_floatingip, server1, address_list)\n\n @decorators.attr(type=\"nsxv3\")\n @decorators.idempotent_id(\"00036e1d-69e0-4faf-a62f-602600bc5631\")\n def test_l2_gateway_reconfig_ping_servers_overlay_to_vds_with_diff_tz(\n self):\n \"\"\"\n Create l2 gateway connection using one vlan. Vlan parameter is\n passed into L2GW create. ping from server on OS ls to NSX ls\n \"\"\"\n LOG.info(\n \"Testing test_l2_gateway_reconfig_ping_servers_overlay_to_vds_\"\n \"with_diff_tz\")\n self.deploy_topology_and_create_l2gateway(self.VLAN_2)\n server1_floatingip = self.topology_servers[\"server1_l2gateway\"][\n \"floating_ip\"]\n server1 = self.topology_servers[\"server1_l2gateway\"]\n address_list = [server1_floatingip[\"fixed_ip_address\"]]\n address_list.append(self.VM1_ON_VDS_TZ2_VLAN17_IP)\n self.check_server_internal_ips_using_floating_ip(\n server1_floatingip, server1, address_list)\n","sub_path":"vmware_nsx_tempest/tests/nsxv3/scenario/test_l2_gateway.py","file_name":"test_l2_gateway.py","file_ext":"py","file_size_in_byte":9368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"302101673","text":"'''\nC. Максимальний добуток\nСтепан останнім часом приділяв мало уваги програмуванню і як, результат, не здав залік. Тепер йому потрібно терміново вирішити наступну задачу:\nДано масив цілих чисел A1, A2, ..., AN, абсолютна величина елементів якого не перевищує 2. Потрібно знайти такий непорожній підвідрізок Al, Al+1, ..., Ar цього масиву (1 ≤ l ≤ r ≤ N), що добуток чисел Al * Al+1 * ... * Ar є максимально можливим.\nЗвісно, Степан просить у вас допомоги у вирішенні даної задачі.\nВхідні дані:\nВ першому рядку вхідного файлу знаходиться число N (1 ≤ N ≤ 200 000) — кiлькiсть елементів масиву. В другому рядку знаходиться N цiлих чисел Ai (-2 ≤ Ai ≤ 2) - елементи масиву. \nВихідні дані:\nЄдиний рядок вихідного файлу має містити два числа l і r - знайдені границі оптимального відрізка (1 ≤ l ≤ r ≤ N). Якщо iснує декiлька вiдповiдей, виведiть будь-яку з них.\nСистема оцінювання:\nВ даній задачі кожен тест оцінюється окремо.\n'''\n\nr = open(\"input.txt\", \"r\").read().split(\"\\n\")\nn = int(r[0])\ntemp = r[1].split(\" \")\nmass = [int(temp[i]) for i in range(n)]\nprint(mass)\nstart = 0\nend = 0\ndob = {}\nmax_dob = 1\n\nfor i in range(n):\n for j in range(i,n):\n max_dob *= mass[j]\n dob.update({max_dob:[i+1,n]})\n max_dob = 1\n\nprint(dob[max(dob)])\n \n \n\n","sub_path":"olimp/2017_oblast/1_3.py","file_name":"1_3.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"497278707","text":"# This script will map FDA upload data to items in Roger's Zotero bibliography\nfrom dotenv import load_dotenv\nload_dotenv()\n\nimport os\nfrom pyzotero import zotero\n\n\n# Get all items in the ISAW Zotero\nlibrary_id = os.getenv('LIBRARY_ID')\nlibrary_type = os.getenv('LIBRARY_TYPE')\napi_key = os.getenv('API_KEY')\n\nz = zotero.Zotero(library_id, library_type, api_key)\nvItems = z.everything(z.top(sort=\"dateModified\"))\n\nvItemTypes = []\nfor item in vItems:\n sItemType = item['data']['itemType']\n if not sItemType in vItemTypes:\n vItemTypes.append(sItemType)\n \nprint(vItemTypes)\n\n\n\n\n\n\n\n","sub_path":"get_all_itemTypes.py","file_name":"get_all_itemTypes.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"70384586","text":"# -*- coding: utf-8 -*-\n# Author: sunmengxin\n# time: 10/14/18\n# file: 二叉树的后序遍历序列.py\n# description:\n\n'''\n 给定一个序列,判断是否是二叉树搜索树的后序遍历\n'''\n\n# def judge(data):\n# if data == None or len(data) == 0:\n# return True\n# if len(data) == 1:\n# return True\n#\n# key = data[-1]\n#\n# i = 0\n# while(data[i] < key and i < len(data)-1):\n# i += 1\n#\n# left = data[:i]\n# right = data[i:len(data)-1]\n#\n# for each in left:\n# if each > key:\n# return False\n#\n# for each in right:\n# if each < key:\n# return False\n#\n# return judge(left) and judge(right)\n#\n\n\nclass Solution:\n # 这种方法不能完全判断子串也是二叉排序树的后序遍历,上面的第一种方法才能判断\n def VerifySquenceOfBST(self, sequence):\n # write code here\n key = sequence[-1]\n index = -1\n index1 = -1\n for i in range(len(sequence)):\n if sequence[i] < key:\n index = i\n elif index1 == -1 and sequence[i] >= key:\n index1 = i\n\n if index1 > index:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n\n\n # data = []\n\n # data = [5,7,6,9,11,10,8]\n #\n data = [7,4,6,5]\n data = [4,8,6,12,16,14,10]\n\n so = Solution()\n\n print(so.VerifySquenceOfBST(data))\n\n","sub_path":"target_offer/二叉树+链表/二叉树的后序遍历序列.py","file_name":"二叉树的后序遍历序列.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"447476897","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms.formsets import BaseFormSet\nfrom django.core.exceptions import NON_FIELD_ERRORS\nfrom django.forms import BaseModelFormSet\n#from django.contrib.auth.models import User\n#import os\n\nfrom .models import Post, Comment, Document\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('grop_0', 'grop_1', 'grop_2', 'grop_3', 'grop_4', 'grop_5', 'title', 'text',)\n labels = {\n 'title': ('Tytuł'),\n 'Text': ('Tekst'),\n 'grop_0': ('Główna'),\n 'grop_1': ('Grupa 1'),\n 'grop_2': ('Grupa 2'),\n 'grop_3': ('Grupa 3'),\n 'grop_4': ('Grupa 4'),\n 'grop_5': ('Grupa 5'),\n }\n error_messages = {\n NON_FIELD_ERRORS: {\n 'unique_together': \"%(model_name)s's %(field_labels)s are not unique.\",\n }\n }\n\nclass PostFormSet(BaseFormSet):\n def __init__(self, *args, **kwargs ):\n super(BaseFormSet, self).__init__(*args, **kwargs)\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('text',)\n\nclass CommentFormSet(BaseFormSet):\n def __init__(self, *args, **kwargs ):\n super(BaseFormSet, self).__init__(*args, **kwargs)\n\nclass DocumentForm(forms.ModelForm):\n class Meta:\n model = Document\n fields = ('docfile', 'title',)\n","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"423647046","text":"import logging\r\nimport numpy as np\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\n\r\nfrom pybar.fei4_run_base import Fei4RunBase\r\nfrom pybar.fei4.register_utils import scan_loop, make_pixel_mask\r\nfrom pybar.run_manager import RunManager\r\nfrom pybar.daq.readout_utils import convert_data_array, is_data_record, data_array_from_data_iterable, get_col_row_array_from_data_record_array\r\nfrom pybar.analysis.plotting.plotting import plotThreeWay\r\n\r\n\r\nclass GdacTuning(Fei4RunBase):\r\n '''Global Threshold Tuning\r\n\r\n Tuning the global threshold to target threshold value (threshold is given in units of PlsrDAC).\r\n The tuning uses a binary search algorithm.\r\n\r\n Note:\r\n Use pybar.scans.tune_fei4 for full FE-I4 tuning.\r\n '''\r\n _default_run_conf = {\r\n \"scan_parameters\": [('GDAC', None)],\r\n \"target_threshold\": 50, # target threshold in PlsrDAC to tune to\r\n \"gdac_tune_bits\": range(7, -1, -1), # GDAC bits to change during tuning\r\n \"n_injections_gdac\": 50, # number of injections per GDAC bit setting\r\n \"max_delta_threshold\": 2, # minimum difference to the target_threshold to abort the tuning\r\n \"mask_steps_gdac\": 3, # mask\r\n \"enable_mask_steps_gdac\": [0], # mask steps to do per GDAC setting\r\n \"plot_intermediate_steps\": False, # plot intermediate steps (takes time)\r\n \"plots_filename\": None, # file name to store the plot to, if None show on screen\r\n \"enable_shift_masks\": [\"Enable\", \"C_High\", \"C_Low\"], # enable masks shifted during scan\r\n \"disable_shift_masks\": [], # disable masks shifted during scan\r\n \"pulser_dac_correction\": False # PlsrDAC correction for each double column\r\n }\r\n\r\n def configure(self):\r\n commands = []\r\n commands.extend(self.register.get_commands(\"ConfMode\"))\r\n # C_Low\r\n if \"C_Low\".lower() in map(lambda x: x.lower(), self.enable_shift_masks):\r\n self.register.set_pixel_register_value('C_Low', 1)\r\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='C_Low'))\r\n else:\r\n self.register.set_pixel_register_value('C_Low', 0)\r\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='C_Low'))\r\n # C_High\r\n if \"C_High\".lower() in map(lambda x: x.lower(), self.enable_shift_masks):\r\n self.register.set_pixel_register_value('C_High', 1)\r\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='C_High'))\r\n else:\r\n self.register.set_pixel_register_value('C_High', 0)\r\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='C_High'))\r\n commands.extend(self.register.get_commands(\"RunMode\"))\r\n self.register_utils.send_commands(commands)\r\n\r\n def scan(self):\r\n if not self.plots_filename:\r\n self.plots_filename = PdfPages(self.output_filename + '.pdf')\r\n self.close_plots = True\r\n else:\r\n self.close_plots = False\r\n cal_lvl1_command = self.register.get_commands(\"CAL\")[0] + self.register.get_commands(\"zeros\", length=40)[0] + self.register.get_commands(\"LV1\")[0] + self.register.get_commands(\"zeros\", mask_steps=self.mask_steps_gdac)[0]\r\n\r\n self.write_target_threshold()\r\n for gdac_bit in self.gdac_tune_bits: # reset all GDAC bits\r\n self.set_gdac_bit(gdac_bit, bit_value=0)\r\n\r\n additional_scan = True\r\n last_bit_result = self.n_injections_gdac\r\n decreased_threshold = False # needed to determine if the FE is noisy\r\n all_bits_zero = True\r\n\r\n def bits_set(int_type):\r\n int_type = int(int_type)\r\n count = 0\r\n position = 0\r\n bits_set = []\r\n while(int_type):\r\n if(int_type & 1):\r\n bits_set.append(position)\r\n position += 1\r\n int_type = int_type >> 1\r\n count += 1\r\n return bits_set\r\n\r\n # calculate selected pixels from the mask and the disabled columns\r\n select_mask_array = np.zeros(shape=(80, 336), dtype=np.uint8)\r\n if not self.enable_mask_steps_gdac:\r\n self.enable_mask_steps_gdac = range(self.mask_steps_gdac)\r\n for mask_step in self.enable_mask_steps_gdac:\r\n select_mask_array += make_pixel_mask(steps=self.mask_steps_gdac, shift=mask_step)\r\n for column in bits_set(self.register.get_global_register_value(\"DisableColumnCnfg\")):\r\n logging.info('Deselect double column %d' % column)\r\n select_mask_array[column, :] = 0\r\n\r\n occupancy_best = 0\r\n vthin_af_best = self.register.get_global_register_value(\"Vthin_AltFine\")\r\n vthin_ac_best = self.register.get_global_register_value(\"Vthin_AltCoarse\")\r\n for gdac_bit in self.gdac_tune_bits:\r\n\r\n if additional_scan:\r\n self.set_gdac_bit(gdac_bit)\r\n scan_parameter_value = (self.register.get_global_register_value(\"Vthin_AltCoarse\") << 8) + self.register.get_global_register_value(\"Vthin_AltFine\")\r\n logging.info('GDAC setting: %d, bit %d = 1' % (scan_parameter_value, gdac_bit))\r\n else:\r\n self.set_gdac_bit(gdac_bit, bit_value=0)\r\n scan_parameter_value = (self.register.get_global_register_value(\"Vthin_AltCoarse\") << 8) + self.register.get_global_register_value(\"Vthin_AltFine\")\r\n logging.info('GDAC setting: %d, bit %d = 0' % (scan_parameter_value, gdac_bit))\r\n\r\n with self.readout(GDAC=scan_parameter_value, reset_sram_fifo=True, fill_buffer=True, clear_buffer=True, callback=self.handle_data):\r\n scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections_gdac, mask_steps=self.mask_steps_gdac, enable_mask_steps=self.enable_mask_steps_gdac, enable_double_columns=None, same_mask_for_all_dc=True, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=True, mask=None, double_column_correction=self.pulser_dac_correction)\r\n\r\n occupancy_array, _, _ = np.histogram2d(*convert_data_array(data_array_from_data_iterable(self.fifo_readout.data), filter_func=is_data_record, converter_func=get_col_row_array_from_data_record_array), bins=(80, 336), range=[[1, 80], [1, 336]])\r\n self.occ_array_sel_pixel = np.ma.array(occupancy_array, mask=np.logical_not(np.ma.make_mask(select_mask_array))) # take only selected pixel into account by creating a mask\r\n median_occupancy = np.ma.median(self.occ_array_sel_pixel)\r\n if abs(median_occupancy - self.n_injections_gdac / 2) < abs(occupancy_best - self.n_injections_gdac / 2):\r\n occupancy_best = median_occupancy\r\n vthin_af_best = self.register.get_global_register_value(\"Vthin_AltFine\")\r\n vthin_ac_best = self.register.get_global_register_value(\"Vthin_AltCoarse\")\r\n\r\n if self.plot_intermediate_steps:\r\n plotThreeWay(self.occ_array_sel_pixel.transpose(), title=\"Occupancy (GDAC \" + str(scan_parameter_value) + \" with tuning bit \" + str(gdac_bit) + \")\", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac)\r\n\r\n if abs(median_occupancy - self.n_injections_gdac / 2) < self.max_delta_threshold and gdac_bit > 0: # abort if good value already found to save time\r\n logging.info('Median = %f, good result already achieved (median - Ninj/2 < %f), skipping not varied bits' % (median_occupancy, self.max_delta_threshold))\r\n break\r\n\r\n if median_occupancy == 0 and decreased_threshold and all_bits_zero:\r\n logging.info('FE noisy?')\r\n\r\n if gdac_bit > 0:\r\n if (median_occupancy < self.n_injections_gdac / 2): # set GDAC bit to 0 if the occupancy is too lowm, thus decrease threshold\r\n logging.info('Median = %f < %f, set bit %d = 0' % (median_occupancy, self.n_injections_gdac / 2, gdac_bit))\r\n self.set_gdac_bit(gdac_bit, bit_value=0)\r\n decreased_threshold = True\r\n else: # set GDAC bit to 1 if the occupancy is too high, thus increase threshold\r\n logging.info('Median = %f > %f, leave bit %d = 1' % (median_occupancy, self.n_injections_gdac / 2, gdac_bit))\r\n decreased_threshold = False\r\n all_bits_zero = False\r\n\r\n if gdac_bit == 0:\r\n if additional_scan: # scan bit = 0 with the correct value again\r\n additional_scan = False\r\n last_bit_result = self.occ_array_sel_pixel.copy()\r\n self.gdac_tune_bits.append(0) # bit 0 has to be scanned twice\r\n else:\r\n last_bit_result_median = np.median(last_bit_result[select_mask_array > 0])\r\n logging.info('Scanned bit 0 = 0 with %f instead of %f' % (median_occupancy, last_bit_result_median))\r\n if abs(median_occupancy - self.n_injections_gdac / 2) > abs(last_bit_result_median - self.n_injections_gdac / 2): # if bit 0 = 0 is worse than bit 0 = 1, so go back\r\n self.set_gdac_bit(gdac_bit, bit_value=1)\r\n logging.info('Set bit 0 = 1')\r\n self.occ_array_sel_pixel = last_bit_result\r\n median_occupancy = np.ma.median(self.occ_array_sel_pixel)\r\n else:\r\n logging.info('Set bit 0 = 0')\r\n if abs(occupancy_best - self.n_injections_gdac / 2) < abs(median_occupancy - self.n_injections_gdac / 2):\r\n logging.info(\"Binary search converged to non optimal value, take best measured value instead\")\r\n median_occupancy = occupancy_best\r\n self.register.set_global_register_value(\"Vthin_AltFine\", vthin_af_best)\r\n self.register.set_global_register_value(\"Vthin_AltCoarse\", vthin_ac_best)\r\n\r\n if (self.register.get_global_register_value(\"Vthin_AltFine\") == 0 and self.register.get_global_register_value(\"Vthin_AltCoarse\") == 0) or self.register.get_global_register_value(\"Vthin_AltFine\") == 254:\r\n logging.warning('GDAC reached minimum/maximum value')\r\n\r\n if abs(median_occupancy - self.n_injections_gdac / 2) > 2 * self.max_delta_threshold:\r\n logging.warning('Global threshold tuning failed. Delta threshold = %f > %f. Vthin_AltCoarse / Vthin_AltFine = %d / %d' % (abs(median_occupancy - self.n_injections_gdac / 2), self.max_delta_threshold, self.register.get_global_register_value(\"Vthin_AltCoarse\"), self.register.get_global_register_value(\"Vthin_AltFine\")))\r\n else:\r\n logging.info('Tuned GDAC to Vthin_AltCoarse / Vthin_AltFine = %d / %d' % (self.register.get_global_register_value(\"Vthin_AltCoarse\"), self.register.get_global_register_value(\"Vthin_AltFine\")))\r\n\r\n self.vthin_altfine_best = self.register.get_global_register_value(\"Vthin_AltFine\")\r\n self.vthin_altcoarse_best = self.register.get_global_register_value(\"Vthin_AltCoarse\")\r\n\r\n def analyze(self):\r\n self.register.set_global_register_value(\"Vthin_AltFine\", self.vthin_altfine_best)\r\n self.register.set_global_register_value(\"Vthin_AltCoarse\", self.vthin_altcoarse_best)\r\n\r\n plotThreeWay(self.occ_array_sel_pixel.transpose(), title=\"Occupancy after GDAC tuning (GDAC \" + str(self.scan_parameters.GDAC) + \")\", x_axis_title='Occupancy', filename=self.plots_filename, maximum=self.n_injections_gdac)\r\n if self.close_plots:\r\n self.plots_filename.close()\r\n\r\n def write_target_threshold(self):\r\n commands = []\r\n commands.extend(self.register.get_commands(\"ConfMode\"))\r\n self.register.set_global_register_value(\"PlsrDAC\", self.target_threshold)\r\n commands.extend(self.register.get_commands(\"WrRegister\", name=\"PlsrDAC\"))\r\n self.register_utils.send_commands(commands)\r\n\r\n def set_gdac_bit(self, bit_position, bit_value=1):\r\n commands = []\r\n commands.extend(self.register.get_commands(\"ConfMode\"))\r\n if(bit_position < 8):\r\n if(bit_value == 1):\r\n self.register.set_global_register_value(\"Vthin_AltFine\", self.register.get_global_register_value(\"Vthin_AltFine\") | (1 << bit_position))\r\n else:\r\n self.register.set_global_register_value(\"Vthin_AltFine\", self.register.get_global_register_value(\"Vthin_AltFine\") & ~(1 << bit_position))\r\n else:\r\n if(bit_value == 1):\r\n self.register.set_global_register_value(\"Vthin_AltCoarse\", self.register.get_global_register_value(\"Vthin_AltCoarse\") | (1 << (bit_position - 8)))\r\n else:\r\n self.register.set_global_register_value(\"Vthin_AltCoarse\", self.register.get_global_register_value(\"Vthin_AltCoarse\") & ~(1 << bit_position))\r\n commands.extend(self.register.get_commands(\"WrRegister\", name=[\"Vthin_AltFine\", \"Vthin_AltCoarse\"]))\r\n self.register_utils.send_commands(commands)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n RunManager('../configuration.yaml').run_run(GdacTuning)\r\n","sub_path":"host/pybar/scans/tune_gdac.py","file_name":"tune_gdac.py","file_ext":"py","file_size_in_byte":13377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"341229331","text":"vegan_nonos = ['eggs','meat','milk','fish','figs']\npie_ingredients = ['flour','apples','sugar','eggs','salt','fish']\nallergic = ['fish','peanuts']\n\nfor food in pie_ingredients:\n if food in allergic:\n print(f\"dont eat that {food} you gonna die\")\n if food in vegan_nonos:\n print(f'dis {food} not vegan friendly lul')\n else:\n print(f\"dis {food} going in da pie\")","sub_path":"python-syntax/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"88810380","text":"#!/usr/bin/env python\n\nimport argparse\nimport pandas as pd\nimport glob\nimport os \n\n#Denne her er med de nye gener brugt til den nye mlsa\n\ndef ReverseComplement1(seq):\n seq_dict = {'A':'T','T':'A','G':'C','C':'G','N':'N'}\n return \"\".join([seq_dict[base] for base in reversed(seq)])\n\ndef import_genome_names():\n\tfiles = glob.glob('*.fasta')\n\treturn files\n\ndef gene_length_query(query):\n '''takes a fasta file and returns a dictionary with genes as keys and gene length as values'''\n fasta = open('%s' %(query), 'r')\n fasta_read = fasta.readlines()\n gene_length = {}\n for line in fasta_read:\n if line[0]=='>':\n length = ''\n gene = line[1:]\n gene = gene.rstrip()\n gene_length[gene] = None \n else:\n value = line\n value = value.rstrip()\n length +=value\n gene_length[gene] = len(length)\n return gene_length\n\ndef number_of_genes(query):\n '''takes a fastefile and returns the number of genes in the fastafile'''\n fasta = open('%s' %(query), 'r')\n fasta_read = fasta.readlines()\n no_of_genes = 0\n for line in fasta_read:\n if line[0]=='>':\n no_of_genes = no_of_genes + 1\n return no_of_genes\n\ndef blasting(genome, query):\n '''takes a genome and make a blast search against the query'''\n #First making a blast database\n db_genome = '%s' %(genome)\n out_folder = 'blast_db/%s_db' %(genome)\n command_db = 'makeblastdb -in %s -out %s -dbtype nucl' %(db_genome, out_folder)\n os.system(command_db)\n #then i make the blast command and saves it in the folder blat_results\n db = 'blast_db/%s_db' %(genome)\n command = 'blastn -db %s -query %s -out blast_result/%s.txt -outfmt 7 -task blastn-short' %(db, query, genome)\n os.system(command)\n\ndef import_blast_result(genome, no_of_genes):\n #print(genome)\n header=['query_id', 'subject_id', 'perc_identity', 'alignment length', 'mismatches', 'gap_opens', 'q_start', 'q_end', 's_start', 's_end','evalue', 'bit_score', 'sequence']\n blast_df = pd.read_csv('blast_result/%s.txt' %(genome), sep='\\t', names=header, comment='#')\n blast_df = blast_df.drop_duplicates(subset='query_id', keep=\"first\")\n #Denne her har jeg lige ændret så de fjerner alle < 200 bit_Score\n #blast_df = blast_df.drop(blast_df.loc[blast_df['bit_score']<200].index)\n if len(blast_df)!=no_of_genes:\n print('%s has a number of gens different from expected' %(genome))\n return blast_df\n \ndef fasta_to_dict(genome):\n '''functions opens fasta/genome and returns a dictionery with headers as keys and sequences as value'''\n fasta = open('%s' %(genome), 'r')\n fasta_read = fasta.readlines()\n dict_fasta = {}\n \n for line in fasta_read:\n if line[0]=='>':\n line = line[1:]\n line = line.split(' ')\n key = line[0]\n key = key.rstrip()\n #key = line[1:]\n #key = key.split(' ')\n #print(key)\n #key= key[1]\n dict_fasta[key]= ''\n else:\n value = line\n value = value.rstrip()\n dict_fasta[key] +=value\n \n return dict_fasta\n\ndef extract_gene_sequences_with_concatenated(dict_fasta, blast_result, gene_length):\n '''Same as \"extract_gene_sequences\" (see above) but it also makes a concatenated fasta with all sequences from each\n strain concatenated'''\n rows = range(0,len(blast_result))\n dict_genes = {}\n dict_genes['concatenated'] = ''\n for n in rows:\n contig = blast_result.iloc[n]['subject_id']\n start = blast_result.iloc[n]['s_start']\n stop = blast_result.iloc[n]['s_end']\n start_query = blast_result.iloc[n]['q_start']\n stop_query = blast_result.iloc[n]['q_end']\n gene = blast_result.iloc[n]['query_id']\n #-1 fordi der mangler en base ved alle gener... uvist hvorfor.\n #Først ordner jeg de hits der er i \"rigtig\" rækkefølge, stop>start \n start = int(start)\n #start_minus = start -1\n start_extended_right = start - start_query\n stop_extended_right = stop + (gene_length[gene]-stop_query)\n #stop_gene_length = start_minus + gene_length[gene]\n #Så ordner jeg dem der er i forkert rækkefølge, dvs. start>stop\n stop = blast_result.iloc[n]['s_end']\n stop = int(stop)\n stop_extended_wrong = stop + (blast_result.iloc[n]['q_end']-gene_length[gene]-1)\n start_extended_wrong = start + (blast_result.iloc[n]['q_start']-1)\n #stop_minus = stop - blast_result.iloc[n]['q_start']\n #reverse_start = stop_minus + gene_length[gene]\n #stop_minus = stop -1\n #Jeg går lige en base længere tilbage da der ved alignment mangler en base i forhold til reference\n #Jeg skal lige tjekke om jeg skal vende de gener om som den ikke finder\n if stop > start:\n sequence = dict_fasta[contig]\n #print(start_extended_right)\n #print(stop_extended_right)\n if start_extended_right < 0:\n start_extended_right = 0\n sequence = sequence[start_extended_right:stop_extended_right]\n dict_genes[gene] = sequence\n dict_genes['concatenated'] +=sequence\n elif stop < start:\n sequence = dict_fasta[contig]\n #print(stop_extended_wrong)\n #print(start_extended_wrong)\n if stop_extended_wrong < 0:\n stop_extended_wrong = 0\n sequence = sequence[stop_extended_wrong:start_extended_wrong]\n #sequence = sequence[stop_minus:start]\n sequence = ReverseComplement1(sequence)\n dict_genes[gene] = sequence\n dict_genes['concatenated'] +=sequence\n return dict_genes\n\ndef append_genes_to_fasta(gene_sequences, genome):\n for gene in list(gene_sequences.keys()):\n sequence = gene_sequences[gene]\n file = open('output/%s.txt' %(gene), 'a')\n file.write(\">%s\" '\\n' '%s' '\\n' %(genome, sequence)) \n return None\n\ndef main(args):\n list_of_genomes = import_genome_names()\n for genome in list_of_genomes:\n gene_length = gene_length_query(args.q)\n no_of_genes = number_of_genes(args.q)\n blasting(genome, args.q)\n blast_result = import_blast_result(genome, no_of_genes)\n dict_fasta_file = fasta_to_dict(genome)\n gene_sequences = extract_gene_sequences_with_concatenated(dict_fasta_file, blast_result, gene_length)\n append_genes_to_fasta(gene_sequences, genome)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Extracts MLSA gene sequences')\n parser.add_argument('-q', help='query file containing genes in MLSA scheme', required=True)\n\n args = parser.parse_args()\n\n main(args)\n\n \n\n","sub_path":"mlsa_script.py","file_name":"mlsa_script.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"174586647","text":"#encoding: UTF-8\n# Autor: Edgar Eduardo Alvarado Duran, A01371424\n# Problema 4\n\nhombres= int(input(\"¿Cuantos hombres estan inscritos?\"))\nmujeres= int(input(\"¿Cuantas mujeres estan inscritas?\"))\ninscritos= hombres+mujeres\nh= hombres*100\nporcientoh= h/inscritos\nm= mujeres*100\nporcientom= m/inscritos\nprint (\"El total de inscritos son \", inscritos)\nprint (\"El porcentaje de hombres es de \", porcientoh, \"%\")\nprint (\"El porcentaje de mujeres es de \", porcientom, \"%\")\n","sub_path":"porcentajes.py","file_name":"porcentajes.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"551970279","text":"\"\"\"\ntraining\n\"\"\"\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom sklearn.metrics import precision_recall_fscore_support as score\n\nfrom config import Config\nfrom model import Model\nfrom utils import PDTB, sent_to_tensor\n\n\ndef train(config):\n choise = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(choise + \" is available\")\n device = torch.device(choise)\n\n print(\"Training from scratch!\")\n model = Model(config.model.vocab_size, \n config.model.embedd_size,\n config.model.hidden_size,\n config.model.max_seq_len,\n config.model.n_layers)\n \n pdtb = PDTB(config)\n train_arg1_sents, train_arg2_sents, train_labels = pdtb.load_PDTB(\"train\")\n dev_arg1_sents, dev_arg2_sents, dev_labels = pdtb.load_PDTB(\"dev\")\n word_to_id = pdtb.build_vocab()\n model.to(device)\n \n start = time.time()\n model.load_pretrained_embedding(config.training.fix_embed, config.resourses.glove_path, word_to_id)\n print(\"Loading embedding taking %.3f s\" % (time.time() - start))\n\n \n \n batch_size = config.training.batch_size\n max_seq_len = config.model.max_seq_len\n \n loss_func = nn.CrossEntropyLoss()\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.training.lr, \n weight_decay=config.training.weight_decay) # L2\n\n print(\"Start training!\")\n best_f1 = 0.0\n for epoch in range(config.training.epochs):\n total_loss = 0.0\n start = time.time()\n\n result = []\n # train\n for i in range(0, len(train_arg1_sents), batch_size):\n optimizer.zero_grad()\n\n arg1 = train_arg1_sents[i: i + batch_size]\n arg2 = train_arg2_sents[i: i + batch_size]\n label = train_labels[i: i + batch_size]\n \n arg1 = sent_to_tensor(arg1, word_to_id, max_seq_len).to(device)\n arg2 = sent_to_tensor(arg2, word_to_id, max_seq_len).to(device)\n label = torch.LongTensor(label).to(device)\n\n output = model(arg1, arg2)\n result.extend(list(torch.max(output, 1)[1].cpu().numpy())) \n\n loss = loss_func(output, label)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n \n precision, recall, f1, _ = score(train_labels, result, average='binary')\n print(\"Epoch %d: train f1 score: %.2f precision: %.2f recall: %.2f\" % (epoch, 100 * f1, \n 100 * precision, 100 * recall))\n print(\"Epoch %d train loss: %.3f time: %.3f s\" % (epoch, total_loss / len(train_arg1_sents), time.time() - start))\n\n # dev\n with torch.no_grad():\n result = []\n for i in range(0, len(dev_arg1_sents), batch_size):\n arg1 = dev_arg1_sents[i: i + batch_size]\n arg2 = dev_arg2_sents[i: i + batch_size]\n label = dev_labels[i: i + batch_size]\n \n arg1 = sent_to_tensor(arg1, word_to_id, max_seq_len).to(device)\n arg2 = sent_to_tensor(arg2, word_to_id, max_seq_len).to(device)\n label = torch.LongTensor(label).to(device)\n\n output = model(arg1, arg2)\n result.extend(list(torch.max(output, 1)[1].cpu().numpy())) \n\n # F1 score\n precision, recall, f1, _ = score(dev_labels, result, average='binary')\n print(\"Epoch %d: dev f1 score: %.2f precision: %.2f recall: %.2f\" % (epoch, 100 * f1, \n 100 * precision, 100 * recall))\n if f1 > best_f1:\n best_f1 = f1\n torch.save(model, config.resourses.model_path + config.type + \"_\" + \n config.resourses.model_name)\n print(\"Model saved!\")\n","sub_path":"baseline/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"30502498","text":"\"\"\"\n4. Write a Python program to get a string from a given string where all\noccurrences of its first char have been changed to '$', except the first\nchar itself.\n\nSample String : 'restart'\nExpected Result : 'resta$t'\n\n4 min\n\n\"\"\"\n\n\"\"\"\nreplace()\n\"\"\"\n\nstr1 = 'restart restart'\nfchar = str1[0]\nresult = fchar + str1[1:].replace(fchar,'$')\nprint(result)\n\n","sub_path":"sj200625_python2/day09_py200820/string_q4.py","file_name":"string_q4.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"284653249","text":"#from os import listdir\nimport operator\nfrom sklearn import svm\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords, reuters\nimport re\nimport math\ncachedStopWords = stopwords.words(\"english\")\nmin_length = 3\n\nclass corpus:\n def __init__(self):\n self.tfidf_representer = tf_idf()\n self.documents = []\n self.categories = reuters.categories()\n self.cat_dict = {}\n iterator = 0\n for category in self.categories:\n iterator = iterator + 1\n self.cat_dict[iterator] = category\n for docid in reuters.fileids(category):\n doc_class = iterator\n if docid.startswith(\"train\"):\n train = 1\n elif docid.startswith(\"test\"):\n train = 0\n else:\n raise\n text = reuters.raw(docid)\n doc = document(text, doc_class, train)\n self.add_document(doc)\n self.initialize_vocabulary()\n def add_document(self, document):\n self.documents.append(document)\n self.tfidf_representer.add_document(document)\n\n def get_train_documents(self):\n train = []\n for doc in self.documents:\n if doc.train == 1:\n train.append(doc.text)\n return train\n\n def initialize_vocabulary(self):\n vocabulary = {}\n inverse_vocabulary = {}\n self.vocabulary = {}\n self.inverse_vocabulary = {}\n vocabulary_sizes = {}\n i = 0\n for doc in self.documents:\n for word in doc.get_unique_words():\n if word not in inverse_vocabulary:\n vocabulary_sizes[i] = 0\n vocabulary[i] = word\n inverse_vocabulary[word] = i\n i += 1\n else:\n vocabulary_sizes[inverse_vocabulary[word]] += 1\n\n sorted_sizes = sorted(vocabulary_sizes.items(), key=operator.itemgetter(1), reverse=True)\n # print(sorted_sizes)\n\n keys_sorted = sorted_sizes[:300]\n # print(keys_sorted)\n keys_sorted2 = []\n for x in keys_sorted:\n keys_sorted2.append(x[0])\n\n iterator = 0\n for i in range(len(vocabulary)):\n if i in keys_sorted2:\n self.vocabulary[iterator] = vocabulary[i]\n self.inverse_vocabulary[vocabulary[i]] = iterator\n iterator += 1\n\n def get_svm_vectors(self,Train = 0, Test = 0):\n Xs = []\n ys = []\n for doc in self.documents:\n if Train == 1 and doc.train == 0:\n continue\n if Test == 1 and doc.train == 1:\n continue\n x = doc.get_vector(self.inverse_vocabulary, self.tfidf_representer)\n #x = doc.get_vector(self.inverse_vocabulary, self.tfidf_representer)\n y = doc.doc_class\n Xs.append(x)\n ys.append(y)\n return (Xs,ys)\n\nclass document:\n def __init__(self, text, doc_class = 1, train = 1):\n self.doc_class = doc_class\n self.train = train\n self.text = text\n self.preprocessed_text = []\n def preprocessing(self,raw_tokens):\n no_stopwords = [token for token in raw_tokens if token not in cachedStopWords]\n stemmed_tokens = []\n stemmer = PorterStemmer()\n for token in no_stopwords:\n stemmed_tokens.append(stemmer.stem(token))\n p = re.compile('[a-zA-Z]+')\n pattern_checked = []\n for stem in stemmed_tokens:\n if p.match(stem) and len(stem) >= min_length:\n pattern_checked.append(stem)\n return pattern_checked\n\n def get_preprocessed_tokens(self):\n if len(self.preprocessed_text) == 0:\n self.preprocessed_text = self.preprocessing(self.text.split())\n else:\n return self.preprocessed_text\n\n return self.preprocessed_text\n\n def get_unique_words(self):\n word_list = []\n\n for word in self.preprocessing(self.text.split()):\n if not word in word_list:\n word_list.append(word)\n return word_list\n\n\n def get_vector(self,inverse_vocabulary, representer):\n lng = len(inverse_vocabulary)\n vector = [0 for i in range(300)]\n for word in self.preprocessing(self.text.split()):\n if word in inverse_vocabulary.keys():\n vector[inverse_vocabulary[word]] = representer.tfidf(word, self)\n #poprzednio:\n #vector[inverse_vocabulary[word]] = 1\n return vector\n\n# def get_vector(self,inverse_vocabulary):\n# lng = len(inverse_vocabulary)\n# vector = [0 for i in range(300)]\n# for word in self.preprocessing(self.text.split()):\n# if word in inverse_vocabulary.keys():\n# vector[inverse_vocabulary[word]] = 1\n# return vector\n\n\nclass tf_idf:\n\n def __init__(self):\n self.D = 0.0\n self.df = {}\n def add_document(self, document):\n self.D += 1.0\n for token in document.get_unique_words():\n if token not in self.df:\n self.df[token] = 1.0\n else:\n self.df[token] += 1.0\n def idf(self,token):\n return math.log(self.D/self.df[token])\n def tf(self,token,document):\n liczba_wystapien_tokenu = 0.0\n liczba_tokenow = 0.0\n for t in document.get_preprocessed_tokens():\n liczba_tokenow += 1.0\n if t == token:\n liczba_wystapien_tokenu += 1.0\n return liczba_wystapien_tokenu/liczba_tokenow\n def tfidf(self,token, document):\n return self.tf(token,document) * self.idf(token)\n\n\nklasyfikator = svm.SVC(kernel=\"linear\")\ncrp = corpus()\n(X,y) = crp.get_svm_vectors(Train = 1)\nprint(\"starting fitting procedure\")\nklasyfikator.fit(X,y)\n(XT,yt) = crp.get_svm_vectors(Test = 1)\npozytywne = 0\nwszystkie = 0\nfor i,x in enumerate(XT):\n wszystkie += 1\n klasa = klasyfikator.predict(x)\n if klasa == yt[i]:\n pozytywne = pozytywne + 1\n\nprint(pozytywne)\nprint(wszystkie)\n\n#testy:\nprint(\"dlugosc X:\")\nprint(len(X))\nprint(\"dlugosc y:\")\nprint(len(y))\nprint(\"dlugosc XT:\")\nprint(len(XT))\nprint(\"dlugosc yt:\")\nprint(len(yt))\nprint(\"dlugosc vectora w XT:\")\nprint(len(XT[0]))\nprint(len(XT[1]))\nprint (XT[1])","sub_path":"zadanie1.py","file_name":"zadanie1.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"176350373","text":"import cv2\nimport numpy as np\n\ndef combine(video1, video2, video_output):\n capture1 = cv2.VideoCapture(video1)\n capture2 = cv2.VideoCapture(video2)\n\n frame_count1 = int(capture1.get(cv2.CAP_PROP_FRAME_COUNT))\n width1 = int(capture1.get(cv2.CAP_PROP_FRAME_WIDTH))\n height1 = int(capture1.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps1 = int(capture1.get(cv2.CAP_PROP_FPS))\n\n frame_count2 = int(capture2.get(cv2.CAP_PROP_FRAME_COUNT))\n width2 = int(capture2.get(cv2.CAP_PROP_FRAME_WIDTH))\n height2 = int(capture2.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps2 = int(capture2.get(cv2.CAP_PROP_FPS))\n\n # setup output\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(video_output, fourcc, fps1, (2 * width1, height1), 1)\n\n i = 0\n print('\\nCombining: %s' %video_output)\n while capture1.isOpened() and capture2.isOpened():\n ret1, frame1 = capture1.read()\n ret2, frame2 = capture2.read()\n \n if not ret1 or not ret2:\n break\n\n new_frame = np.concatenate((frame1, frame2), axis=1)\n writer.write(new_frame)\n i = i + 1\n print('%d' %i, end = ' * ')\n\n print('\\nCompleted')\n\n","sub_path":"combine_video.py","file_name":"combine_video.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"69775804","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nsample_1 = np.random.multivariate_normal([0, 0, 0, 0, 0], np.diag([1, 1, 1, 1, 1]), size=100) # from a standard bivariate normal\nsample_2 = np.random.multivariate_normal([0, 0, 0, 0, 0], np.diag([5, 5, 5, 5, 5]), size=100) # from a different 2d normal\nquery_point = np.random.multivariate_normal([0, 0, 0, 0, 0], np.diag([5, 5, 5, 5, 5]), size=1) # from second distribution\n\n\ndef distance(x, y): # must be arrays\n if max(np.shape(x)) != max(np.shape(y)):\n return print(\"error, arrays not same length\")\n else:\n z = x - y\n z = z.flatten()\n return np.sqrt(sum(z ** 2))\n\n\ndef knn(training_data, training_labels, new_point, k=3): # a must be a list\n distances = []\n n = training_data.shape[0]\n for i in range(n):\n distances.append(distance(x=training_data[i,:], y=new_point))\n training_data = np.concatenate((training_data, np.reshape(training_labels, (n, 1)), np.reshape(distances, (n, 1))),\n axis=1)\n td_col = training_data.shape[1]\n order = training_data[:, td_col - 1].argsort() # this will return an array where the numbers in the array give\n # the indexes such that if the training data were sorted by this index then the distance column would be in order\n # (low to high)\n training_data = training_data[order, :] # this then re-orders the matrix\n ans = list(training_data[0:k, td_col - 2]) # returns a list of the k nearest labels\n return max(set(ans)) # returns the dominating labels from the k nearest\n\n\n# plt.scatter(x=sample_1[:, 0], y=sample_1[:, 1])\n# plt.scatter(x=sample_2[:, 0], y=sample_2[:, 1])\n# plt.scatter(x=query_point[0, 0], y=query_point[0, 1])\n\nprint(knn(training_data=np.concatenate((sample_1, sample_2), axis=0), training_labels=[0] * 100 + [1] * 100,\n new_point=query_point, k=3))\n","sub_path":"K_nearestNeighbour.py","file_name":"K_nearestNeighbour.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"152853921","text":"import sys\r\nimport random\r\nimport time\r\n\r\nROWS = int(sys.argv[1])\r\nCOLS = int(sys.argv[2])\r\npercentage = float(sys.argv[3])\r\ndef createGrid(rows, cols):\r\n grid = []\r\n for row in range(0, rows):\r\n grid.append([])\r\n for col in range(0, cols):\r\n value = bool(random.random() < percentage)\r\n grid[row].append(value)\r\n return grid\r\n\r\ndef allDead(grid):\r\n for row in grid:\r\n for cell in row:\r\n if cell == True:\r\n return False\r\n return True\r\n\r\ndef getNeighbours(grid, x, y):\r\n neighbours = []\r\n max_i = len(grid)\r\n max_j = len(grid[0])\r\n for i in range(x - 1, x + 2):\r\n for j in range(y - 1, y + 2):\r\n if (i != x or j != y) and (i > -1 and j > -1) and (i < max_i and j < max_j):\r\n neighbours.append(grid[i][j])\r\n return neighbours\r\n\r\ndef evolve(grid):\r\n rowCount = 0\r\n for row in grid:\r\n cellCount = 0\r\n for cell in row:\r\n applyRules(rowCount, cellCount, cell, grid)\r\n cellCount += 1\r\n rowCount += 1\r\n\r\ndef applyRules(x, y, cell, grid):\r\n neighbours = getNeighbours(grid, x, y)\r\n count = 0\r\n for neighbour in neighbours:\r\n if(neighbour):\r\n count += 1\r\n if cell:\r\n if count < 2:\r\n cell = False\r\n elif count > 3:\r\n cell = False\r\n else:\r\n if count == 3:\r\n cell = True\r\n\r\n grid[x][y] = cell\r\n\r\ngrid = createGrid(COLS, ROWS)\r\nwhile not allDead(grid):\r\n evolve(grid)\r\n print(grid)\r\n sys.stdout.flush()\r\n time.sleep(.05)\r\n","sub_path":"python_scripts/gameOfLife.py","file_name":"gameOfLife.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"52800516","text":"# coding: utf-8\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" An example of predicting CAPTCHA image data with a LSTM network pre-trained with a CTC loss\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport time\nimport glob\nimport logging\nimport argparse\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom cnocr import CnOcr\nfrom cnocr.utils import set_logger\n\n\nlogger = set_logger(log_level=logging.INFO)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_name\", help=\"model name\", type=str, default='conv-lite-fc'\n )\n parser.add_argument(\"--model_epoch\", type=int, default=None, help=\"model epoch\")\n parser.add_argument(\n \"--context\",\n help=\"使用cpu还是gpu运行代码。默认为cpu\",\n type=str,\n choices=['cpu', 'gpu'],\n default='cpu',\n )\n parser.add_argument(\"-f\", \"--file\", help=\"Path to the image file or dir\")\n parser.add_argument(\n \"-s\",\n \"--single-line\",\n default=False,\n help=\"Whether the image only includes one-line characters\",\n )\n args = parser.parse_args()\n\n ocr = CnOcr(\n model_name=args.model_name, model_epoch=args.model_epoch, context=args.context\n )\n ocr_func = ocr.ocr_for_single_line if args.single_line else ocr.ocr\n fp_list = []\n if os.path.isfile(args.file):\n fp_list.append(args.file)\n elif os.path.isdir(args.file):\n fn_list = glob.glob1(args.file, '*g')\n fp_list = [os.path.join(args.file, fn) for fn in fn_list]\n\n for fp in fp_list:\n start_time = time.time()\n res = ocr_func(fp)\n logger.info('\\n' + '=' * 10 + fp + '=' * 10)\n if not args.single_line:\n res = '\\n'.join([''.join(line_p) for line_p in res])\n else:\n res = ''.join(res)\n logger.info('\\n' + res)\n logger.info('time cost: %f' % (time.time() - start_time))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/cnocr_predict.py","file_name":"cnocr_predict.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"508489784","text":"from dataclasses import dataclass\nfrom pathlib import Path\n\nfrom scripts_ import tikz\nMETHODS = ['mopoe', 'mopgfm', 'mogfm']\n\nmethod_footnote_mapping = {'mopoe': r'mopoe \\footnote{mixture-of-product-of-expert}',\n 'mopgfm': r'mopgfm \\footnote{mixture-of-parameter-generalized-$f$-mean}',\n 'mogfm': r'mogfm \\footnote{mixture-of-generalized-$f$-mean}'\n }\n\ndef make_graph(with_red_circle: bool = False):\n cond_samples_path = Path('data/pgfm/cond_gen_examples')\n input_samples_dir = cond_samples_path / 'input_samples'\n\n @dataclass\n class Nodes:\n input_m1: str = f'\\\\includegraphics[width=2cm]{{{str(input_samples_dir / \"m2.png\")}}}'\n input_m0: str = f'\\\\includegraphics[width=2cm]{{{str(input_samples_dir / \"m1.png\")}}}'\n output__m1m2_m2: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m1_m2\" / \"m2.png\")}}}'\n output__m1m2_m1: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m1_m2\" / \"m1.png\")}}}'\n output__m2_m1: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m2\" / \"m1.png\")}}}'\n output__m1_m2: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m1\" / \"m2.png\")}}}'\n q1: str = r'$q_{\\phi_1}$'\n q2: str = r'$q_{\\phi_2}$'\n q1_tilde: str = r'$\\tilde{q}_{\\phi_1}$'\n q2_tilde: str = r'$\\tilde{q}_{\\phi_2}$'\n q12_tilde: str = r'$\\tilde{q}_{\\phi_{12}}$'\n gfm: str = r'$f$-Mean'\n points: str = r'\\ldots'\n\n nodes = Nodes()\n pic = tikz.Picture(\n 'gfm/.style={rectangle, draw=red!60, fill=red!5, very thick, minimum size=10mm},'\n # 'lr/.style={ellipse, draw=blue!60, fill=blue!5, very thick, minimum size=15mm},'\n 'm0/.style={regular polygon,regular polygon sides=4, draw=green!60, fill=green!5, very thick, minimum size=28mm},'\n 'm0_dis/.style={circle, draw=green!60, fill=green!5, very thick, minimum size=10mm},'\n 'm1/.style={regular polygon,regular polygon sides=4, draw=orange!60, fill=orange!5, very thick, minimum size=28mm},'\n 'm1_distr/.style={circle, draw=orange!60, fill=orange!5, very thick, minimum size=10mm},'\n 'lr/.style={circle, draw=gray!60, fill=gray!5, very thick, minimum size=15mm},'\n 'subset/.style={circle, draw=gray!60, fill=gray!5, very thick, minimum size=5mm},'\n )\n\n pic.set_node(text=nodes.input_m0, name='input_m0')\n pic.set_node(text=nodes.q1, options='m0_dis, right of=input_m0, xshift=1.5cm', name='q1')\n pic.set_node(text=nodes.input_m1, options='below of=input_m0, yshift=-2cm', name='input_m1')\n pic.set_node(text=nodes.q2, options='m1_distr, right of=input_m1, xshift=1.5cm', name='q2')\n\n pic.set_node(text=nodes.gfm, options='gfm, right of=q1, xshift=1cm,yshift=-1.5cm, align=center', name='gfm')\n\n pic.set_node(text=nodes.q12_tilde, options='lr, right of=gfm, xshift=1.5cm', name='q12_tilde')\n\n\n\n pic.set_node(text=nodes.output__m1m2_m2, options='right of=q12_tilde, xshift=2cm,yshift=-1.5cm', name='output__m1m2_m2')\n pic.set_node(text=nodes.output__m1_m2, options='right of=output__m1m2_m2, xshift=1.5cm', name='output__m1_m2')\n\n pic.set_node(text=nodes.output__m1m2_m1, options='above of=output__m1m2_m2, yshift=2cm', name='output__m1m2_m1')\n pic.set_node(text=nodes.output__m2_m1, options='right of=output__m1m2_m1, xshift=1.5cm', name='output__m2_m1')\n\n if with_red_circle:\n pic.set_node(\n options='right of=output__m1m2_m2, xshift=1.5cm, yshift=1.5cm, ellipse, draw=red!100, line width=2pt, minimum height=70mm, minimum width=30mm')\n\n pic.set_line('input_m0', 'q1', label=r'$enc_1$', label_pos='south')\n pic.set_line('input_m1', 'q2', label=r'$enc_2$', label_pos='south')\n\n pic.set_line('q1', 'gfm', label=r'\\textcolor{green}{$\\mu_1$}', edge_options='bend right=-10', label_pos='south')\n pic.set_line('q1', 'gfm', label=r'\\textcolor{green}{$\\sigma_1$}', edge_options='bend right=10', label_pos='north')\n\n pic.set_line('q2', 'gfm', label=r'\\textcolor{orange}{$\\mu_2$}', edge_options='bend right=10', label_pos='north')\n pic.set_line('q2', 'gfm', label=r'\\textcolor{orange}{$\\sigma_2$}', edge_options='bend right=-10', label_pos='south')\n\n pic.set_line('gfm', 'q12_tilde')\n\n pic.set_line('q12_tilde', 'output__m1m2_m2', label=r'$dec_2$', label_pos='north', edge_options='bend right=30')\n # pic.set_line('z', 'output__m1_m2', label=r'$dec_2$', label_pos='north, rotate=-45', edge_options='bend right=50')\n\n pic.set_line('q12_tilde', 'output__m1m2_m1', label=r'$dec_1$\\ ', label_pos='south, rotate=10', edge_options='bend left=30')\n # pic.set_line('z', 'output__m2_m1', label=r'$dec_1$\\ ', label_pos='south, rotate=45', edge_options='bend left=50')\n\n output = pic.make()\n print(output)","sub_path":"midterm_presentation/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"120051246","text":"\"\"\"Connections to ADODB data sources from Kukur.\n\nThis requires an installation of pywin32 (LGPL).\n\"\"\"\n\n# SPDX-FileCopyrightText: 2021 Timeseer.AI\n# SPDX-License-Identifier: Apache-2.0\n\ntry:\n import adodbapi\n\n HAS_ADODB = True\nexcept ImportError:\n HAS_ADODB = False\n\nfrom kukur.source.metadata import MetadataValueMapper\nfrom kukur.source.sql import BaseSQLSource, SQLConfig\n\n\nclass ADODBNotInstalledError(Exception):\n \"\"\"Raised when the adodbapi module of pywin32 is not available.\"\"\"\n\n def __init__(self):\n Exception.__init__(\n self, \"the adodbapi modules is not available. Install pywin32.\"\n )\n\n\ndef from_config(data, metadata_value_mapper: MetadataValueMapper):\n \"\"\"Create a new ADODB data source from a configuration dict.\n\n Raises ADODBNotInstalledError when the adodbapi module is not available.\"\"\"\n if not HAS_ADODB:\n raise ADODBNotInstalledError()\n\n config = SQLConfig.from_dict(data)\n\n return ADODBSource(config, metadata_value_mapper)\n\n\nclass ADODBSource(BaseSQLSource):\n \"\"\"An ADODB data source.\"\"\"\n\n def __init__(self, config: SQLConfig, metadata_value_mapper: MetadataValueMapper):\n super().__init__(config, metadata_value_mapper)\n if not HAS_ADODB:\n raise ADODBNotInstalledError()\n\n def connect(self):\n return adodbapi.connect(self._config.connection_string)\n","sub_path":"kukur/source/adodb/adodb.py","file_name":"adodb.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"350466944","text":"# https://atcoder.jp/contests/abc096/tasks/abc096_a\n\na, b = map(int, input().split())\nans = 0\nflg = False\nfor i in range(1, 13):\n for j in range(1, 32):\n if i == j:\n ans += 1\n if i == a and j == b:\n flg = True\n break\n if flg:\n break\nprint(ans)\n","sub_path":"beginner_contests/096/A/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"59892254","text":"import codey\r\nfrom makeblock import ledmatrix\r\nimport time\r\nimport random\r\nimport rocky\r\nimport math\r\n\r\n# note table\r\nloop_times = 1\r\n\r\nnote_table = \\\r\n(\r\n [60, 0.25],\r\n [62, 0.25],\r\n [64, 0.25],\r\n [60, 0.25],\r\n [60, 0.25],\r\n [62, 0.25],\r\n [64, 0.25],\r\n [60, 0.25],\r\n [64, 0.25],\r\n [65, 0.25],\r\n [67, 0.5],\r\n [64, 0.25],\r\n [65, 0.25],\r\n [67, 0.5],\r\n [67, 0.375],\r\n [69, 0.125],\r\n [67, 0.375],\r\n [65, 0.125],\r\n [64, 0.25],\r\n [60, 0.25],\r\n [67, 0.375],\r\n [69, 0.125],\r\n [67, 0.375],\r\n [65, 0.125],\r\n [64, 0.25],\r\n [60, 0.25],\r\n [62, 0.25],\r\n [55, 0.25],\r\n [60, 0.5],\r\n [62, 0.25],\r\n [55, 0.25],\r\n [60, 0.5],\r\n)\r\nnote_play_index = 0\r\nsingle_note_show_over = True\r\nsingle_note_show_row_num = 1\r\nsingle_note_show_current_row_num = 1\r\nsingle_bote_play_over = True\r\nnote_interval_current_row = 0\r\nupdate_row_num = 0\r\n# note table\r\n\r\nGAME_READY = 0\r\nGAME_PLAYING = 1\r\nGAME_OVER = 2\r\nGAME_PASS = 3\r\nSTART_FACE = [0] * 16\r\n\r\nNOTE_LEFT_COLUMN = 2\r\nNOTE_MIDDLE_COLUMN = 7\r\nNOTE_RIGHT_COLUMN = 12\r\nNOTE_COLUMN_NUM = 3\r\nNOTE_INTERVAL_ROW = 2\r\n\r\nNOTE_LEFT_MAX = 63\r\nNOTE_MIDDLE_MAX = 100\r\nNOTE_RIGHT_MAX = 66\r\n\r\n\r\ngame_status = 0\r\nrun_speed = 20\r\nuser_score = 0\r\n\r\ncurrent_backgroup = START_FACE\r\n\r\ndef game_init(): \r\n global user_score\r\n global current_backgroup\r\n global note_play_index\r\n global single_note_show_over\r\n global single_bote_play_over\r\n global single_note_show_row_num\r\n global note_interval_current_row\r\n global single_note_show_current_row_num\r\n global update_row_num\r\n user_score = 0\r\n current_backgroup = [0] * 16\r\n note_play_index = 0\r\n single_note_show_over = True\r\n single_note_show_row_num = 1\r\n single_note_show_current_row_num = 1\r\n single_bote_play_over = True\r\n note_interval_current_row = 0\r\n update_row_num = 0\r\n\r\ndef speed_control():\r\n global run_speed\r\n run_speed = 20 + int(codey.dail())\r\n\r\ndef note_show_row_num_cal(note_t):\r\n row_num = round(note_t * 4)\r\n if row_num == 0:\r\n \t\trow_num += 1\r\n #return row_num\r\n return 1\r\n\r\ndef update_background():\r\n global current_backgroup\r\n global note_play_index\r\n global single_note_show_over\r\n global single_bote_play_over\r\n global single_note_show_row_num\r\n global note_interval_current_row\r\n global single_note_show_current_row_num\r\n global update_row_num\r\n\r\n if update_row_num >= len(note_table) * 3 + 8:\r\n return False\r\n elif note_play_index < len(note_table):\r\n if single_note_show_over:\r\n single_note_show_row_num = note_show_row_num_cal(note_table[note_play_index][1])\r\n single_note_show_current_row_num = 1\r\n update_value = 0x80\r\n single_note_show_over = False\r\n else:\r\n if single_note_show_current_row_num >= single_note_show_row_num:\r\n update_value = 0\r\n note_interval_current_row += 1\r\n if note_interval_current_row >= NOTE_INTERVAL_ROW:\r\n single_note_show_over = True\r\n single_note_show_current_row_num = 0\r\n note_play_index = note_play_index + 1\r\n note_interval_current_row = 0\r\n else:\r\n single_note_show_current_row_num += 1\r\n update_value = 0x80\r\n update_row_num += 1\r\n for i in range(16):\r\n current_backgroup[i] = (current_backgroup[i] >> 1) & 0xff\r\n\r\n if note_play_index >= len(note_table):\r\n show_face = current_backgroup.copy()\r\n for i in range(16):\r\n show_face[i] = show_face[i] | 0x01 \r\n ledmatrix().faceplate_show(0, 0, *show_face)\r\n return True\r\n\r\n if note_table[note_play_index][0] < NOTE_LEFT_MAX:\r\n for i in range(NOTE_LEFT_COLUMN, NOTE_LEFT_COLUMN + NOTE_COLUMN_NUM):\r\n current_backgroup[i] |= update_value\r\n elif note_table[note_play_index][0] < NOTE_RIGHT_MAX:\r\n for i in range(NOTE_RIGHT_COLUMN, NOTE_RIGHT_COLUMN + NOTE_COLUMN_NUM):\r\n current_backgroup[i] |= update_value\r\n else:\r\n for i in range(NOTE_MIDDLE_COLUMN, NOTE_MIDDLE_COLUMN + NOTE_COLUMN_NUM):\r\n current_backgroup[i] |= update_value\r\n print(current_backgroup)\r\n show_face = current_backgroup.copy()\r\n for i in range(16):\r\n show_face[i] = show_face[i] | 0x01 \r\n ledmatrix().faceplate_show(0, 0, *show_face)\r\n codey.color_off()\r\n return True\r\n\r\ndef game_over_check():\r\n global current_backgroup\r\n\r\ndef on_start_callback():\r\n global game_status\r\n global run_speed\r\n global user_score\r\n count = 0\r\n while True:\r\n speed_control()\r\n if game_status == GAME_READY:\r\n if codey.is_button(\"C\"):\r\n game_status = GAME_PLAYING\r\n\r\n elif game_status == GAME_PLAYING:\r\n if count % run_speed == 0:\r\n if update_background():\r\n \t pass\r\n else:\r\n game_status = GAME_OVER\r\n\r\n elif game_status == GAME_OVER:\r\n codey.show(user_score)\r\n game_status = GAME_READY\r\n game_init()\r\n\r\n elif game_status == GAME_PASS:\r\n game_status = GAME_PLAYING\r\n\r\n game_over_check()\r\n time.sleep(0.01)\r\n count += 1\r\ncodey.on_start(on_start_callback)\r\n\r\ndef on_button_callback():\r\n global current_backgroup\r\n global note_play_index\r\n global update_row_num\r\n global game_status\r\n global user_score\r\n if game_status != GAME_PLAYING:\r\n pass\r\n else:\r\n index = update_row_num // 3 - 2\r\n print('A index is', index)\r\n if current_backgroup[NOTE_LEFT_COLUMN] & 0x01:\r\n codey.green(50)\r\n user_score += 1\r\n codey.play_note(note_table[index][0], note_table[index][1])\r\n\r\ncodey.on_button('A', on_button_callback)\r\n\r\ndef on_button1_callback():\r\n global current_backgroup\r\n global note_play_index\r\n global update_row_num\r\n global user_score\r\n\r\n if game_status != GAME_PLAYING:\r\n pass\r\n else:\r\n index = update_row_num // 3 - 2\r\n print('B index is', index)\r\n if current_backgroup[NOTE_RIGHT_COLUMN] & 0x01:\r\n codey.green(50)\r\n user_score += 1\r\n codey.play_note(note_table[index][0], note_table[index][1])\r\n\r\ncodey.on_button('B', on_button1_callback)\r\n\r\ndef on_button2_callback():\r\n global current_backgroup\r\n global note_play_index\r\n global update_row_num\r\n global user_score\r\n if game_status != GAME_PLAYING:\r\n pass\r\n else:\r\n index = update_row_num // 3 - 2\r\n print('C index is', index)\r\n if current_backgroup[NOTE_MIDDLE_COLUMN] & 0x01:\r\n codey.green(50)\r\n user_score += 1\r\n codey.play_note(note_table[index][0], note_table[index][1])\r\n\r\ncodey.on_button('C', on_button2_callback)","sub_path":"others/old_game/music_game/rhythm.py","file_name":"rhythm.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"117478191","text":"from datetime import datetime, timedelta\r\n\r\nPDX_time = datetime.now()\r\nLND_time = PDX_time + timedelta(hours = 9)\r\nNYC_time = PDX_time + timedelta(hours = 3)\r\n\r\nBranchTime = {\r\n 'Portland' : PDX_time,\r\n 'London' : LND_time,\r\n 'New York City' : NYC_time\r\n }\r\n\r\ndef bizstat(branches):\r\n for branch in branches:\r\n if(branches[branch].hour >= 9 and branches[branch].hour < 21):\r\n print(\"{} Open\".format(branch))\r\n else:\r\n print(\"{} Closed\".format(branch))\r\n\r\ndef main():\r\n bizstat(BranchTime)\r\n\r\nif __name__=='__main__':main()\r\n\r\n##\r\n##def bizstat():\r\n## \r\n## if(NYC_time.hour >= 9 and NYC_time.hour < 21):\r\n## print(\"New York City: Open\")\r\n## else:\r\n## print(\"New York City: Closed\")\r\n##\r\n## if(LND_time.hour >= 9 and LND_time.hour < 21):\r\n## print(\"London: Open\")\r\n## else:\r\n## print(\"London: Closed\")\r\n##\r\n##\r\n## if(PDX_time.hour >= 9 and PDX_time.hour < 21):\r\n## print(\"Portland: Open\")\r\n## else:\r\n## print(\"Portland: Closed\")\r\n\r\n\r\n","sub_path":"Datetime_drill-Refactored.py","file_name":"Datetime_drill-Refactored.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"134464413","text":"# vim: sw=4:ts=4:et\n\n#\n# remediation routines for EWS\n#\n\nimport logging\nimport json\n\nimport saq\nfrom saq.error import report_exception\nfrom saq.remediation import RemediationSystem\nfrom saq.remediation.constants import *\nfrom saq.util import PreInitCustomSSLAdapter\n\nimport exchangelib\nfrom exchangelib.errors import DoesNotExist, ErrorNonExistentMailbox\nimport requests\n\n\ndef check_message_id_format(message_id):\n \"\"\"Returns message id with < and > prepended and appended respectively\n\n Required format for exchangelib filter.\"\"\"\n message_id = message_id.strip()\n if not message_id.startswith(\"<\"):\n message_id = f\"<{message_id}\"\n if not message_id.endswith(\">\"):\n message_id = f\"{message_id}>\"\n return message_id\n\n\ndef get_messages_from_folder(folder, message_id, **kwargs):\n \"\"\"Return list of messages matching message id in the given folder.\"\"\"\n _logger = kwargs.get(\"logger\") or logging\n message_id = check_message_id_format(message_id)\n\n # We want to use filter WITHOUT conditional QuerySet queries... we want an EXACT match\n # on the message id. Important to note this because if we did some sort of filter like\n # message_id__contains, then we could accidentally pass (for example) a single letter\n # which would cause collateral removals or restorations.\n try:\n return [message for message in folder.filter(message_id=message_id)]\n # XXX - Not sure if this is needed since we're using .filter instead of .get\n except exchangelib.errors.DoesNotExist:\n _logger.info(f\"{folder.absolute} does not contain message id {message_id}\")\n return []\n\ndef get_exchange_build(version=\"Exchange2016\", **kwargs):\n \"\"\"Return a valid exchangelib.Build object based on the api version.\"\"\"\n _version = version.upper()\n _module = kwargs.get(\"version_module\") or exchangelib.version\n if not _version.startswith(\"EXCHANGE\"):\n raise ValueError(\"exchange version invalid\")\n _version = f'EXCHANGE_{_version[8:]}'\n\n try:\n return getattr(_module, _version)\n except AttributeError:\n raise AttributeError(\"exchange version not found\")\n\nclass EWSRemediator:\n \"\"\"Helper class to remediate and restore emails.\"\"\"\n\n def __init__(self, user, password, server=\"outlook.office365.com\", version=\"Exchange2016\",\n auth_type=exchangelib.BASIC, access_type=exchangelib.DELEGATE, adapter=None, **kwargs):\n\n self.credentials = exchangelib.Credentials(user, password)\n self.server = server\n _build = get_exchange_build(version)\n _version = exchangelib.Version(_build)\n self.config = exchangelib.Configuration(credentials=self.credentials, server=server, auth_type=auth_type, version=_version)\n self.access_type = access_type\n self.account = kwargs.get(\"account\", None)\n self.mailbox_found = False\n if adapter is not None:\n exchangelib.protocol.BaseProtocol.HTTP_ADAPTER_CLS = adapter\n\n def remediate(self, action, email_address, message_id):\n if action == 'remove':\n return self.remove(email_address, message_id)\n return self.restore(email_address, message_id)\n\n\n def get_account(self, email_address, **kwargs):\n \"\"\"Return the existing account if appropriate. Return a new one.\"\"\"\n\n _account_class = kwargs.get(\"account_class\") or exchangelib.Account\n _logger = kwargs.get(\"logger\") or logging\n\n if self.account is not None:\n if email_address.strip().lower() == self.account.primary_smtp_address.lower():\n return self.account\n\n self.account = _account_class(\n email_address, access_type=self.access_type, credentials=self.credentials, config=self.config\n )\n\n _logger.debug(f\"setup account object for {email_address} using {self.access_type}\")\n return self.account\n\n def remove(self, email_address, message_id, **kwargs):\n \"\"\"Soft delete messages with a specific message id.\n\n Soft delete == recoverable\"\"\"\n\n _account = kwargs.get(\"account\") or self.get_account(email_address)\n _logger = kwargs.get(\"logger\") or logging\n\n all_items = _account.root / \"AllItems\"\n\n try:\n messages = get_messages_from_folder(all_items, message_id)\n except ErrorNonExistentMailbox:\n self.mailbox_found = False\n return RemediationResult(email_address, message_id, 'unknown', 'remove', success=False,\n message='account does not have mailbox')\n else:\n self.mailbox_found = True\n\n if not messages:\n _logger.warning(f'inbox {email_address} did not contain message id {message_id} during remediation')\n return RemediationResult(email_address, message_id, 'mailbox', 'remove', success=False, message=\"no messages found\")\n\n for message in messages:\n message.soft_delete()\n _logger.info(\n f\"removed message id {message.message_id} item id {message.id} \"\n f\"changekey {message.changekey} for user {email_address}\"\n )\n\n return RemediationResult(email_address, message_id, 'mailbox',\n 'remove', success=True, message='removed')\n\n def restore(self, email_address, message_id, **kwargs):\n \"\"\"Restore a soft deleted--but recoverable--message to the user's inbox.\"\"\"\n _account = kwargs.get(\"account\") or self.get_account(email_address)\n _logger = kwargs.get(\"logger\") or logging\n\n recoverable_items = _account.root / 'Recoverable Items' / 'Deletions'\n\n try:\n messages = get_messages_from_folder(recoverable_items, message_id)\n except ErrorNonExistentMailbox:\n self.mailbox_found = False\n return RemediationResult(email_address, message_id, 'unknown', 'restore', success=False,\n message='account does not have mailbox')\n else:\n self.mailbox_found = True\n\n if not messages:\n _logger.warning(f'inbox {email_address} did not contain message id {message_id} during remediation')\n return RemediationResult(email_address, message_id, 'mailbox', 'restore', success=False,\n message=\"no messages found\")\n\n for message in messages:\n message.move(_account.inbox)\n _logger.info(\n f\"move message id {message.message_id} item id {message.id} changekey \"\n f\"{message.changekey} to the inbox of {email_address}\"\n )\n\n return RemediationResult(email_address, message_id, 'mailbox', 'restore', success=True,\n message='restored')\n\n\ndef get_remediator(section, timezone=None):\n _timezone = timezone or saq.CONFIG[\"DEFAULT\"].get(\"timezone\", \"UTC\")\n \"\"\"Return EWSRemediator object\"\"\"\n certificate = section.get(\"certificate\", None)\n use_proxy = section.getboolean(\"use_proxy\", True)\n server = section.get('server', 'outlook.office365.com')\n auth_type = section.get('auth_type', exchangelib.BASIC)\n\n if auth_type.upper() == exchangelib.NTLM:\n auth_type = exchangelib.NTLM\n\n adapter = PreInitCustomSSLAdapter\n\n if certificate:\n adapter.add_cert(server, certificate)\n\n if not use_proxy:\n adapter.PROXIES = {}\n\n return EWSRemediator(\n user=section['user'],\n password=section['pass'],\n server=server,\n auth_type=auth_type,\n access_type=section.get('access_type', exchangelib.DELEGATE),\n version=section.get('version', \"Exchange2016\"),\n adapter=adapter,\n )\n\n\nclass EWSRemediationSystem(RemediationSystem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Create a remediator for each account\n self.remediators = []\n\n _config = kwargs.get('test_config') or saq.CONFIG\n\n sections = [_config[section] for section in _config.sections() if section.startswith('ews_remediation_account_')]\n\n logging.debug(f'found {len(sections)} ews remediation account sections')\n\n for section in sections:\n\n logging.debug(f'loading section {section.name}')\n\n if not section.get('pass'):\n logging.error(f'ews remediation section {section.name} for EWSRemdiationSystem is missing a password '\n f'and will not be used.')\n\n remediator = get_remediator(section)\n\n logging.debug(f'loaded remediator account section {section.name}')\n\n self.remediators.append(remediator)\n\n logging.info(\n f'loaded EWSRemediator with EWS account user {remediator.credentials.username} server {remediator.server} '\n f'version {remediator.config.version.api_version} auth_type {remediator.config.auth_type}'\n )\n\n logging.debug(f'acquired {len(self.remediators)} remediator accounts')\n\n def execute_request(self, remediation):\n logging.info(f\"execution remediation {remediation}\")\n message_id, recipient = remediation.key.split(':', 1)\n\n # TODO should we use our email address parsing utilities for this instead?\n if recipient.startswith('<'):\n recipient = recipient[1:]\n if recipient.endswith('>'):\n recipient = recipient[:-1]\n\n logging.debug(f\"got message_id {message_id} recipient {recipient} from key {remediation.key}\")\n\n # found_recipient = False\n\n for remediator in self.remediators:\n\n if saq.UNIT_TESTING:\n pf_result = RemediationResult(recipient, message_id, 'mailbox',\n remediation.action, success=True, message='removed')\n else:\n pf_result = remediator.remediate(remediation.action, recipient, message_id)\n\n\n logging.info(f\"got result {pf_result} for message-id {message_id} for {recipient}\")\n\n remediation.result = pf_result.message\n remediation.successful = pf_result.success and pf_result.message in ['removed', 'restored']\n remediation.status = REMEDIATION_STATUS_COMPLETED\n\n # this returns a dict of the following structure\n # pf_result[email_address] = phishfry.RemediationResult\n # with any number of email_address keys depending on what kind of mailbox it found\n # and how many forwards it found\n\n # use results from whichever account succesfully resolved the mailbox\n #if pf_result[recipient].mailbox_type != \"Unknown\": # TODO remove hcc\n # found_recipient = True\n # messages = []\n # for pf_recipient in pf_result.keys():\n # if pf_recipient == recipient:\n # continue\n\n # if pf_recipient in pf_result[recipient].forwards:\n # discovery_method = \"forwarded to\"\n # elif pf_recipient in pf_result[recipient].members:\n # discovery_method = \"list membership\"\n # elif pf_result[recipient].owner:\n # discovery_method = \"owner\"\n # else:\n # discovery_method = \"UNKNOWN DISCOVERY METHOD\"\n\n # messages.append('({}) success {} disc method {} recipient {} (message {})'.format(\n # 200 if pf_result[pf_recipient].success and pf_result[pf_recipient].message in ['removed',\n # 'restored'] else 500,\n # pf_result[pf_recipient].success,\n # discovery_method,\n # pf_recipient,\n # pf_result[pf_recipient].message))\n\n # message = pf_result[pf_recipient].message\n # if message is None:\n # message = ''\n # if messages:\n # message += '\\n' + '\\n'.join(messages)\n\n # remediation.result = message\n # remediation.successful = pf_result[pf_recipient].success and pf_result[pf_recipient].message in [\n # 'removed', 'restored']\n # remediation.status = REMEDIATION_STATUS_COMPLETED\n\n # we found the recipient in this EWS acount so we don't need to keep looking in any others ones\n # break\n\n # did we find it?\n #if not found_recipient:\n # remediation.result = \"cannot find mailbox\"\n # remediation.successful = False\n # remediation.status = REMEDIATION_STATUS_COMPLETED\n # logging.warning(f\"could not find message-id {message_id} sent to {recipient}\")\n \n logging.info(f\"completed remediation request {remediation}\")\n return remediation\n\n\nclass RemediationResult(object):\n def __init__(self, address, message_id, mailbox_type, action, success=True, message=None):\n self.address = address\n self.message_id = message_id\n self.mailbox_type = mailbox_type\n self.success = success\n self.message = message\n self.owner = None\n self.members = []\n self.forwards = []\n self.action = action\n\n def result(self, message, success=False):\n logging.info(message)\n self.success = success\n self.message = message\n\n def __eq__(self, other):\n attributes = [\n 'address', 'message_id', 'mailbox_type', 'success',\n 'message', 'owner', 'members', 'forwards', 'action',\n ]\n for attr in attributes:\n if getattr(self, attr) != getattr(other, attr):\n return False\n return True\n\n\n\n#######################\n## LEGACY CODE BELOW ##\n#######################\n\ndef _remediate_email_o365_EWS(emails):\n \"\"\"Remediates the given emails specified by a list of tuples of (message-id, recipient email address).\"\"\"\n assert emails\n assert all([len(e) == 2 for e in emails])\n\n result = [] # tuple(message_id, recipient, result_code, result_text)\n\n # get the hostname and port for our EWS proxy system\n # this system receives requests for remediation and restorations and submits them to EWS on our behalf\n ews_host = saq.CONFIG['remediation']['ews_host']\n ews_port = saq.CONFIG['remediation'].getint('ews_port')\n\n # the format of each request is a POST to\n # https://host:port/delete\n # with JSON as the POST data content\n\n # note that we make a separate request for each one\n url = 'https://{}:{}/delete'.format(saq.CONFIG['remediation']['ews_host'], saq.CONFIG['remediation']['ews_port'])\n session = requests.Session()\n data = {'recipient': None, 'message_id': None}\n headers = {'Content-Type': 'application/json'}\n\n for message_id, recipient in emails:\n try:\n\n if recipient is None:\n continue\n\n if recipient.startswith('<'):\n recipient = recipient[1:]\n if recipient.endswith('>'):\n recipient = recipient[:-1]\n\n data['recipient'] = recipient\n data['message_id'] = message_id\n json_data = json.dumps(data)\n\n logging.info(\"remediating message_id {} to {}\".format(message_id, recipient))\n r = session.post(url, headers=headers, data=json_data, verify=False)\n logging.info(\n \"got result {} text {} for message_id {} to {}\".format(r.status_code, r.text, message_id, recipient))\n result.append((message_id, recipient, r.status_code, r.text))\n except Exception as e:\n error_message = 'unable to remediate message_id {} to {}: {}'.format(message_id, recipient, str(e))\n logging.error(error_message)\n report_exception()\n result.append((message_id, recipient, 'N/A', str(e)))\n\n return result\n\n\ndef _unremediate_email_o365_EWS(emails):\n \"\"\"Remediates the given emails specified by a list of tuples of (message-id, recipient email address).\"\"\"\n assert emails\n assert all([len(e) == 2 for e in emails])\n\n result = [] # tuple(message_id, recipient, result_code, result_text)\n\n # get the hostname and port for our EWS proxy system\n # this system receives requests for remediation and restorations and submits them to EWS on our behalf\n ews_host = saq.CONFIG['remediation']['ews_host']\n ews_port = saq.CONFIG['remediation'].getint('ews_port')\n\n # the format of each request is a POST to\n # https://host:port/delete\n # with JSON as the POST data content\n\n # note that we make a separate request for each one\n url = 'https://{}:{}/restore'.format(saq.CONFIG['remediation']['ews_host'], saq.CONFIG['remediation']['ews_port'])\n session = requests.Session()\n data = {'recipient': None, 'message_id': None}\n headers = {'Content-Type': 'application/json'}\n\n for message_id, recipient in emails:\n\n try:\n if recipient.startswith('<'):\n recipient = recipient[1:]\n if recipient.endswith('>'):\n recipient = recipient[:-1]\n\n data['recipient'] = recipient\n data['message_id'] = message_id\n json_data = json.dumps(data)\n\n logging.info(\"restoring message_id {} to {}\".format(message_id, recipient))\n r = session.post(url, headers=headers, data=json_data, verify=False)\n logging.info(\n \"got result {} text {} for message_id {} to {}\".format(r.status_code, r.text, message_id, recipient))\n result.append((message_id, recipient, r.status_code, r.text))\n except Exception as e:\n error_message = 'unable to restore message_id {} to {}: {}'.format(message_id, recipient, str(e))\n logging.error(error_message)\n report_exception()\n result.append((message_id, recipient, 'N/A', str(e)))\n\n return result\n\n\n#\n# XXX are these next functions even used any more?\n#\n\ndef remediate_phish(alerts):\n \"\"\"Attempts to remediate the given Alert objects. Returns a tuple of (success_count, total)\"\"\"\n # make sure we can load all of the alerts\n for alert in alerts:\n if not alert.load():\n raise RuntimeError(\"unable to load alert {}\".format(str(alert)))\n\n # hard coded type\n # XXX would like to map types to remediation functions to call in aggregate\n if alert.alert_type != 'brotex - smtp - v2' and alert.alert_type != 'mailbox':\n raise RuntimeError(\"alert {} is not a support alert type of phishing remediation\".format(str(alert)))\n\n emails = [] # list of dicts returned by _create_remediation_email\n brotex_alert_count = 0 # keep track of how many brotex alerts we're remediating\n\n #\n # Office365 EWS Proxy Remediation\n #\n\n from saq.modules.email import EmailAnalysis, KEY_MESSAGE_ID, KEY_ENV_RCPT_TO, KEY_TO\n targets = [] # of tuple(message_id, recipient)\n results = {} # key = alert.uuid, value = str\n\n for alert in alerts:\n email_file = None\n for o in alert.observables:\n if o.type == F_FILE and (o.has_directive(DIRECTIVE_ORIGINAL_EMAIL) or o.value.endswith('email.rfc822')):\n email_file = o\n break\n\n if email_file is None:\n logging.warning(\"expected a single file observable in the alert for email remediation, \"\n \"but got {}\".format(len(email_file)))\n results[alert.uuid] = 'unexpected F_FILE type observables in main alert'\n continue\n\n # then get the EmailAnalysis for this email\n analysis = email_file.get_analysis(EmailAnalysis)\n if not analysis:\n loggging.warning(\"cannot get EmailAnalysis for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot find email analysis'\n continue\n\n message_id = None\n env_rcpt_to = None\n mail_to = None\n recipient = None\n\n if KEY_MESSAGE_ID in analysis.email:\n message_id = analysis.email[KEY_MESSAGE_ID]\n\n if KEY_ENV_RCPT_TO in analysis.email:\n env_rcpt_to = analysis.email[KEY_ENV_RCPT_TO]\n # if we didn't find it there then look in the main alert\n # XXX I really don't how all this information is all over the place\n elif 'envelope rcpt to' in alert.details:\n env_rcpt_to = alert.details['envelope rcpt to']\n if isinstance(env_rcpt_to, str):\n env_rcpt_to = [env_rcpt_to]\n\n if KEY_TO in analysis.email:\n mail_to = analysis.email[KEY_TO]\n\n if not message_id:\n logging.error(\"cannot find Message-ID for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot find Message-ID'\n continue\n\n if env_rcpt_to:\n recipient = env_rcpt_to[0] # there should only be one\n logging.debug(\"using env_rcpt_to value {} as recipient for {} in {}\".format(recipient, email_file, alert))\n elif mail_to:\n recipient = mail_to[\n 0] # XXX I need to look at all of them and pull out the one that matches a domain we own\n logging.debug(\"using mail_to value {} as recipient for {} in {}\".format(recipient, email_file, alert))\n\n if not recipient:\n logging.error(\"cannot determine recipient for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot determine recipient'\n continue\n\n targets.append((message_id, recipient))\n\n result = _remediate_email_o365_EWS(targets)\n success_count = 0\n messages = [] # of str\n for message_id, recipient, result_code, result_text in result:\n if result_code == 200:\n success_count += 1\n\n # on 1/9/2017 we changed the format of the output\n # the result_text is now a JSON array [ {\"address\": EMAIL_ADDRESS, \"code\": CODE, \"message\": MESSAGE }, ... ]\n decoded_result_text = json.loads(result_text)\n for entry in decoded_result_text:\n messages.append('message-id {} to {} error code {} message {}'.format(\n message_id, entry['address'], entry['code'], entry['message']))\n else:\n messages.append(\n 'message-id {} to {} error code {} message {}'.format(message_id, recipient, result_code, result_text))\n\n messages.insert(0, 'remediated {} out of {} emails from office365'.format(success_count, len(alerts)))\n return messages\n\n\ndef unremediate_phish(alerts):\n # make sure we can load all of the alerts\n for alert in alerts:\n if not alert.load():\n raise RuntimeError(\"unable to load alert {}\".format(str(alert)))\n\n # hard coded type\n # XXX would like to map types to remediation functions to call in aggregate\n if alert.alert_type != 'brotex - smtp - v2' and alert.alert_type != 'mailbox':\n raise RuntimeError(\"alert {} is not a support alert type of phishing remediation\".format(str(alert)))\n\n #\n # Office365 EWS Proxy Remediation\n #\n\n from saq.modules.email import EmailAnalysis, KEY_MESSAGE_ID, KEY_ENV_RCPT_TO, KEY_TO\n targets = [] # of tuple(message_id, recipient)\n results = {} # key = alert.uuid, value = str\n\n for alert in alerts:\n # the two types of alerts that support this will have a single F_FILE observable in the Alert itself\n email_file = [o for o in alert.observables if o.type == F_FILE]\n if len(email_file) != 1:\n logging.warning(\"expected a single file observable in the alert for email remediation, \"\n \"but got {}\".format(len(email_file)))\n results[alert.uuid] = 'unexpected F_FILE type observables in main alert'\n continue\n\n email_file = email_file[0]\n # then get the EmailAnalysis for this email\n analysis = email_file.get_analysis(EmailAnalysis)\n if not analysis:\n loggging.warning(\"cannot get EmailAnalysis for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot find email analysis'\n continue\n\n message_id = None\n env_rcpt_to = None\n mail_to = None\n recipient = None\n\n if KEY_MESSAGE_ID in analysis.email:\n message_id = analysis.email[KEY_MESSAGE_ID]\n\n if KEY_ENV_RCPT_TO in analysis.email:\n env_rcpt_to = analysis.email[KEY_ENV_RCPT_TO]\n # if we didn't find it there then look in the main alert\n # XXX I really don't how all this information is all over the place\n elif 'envelope rcpt to' in alert.details:\n env_rcpt_to = alert.details['envelope rcpt to']\n if isinstance(env_rcpt_to, str):\n env_rcpt_to = [env_rcpt_to]\n\n if KEY_TO in analysis.email:\n mail_to = analysis.email[KEY_TO]\n\n if not message_id:\n logging.error(\"cannot find Message-ID for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot find Message-ID'\n continue\n\n if env_rcpt_to:\n recipient = env_rcpt_to[0] # there should only be one\n logging.debug(\"using env_rcpt_to value {} as recipient for {} in {}\".format(recipient, email_file, alert))\n elif mail_to:\n recipient = mail_to[0]\n logging.debug(\"using mail_to value {} as recipient for {} in {}\".format(recipient, email_file, alert))\n\n if not recipient:\n logging.error(\"cannot determine recipient for {} in {}\".format(email_file, alert))\n results[alert.uuid] = 'cannot determine recipient'\n continue\n\n targets.append((message_id, recipient))\n\n result = _unremediate_email_o365_EWS(targets)\n success_count = 0\n messages = [] # of str\n for message_id, recipient, result_code, result_text in result:\n if result_code == 200:\n success_count += 1\n\n messages.append(\n 'message-id {} to {} error code {} message {}'.format(message_id, recipient, result_code, result_text))\n\n messages.insert(0, 'restored {} out of {} emails from office365'.format(success_count, len(alerts)))\n return messages\n","sub_path":"lib/saq/remediation/ews/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":26343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"71271449","text":"# Top settings file for development\nfrom .settings import *\n\n\nDEBUG = True\nALLOWED_HOSTS = [\n 'localhost',\n '0.0.0.0',\n]\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\nINSTALLED_APPS += (\n 'debug_toolbar',\n 'django_extensions',\n)\n\n# Disable sending mail\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n\n# Including a default secret key since this is just for development\nSECRET_KEY = environ('SECRET_KEY', u'dipps!+sq49#e2k#5^@4*^qn#8s83$kawqqxn&_-*xo7twru*8')\n\nMIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\n\nDEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': [\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n # 'debug_toolbar.panels.staticfiles.StaticFilesPanel',\n ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n}\n\n# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n","sub_path":"config/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"7219114","text":"from django.shortcuts import render\nfrom rdflib import Graph\nimport rdflib\n\ng = Graph()\ng.parse(\"index/Immunization.owl\")\n\n\ndef strip(query_string):\n index = query_string.index('#') + 1\n c = query_string[index:]\n return c\n\n\ndef home(request):\n return render(request, 'search/search.html')\n\n\ndef search(request):\n userValue = request.GET.get('s')\n userstr = str(userValue).lower()\n query = g.query(\n \"\"\"\n PREFIX rdf: \n PREFIX owl: \n PREFIX rdfs: \n PREFIX xsd: \n PREFIX moyin: \n\n SELECT ?value ?superclass ?superlabel\n WHERE {?subject moyin:diseasePrevention ?value.\n ?subject rdf:type ?subclass.\n ?subclass rdfs:subClassOf ?superclass.\n ?superclass rdfs:label ?superlabel} \n \"\"\")\n result = []\n for row in query:\n result.append({\n 'value': row['value'],\n 'superclass': strip(row['superclass']),\n 'superlabel': row['superlabel'],\n })\n return render(request, 'search/result.html', {'result': result, 'userstr': userstr})","sub_path":"Immunization/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"620201989","text":"class Student:\n\n\t#establish Student class with relevant attributes\n\tdef __init__(self, name, year, gpa, current_classes):\n\n\t\tself.name = name\n\t\tself.year = year\n\t\tself.gpa = gpa\n\t\tself.current_classes = current_classes\n\n\t#use method to print current year and how long until graduation\n\tdef addYear(self): \n\n\t\tif (self.year < 4):\n\n\t\t\tA = 4 - self.year\n\n\t\t\tprint (\"Current year:\", self.year)\n\t\t\tprint (\"Years until graduation:\", A)\n\n\t\telse: \n\n\t\t\tprint (\"Current year:\", self.year)\n\t\t\tprint (\"You are graduating this year.\")\n\n\t#allow user to input desired gpa and display user input in relation to current gpa\n\tdef setGPA(self):\n\n\t\tgoal_gpa = float(input(\"Enter desired GPA: \"))\n\n\t\tprint(\"Current GPA:\", self.gpa, \"vs. Desired GPA:\", goal_gpa)\n\n\t\tif (self.gpa < goal_gpa):\n\n\t\t\tprint(\"Go study to increase your GPA.\")\n\n\t#allow user to change course load\n\tdef addClass(self):\n\n\t\tprint(\"Current course load:\", self.current_classes)\n\n\t\tclass_number = int(input(\"Enter number of classes to add: \"))\n\n\t\tfor i in range (0, class_number):\n\n\t\t\tnew_class = input(\"Enter class: \")\n\n\t\t\tnew_class_credit = int(input(\"Enter credits: \"))\n\n\t\t\tself.current_classes.update ({new_class : new_class_credit})\n\n\t\tprint(\"New course load:\", self.current_classes)\n\n#create athlete subclass with relavent attributes\nclass Athlete(Student):\n\n\tdef __init__(self, name, year, gpa, current_classes, sport, years_of_experience, onScholarship, starter):\n\n\t\t#instatiate attributes from student class and for athlete subclass\n\t\tsuper().__init__(name, year, gpa, current_classes)\n\t\tself.sport = sport\n\t\tself.years_of_experience = years_of_experience\n\t\tself.onScholarship = onScholarship\n\t\tself.starter = starter\n\ndef main():\n\n\t#create instance of athlete subclass with relavant attributes\n\tstudent1 = Athlete(\n\n\t\t\tname = \"Steve\", \n\t\t\tyear = 3, \n\t\t\tgpa = 3.9,\n\t\t\tcurrent_classes = {\"Calc I\" : 3, \"Jewish Studies\" : 4},\n\t\t\tsport = \"Tennis\", \n\t\t\tyears_of_experience = 10, \n\t\t\tonScholarship = True,\n\t\t\tstarter = True\n\n\t\t\t)\n\n\t#print name, sport, years of experience\n\tprint(\"Name:\", student1.name)\n\tprint(\"Sport:\", student1.sport)\n\tprint(\"Years of experience:\", student1.years_of_experience)\n\n\t#determine whether or not student is on scholarship or is a starter\n\tprint(\"On scholarship:\", student1.onScholarship)\n\tprint(\"Starter:\", student1.starter)\n\n\t#run methods established in student class \n\tstudent1.addYear()\n\tstudent1.setGPA()\n\tstudent1.addClass()\n\tprint(\"Hello world\")\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"lab_5_3.py","file_name":"lab_5_3.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"255586582","text":"#!/usr/bin/env python3\n\"\"\"\nScript to sync Sentinel-2 data from NCI to AWS S3 bucket\n\"\"\"\n\nimport logging\nimport re\nimport subprocess\nimport sys\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures._base import as_completed\nfrom pathlib import Path\n\nimport click\nimport yaml\nfrom odc.aws import s3_dump, s3_client\nfrom odc.index import odc_uuid\nfrom tqdm import tqdm\n\nNCI_DIR = '/g/data/if87/datacube/002/S2_MSI_ARD/packaged'\nS3_PATH = 'L2/sentinel-2-nbar/S2MSIARD_NBAR'\nS3_BUCKET = 'dea-public-data'\n\nS3 = None\n\n_LOG = logging.getLogger()\n\n\ndef setup_logging():\n \"\"\"Log to stdout (via TQDM if running interactively) as well as into a file.\"\"\"\n _LOG.setLevel(logging.INFO)\n if sys.stdout.isatty():\n c_handler = TqdmLoggingHandler()\n else:\n c_handler = logging.StreamHandler()\n f_handler = logging.FileHandler('s3_uploads.log')\n c_handler.setLevel(logging.INFO)\n f_handler.setLevel(logging.INFO)\n\n # Create formatters and add it to handlers\n # c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n c_handler.setFormatter(formatter)\n f_handler.setFormatter(formatter)\n\n # Add handlers to the logger\n _LOG.addHandler(f_handler)\n _LOG.addHandler(c_handler)\n\n\n@click.command()\n@click.argument('s3_urls', type=click.File('r'))\n@click.option('--workers', type=int, default=10)\ndef main(s3_urls, workers):\n \"\"\"\n Script to sync Sentinel-2 data from NCI to AWS S3 bucket\n\n Pass in a file containing destination S3 urls that need to be uploaded.\n\n \"\"\"\n setup_logging()\n\n global S3\n S3 = s3_client()\n urls_to_upload = [url.strip() for url in s3_urls.readlines()]\n\n _LOG.info(f\"{len(urls_to_upload)} datasets to upload.\")\n with ThreadPoolExecutor(max_workers=workers) as executor:\n futures = [executor.submit(upload_dataset, s3_url) for s3_url in urls_to_upload]\n\n for future in tqdm(as_completed(futures), total=len(urls_to_upload), unit='datasets', disable=None):\n _LOG.info(f\"Completed uploaded: {future.result()}\")\n\n\ndef upload_dataset(s3_url):\n granule_id = s3_url_to_granule_id(s3_url)\n\n upload_dataset_without_yaml(granule_id, S3_BUCKET)\n\n local_path = Path(NCI_DIR) / granule_id\n upload_dataset_doc(local_path / 'ARD-METADATA.yaml', s3_url)\n return s3_url\n\n\ndef s3_url_to_granule_id(s3_url):\n match = re.search(r'/(\\d\\d\\d\\d-\\d\\d-\\d\\d/.*)/', s3_url)\n if match:\n return match.group(1)\n else:\n raise ValueError(f'Unable to extract granule id from {s3_url}')\n\n\ndef upload_dataset_without_yaml(granule_id, _s3_bucket):\n \"\"\"\n Run AWS sync command to sync granules to S3 bucket\n :param granule_id: name of the granule\n :param _s3_bucket: name of the s3 bucket\n \"\"\"\n local_path = Path(NCI_DIR) / granule_id\n s3_path = f\"s3://{_s3_bucket}/{S3_PATH}/{granule_id}\"\n\n # Remove any data that shouldn't be there and exclude the metadata and NBART\n command = f\"aws s3 sync {local_path} {s3_path} \" \\\n \"--only-show-errors \" \\\n \"--delete \" \\\n \"--exclude NBART/* \" \\\n \"--exclude ARD-METADATA.yaml\"\n\n try:\n subprocess.run(command, shell=True, check=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n except subprocess.CalledProcessError as e:\n _LOG.info(f\"Upload failed, stdout: {e.stdout}, stderr: {e.stderr}\")\n raise e\n\n\ndef upload_dataset_doc(src_yaml, s3_url):\n \"\"\"\n Replace metadata with additional info\n :param src_yaml: metadata file in NCI\n :param s3_url: path to upload metadata to in s3\n \"\"\"\n with open(src_yaml) as fin:\n nci_dataset = yaml.safe_load(fin)\n\n metadata_to_upload = munge_metadata(nci_dataset)\n\n s3_dump(yaml.safe_dump(metadata_to_upload, default_flow_style=False), s3_url, S3)\n\n\ndef munge_metadata(nci_dataset):\n del nci_dataset['image']['bands']['nbart_blue']\n del nci_dataset['image']['bands']['nbart_coastal_aerosol']\n del nci_dataset['image']['bands']['nbart_contiguity']\n del nci_dataset['image']['bands']['nbart_green']\n del nci_dataset['image']['bands']['nbart_nir_1']\n del nci_dataset['image']['bands']['nbart_nir_2']\n del nci_dataset['image']['bands']['nbart_red']\n del nci_dataset['image']['bands']['nbart_red_edge_1']\n del nci_dataset['image']['bands']['nbart_red_edge_2']\n del nci_dataset['image']['bands']['nbart_red_edge_3']\n del nci_dataset['image']['bands']['nbart_swir_2']\n del nci_dataset['image']['bands']['nbart_swir_3']\n del nci_dataset['lineage']\n nci_dataset['creation_dt'] = nci_dataset['extent']['center_dt'] # FIXME: WTF\n nci_dataset['product_type'] = 'S2MSIARD_NBAR'\n nci_dataset['original_id'] = nci_dataset['id']\n nci_dataset['software_versions'].update({\n 's2_to_s3_rolling': { # FIXME: Update\n 'repo': 'https://github.com/GeoscienceAustralia/dea-airflow/',\n 'version': '1.0.0'}\n })\n\n # Create a deterministic dataset ID based on these inputs\n nci_dataset['id'] = str(odc_uuid(\"s2_to_s3_rolling\", \"1.0.0\", [nci_dataset['id']]))\n return nci_dataset\n\n\nclass TqdmLoggingHandler(logging.Handler):\n def __init__(self, level=logging.NOTSET):\n super().__init__(level)\n\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/upload_s2.py","file_name":"upload_s2.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"339428098","text":"def test_wav(h, f):\n import wave\n # 'RIFF' 'WAVE' 'fmt ' \n if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':\n return None\n f.seek(0)\n try:\n w = wave.openfp(f, 'r')\n except (EOFError, wave.Error):\n return None\n return ('wav', w.getframerate(), w.getnchannels(),\n w.getnframes(), 8*w.getsampwidth())\n\n","sub_path":"_4.python/__code/python-master/Lib/sndhdr.py","file_name":"sndhdr.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"446740836","text":"# Copyright 2015 Ufora Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nExecutor\n\nResponsible for sending python computations to a Ufora cluster and returns\ntheir result\n\"\"\"\n\n\nimport pyfora.Future as Future\nimport pyfora.Exceptions as Exceptions\nimport pyfora.RemotePythonObject as RemotePythonObject\nimport pyfora.PythonObjectRehydrator as PythonObjectRehydrator\nimport pyfora.ObjectRegistry as ObjectRegistry\nimport pyfora.ObjectVisitors as ObjectVisitors\nimport pyfora.WithBlockExecutor as WithBlockExecutor\nimport pyfora.DefaultPureImplementationMappings as DefaultPureImplementationMappings\nimport traceback\nimport logging\nimport threading\n\n\nclass Executor(object):\n \"\"\"Main executor for Pyfora code. This is the component responible for sending\n computations to the Ufora cluster and returning the result as a RemotePythonObject\n future.\n\n Python objects are sent to the server using the define() method. A Future that \n resolves to a RemotePythonObject corresponding to the submitted object is returned.\n\n Similarly, functions and their arguments can be submitted using the submit method which \n returns a Future that resolves to a RemotePythonObject of the evaluated expression or\n thrown exception. \n \"\"\"\n def __init__(self, connection, pureImplementationMappings=None):\n \"\"\"Initialize a Pyfora executor.\n\n connection - a pyfora.Connection.Connection, or something with similar interface.\n \"\"\"\n self.connection = connection\n self.stayOpenOnExit = False\n self.pureImplementationMappings = pureImplementationMappings or DefaultPureImplementationMappings.getMappings()\n self.objectRegistry = ObjectRegistry.ObjectRegistry()\n self.objectRehydrator = PythonObjectRehydrator.PythonObjectRehydrator(self.pureImplementationMappings)\n self.futures = {}\n self.lock = threading.Lock()\n\n def importS3Dataset(self, bucketname, keyname):\n \"\"\"Takes an S3 bucket and key and returns a RemotePythonObject representing the s3\n dataset on the server\"\"\"\n def importS3Dataset():\n builtins = bucketname.__pyfora_builtins__\n return builtins.loadS3Dataset(bucketname, keyname)\n\n return self.submit(importS3Dataset)\n\n def exportS3Dataset(self, valueAsString, bucketname, keyname):\n \"\"\"Write a ComputedRemotePythonObject representing a pyfora string to s3 and return a\n Future representing the completion of the object.\n\n The future will resolve either to None (success) or to a Exceptions.PyforaException.\n \"\"\"\n assert isinstance(valueAsString, RemotePythonObject.ComputedRemotePythonObject)\n future = Future.Future()\n\n def onCompleted(result):\n if isinstance(result, Exception):\n future.set_exception(result)\n else:\n future.set_result(None)\n\n self.connection.triggerS3DatasetExport(valueAsString, bucketname, keyname, onCompleted)\n\n return future\n\n def define(self, obj):\n \"\"\"Send 'obj' to the server and return a Future that resolves to a RemotePythonObject\n representing the object on the server.\n\n Returns:\n A Future object representing the PyFora object on the server\n \"\"\"\n\n self._raiseIfClosed()\n objectId = ObjectVisitors.walkPythonObject(\n obj,\n self.objectRegistry,\n self.pureImplementationMappings\n )\n\n future = Future.Future()\n\n def onConverted(result):\n if isinstance(result, Exception):\n future.set_exception(result)\n else:\n future.set_result(\n RemotePythonObject.DefinedRemotePythonObject(\n objectId,\n self\n )\n )\n\n self.connection.convertObject(objectId, self.objectRegistry, onConverted)\n return future\n\n def submit(self, fn, *args, **kwargs):\n \"\"\"Submits a callable to be executed on the server with the provided arguments 'args'.\n kwargs are not currently supported.\n\n Returns:\n A Future representing the given call. The future will eventually resolve to a\n RemotePythonObject instance.\n \"\"\"\n self._raiseIfClosed()\n if len(kwargs) > 0:\n raise Exceptions.PyforaNotImplementedError(\"Keyword arguments not supported yet\")\n\n # TODO: make this truly async\n # don't block on the 'define' calls\n futures = [self.define(fn)] + [self.define(arg) for arg in args]\n results = [f.result() for f in futures]\n return results[0](*results[1:])\n\n\n def close(self):\n if not self.isClosed():\n self.connection.close()\n self.connection = None\n\n @property\n def remotely(self):\n \"\"\"\n 'with fora.remotely:' syntax allows you to automatically submit an entire block of \n python code for remote execution. All the code nested in the remotely with block is \n submitted.\n\n Returns:\n A WithBlockExecutor that will extract python code from a with block and submit \n that code to the ufora cluster for remote execution. Results of the remote execution\n are returned as RemotePythonObject and are automatically reasigned to their\n corresponding local variables in the with block.\n \"\"\"\n return WithBlockExecutor.WithBlockExecutor(self)\n\n def isClosed(self):\n return self.connection is None\n\n\n def __enter__(self):\n return self\n\n\n def __exit__(self, excType, excValue, trace):\n if not self.stayOpenOnExit:\n self.close()\n\n\n def _raiseIfClosed(self):\n if self.connection is None:\n raise Exceptions.PyforaError('Attempted operation on a closed executor')\n\n\n def _resolveFutureToComputedObject(self, future):\n future.set_result(\n RemotePythonObject.ComputedRemotePythonObject(\n future._executorState,\n self\n )\n )\n\n\n def _downloadComputedValueResult(self, computation, maxBytecount):\n future = Future.Future()\n\n def onResultCallback(jsonResult):\n try:\n if isinstance(jsonResult, Exception):\n future.set_exception(jsonResult)\n return\n\n if 'foraToPythonConversionError' in jsonResult:\n future.set_exception(\n Exceptions.ForaToPythonConversionError(\n str(jsonResult['foraToPythonConversionError'])\n )\n )\n return\n if not jsonResult['isException']:\n if 'maxBytesExceeded' in jsonResult:\n future.set_exception(Exceptions.ResultExceededBytecountThreshold())\n else:\n result = self.objectRehydrator.convertJsonResultToPythonObject(jsonResult['result'])\n future.set_result(result)\n else:\n result = self.objectRehydrator.convertJsonResultToPythonObject(jsonResult['result'])\n future.set_exception(Exceptions.ComputationError(result, jsonResult['trace']))\n except Exception as e:\n # TODO need a better way of wrapping exceptions.\n # Alexandros has some ideas here, but this is\n # better than the experience without the wrapping\n # (which is hanging)\n logging.error(\n \"Rehydration failed: %s\\nResult was %s of type %s\", \n traceback.format_exc(), \n jsonResult, \n type(jsonResult)\n )\n \n future.set_exception(\n Exceptions.ForaToPythonConversionError(\n e\n )\n )\n\n self.connection.downloadComputation(computation, onResultCallback, maxBytecount)\n\n return future\n\n def _expandComputedValueToDictOfAssignedVarsToProxyValues(self, computedValue):\n future = Future.Future()\n\n def onExpanded(jsonResult):\n if isinstance(jsonResult, Exception):\n future.set_exception(jsonResult)\n return\n\n if jsonResult['isException']:\n result = self.objectRehydrator.convertJsonResultToPythonObject(\n jsonResult['result']\n )\n future.set_exception(\n Exceptions.ComputationError(\n result,\n jsonResult['trace']\n )\n )\n return\n\n assert isinstance(jsonResult['dictOfProxies'], dict)\n\n dictOfProxies = {}\n for k, v in jsonResult['dictOfProxies'].iteritems():\n dictOfProxies[k] = RemotePythonObject.ComputedRemotePythonObject(v, self)\n\n future.set_result(dictOfProxies)\n\n self.connection.expandComputedValueToDictOfAssignedVarsToProxyValues(\n computedValue,\n onExpanded\n )\n\n return future\n\n\n def _expandComputedValueToTupleOfProxies(self, computedValue):\n future = Future.Future()\n\n def onExpanded(jsonResult):\n if isinstance(jsonResult, Exception):\n future.set_exception(jsonResult)\n return\n\n if jsonResult['isException']:\n result = self.objectRehydrator.convertJsonResultToPythonObject(jsonResult['result'])\n future.set_exception(Exceptions.ComputationError(result, jsonResult['trace']))\n return\n\n assert isinstance(jsonResult['tupleOfComputedValues'], tuple)\n\n tupleOfProxies = \\\n tuple([\n RemotePythonObject.ComputedRemotePythonObject(val, self) \\\n for val in jsonResult['tupleOfComputedValues']\n ])\n\n future.set_result(tupleOfProxies)\n\n self.connection.expandComputedValueToTupleOfProxies(computedValue, onExpanded)\n\n return future \n \n\n def _downloadDefinedObject(self, objectId):\n future = Future.Future()\n def onRetrieved(value):\n future.set_result(value)\n self.connection.retrieveConvertedObject(objectId, onRetrieved)\n return future\n\n\n def _callRemoteObject(self, fnHandle, argHandles):\n future = Future.Future(onCancel=self._cancelComputation)\n def onComputationCreated(result):\n if isinstance(result, Exception):\n future.set_exception(result)\n return\n computation = result\n future.setExecutorState(computation)\n with self.lock:\n self.futures[computation] = future\n self._prioritizeComputation(future)\n\n self.connection.createComputation(fnHandle, argHandles, onComputationCreated)\n return future\n\n def _prioritizeComputation(self, future):\n computation = future._executorState\n def onPrioritized(result):\n if isinstance(result, Exception):\n future.set_exception(result)\n else:\n future.set_running_or_notify_cancel()\n\n def onComputationCompleted(shouldBeNone):\n self._resolveFutureToComputedObject(future)\n\n def onComputationFailed(exception):\n assert isinstance(exception, Exceptions.PyforaError)\n future.set_exception(exception)\n\n self.connection.prioritizeComputation(\n computation,\n onPrioritized,\n onComputationCompleted,\n onComputationFailed\n )\n\n def _cancelComputation(self, computationId):\n with self.lock:\n future = self.futures.get(computationId)\n if future is None:\n # the computation has already completed\n return False\n del self.futures[computationId]\n self.connection.cancelComputation(computationId)\n return True\n\n\n","sub_path":"packages/python/pyfora/Executor.py","file_name":"Executor.py","file_ext":"py","file_size_in_byte":12794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"356937728","text":"import numpy as np\nimport h5py\nfrom matplotlib import pyplot as plt\nfrom testCase import *\nfrom dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward\n\nnp.random.seed(1)\n\n\ndef initialize_parameters(n_x, n_h, n_y):\n np.random.seed(1)\n\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros([n_h, 1])\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros([n_y, 1])\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n\n return parameters\n\n# test\n# parameters = initialize_parameters(2, 2, 1)\n# print(\"W1 = \" + str(parameters[\"W1\"]))\n# print(\"b1 = \" + str(parameters[\"b1\"]))\n# print(\"W2 = \" + str(parameters[\"W2\"]))\n# print(\"b2 = \" + str(parameters[\"b2\"]))\n\n\ndef initialize_parameters_deep(layer_dims):\n np.random.seed(3)\n parameters = {}\n L = len(layer_dims)\n\n for l in range(1, L):\n # the w value matrix is layer_dims[l] * layer_dims[l-1b]\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01\n parameters['b' + str(l)] = np.zeros([layer_dims[l], 1])\n\n return parameters\n\n# test\n# parameters = initialize_parameters_deep([5, 4, 3])\n# print(\"W1 = \" + str(parameters[\"W1\"]))\n# print(\"b1 = \" + str(parameters[\"b1\"]))\n# print(\"W2 = \" + str(parameters[\"W2\"]))\n# print(\"b2 = \" + str(parameters[\"b2\"]))\n\n\ndef linear_forward(A, W, b):\n Z = np.dot(W, A) + b\n cache = (A, W, b)\n\n return Z, cache\n\n\n# test\n# A, W, b = linear_forward_test_case()\n# Z, linear_cache = linear_forward(A, W, b)\n# print(\"Z = \" + str(Z))\n\ndef linear_activation_forward(A_prev, W, b, activation):\n if activation == \"sigmoid\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n\n elif activation == \"relu\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\n\n# test\n# A_prev, W, b = linear_activation_forward_test_case()\n# A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation=\"sigmoid\")\n# print(\"with sigmoid: A = \" + str(A))\n# A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation=\"relu\")\n# print(\"with relu: A = \" + str(A))\n\n\ndef L_model_forward(X, parameters):\n caches = []\n A = X\n # parameters contain W and b , each group count a layer\n L = len(parameters) // 2\n\n for l in range(1, L):\n A_prev = A\n A, caches = linear_activation_forward(A_prev,\n parameters['W' + str(l)],\n parameters['b' + str(l)],\n activation='relu')\n caches.append(caches)\n","sub_path":"deeplearning/1_Neural_Networks_and_Deep_Learning/building_your_Deep_Neural_Network_Step_by_Step/building_your_Deep_Neural_Network_Step_by_Step.py","file_name":"building_your_Deep_Neural_Network_Step_by_Step.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"36131088","text":"from Russian_Flash_Cards import *\nimport mysql.connector\nfrom mysql.connector import errorcode\n\nRU = True\ntable = \"\"\n\nif not RU:\n pass # TO DO\nelse:\n table = \"ru_words\"\n\n\ndef update(rid, trouble=False): # Using this instead of the regular in main init because of a lot of differences\n global table\n try:\n conn = mysql.connector.connect(**config)\n cur = conn.cursor()\n if trouble:\n query = \"UPDATE \" + str(table) + \" SET active='1', trouble_word='1' WHERE id='\" + str(rid) + \"'\"\n else:\n query = \"UPDATE \" + str(table) + \" SET active='1' WHERE id='\" + str(rid) + \"'\"\n cur.execute(query)\n conn.commit()\n cur.close()\n conn.close()\n print(\"Successfully updated \" + str(rid) + \" to enabled.\\n\")\n except errorcode as e:\n print(\"Error enabling \" + rid + \"\\n\" + e + \"\\n\")\n\nwhile True:\n a = input(str(\"ID: \"))\n a = a.strip()\n if \" -t\" in a:\n a = a.replace(\" -t\", \"\")\n update(a, True)\n else:\n update(a)\n\n","sub_path":"tools/QuickEnable.py","file_name":"QuickEnable.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"421296517","text":"\"\"\"\nNAME: OSUNTOLU PAUL ADEDIRAN\nEMAIL: neptunecody@gmail.com\nPHONE: 09025111684\n\nQuestion B :Store data scrapped from webpage as a dictionary, using colors as key\nand frequency as values\n\"\"\"\n\n#import regular expression, urllib library\nimport re\nimport urllib, urllib2\n\n\n#open webpage file using urllib.urlopen and read using the .read()\nfile = urllib.urlopen(\"python_class_test.html\")\nfile = file.read()\n\n#define a function for the regular expression matched\ndef expression():\n\n #create a pattern to match the color\n pattern = re.compile(r'([A-Z]+, \\w.*?) | ')\n\n #find all matched pattern\n matches = re.findall(pattern, file)\n\n #iterate through the patterns matched\n for match in matches:\n\n #splt items in matched\n items = match.split()\n\n\"\"\"\n #record items matched in a dictionary\n diction = {x: items.count(x) for x in items}\n print diction\n\n count = 0.0\n sum = 0.0\n for key in diction:\n count += 1\n sum += diction[key]\n print (\"The mean is: \",sum/count)\n\"\"\"\nexpression()\n","sub_path":"questionB2.py","file_name":"questionB2.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"575272827","text":"#!/usr/bin/python\n\"\"\"\nThere are N gas stations along a circular route, where the amount of gas at station i is gas[i].\nYou have a car with an unlimited gas tank and it costs cost[i] of gas to travel\nfrom station i to its next station (i+1). You begin the journey with an empty tank at one of the\ngas stations.\n\nReturn the starting gas station's index if you can travel around the circuit once,\notherwise return -1.\n\n#134\nREDDO: need to find the pattern. I don't like this question that much. need to solve with no hints\n\"\"\"\n\ndef calculategs(g, c):\n tnk = 0\n total = 0\n\n for i in range(len(g)):\n tmp = g[i] - c[i]\n\n tnk += tmp\n total += tmp\n\n if tnk < 0:\n tnk = 0\n start = i+1\n\n return start if total >=0 else -1\n\ndef test1():\n g = [1,2,3,4,5]\n c = [3,4,5,1,2]\n print(calculategs(g, c))\n\nif __name__ == '__main__':\n test1()\n\n","sub_path":"array/gasStation.py","file_name":"gasStation.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"61472184","text":"import random\nprint('ce jeu consiste à deviner un nombre entre 0 et 1000.\\npour abandonner taper 1001')\nn = random.randrange(0,1001)\nc = 1\nx = int(input('essai 1:'))\nwhile x!=n and x!=1001 :\n if x < n :\n print('trop petit')\n else :\n print('trop grand')\n c = c+1\n x= int(input('essai' + str(c) + ':'))\nif x == n : \n print('vous avez réussi en',c,'essais');\nelse :\n print(\"C'était n=\",n);\n\n \n \n","sub_path":"jeu.py","file_name":"jeu.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"295076940","text":"\ndef lcs_ries(x, y):\n \n def lcs_tab(x, y):\n n, m = len(x), len(y)\n tab = [[0]*(m+1) for i in range(n+1)]\n for i in range(n):\n for j in range(m):\n if x[i] == y[j]:\n tab[i+1][j+1] = tab[i][j] + 1\n else:\n tab[i+1][j+1] = max(tab[i][j+1], tab[i+1][j])\n return tab\n\n tab = lcs_tab(x, y)\n print(tab)\n ries = ''\n i, j = len(x), len(y)\n while tab[i][j] > 0:\n if x[i-1] == y[j-1]:\n ries = x[i-1] + ries\n i -= 1\n j -= 1\n elif tab[i-1][j] >= tab[i][j-1]:\n i -= 1\n else:\n j -= 1\n return ries\n\nprint(lcs_ries('programovanie', 'krasokorculovanie'))\nprint('\\n')\n\nimport random\ndef generate_dna(n):\n return ''.join([random.choice(['A', 'C', 'G', 'T']) for i in range(n)])\n\ndef get_text_seq():\n with open('text3.txt') as l:\n seq1 = l.read()[100:1101]\n with open('text4.txt') as l:\n seq2 = l.read()[100:1101]\n return seq1, seq2\n\n##print(lcs_ries(generate_dna(100), generate_dna(100)))\n##\n##print(lcs_ries(get_text_seq()[0], get_text_seq()[1]))\n\nprint(lcs_ries('cbdbab', 'abbccd'))\n","sub_path":"2015-2016/adscv10.py","file_name":"adscv10.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"97692727","text":"from nn.network import Network\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport tensorflow as tf\nfrom sklearn import metrics\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\nx_train = np.reshape(x_train, (x_train.shape[0], 28 * 28))\nx_test = np.reshape(x_test, (x_test.shape[0], 28 * 28))\ny_train = tf.keras.utils.to_categorical(y_train)\nx_train = (x_train/255).astype('float32')\nx_test = (x_test/255).astype('float32')\n\nnet = Network()\n\nnet.init(input_dimension=784, loss_function=\"cross entropy\", layers=[\n {\"units\": 128, \"activation\": \"relu\", \"type\":\"dense\"},\n {\"units\": 64, \"activation\": \"relu\", \"type\":\"dense\"},\n {\"units\": 10, \"activation\": \"softmax\", \"type\":\"dense\"}\n])\n\nnet.fit(x_train, y_train, epochs=10)\n\ny_pred = net.predict(x_test)\n\ny_pred = np.argmax(y_pred, axis=1)\n\ncmatrix = confusion_matrix(y_test, y_pred)\nprint(cmatrix)\nprint(f\"Accuracy score: {metrics.accuracy_score(y_test, y_pred):10.5}\")","sub_path":"terminal_mnist.py","file_name":"terminal_mnist.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"624329679","text":"# '''\n# Linked List hash table key/value pair\n# '''\nclass LinkedPair:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\nclass HashTable:\n '''\n A hash table that with `capacity` buckets\n that accepts string keys\n '''\n def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.count = 0\n\n\n def _hash(self, key):\n '''\n Hash an arbitrary key and return an integer.\n\n You may replace the Python hash with DJB2 as a stretch goal.\n '''\n return self._hash_djb2(key)\n\n\n def _hash_djb2(self, key):\n '''\n Hash an arbitrary key using DJB2 hash\n\n OPTIONAL STRETCH: Research and implement DJB2\n '''\n hash_value = 5381\n\n for char in key:\n hash_value = (hash_value << 5) + hash_value + ord(char)\n \n return hash_value\n\n\n\n def _hash_mod(self, key):\n '''\n Take an arbitrary key and return a valid integer index\n within the storage capacity of the hash table.\n '''\n return self._hash(key) % self.capacity\n\n\n def insert(self, key, value):\n '''\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Fill this in.\n '''\n \n # Create new node\n if self.count >= self.capacity:\n self.resize()\n new_node = LinkedPair(key,value)\n # Get current position\n position = self._hash_mod(key)\n # Get the current node at the position\n current_node = self.storage[position]\n if current_node is None:\n self.storage[position] = new_node\n self.count += 1\n return\n\n elif current_node:\n # Traverse the list\n while current_node:\n # Check if the key already exists and update\n if (self._hash(key) == self._hash(current_node.key)):\n if ( value == current_node.value):\n return\n else:\n current_node.value = value\n return \n elif current_node.next is None:\n current_node.next = new_node\n return\n current_node = current_node.next\n \n\n\n def remove(self, key):\n '''\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Fill this in.\n '''\n hashed_key = self._hash(key)\n # Get item position\n position = self._hash_mod(key)\n # Get item\n item = self.storage[position]\n if (item and item.next is None):\n self.count -= 1\n while item is not None:\n if self._hash(item.key) == hashed_key:\n self.storage[position] = item.next\n return key\n item = item.next\n \n print('Item not found!!!')\n return \n\n\n def retrieve(self, key):\n '''\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Fill this in.\n '''\n hashed_key = self._hash(key)\n for hash in self.storage:\n if hash is not None and self._hash(hash.key) == hashed_key:\n return hash.value\n if hash is not None and hash.next:\n next = hash.next\n while next:\n if self._hash(next.key) == hashed_key:\n return next.value\n next = next.next\n return None\n\n def resize(self):\n '''\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Fill this in.\n '''\n # Create new storage\n new_storage = [None]* self.capacity * 2\n # Iterate over the old storage\n # Copy items to the new storage\n for i, item in enumerate(self.storage):\n # Check if the item is not None\n if item is not None:\n\n # Get a position for the item\n postion = self._hash_mod(item.key)\n # Get the item already in the position\n current_item = new_storage[postion]\n\n # Check if there's no collision\n if current_item is None:\n # Insert the item in position\n new_storage[postion] = item\n # There's a collision, now traverse the list and insert the item at the right position\n else:\n while current_item:\n\n # Check if it is the right position to insert the item\n if current_item.next is None:\n current_item.next = item\n else:\n current_item = current_item.next\n # destroy the old storage memory\n self.storage = new_storage\n\n\n\nif __name__ == \"__main__\":\n ht = HashTable(2)\n\n ht.insert(\"line_1\", \"Tiny hash table\")\n ht.insert(\"line_2\", \"Filled beyond capacity\")\n ht.insert(\"line_3\", \"Linked list saves the day!\")\n\n print(ht._hash('ss'))\n print(\"\")\n\n # Test storing beyond capacity\n print(ht.retrieve(\"line_1\"))\n print(ht.retrieve(\"line_2\"))\n print(ht.retrieve(\"line_3\"))\n\n # Test resizing\n old_capacity = len(ht.storage)\n ht.resize()\n new_capacity = len(ht.storage)\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # Test if data intact after resizing\n print(ht.retrieve(\"line_1\"))\n print(ht.retrieve(\"line_2\"))\n print(ht.retrieve(\"line_3\"))\n\n print(\"\")\n","sub_path":"src/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"428949714","text":"import numpy as np\nimport copy\nimport matplotlib.pyplot as plt\nimport itertools\n\nfrom skimage.color import rgb2gray\nfrom skimage.filters import threshold_otsu\nfrom skimage.morphology import binary_erosion\nfrom imantics import Polygons\nfrom typing import List, Tuple\n\n\ndef plot_it(image_array: np.array) -> None:\n\t\"\"\"This function shows the plot of a given image (np.array([...]))\"\"\"\n\tplt.rcParams[\"figure.figsize\"] = (20, 15)\n\n\tfig, ax = plt.subplots(1)\n\tax.imshow(image_array)\n\t# plt.savefig(\"./image_\"+str(image_counter)+\".png\")\n\tplt.show()\n\treturn\n\n\ndef define_bounding_box_center(bounding_box: np.array) -> np.array:\n\t\"\"\"This function return the center np.array([x,y]) of a given bounding box np.array([[x1, y1], [x2, y2],...,[x_n,\n\ty_n]]) \"\"\"\n\tx_values = np.array([])\n\ty_values = np.array([])\n\tfor node in bounding_box:\n\t\tx_values = np.append(x_values, node[0])\n\t\ty_values = np.append(y_values, node[1])\n\treturn np.array([np.mean(x_values), np.mean(y_values)])\n\n\ndef define_edge_line(node_1: Tuple[int, int], node_2: Tuple[int, int]) -> Tuple[float, float]:\n\t\"\"\"This function returns slope and intercept of a linear function between two given points in a 2-dimensional space\"\"\"\n\tslope = (node_2[1]-node_1[1])/(node_2[0]-node_1[0])\n\tintercept = node_1[1] - slope * node_1[0]\n\treturn slope, intercept\n\n\ndef euklidian_distance(node_1: Tuple[int, int], node_2: Tuple[int, int]) -> float:\n\t\"\"\"This function returns the euklidian distance between two given points in a 2-dimensional space.\"\"\"\n\treturn np.sqrt((node_2[0]-node_1[0]) ** 2 + (node_2[1]-node_1[1])**2)\n\n\ndef set_x_diff_range(x_diff: float, eukl_distance: float, image_array: np.array) -> np.array:\n\t\"\"\"This function takes the amount of steps on an edge and returns the corresponding list of steps in random order\"\"\"\n\tblur_factor = int(image_array.shape[1]/500) if image_array.shape[1]/500 >= 1 else 1\n\tif x_diff > 0:\n\t\tx_diff_range = np.arange(0, x_diff, blur_factor/eukl_distance)\n\telse:\n\t\tx_diff_range = np.arange(0, x_diff, -blur_factor/eukl_distance)\n\tnp.random.shuffle(x_diff_range)\n\treturn x_diff_range\n\n\ndef define_next_pixel_to_check(bounding_box: np.array, node_index: int, step: int, image_shape: Tuple[int, int, int]) -> Tuple[int, int]:\n\t\"\"\"This function returns the next pixel to check in the image (along the edge of the bounding box).\n\tIn the case that it tries to check a pixel outside of the image, this is corrected.\"\"\"\n\t# Define the edges of the bounding box\n\tslope, intercept = define_edge_line(bounding_box[node_index], bounding_box[node_index-1])\n\tx = int(bounding_box[node_index-1][0] + step)\n\ty = int((slope * (bounding_box[node_index-1][0] + step)) + intercept)\n\tif y >= image_shape[0]:\n\t\ty = image_shape[0]-1\n\tif y < 0:\n\t\ty = 0\n\tif x >= image_shape[1]:\n\t\tx = image_shape[1]-1\n\tif x < 0:\n\t\tx = 0\n\treturn x, y\n\n\ndef adapt_x_values(bounding_box: np.array, node_index: int, image_shape: Tuple[int, int, int]) -> np.array:\n\t\"\"\"If two nodes form a vertical edge the function that descripes that edge will in- or decrease infinitely with dx so we need to alter those nodes.\n\tThis function returns a bounding box where the nodes are altered depending on their relative position to the center of the bounding box.\n\tIf a node is at the border of the image, then it is not changed.\"\"\"\n\tbounding_box = copy.deepcopy(bounding_box)\n\tif bounding_box[node_index][0] != image_shape[1]:\n\t\tbounding_box_center = define_bounding_box_center(bounding_box)\n\t\tif bounding_box[node_index][0] < bounding_box_center[0]:\n\t\t\tbounding_box[node_index][0] = bounding_box[node_index][0] - 1\n\t\telse:\n\t\t\tbounding_box[node_index][0] = bounding_box[node_index][0] + 1\n\treturn bounding_box\n\n\ndef mask_2_polygons(mask_array: np.array) -> List:\n\t\"\"\"This function takes a mask array and returns a list of polygon bounding boxes (node coordinates)\"\"\"\n\treturn Polygons.from_mask(mask_array).points\n\n\ndef binarize_image(image_array: np.array, threshold=\"otsu\") -> np.array:\n\t\"\"\"This function takes a Numpy array that represents an RGB image and returns the binarized form of that image\n\tby applying the otsu threshold.\"\"\"\n\tgrayscale = rgb2gray(image_array)\n\tif threshold == \"otsu\":\n\t\tthreshold = threshold_otsu(grayscale)\n\tbinarized_image_array = grayscale > threshold\n\treturn binarized_image_array\n\n\ndef define_relevant_polygons(polygon: np.array) -> np.array:\n\t\"\"\"Sometimes, the mask R CNN model produces a weird output with one big masks and some small irrelevant islands.\n\tThis function takes the imantics Polygon object and returns the Polygon object that only contains the biggest\n\tbounding box (which is defined by the biggest range in y-direction).\"\"\"\n\tif len(polygon) == 1:\n\t\treturn polygon\n\telse:\n\t\ty_variance = [] # list>\n\t\tfor box in polygon:\n\t\t\ty_values = np.array([value[1] for value in box])\n\t\t\ty_variance.append(max(y_values)-min(y_values))\n\t\treturn [polygon[y_variance.index(max(y_variance))]]\n\n\ndef find_seeds_contours(image_array: np.array, bounding_box: np.array) -> List[Tuple[int, int]]:\n\t\"\"\"This function an array that represents an image and a bounding box.\n\tIt returns a list of tuples with indices of objects in the image on the bounding box edges.\"\"\"\n\n\t# Check edges for pixels that are not white\n\tseed_pixels = []\n\tfor node_index in range(len(bounding_box)):\n\t\t# Define amount of steps we have to go in x-direction to get from node 1 to node 2\n\t\tx_diff = bounding_box[node_index][0] - bounding_box[node_index-1][0]\n\t\tif x_diff == 0:\n\t\t\tbounding_box = adapt_x_values(bounding_box=bounding_box, node_index=node_index, image_shape=image_array.shape)\n\t\tx_diff = bounding_box[node_index][0] - bounding_box[node_index-1][0] # Define amount of steps we have to go in x-direction to get from node 1 to node 2\n\t\tx_diff_range = set_x_diff_range(x_diff, euklidian_distance(bounding_box[node_index], bounding_box[node_index-1]), image_array)\n\t\t# Go down the edge and check if there is something that is not white.\n\t\t# If something was found, the corresponding coordinates are saved.\n\t\tfor step in x_diff_range:\n\t\t\tx, y = define_next_pixel_to_check(bounding_box, node_index, step, image_shape=image_array.shape)\n\t\t\t# If there is something that is not white\n\t\t\tif image_array[y, x] < 0.9:\n\t\t\t\tseed_pixels.append((x, y))\n\treturn seed_pixels\n\n\ndef find_mask_center(mask_array: np.array) -> Tuple[int, int]:\n\t\"\"\"This function takes a binary matrix (numpy array containing a mask) and defines the center of the mask.\n\tIt returns a tuple with the center indices.\"\"\"\n\t# First, try to find global mask center. If that point is included in the mask, return it.\n\ty_coordinates, x_coordinates = np.nonzero(mask_array)\n\tx_center = int((x_coordinates.max()+x_coordinates.min())/2)\n\ty_center = int((y_coordinates.max()+y_coordinates.min())/2)\n\tif mask_array[y_center, x_center]:\n\t\treturn x_center, y_center\n\telse:\n\t\t# If the global mask center is not placed in the mask, take the center on the x-axis and the first-best y-coordinate that lies in the mask\n\t\tx_center = np.where(mask_array[y_center] == True)[0][0]\n\t\treturn x_center, y_center\n\n\ndef find_seeds(image_array: np.array, mask_array: np.array) -> List[Tuple[int, int]]:\n\t\"\"\"This function takes an array that represents an image and a mask.\n\tIt returns a list of tuples with indices of seeds in the structure covered by the mask.\"\"\"\n\tx_center, y_center = find_mask_center(mask_array)\n\t# Starting at the mask center, check for pixels that are not white\n\tseed_pixels = []\n\tup, down, right, left = True, True, True, True\n\tfor n in range(1, 1000):\n\t\t# Check for seeds above center\n\t\tif up:\n\t\t\tif x_center+n < image_array.shape[1]:\n\t\t\t\tif not mask_array[y_center, x_center+n]:\n\t\t\t\t\tup = False\n\t\t\t\tif not image_array[y_center, x_center+n]:\n\t\t\t\t\tseed_pixels.append((x_center+n, y_center))\n\t\t\t\t\tup = False\n\t\t# Check for seeds below center\n\t\tif down:\n\t\t\tif x_center-n >= 0:\n\t\t\t\tif not mask_array[y_center, x_center-n]:\n\t\t\t\t\tdown = False\n\t\t\t\tif not image_array[y_center, x_center-n]:\n\t\t\t\t\tseed_pixels.append((x_center-n, y_center))\n\t\t\t\t\tdown = False\n\t\t# Check for seeds left from center\n\t\tif left:\n\t\t\tif y_center+n < image_array.shape[0]:\n\t\t\t\tif not mask_array[y_center+n, x_center]:\n\t\t\t\t\tleft = False\n\t\t\t\tif not image_array[y_center+n, x_center]:\n\t\t\t\t\tseed_pixels.append((x_center, y_center+n))\n\t\t\t\t\tleft = False\n\t\t# Check for seeds right from center\n\t\tif right:\n\t\t\tif y_center-n >= 0:\n\t\t\t\tif not mask_array[y_center-n, x_center]:\n\t\t\t\t\tright = False\n\t\t\t\tif not image_array[y_center-n, x_center]:\n\t\t\t\t\tseed_pixels.append((x_center, y_center-n))\n\t\t\t\t\tright = False\n\treturn seed_pixels\n\n\ndef determine_neighbour_pixels(seed_pixel: Tuple[int, int], image_shape: Tuple[int, int, int]) -> List[Tuple[int, int]]:\n\t\"\"\"This function takes a tuple of x and y coordinates and returns a list of tuples of the coordinates of the eight neighbour pixels.\"\"\"\n\tneighbour_pixels = []\n\tx, y = seed_pixel\n\tfor new_x in range(x-1, x+2):\n\t\tif new_x in range(image_shape[1]):\n\t\t\tfor new_y in range(y-1, y+2):\n\t\t\t\tif new_y in range(image_shape[0]):\n\t\t\t\t\tif (x, y) != (new_x, new_y):\n\t\t\t\t\t\tneighbour_pixels.append((new_x, new_y))\n\treturn neighbour_pixels\n\n\ndef expand_masks(image_array: np.array, seed_pixels: List[Tuple[int, int]], mask_array: np.array, contour_expansion=False) -> np.array:\n\t\"\"\"This function takes...\n\timage_array - Numpy array that represents an image (float)\n\tmask_array - Numpy array containing binary matrices that define masks (y, x, mask_index)\n\tseed_pixels - List of tuples with x and y positions of objects on the mask edges\n\tand returns a mask array where the mask has been expanded to surround an object in the image completely.\"\"\"\n\n\t# If the mask is supposed to be reconstructed and not expanded from the contours\n\tif not contour_expansion:\n\t\tmask_array = np.zeros(mask_array.shape)\n\tfor seed_pixel in seed_pixels:\n\t\tneighbour_pixels = determine_neighbour_pixels(seed_pixel, image_array.shape)\n\t\tfor neighbour_pixel in neighbour_pixels:\n\t\t\tx, y = neighbour_pixel\n\t\t\tif not mask_array[y, x]:\n\t\t\t\tif not image_array[y, x]:\n\t\t\t\t\tmask_array[y, x] = True\n\t\t\t\t\tseed_pixels.append((x, y))\n\treturn mask_array\n\n\ndef expansion_coordination(mask_array: np.array, image_array: np.array) -> np.array:\n\t\"\"\"This function takes a single mask and an image (np.array) and coordinates\n\tthe mask expansion. It returns the expanded mask.\n\tThe purpose of this function is wrapping up the expansion procedure in a map function.\"\"\"\n\tseed_pixels = find_seeds(image_array, mask_array)\n\tif seed_pixels != []:\n\t\tmask_array = expand_masks(image_array, seed_pixels, mask_array)\n\telse:\n\t\t# If the seed detection inside of the mask has failed for some reason, look for seeds on the contours of the mask and expand from there on.\n\t\t# Turn masks into list of polygon bounding boxes\n\t\tpolygon = mask_2_polygons(mask_array)\n\t\t# Delete unnecessary mask blobs\n\t\tpolygon = define_relevant_polygons(polygon)\n\t\tseed_pixels = find_seeds_contours(image_array=image_array, bounding_box=polygon[0])\n\t\tmask_array = expand_masks(image_array, seed_pixels, mask_array, contour_expansion=True)\n\treturn mask_array\n\n\ndef complete_structure_mask(image_array: np.array, mask_array: np.array, debug=False) -> np.array:\n\t\"\"\"This funtion takes an image (array) and an array containing the masks (shape: x,y,n where n is the amount of masks and x and y are the pixel coordinates).\n\tIt detects objects on the contours of the mask and expands it until it frames the complete object in the image.\n\tIt returns the expanded mask array\"\"\"\n\n\tif mask_array.size != 0:\n\t\t#Binarization of input image\n\t\tbinarized_image_array = binarize_image(image_array, threshold=0.85)\n\n\t\t# Apply gaussian filter with a resolution-dependent standard deviation to the image\n\t\tblur_factor = int(image_array.shape[1]/185) if image_array.shape[1]/185 >= 2 else 2\n\n\t\tif debug:\n\t\t\tplot_it(binarized_image_array)\n\n\t\t#Define kernel and apply\n\t\tkernel = np.ones((blur_factor, blur_factor))\n\t\tblurred_image_array = binary_erosion(binarized_image_array, selem=kernel)\n\n\t\tif debug:\n\t\t\tplot_it(blurred_image_array)\n\t\t# Slice mask array along third dimension into single masks\n\t\tsplit_mask_arrays = np.array([mask_array[:, :, index] for index in range(mask_array.shape[2])])\n\n\t\t# Run expansion the expansion\n\t\timage_repeat = itertools.repeat(blurred_image_array, mask_array.shape[2])\n\t\t# Faster with map function\n\t\texpanded_split_mask_arrays = map(expansion_coordination, split_mask_arrays, image_repeat)\n\n\t\t# Stack mask arrays to give the desired output format\n\t\tmask_array = np.stack(expanded_split_mask_arrays, -1)\n\t\treturn mask_array\n\n\telse:\n\t\tprint(\"No masks found.\")\n\t\treturn mask_array\n","sub_path":"Scripts/complete_structure.py","file_name":"complete_structure.py","file_ext":"py","file_size_in_byte":12525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"529783113","text":"import json, os, datetime\nfrom random import randint\n\nimport requests\n\nfrom werkzeug import secure_filename\n\nfrom flask import Flask, render_template, session, redirect, url_for, request, flash\nfrom flask_login import LoginManager, login_user, logout_user, current_user, login_required\nfrom flask.ext.bcrypt import generate_password_hash\n\nfrom models import *\nfrom pagination import Pagination\n\nimport forms\n\n\nDEBUG = True\nPORT = ''\nHOST = 'androunditgoes.pythonanywhere.com'\n#HOST = 'localhost'\nUPLOAD_FOLDER = '/home/androunditgoes/mysite/static/img/'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\nPER_PAGE = 5\n\napp = Flask(__name__)\napp.secret_key = 'mlskdjfisfwe[e20220i42mf2fra/aioh30mowgf0924mo2=gmvdVsv72v5v2vwvs5f3wef3wf83gf3v5esf1f3fw4vwvopw3mviosvmwpvp3ma31334ivlkwvog'\napp.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n@login_manager.user_loader\ndef load_user(userid):\n\treturn User.get(User.id==userid)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\ndef send_sms(phone_number, message):\n\tif type(message) == dict:\n\t\tmessage = message['response']\n\n\treturn '{}: {}'.format(phone_number, message)\n\ndef stock_item_exists(order_no):\n\t'''This function checks if the stock item translated to by the order number exists.\n\t If it does, the stock item is returned, if it doesn\\'t, False is returned'''\n\t# check if order_no is valid and stock item exists\n\ttry:\n\t\tcatalogue_entry = Catalogue.get(Catalogue.short_code==order_no)\n\n\t\t# attempt to get stock item from catalogue entry\n\t\t# parameters holds the values needed to identify ordered stock item\n\t\tpars = catalogue_entry.long_code.split('R')\n\n\t\tstock_item = Stock.get(\n\t\t\tStock.product==int(pars[0]),\n\t\t\tStock.first_description==int(pars[1]),\n\t\t\tStock.unit==int(pars[2]),\n\t\t\tStock.brand==int(pars[3]),\n\t\t\tStock.quantity==int(pars[4]),\n\t\t\tStock.supplier==int(pars[5])\n\t\t\t)\n\n\t\t# return stock item\n\t\treturn stock_item\n\texcept DoesNotExist:\n\t\treturn False\n\ndef process_order(quantity, stock):\n\t'''this function processes all incoming orders and returns a success or error response'''\n\t# current refers to the current number of orders\n\t# needed refers to the number of orders required to meet the target\n\t# target refers to the numbers of orders we have to reach before we can make an actual sale\n\tquantity = int(quantity)\n\n\ttry:\n\t\tcurrent = Order.select(fn.sum(Order.quantity)).where(Order.stock==stock.id).scalar() #Get the current number of orders\n\n\t\tif current == None: #I don't want a TypeError below\n\t\t\tcurrent = 0\n\n\t\tneeded = stock.minimum_quantity - current\n\n\t\t# verify if quantity is less than or equal to the needed orders\n\t\tif quantity <= needed :\n\t\t\t# verify if quantity is greater the MOQ\n\t\t\tif quantity >= stock.moq or quantity == needed:\n\t\t\t\t# calculate the amount the buyer has to pay\n\t\t\t\tprice = quantity * stock.price\n\n\t\t\t\torder = Order.make_order(buyer=current_user.id, stock=stock, quantity=quantity, price=price)\n\t\t\t\tresponse = {\n\t\t\t\t\t'response': 'Order successfully placed. Order No: {}'.format(order.id),\n\t\t\t\t\t'category': 'green'\n\t\t\t\t}\n\n\t\t\t\t# check if the target has been met\n\t\t\t\tif stock.minimum_quantity == Order.select(fn.sum(Order.quantity)).where(Order.stock==stock.id).scalar():\n\t\t\t\t\t#update stock item\n\t\t\t\t\tstock.bought = True\n\t\t\t\t\tstock.save()\n\n\t\t\t\t\torders = Stock.get(Stock.id==stock.id).orders\n\n\t\t\t\t\tfor order in orders:\n\t\t\t\t\t\torder.ready = True\n\t\t\t\t\t\torder.save()\n\n\t\t\telse:\n\t\t\t\tresponse = {\n\t\t\t\t\t'response': 'Your order is less than the MOQ of {}'.format(stock.moq),\n\t\t\t\t\t'category': 'red'\n\t\t\t\t}\n\t\telse:\n\t\t\tresponse = {\n\t\t\t\t'response': 'Only {} units left for ordering.'.format(needed),\n\t\t\t\t'category': 'red'\n\t\t\t}\n\n\n\texcept TypeError:\n\t\tresponse = {\n\t\t\t\t'response': 'Invalid quantity',\n\t\t\t\t'category': 'red'\n\t\t\t}\n\n\treturn response\n\n@login_required\n@app.route('/monitoring')\ndef monitoring():\n\tif current_user.is_authenticated:\n\t\tif current_user.account_type == 'supplier':\n\t\t\treturn render_template('monitoring.html', HOST=HOST, PORT=PORT)\n\t\telse:\n\t\t\treturn render_template('404.html', Order=Order), 404\n\telse:\n\t\treturn render_template('404.html'), 404\n\n@app.route('/monitoring/stocks', methods=['POST', 'GET'])\ndef monitoring_stocks():\n json_response = [\n\t\t['Status', 'Current', 'Needed', 'Canceled', {'role': 'annotation'}]\n ]\n\n stocks = Stock.select().where(Stock.bought==False)\n\n for stock in stocks:\n canceled = DeletedOrder.select(fn.sum(DeletedOrder.quantity)).where(DeletedOrder.stock==stock.id).scalar()\n\n if canceled == None:\n canceled = 0\n\n response = enquire(stock.id)\n response = response.split('R')\n json_response.append(['{}{} {} {}'.format(stock.quantity, stock.unit.short_name, stock.brand.name, stock.product.name), int(response[1]), int(response[2]), -1 * canceled, ''])\n\n return json.JSONEncoder().encode(json_response)\n\n@app.route('/enquiry', methods=['POST', 'GET'])\ndef enquire(id=None):\n\t'''This function is for enquiring the status of a stock item. Returns the target\n\t, current numbers of orders, needed orders to meet target and the stock item\\'s moq'''\n\n\ttry:\n\t\t# id passed as arg to function overides stock_id passed as arg to url\n\t\tif id != None:\n\t\t\tstock_id = id\n\t\telse:\n\t\t\tstock_id = request.args.get('stock_id', None)\n\n\t\tstock = Stock.get(Stock.id==stock_id)\n\n\t\ttarget = stock.minimum_quantity\n\t\tmoq = stock.moq\n\t\tcurrent = Order.select(fn.sum(Order.quantity)).where(Order.stock==stock.id).scalar()\n\n\t\tif current == None:\n\t\t\tcurrent = 0\n\n\t\tneeded = target - current\n\n\t\tresponse = '{}R{}R{}R{}'.format(target, current, needed, moq)\n\n\t\treturn response\n\texcept DoesNotExist:\n\t\treturn 'Error'\n\n@app.route('/catalogue', methods=['POST', 'GET'])\ndef catalogue():\n\tstocks = Stock.select().where(Stock.bought==False)\n\n\toutput = ''\n\n\tflash('Oops. Sorry this page took long to load.', 'red')\n\n\tfor stock in stocks:\n\t\tproduct = stock.product\n\t\tdescription = stock.first_description\n\t\tunit = stock.unit\n\t\tbrand = stock.brand\n\t\tquantity = stock.quantity\n\t\tsupplier = stock.supplier\n\n\t\tlong_code = '{}R{}R{}R{}R{}R{}'.format(product.id, description.id, unit.id, brand.id, quantity, supplier.id)\n\t\ttry:\n\t\t\tcatalogue_entry = Catalogue.create_entry(long_code=long_code, short_code=None, available=True)\n\n\t\t\t# generate short_code from created catalogue entry and assign it to the new catalogue entry\n\t\t\tshort_code = 'RS{}{}'.format('0' * (4 - len(str(catalogue_entry.id))), catalogue_entry.id)\n\t\t\tcatalogue_entry.short_code = short_code\n\n\t\t\t# update the newly created catalogue entry with the short code\n\t\t\tcatalogue_entry.save()\n\t\texcept IntegrityError: # catalogue entry already exists\n\t\t\t# check if it has a short code assigned to it\n\t\t\tcatalogue_entry = Catalogue.get(Catalogue.long_code==long_code)\n\n\t\t\tif catalogue_entry.short_code != None:\n\t\t\t\t# generate short_code from created catalogue entry and assign it to the new catalogue entry\n\t\t\t\tshort_code = 'RS{}{}'.format('0' * (4 - len(str(catalogue_entry.id))), catalogue_entry.id)\n\t\t\t\tcatalogue_entry.short_code = short_code\n\n\t\t\t\t# update the catalogue entry with the short code\n\t\t\t\tcatalogue_entry.save()\n\n\treturn render_template('catalogue.html', stocks=stocks, list=list,\n\t\tCatalogue=Catalogue, Order=Order)\n\n@app.route('/sms_order', methods=['POST', 'GET'])\ndef sms_order():\n\t'''This function handles orders made via SMS '''\n\t# get the sender's phone number and message from the url\n\tfrom_ = request.args.get('from', None)\n\tbody = request.args.get('body', None)\n\n\t# check if user is registered\n\tif not User.select().where(User.phone==from_, User.account_type=='buyer').exists():\n\t\tresponse = 'Phone number is not registered'\n\t\treturn send_sms(phone_number=from_, message=response)\n\n\tif body == None:\n\t\tresponse = 'Message received but not processed'\n\n\t# split the message body to keywords and provide appropriate response\n\tkeywords = body.split('*')\n\n\t# if the message has two words, then first keyword should either be enquiry or cancel else error\n\tif len(keywords) == 2:\n\t\t# process enquiry operation\n\t\tif keywords[0].lower() == 'enquiry' or keywords[0].lower() == 'inquiry':\n\t\t\torder_no = keywords[1].upper()\n\n\t\t\t# check if stock item exists\n\t\t\tif stock_item_exists(order_no) == False:\n\t\t\t\tresponse = 'Stock item not found.'\n\t\t\telse:\n\t\t\t\tstock_item = stock_item_exists(order_no)\n\n\t\t\t\tstatus = enquire(stock_item.id).split('R')\n\n\t\t\t\tresponse = 'Target: {} Current: {} Needed: {} MOQ: {}'.format(status[0], status[1], status[2], status[3] )\n\n\t\t# process cancel order operation\n\t\telif keywords[0].lower() == 'cancel':\n\t\t\torder_no = int(keywords[1].upper().lstrip('RS'))\n\t\t\t#import pdb; pdb.set_trace()\n\t\t\t# check if order exists for this buyer\n\t\t\tif Order.select().where((Order.id==order_no) & (Order.buyer==User.get(User.phone==int(from_)).id)).exists():\n\t\t\t\torder = Order.get(Order.id==order_no)\n\t\t\t\t# set datetime cancelled\n\t\t\t\torder.date_cancelled = datetime.datetime.now()\n\t\t\t\torder.save()\n\n\t\t\t\t# save cancelled order in another table before deleting\n\t\t\t\tDeletedOrder.add_new(**order.__dict__['_data'])\n\t\t\t\tOrder.delete_instance(order)\n\n\t\t\t\tresponse = 'Order RS{} has been cancelled'.format(order_no)\n\t\t\telse:\n\t\t\t\tresponse = 'Order RS{} does not exist'.format(order_no)\n\t\t# return error\n\t\telse:\n\t\t\tresponse = 'Invalid operation'\n\n\t# if the message has 3 keywords then it's an order\n\telif len(keywords) == 3:\n\t\tif keywords[0].lower() == 'order':\n\n\t\t\torder_no = keywords[1].upper()\n\t\t\tquantity = keywords[2]\n\n\t\t\t# check if stock item exists\n\t\t\tif stock_item_exists(order_no) == False:\n\t\t\t\tresponse = 'Stock item not found.'\n\t\t\telse:\n\t\t\t\tstock_item = stock_item_exists(order_no)\n\n\t\t\t\t# check if item is available, if yes make order if not abort order\n\t\t\t\tif not stock_item.bought:\n\t\t\t\t\tresponse = process_order(quantity=quantity, stock=stock_item)\n\t\t\t\telse:\n\t\t\t\t\tresponse = 'Stock item not found.'\n\t\telse:\n\t\t\t# return error\n\t\t\tresponse = 'invalid input'\n\telse:\n\t\tresponse = 'invalid operation'\n\n\treturn send_sms(phone_number=from_, message=response)\n\n@app.route('/register', methods=['POST', 'GET'])\ndef register():\n\tregister_form = forms.RegisterForm()\n\n\tif register_form.validate_on_submit():\n\t\tUser.create_user(\n\t\t\tusername='unknown',\n\t\t\temail=register_form.email.data,\n\t\t\tpassword=register_form.password.data,\n\t\t\taddress=register_form.address.data,\n\t\t\taccount_type='buyer',\n\t\t\tphone=register_form.phone.data)\n\n\t\tuser = User.get(User.email==register_form.email.data)\n\n\t\tlogin_user(user)\n\n\t\tif user.account_type == 'buyer':\n\t\t\treturn redirect(url_for('buyer'))\n\t\telif user.account_type == 'supplier':\n\t\t\treturn redirect(url_for('supplier'))\n\t\telse:\n\t\t\treturn redirect(url_for('shipping'))\n\n\treturn render_template('register.html', form=register_form)\n\n@app.route('/supplier-register', methods=['POST', 'GET'])\ndef supplier_register():\n\ttry:\n\t\temail = dict(request.form.items())['email']\n\t\tpassword = dict(request.form.items())['password']\n\t\taddress = dict(request.form.items())['address']\n\t\taccount_type = dict(request.form.items())['account_type']\n\t\tcode = dict(request.form.items())['code']\n\t\tphone = dict(request.form.items())['phone']\n\n\t\tphone = int(str(code) + str(phone))\n\n\t\tUser.create_user(username='unknown', email=email, password=password, address=address,\n\t\t\taccount_type=account_type, phone=phone)\n\n\t\tuser = User.get(User.email==email)\n\n\t\tlogin_user(user)\n\n\t\tif user.account_type == 'buyer':\n\t\t\treturn redirect(url_for('buyer'))\n\t\telif user.account_type == 'supplier':\n\t\t\treturn redirect(url_for('supplier'))\n\t\telse:\n\t\t\tredirect(url_for('shipping'))\n\n\texcept KeyError:\n\t\tflash('Please fill all fields')\n\t\treturn render_template('supplier-register.html')\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n\tproducts = Product.select().limit(10)\n\n\t#return render_template('index.html', products=products)\n\treturn redirect(url_for('buyer_feed'))\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n\tlogin_form = forms.LoginForm()\n\n\tif login_form.validate_on_submit():\n\t\ttry:\n\t\t\tuser = User.get(User.email == login_form.email.data)\n\n\t\t\tlogin_user(user)\n\n\t\t\tif user.account_type == 'buyer':\n\t\t\t\treturn redirect(url_for('buyer'))\n\t\t\telif user.account_type == 'supplier':\n\t\t\t\treturn redirect(url_for('seller_feed'))\n\t\t\telse:\n\t\t\t\tredirect(url_for('shipping'))\n\t\texcept DoesNotExist:\n\t\t\tpass\n\n\treturn render_template('login.html', form=login_form)\n\n@app.route('/logout', methods=['POST', 'GET'])\ndef logout():\n\tlogout_user()\n\treturn redirect(url_for('login'))\n\n@app.route('/buyer')\n@login_required\ndef buyer():\n\tstocks = Stock.select().where(Stock.bought==False)\n\tmy_orders = current_user.orders.order_by(Order.date_ordered.desc())\n\n\treturn render_template('buyer-dashboard.html', stocks=stocks,\n\t\tOrder=Order, Stock=Stock, fn=fn, my_orders=my_orders, list=list)\n\n@app.route('/buyer/notifications')\n@login_required\ndef buyer_notifications():\n\tmy_orders = current_user.orders.where(Order.ready==True).order_by(Order.date_ordered.desc())\n\n\treturn render_template('buyer-notifications.html',\n\t\tOrder=Order, Stock=Stock, fn=fn, my_orders=my_orders, list=list)\n\n@app.route('/buyer/cart')\n@login_required\ndef buyer_orders():\n\tmy_orders = current_user.orders.where(Order.ready==False).order_by(Order.date_ordered.desc())\n\n\treturn render_template('buyer-orders.html',\n\t\tOrder=Order, Stock=Stock, fn=fn, my_orders=my_orders, list=list)\n\n@app.route('/buyer/feed/', defaults={'page': 1}, methods=['POST', 'GET'])\n@app.route('/buyer/feed//')\n@app.route('/buyer/feed/page/')\n@login_required\ndef buyer_feed(page, product=None):\n\n\tif product == None:\n\t\tstocks = Stock.select().where(Stock.bought==False).paginate(int(page), PER_PAGE)\n\t\tcount = Stock.select().where(Stock.bought==False).count()\n\telse:\n\t\tstocks = Product.get(Product.name==product).in_stock.where(Stock.bought==False).paginate(int(page), PER_PAGE)\n\t\tcount = Product.get(Product.name==product).in_stock.where(Stock.bought==False).count()\n\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock, 'form': forms.OrderForm() } for stock in stocks] # this will be used for adding listings to the homepage\n\n\tstock_ids = [stock['id'] for stock in stocks ]\n\n\tpagination = Pagination(page, PER_PAGE, count)\n\n\treturn render_template('buyer-feed.html', stocks=stocks, current_user=current_user,\n\t\tOrder=Order, Stock=Stock, fn=fn, int=int, pagination=pagination, page=page, os=os,\n\t\tHOST=HOST, PORT=PORT, UPLOAD_FOLDER=UPLOAD_FOLDER)\n\n@app.route('/seller/feed/', defaults={'page': 1}, methods=['POST', 'GET'])\n@app.route('/seller/feed//')\n@app.route('/seller/feed/page/')\n@login_required\ndef seller_feed(page, product=None):\n\n\tif product == None:\n\t\tstocks = Stock.select().where(Stock.bought==False).paginate(int(page), 100)\n\t\tcount = Stock.select().where(Stock.bought==False).count()\n\telse:\n\t\tstocks = Product.get(Product.name==product).in_stock.where(Stock.bought==False).paginate(int(page), 100)\n\t\tcount = Product.get(Product.name==product).in_stock.where(Stock.bought==False).count()\n\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock, 'form': forms.OrderForm() } for stock in stocks] # this will be used for adding listings to the homepage\n\n\tstock_ids = [stock['id'] for stock in stocks ]\n\n\tpagination = Pagination(page, 100, count)\n\n\treturn render_template('admin-feed.html', stocks=stocks, current_user=current_user,\n\t\tOrder=Order, Stock=Stock, fn=fn, int=int, pagination=pagination, page=page, os=os,\n\t\tlist=list, HOST=HOST)\n\n@app.route('/buyer/how-it-works', methods=['POST', 'GET'])\n@login_required\ndef buyer_how_it_works():\n\tstocks = Stock.select().where(Stock.bought==False)\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock } for stock in stocks] # this will be used for adding listings to the homepage\n\tunits = Unit.select()\n\n\treturn render_template('buyer-how-it-works.html', stocks=stocks, Order=Order,\n\t\t\tcurrent_user=current_user, Stock=Stock, fn=fn, units=units)\n\n\n@app.route('/buyer/make-suggestion', methods=['POST', 'GET'])\n@login_required\ndef buyer_make_suggestion():\n\tstocks = Stock.select().where(Stock.bought==False)\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock } for stock in stocks] # this will be used for adding listings to the homepage\n\tunits = Unit.select()\n\n\ttry:\n\t\tsuggestion = dict(request.form.items())['suggestion']\n\t\t#scale = int(dict(request.form.items())['scale']) removed for now\n\n\t\tSuggestion.make_suggestion(suggestion=suggestion, scale=0, user=current_user.id)\n\t\tflash('Suggestion submitted. Thank you!', 'success')\n\t\treturn redirect(url_for('buyer_make_suggestion'))\n\texcept KeyError: #some required fields blanks\n\t\treturn render_template('buyer-make-suggestion.html', stocks=stocks,\n\t\t\tcurrent_user=current_user, Order=Order, Stock=Stock, fn=fn, units=units\n\t\t)\n\texcept ValueError: #scale was text not number\n\t\tflash('Invalid entry. Please try again', 'error')\n\t\treturn redirect(url_for('buyer_make_suggestion'))\n\n@app.route('/seller')\n@login_required\ndef supplier():\n\tstocks = Stock.select().where(Stock.bought==False)\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock } for stock in stocks] # this will be used for adding listings to the homepage\n\tmy_stocks = current_user.products\n\n\ttags = [product.name for product in Product.select()]\n\ttags += [descriptor.description for descriptor in Descriptor.select()]\n\ttags += [brand.name for brand in Brand.select()]\n\ttags.sort()\n\n\treturn render_template('supplier_dashboard.html', stocks=stocks, current_user=current_user,\n\t\tOrder=Order, Stock=Stock, tags=tags, fn=fn, my_stocks=my_stocks, list=list)\n\n@app.route('/seller/add-product', methods=['POST', 'GET'])\n@login_required\ndef seller_add_product():\n\tadd_product_form = forms.AddProductForm()\n\tadd_product_form.units.choices = getUnits()\n\n\t# there vars provide data for the datalists in the form\n\tstocks = Stock.select().where(Stock.bought==False)\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock } for stock in stocks] # this will be used for adding listings to the homepage\n\tunits = Unit.select()\n\tbrands = Brand.select()\n\tdescriptions = Descriptor.select()\n\tproducts = Product.select()\n\tprices = Stock.select(Stock.price)\n\n\tif add_product_form.validate_on_submit():\n\t\tproduct = add_product_form.product.data\n\t\tbrand = add_product_form.brand.data\n\t\tdescription = add_product_form.description.data\n\t\tquantity = add_product_form.quantity.data\n\t\tprice = add_product_form.price.data\n\t\tunit = add_product_form.units.data\n\t\ttarget = add_product_form.target.data\n\t\tmoq = add_product_form.moq.data\n\n\t\tproduct, created = Product.create_or_get(name=product)\n\t\tbrand, created = Brand.create_or_get(name=brand)\n\t\tdescription, created = Descriptor.create_or_get(description=description)\n\t\tunit = Unit.get(Unit.id==unit)\n\n\t\tfilename = '{}{} {} {} {} {}.png'.format(quantity, unit.short_name, brand.name,\n\t\t\tdescription.description, product.name, randint(0, 9999999999))\n\t\t#filename = secure_filename(add_product_form.image.data.filename)\n\t\tadd_product_form.image.data.save(app.config['UPLOAD_FOLDER'] + filename)\n\n\t\t#create stock\n\t\tstock, created = Stock.get_or_create(product=product, brand=brand, first_description=description,\n\t\t\t\tquantity=quantity, price=price, unit=unit, supplier=current_user.id,\n\t\t\t\tminimum_quantity=target, moq=moq, image=filename)\n\n\t\tif created == False:\n\t\t\tflash('Stock item already exists', 'red')\n\t\telse:\n\t\t\tflash('Stock item successfully added.', 'green')\n\n\treturn render_template('seller-add-product.html', stocks=stocks,\n\t\t\tcurrent_user=current_user, brands=brands, Order=Order,\n\t\t\tStock=Stock, fn=fn, descriptions=descriptions, units=units,\n\t\t\tproducts=products, prices=prices, form=add_product_form)\n\n@app.route('/seller/make-suggestion', methods=['POST', 'GET'])\n@login_required\ndef seller_make_suggestion():\n\tstocks = Stock.select().where(Stock.bought==False)\n\tstocks = [{'id': json.dumps(str(stock.id)) , 'stock': stock } for stock in stocks] # this will be used for adding listings to the homepage\n\tunits = Unit.select()\n\n\ttry:\n\t\tsuggestion = dict(request.form.items())['suggestion']\n\t\t#scale = int(dict(request.form.items())['scale']) removed for now\n\n\t\tSuggestion.make_suggestion(suggestion=suggestion, scale=0, user=current_user.id)\n\t\tflash('Suggestion submitted. Thank you!', 'success')\n\t\treturn redirect(url_for('seller_make_suggestion'))\n\texcept KeyError: #some required fields blanks\n\t\treturn render_template('seller-make-suggestion.html', stocks=stocks,\n\t\t\tcurrent_user=current_user, Order=Order, Stock=Stock, fn=fn, units=units\n\t\t)\n\texcept ValueError: #scale was text not number\n\t\tflash('Invalid entry. Please try again', 'error')\n\t\treturn redirect(url_for('seller_make_suggestion'))\n\n@app.route('/shipping')\ndef shipping():\n\n\ttags = [product.name for product in Product.select()]\n\ttags += [descriptor.description for descriptor in Descriptor.select()]\n\ttags += [brand.name for brand in Brand.select()]\n\ttags.sort()\n\n\treturn render_template('shipper_dashboard.html', Order=Order, Stock=Stock,\n\t\ttags=tags, fn=fn, current_user=current_user)\n\n@app.route('/supplier/submit-product')\n@login_required\ndef submit_product():\n\treturn render_template('supplier_submit_product.html', current_user=current_user)\n\n@app.route('/order', methods=['POST'])\n@login_required\ndef order():\n\tquantity = int(dict(request.form.items())['quantity'])\n\tstock_id = dict(request.form.items())['stock_id']\n\tpage = dict(request.form.items())['page']\n\tstock = Stock.get(Stock.id==stock_id)\n\n\t# have to make sure that the person making an order is a buyer\n\tif current_user.account_type == 'buyer':\n\t\tresponse = process_order(quantity=quantity, stock=stock)\n\t\tflash(response['response'], response['category'])\n\telse:\n\t\tflash('You need a buyer account to participate in group buying.', 'red')\n\n\treturn redirect('{}page/{}'.format(url_for('buyer_feed'), page))\n\ndef getUnits():\n\tunits = Unit.select()\n\n\toptimised_units = [(unit.id, unit.short_name) for unit in units]\n\n\treturn optimised_units\n\ndef url_for_other_page(page):\n args = request.view_args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\napp.jinja_env.globals['url_for_other_page'] = url_for_other_page\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html', Order=Order), 404\n\n@app.errorhandler(500)\ndef internal_server_errror(e):\n return render_template('500.html'), 500\n\n@app.route('/about')\ndef about():\n\treturn render_template('about-roundshopper.html')\n\n\nif __name__ == '__main__':\n\tapp.run(debug=DEBUG, port=PORT, host=HOST)\n\t#app.run(debug=DEBUG, host=HOST, port=PORT)\n","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":22695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"431616655","text":"import pandas as pd\r\n\r\ndata = {\"Box\": [\"Box1\", \"Box1\", \"Box1\", \"Box2\", \"Box2\", \"Box2\"],\r\n \"Dimension\": [ \"Length\", \"Width\", \"Height\", \"Length\", \"Width\", \"Height\"], \r\n 'Value': [6, 4, 2, 5, 3, 4]}\r\nmessy = pd.DataFrame(data, columns = [\"Box\",\"Dimension\", \"Value\"])\r\ntidy = messy.pivot(index = \"Box\" ,columns = \"Dimension\", values = \"Value\").reset_index()\r\n\r\nV = tidy[\"Length\"] * tidy[\"Width\"] * tidy[\"Height\"]\r\ntidy[\"Volume\"] = V\r\n\r\nswap= list(tidy)\r\nswap[1], swap[2]= swap[2], swap[1]\r\nswap[2],swap[3] = swap[3], swap[2]\r\ntidy = tidy.reindex(columns = swap)\r\nprint(tidy)","sub_path":"Lab_09_Problem(2).py","file_name":"Lab_09_Problem(2).py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"67015203","text":"class Person:\n\n def __init__(self, age):\n self.age = age\n\n def compare(self, other):\n if self.age == other.age:\n return True\n else:\n return False\n\n\nage1 = Person(33)\nage2 = Person(32)\n\nif age1.compare(age2):\n print(\"Age is same\")\nelse:\n print(\"Age is different\")\n","sub_path":"comparing.py","file_name":"comparing.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"343613658","text":"import sys, os\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom UcsBase import ManagedObject\nsys.path.remove(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nclass EquipmentFex(ManagedObject):\n\tdef __init__(self):\n\t\tManagedObject.__init__(self,\"EquipmentFex\")\n\n\t@staticmethod\n\tdef ClassId():\n\t\treturn \"equipmentFex\"\n\n\tADMIN_POWER_STATE = \"AdminPowerState\"\n\tADMIN_STATE = \"AdminState\"\n\tCONFIG_STATE = \"ConfigState\"\n\tDN = \"Dn\"\n\tID = \"Id\"\n\tLIC_GP = \"LicGP\"\n\tLIC_STATE = \"LicState\"\n\tMODEL = \"Model\"\n\tOPER_QUALIFIER = \"OperQualifier\"\n\tOPER_QUALIFIER_REASON = \"OperQualifierReason\"\n\tOPER_STATE = \"OperState\"\n\tOPERABILITY = \"Operability\"\n\tPOWER = \"Power\"\n\tPRESENCE = \"Presence\"\n\tREVISION = \"Revision\"\n\tRN = \"Rn\"\n\tSERIAL = \"Serial\"\n\tSTATUS = \"Status\"\n\tSWITCH_ID = \"SwitchId\"\n\tTHERMAL = \"Thermal\"\n\tUSR_LBL = \"UsrLbl\"\n\tVENDOR = \"Vendor\"\n\tVOLTAGE = \"Voltage\"\n\n\tCONST_ADMIN_POWER_STATE_CYCLE_IMMEDIATE = \"cycle-immediate\"\n\tCONST_ADMIN_POWER_STATE_CYCLE_WAIT = \"cycle-wait\"\n\tCONST_ADMIN_POWER_STATE_POLICY = \"policy\"\n\tCONST_ADMIN_STATE_ACKNOWLEDGED = \"acknowledged\"\n\tCONST_ADMIN_STATE_AUTO_ACKNOWLEDGE = \"auto-acknowledge\"\n\tCONST_ADMIN_STATE_DECOMMISSION = \"decommission\"\n\tCONST_ADMIN_STATE_DISABLE_PORT_CHANNEL = \"disable-port-channel\"\n\tCONST_ADMIN_STATE_ENABLE_PORT_CHANNEL = \"enable-port-channel\"\n\tCONST_ADMIN_STATE_RE_ACKNOWLEDGE = \"re-acknowledge\"\n\tCONST_ADMIN_STATE_REMOVE = \"remove\"\n\tCONST_CONFIG_STATE_ACK_IN_PROGRESS = \"ack-in-progress\"\n\tCONST_CONFIG_STATE_ACKNOWLEDGED = \"acknowledged\"\n\tCONST_CONFIG_STATE_AUTO_ACK = \"auto-ack\"\n\tCONST_CONFIG_STATE_EVALUATION = \"evaluation\"\n\tCONST_CONFIG_STATE_OK = \"ok\"\n\tCONST_CONFIG_STATE_REMOVING = \"removing\"\n\tCONST_CONFIG_STATE_UN_ACKNOWLEDGED = \"un-acknowledged\"\n\tCONST_CONFIG_STATE_UN_INITIALIZED = \"un-initialized\"\n\tCONST_CONFIG_STATE_UNSUPPORTED_CONNECTIVITY = \"unsupported-connectivity\"\n\tCONST_FSM_PREV_REMOVE_FEX_BEGIN = \"RemoveFexBegin\"\n\tCONST_FSM_PREV_REMOVE_FEX_CLEANUP_ENTRIES = \"RemoveFexCleanupEntries\"\n\tCONST_FSM_PREV_REMOVE_FEX_DECOMISSION = \"RemoveFexDecomission\"\n\tCONST_FSM_PREV_REMOVE_FEX_FAIL = \"RemoveFexFail\"\n\tCONST_FSM_PREV_REMOVE_FEX_SUCCESS = \"RemoveFexSuccess\"\n\tCONST_FSM_PREV_REMOVE_FEX_UN_IDENTIFY_LOCAL = \"RemoveFexUnIdentifyLocal\"\n\tCONST_FSM_PREV_REMOVE_FEX_WAIT = \"RemoveFexWait\"\n\tCONST_FSM_PREV_NOP = \"nop\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = \"ERR-2fa-auth-retry\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = \"ERR-ACTIVATE-failed\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = \"ERR-ACTIVATE-in-progress\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = \"ERR-ACTIVATE-retry\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = \"ERR-BIOS-TOKENS-OLD-BIOS\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = \"ERR-BIOS-TOKENS-OLD-CIMC\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = \"ERR-BIOS-network-boot-order-not-found\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = \"ERR-BOARDCTRLUPDATE-ignore\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = \"ERR-DIAG-cancelled\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = \"ERR-DIAG-fsm-restarted\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = \"ERR-DIAG-test-failed\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = \"ERR-DNLD-authentication-failure\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = \"ERR-DNLD-hostkey-mismatch\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = \"ERR-DNLD-invalid-image\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = \"ERR-DNLD-no-file\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = \"ERR-DNLD-no-space\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = \"ERR-DNS-delete-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = \"ERR-DNS-get-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = \"ERR-DNS-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = \"ERR-Diagnostics-in-progress\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = \"ERR-Diagnostics-memtest-in-progress\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = \"ERR-Diagnostics-network-in-progress\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = \"ERR-FILTER-illegal-format\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = \"ERR-FSM-no-such-state\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = \"ERR-HOST-fru-identity-mismatch\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = \"ERR-HTTP-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = \"ERR-HTTPS-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = \"ERR-IBMC-analyze-results\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = \"ERR-IBMC-connect-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = \"ERR-IBMC-connector-info-retrieval-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = \"ERR-IBMC-fru-retrieval-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = \"ERR-IBMC-invalid-end-point-config\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = \"ERR-IBMC-results-not-ready\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = \"ERR-MAX-subscriptions-allowed-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = \"ERR-MO-CONFIG-child-object-cant-be-configured\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = \"ERR-MO-META-no-such-object-class\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = \"ERR-MO-PROPERTY-no-such-property\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = \"ERR-MO-PROPERTY-value-out-of-range\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = \"ERR-MO-access-denied\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = \"ERR-MO-deletion-rule-violation\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = \"ERR-MO-duplicate-object\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = \"ERR-MO-illegal-containment\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = \"ERR-MO-illegal-creation\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = \"ERR-MO-illegal-iterator-state\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = \"ERR-MO-illegal-object-lifecycle-transition\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = \"ERR-MO-naming-rule-violation\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = \"ERR-MO-object-not-found\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = \"ERR-MO-resource-allocation\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = \"ERR-NTP-delete-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = \"ERR-NTP-get-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = \"ERR-NTP-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = \"ERR-POWER-CAP-UNSUPPORTED\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = \"ERR-SERVER-mis-connect\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = \"ERR-SWITCH-invalid-if-config\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = \"ERR-TOKEN-request-denied\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = \"ERR-UNABLE-TO-FETCH-BIOS-SETTINGS\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = \"ERR-UPDATE-failed\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = \"ERR-UPDATE-in-progress\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = \"ERR-UPDATE-retry\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = \"ERR-aaa-config-modify-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = \"ERR-acct-realm-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = \"ERR-admin-passwd-set\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = \"ERR-auth-issue\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = \"ERR-auth-realm-get-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = \"ERR-auth-realm-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = \"ERR-authentication\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = \"ERR-authorization-required\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = \"ERR-cli-session-limit-reached\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = \"ERR-create-keyring\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = \"ERR-create-locale\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = \"ERR-create-role\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = \"ERR-create-user\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = \"ERR-delete-locale\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = \"ERR-delete-role\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = \"ERR-delete-session\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = \"ERR-delete-user\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = \"ERR-efi-Diagnostics--in-progress\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = \"ERR-enable-mgmt-conn\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = \"ERR-ep-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = \"ERR-get-max-http-user-sessions\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = \"ERR-http-initializing\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = \"ERR-insufficiently-equipped\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = \"ERR-internal-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = \"ERR-ldap-delete-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = \"ERR-ldap-get-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = \"ERR-ldap-group-modify-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = \"ERR-ldap-group-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = \"ERR-ldap-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = \"ERR-locale-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = \"ERR-max-userid-sessions-reached\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = \"ERR-missing-method\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = \"ERR-modify-locale\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = \"ERR-modify-role\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = \"ERR-modify-user\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = \"ERR-modify-user-locale\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = \"ERR-modify-user-role\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = \"ERR-provider-group-modify-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = \"ERR-provider-group-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = \"ERR-radius-get-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = \"ERR-radius-global-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = \"ERR-radius-group-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = \"ERR-radius-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = \"ERR-request-timeout\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = \"ERR-reset-adapter\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = \"ERR-role-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = \"ERR-secondary-node\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = \"ERR-service-not-ready\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = \"ERR-session-cache-full\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = \"ERR-session-not-found\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = \"ERR-set-network\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = \"ERR-set-password-strength-check\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = \"ERR-set-port-channel\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = \"ERR-store-pre-login-banner-msg\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = \"ERR-tacacs-enable-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = \"ERR-tacacs-global-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = \"ERR-tacacs-group-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = \"ERR-tacacs-plus-get-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = \"ERR-tacacs-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = \"ERR-test-error-1\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = \"ERR-test-error-2\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = \"ERR-timezone-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = \"ERR-user-account-expired\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = \"ERR-user-set-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = \"ERR-xml-parse-error\"\n\tCONST_FSM_RMT_INV_ERR_CODE_NONE = \"none\"\n\tCONST_FSM_STAMP_NEVER = \"never\"\n\tCONST_FSM_STATUS_REMOVE_FEX_BEGIN = \"RemoveFexBegin\"\n\tCONST_FSM_STATUS_REMOVE_FEX_CLEANUP_ENTRIES = \"RemoveFexCleanupEntries\"\n\tCONST_FSM_STATUS_REMOVE_FEX_DECOMISSION = \"RemoveFexDecomission\"\n\tCONST_FSM_STATUS_REMOVE_FEX_FAIL = \"RemoveFexFail\"\n\tCONST_FSM_STATUS_REMOVE_FEX_SUCCESS = \"RemoveFexSuccess\"\n\tCONST_FSM_STATUS_REMOVE_FEX_UN_IDENTIFY_LOCAL = \"RemoveFexUnIdentifyLocal\"\n\tCONST_FSM_STATUS_REMOVE_FEX_WAIT = \"RemoveFexWait\"\n\tCONST_FSM_STATUS_NOP = \"nop\"\n\tCONST_LIC_STATE_LICENSE_EXPIRED = \"license-expired\"\n\tCONST_LIC_STATE_LICENSE_GRACEPERIOD = \"license-graceperiod\"\n\tCONST_LIC_STATE_LICENSE_INSUFFICIENT = \"license-insufficient\"\n\tCONST_LIC_STATE_LICENSE_OK = \"license-ok\"\n\tCONST_LIC_STATE_NOT_APPLICABLE = \"not-applicable\"\n\tCONST_LIC_STATE_UNKNOWN = \"unknown\"\n\tCONST_OPER_STATE_ACCESSIBILITY_PROBLEM = \"accessibility-problem\"\n\tCONST_OPER_STATE_AUTO_UPGRADE = \"auto-upgrade\"\n\tCONST_OPER_STATE_BIOS_POST_TIMEOUT = \"bios-post-timeout\"\n\tCONST_OPER_STATE_CHASSIS_LIMIT_EXCEEDED = \"chassis-limit-exceeded\"\n\tCONST_OPER_STATE_CONFIG = \"config\"\n\tCONST_OPER_STATE_DECOMISSIONING = \"decomissioning\"\n\tCONST_OPER_STATE_DEGRADED = \"degraded\"\n\tCONST_OPER_STATE_DISABLED = \"disabled\"\n\tCONST_OPER_STATE_DISCOVERY = \"discovery\"\n\tCONST_OPER_STATE_DISCOVERY_FAILED = \"discovery-failed\"\n\tCONST_OPER_STATE_EQUIPMENT_PROBLEM = \"equipment-problem\"\n\tCONST_OPER_STATE_FABRIC_CONN_PROBLEM = \"fabric-conn-problem\"\n\tCONST_OPER_STATE_FABRIC_UNSUPPORTED_CONN = \"fabric-unsupported-conn\"\n\tCONST_OPER_STATE_IDENTIFY = \"identify\"\n\tCONST_OPER_STATE_IDENTITY_UNESTABLISHABLE = \"identity-unestablishable\"\n\tCONST_OPER_STATE_INOPERABLE = \"inoperable\"\n\tCONST_OPER_STATE_LINK_ACTIVATE_BLOCKED = \"link-activate-blocked\"\n\tCONST_OPER_STATE_MALFORMED_FRU = \"malformed-fru\"\n\tCONST_OPER_STATE_NOT_SUPPORTED = \"not-supported\"\n\tCONST_OPER_STATE_OPERABLE = \"operable\"\n\tCONST_OPER_STATE_PEER_COMM_PROBLEM = \"peer-comm-problem\"\n\tCONST_OPER_STATE_PERFORMANCE_PROBLEM = \"performance-problem\"\n\tCONST_OPER_STATE_POST_FAILURE = \"post-failure\"\n\tCONST_OPER_STATE_POWER_PROBLEM = \"power-problem\"\n\tCONST_OPER_STATE_POWERED_OFF = \"powered-off\"\n\tCONST_OPER_STATE_REMOVED = \"removed\"\n\tCONST_OPER_STATE_THERMAL_PROBLEM = \"thermal-problem\"\n\tCONST_OPER_STATE_UNKNOWN = \"unknown\"\n\tCONST_OPER_STATE_UPGRADE_PROBLEM = \"upgrade-problem\"\n\tCONST_OPER_STATE_VOLTAGE_PROBLEM = \"voltage-problem\"\n\tCONST_OPERABILITY_ACCESSIBILITY_PROBLEM = \"accessibility-problem\"\n\tCONST_OPERABILITY_AUTO_UPGRADE = \"auto-upgrade\"\n\tCONST_OPERABILITY_BIOS_POST_TIMEOUT = \"bios-post-timeout\"\n\tCONST_OPERABILITY_CHASSIS_LIMIT_EXCEEDED = \"chassis-limit-exceeded\"\n\tCONST_OPERABILITY_CONFIG = \"config\"\n\tCONST_OPERABILITY_DECOMISSIONING = \"decomissioning\"\n\tCONST_OPERABILITY_DEGRADED = \"degraded\"\n\tCONST_OPERABILITY_DISABLED = \"disabled\"\n\tCONST_OPERABILITY_DISCOVERY = \"discovery\"\n\tCONST_OPERABILITY_DISCOVERY_FAILED = \"discovery-failed\"\n\tCONST_OPERABILITY_EQUIPMENT_PROBLEM = \"equipment-problem\"\n\tCONST_OPERABILITY_FABRIC_CONN_PROBLEM = \"fabric-conn-problem\"\n\tCONST_OPERABILITY_FABRIC_UNSUPPORTED_CONN = \"fabric-unsupported-conn\"\n\tCONST_OPERABILITY_IDENTIFY = \"identify\"\n\tCONST_OPERABILITY_IDENTITY_UNESTABLISHABLE = \"identity-unestablishable\"\n\tCONST_OPERABILITY_INOPERABLE = \"inoperable\"\n\tCONST_OPERABILITY_LINK_ACTIVATE_BLOCKED = \"link-activate-blocked\"\n\tCONST_OPERABILITY_MALFORMED_FRU = \"malformed-fru\"\n\tCONST_OPERABILITY_NOT_SUPPORTED = \"not-supported\"\n\tCONST_OPERABILITY_OPERABLE = \"operable\"\n\tCONST_OPERABILITY_PEER_COMM_PROBLEM = \"peer-comm-problem\"\n\tCONST_OPERABILITY_PERFORMANCE_PROBLEM = \"performance-problem\"\n\tCONST_OPERABILITY_POST_FAILURE = \"post-failure\"\n\tCONST_OPERABILITY_POWER_PROBLEM = \"power-problem\"\n\tCONST_OPERABILITY_POWERED_OFF = \"powered-off\"\n\tCONST_OPERABILITY_REMOVED = \"removed\"\n\tCONST_OPERABILITY_THERMAL_PROBLEM = \"thermal-problem\"\n\tCONST_OPERABILITY_UNKNOWN = \"unknown\"\n\tCONST_OPERABILITY_UPGRADE_PROBLEM = \"upgrade-problem\"\n\tCONST_OPERABILITY_VOLTAGE_PROBLEM = \"voltage-problem\"\n\tCONST_POWER_DEGRADED = \"degraded\"\n\tCONST_POWER_ERROR = \"error\"\n\tCONST_POWER_FAILED = \"failed\"\n\tCONST_POWER_NOT_SUPPORTED = \"not-supported\"\n\tCONST_POWER_OFF = \"off\"\n\tCONST_POWER_OFFDUTY = \"offduty\"\n\tCONST_POWER_OFFLINE = \"offline\"\n\tCONST_POWER_OK = \"ok\"\n\tCONST_POWER_ON = \"on\"\n\tCONST_POWER_ONLINE = \"online\"\n\tCONST_POWER_POWER_SAVE = \"power-save\"\n\tCONST_POWER_TEST = \"test\"\n\tCONST_POWER_UNKNOWN = \"unknown\"\n\tCONST_PRESENCE_EMPTY = \"empty\"\n\tCONST_PRESENCE_EQUIPPED = \"equipped\"\n\tCONST_PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = \"equipped-identity-unestablishable\"\n\tCONST_PRESENCE_EQUIPPED_NOT_PRIMARY = \"equipped-not-primary\"\n\tCONST_PRESENCE_EQUIPPED_SLAVE = \"equipped-slave\"\n\tCONST_PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = \"equipped-with-malformed-fru\"\n\tCONST_PRESENCE_INACCESSIBLE = \"inaccessible\"\n\tCONST_PRESENCE_MISMATCH = \"mismatch\"\n\tCONST_PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = \"mismatch-identity-unestablishable\"\n\tCONST_PRESENCE_MISMATCH_SLAVE = \"mismatch-slave\"\n\tCONST_PRESENCE_MISSING = \"missing\"\n\tCONST_PRESENCE_MISSING_SLAVE = \"missing-slave\"\n\tCONST_PRESENCE_NOT_SUPPORTED = \"not-supported\"\n\tCONST_PRESENCE_UNAUTHORIZED = \"unauthorized\"\n\tCONST_PRESENCE_UNKNOWN = \"unknown\"\n\tCONST_SWITCH_ID_A = \"A\"\n\tCONST_SWITCH_ID_B = \"B\"\n\tCONST_SWITCH_ID_NONE = \"NONE\"\n\tCONST_THERMAL_LOWER_CRITICAL = \"lower-critical\"\n\tCONST_THERMAL_LOWER_NON_CRITICAL = \"lower-non-critical\"\n\tCONST_THERMAL_LOWER_NON_RECOVERABLE = \"lower-non-recoverable\"\n\tCONST_THERMAL_NOT_SUPPORTED = \"not-supported\"\n\tCONST_THERMAL_OK = \"ok\"\n\tCONST_THERMAL_UNKNOWN = \"unknown\"\n\tCONST_THERMAL_UPPER_CRITICAL = \"upper-critical\"\n\tCONST_THERMAL_UPPER_NON_CRITICAL = \"upper-non-critical\"\n\tCONST_THERMAL_UPPER_NON_RECOVERABLE = \"upper-non-recoverable\"\n\tCONST_VOLTAGE_LOWER_CRITICAL = \"lower-critical\"\n\tCONST_VOLTAGE_LOWER_NON_CRITICAL = \"lower-non-critical\"\n\tCONST_VOLTAGE_LOWER_NON_RECOVERABLE = \"lower-non-recoverable\"\n\tCONST_VOLTAGE_NOT_SUPPORTED = \"not-supported\"\n\tCONST_VOLTAGE_OK = \"ok\"\n\tCONST_VOLTAGE_UNKNOWN = \"unknown\"\n\tCONST_VOLTAGE_UPPER_CRITICAL = \"upper-critical\"\n\tCONST_VOLTAGE_UPPER_NON_CRITICAL = \"upper-non-critical\"\n\tCONST_VOLTAGE_UPPER_NON_RECOVERABLE = \"upper-non-recoverable\"\n","sub_path":"UcsSdk-0.8.3/src/UcsSdk/MoMeta/EquipmentFex.py","file_name":"EquipmentFex.py","file_ext":"py","file_size_in_byte":18432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"193733025","text":"# Copyright 2020 Stanford University, Los Alamos National Laboratory\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom flexflow.core.flexflow_type import ActiMode, AggrMode, PoolType, DataType, LossType, MetricsType, OpType, enum_to_int, int_to_enum\n\nclass PyTorchModel(object):\n def __init__(self, filename):\n self.tensor_dict = {}\n self.filename = filename\n \n def apply(self, ffmodel, input_tensors):\n in_file = open(self.filename, \"r\")\n output_tensors = []\n lines = in_file.readlines()\n input_idx = 0\n for line in lines:\n items = line.strip().split(\",\")\n assert len(items) >= 3, \"wrong format\"\n items = [i.strip() for i in items]\n print(items)\n\n #get op name\n op_name = items[0]\n\n #get previous ops' name\n prev_ops_list = items[1].split(\":\")\n prev_ops_list = [i.strip() for i in prev_ops_list]\n for i in prev_ops_list:\n if i == \"\":\n prev_ops_list.remove(i)\n\n #get op type\n op_type = int_to_enum(OpType, int(items[2]))\n \n if op_type == OpType.INPUT:\n assert len(prev_ops_list) == 0, \"wrong format\"\n self.tensor_dict[op_name] = input_tensors[input_idx]\n input_idx += 1\n\n elif op_type == OpType.LINEAR:\n assert len(items) == 6, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n od = int(items[3])\n activ = int_to_enum(ActiMode, int(items[4]))\n bias = bool(int(items[5]))\n self.tensor_dict[op_name] = ffmodel.dense(input=input_tensor, out_dim=od, activation=activ, use_bias=bias, name=op_name)\n\n elif op_type == OpType.CONV2D:\n assert len(items) == 13, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n oc = int(items[3])\n kh = int(items[4])\n kw = int(items[5])\n sh = int(items[6])\n sw = int(items[7])\n ph = int(items[8])\n pw = int(items[9])\n activ = int_to_enum(ActiMode, int(items[10]))\n group = int(items[11])\n bias = bool(int(items[12]))\n self.tensor_dict[op_name] = ffmodel.conv2d(input=input_tensor, out_channels=oc, kernel_h=kh, kernel_w=kw, stride_h=sh, stride_w=sw, padding_h=ph, padding_w=pw, activation=activ, groups=group, use_bias=bias, name=op_name)\n\n elif op_type == OpType.POOL2D:\n assert len(items) == 8, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n kh = int(items[3])\n sh = int(items[4])\n ph = int(items[5])\n pt = int_to_enum(PoolType, int(items[6]))\n activ = int_to_enum(ActiMode, int(items[7]))\n self.tensor_dict[op_name] = ffmodel.pool2d(input=input_tensor, kernel_h=kh, kernel_w=kh, stride_h=sh, stride_w=sh, padding_h=ph, padding_w=ph, pool_type=pt, activation=activ, name=op_name)\n\n elif op_type == OpType.DROPOUT:\n assert len(items) == 4, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n r = int(item[3])\n self.tensor_dict[op_name] = ffmodel.dropout(input=input_tensor, rate=r, seed=0, name=op_name)\n\n elif op_type == OpType.FLAT:\n assert len(items) == 3, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n self.tensor_dict[op_name] = ffmodel.flat(input=input_tensor, name=op_name)\n\n elif op_type == OpType.RELU:\n assert len(items) == 3, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n self.tensor_dict[op_name] = ffmodel.relu(input=input_tensor, name=op_name)\n\n elif op_type == OpType.SIGMOID:\n assert len(items) == 3, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n self.tensor_dict[op_name] = ffmodel.sigmoid(input=input_tensor, name=op_name)\n\n elif op_type == OpType.TANH:\n assert len(items) == 3, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n self.tensor_dict[op_name] = ffmodel.tanh(input=input_tensor, name=op_name)\n\n elif op_type == OpType.ELU:\n assert len(items) == 3, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n self.tensor_dict[op_name] = ffmodel.elu(input=input_tensor, name=op_name)\n \n elif op_type == OpType.SOFTMAX:\n assert len(items) == 3, \"wrong format\"\n assert len(prev_ops_list) == 1, \"wrong format\"\n input_tensor = self.tensor_dict[prev_ops_list[0]]\n self.tensor_dict[op_name] = ffmodel.softmax(input=input_tensor, name=op_name)\n\n elif op_type == OpType.CONCAT:\n assert len(items) == 4, \"wrong format\"\n assert len(prev_ops_list) >= 2, \"wrong format\"\n input_tensors = []\n for i in prev_ops_list:\n input_tensors.append(self.tensor_dict[i])\n ax = int(items[3])\n self.tensor_dict[op_name] = ffmodel.concat(tensors=input_tensors, axis=ax, name=op_name)\n\n elif op_type == OpType.OUTPUT:\n self.tensor_dict[op_name] = []\n for i in prev_ops_list:\n self.tensor_dict[op_name].append(self.tensor_dict[i])\n output_tensors = self.tensor_dict[op_name]\n #print(output_tensors[1].handle.impl)\n\n else:\n assert 0, \"unknown op\"\n \n in_file.close()\n return output_tensors\n","sub_path":"python/flexflow/torch/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"382400914","text":"# coding=utf-8\nimport subprocess\nfrom subprocess import PIPE, Popen\nimport sys\nimport time\n\n\n'''\n使用subprocess交互输入三个yes,用于mnha切换\n'''\n\n\ncmd = \"masterha_master_switch \\\n --conf=/mhaadmin/app/appcnfs/app_glpdbdisaster.cnf \\\n --master_state=alive \\\n --new_master_host=10.156.112.55 \\\n --new_master_port=55944 \\\n --orig_master_is_new_slave \\\n --running_updates_limit=0\"\n\np = Popen([cmd], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True, bufsize=-1)\n# p.stdin.write('yes')\n# p.stdin.flush()\n\nwhile True:\n try:\n p.stdout.readline()\n out = p.communicate(input='yes\\nyes\\nyes\\n')\n print(out)\n #print(err)\n except IOError:\n print(\"IOError\")\n else:\n break\n","sub_path":"subprocess_for_mha.py","file_name":"subprocess_for_mha.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"182834516","text":"from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.TrilinosApplication import *\n\n\ndef MultilevelLinearSolver(tolerance, max_iterations):\n # settings for the iterative solver\n aztec_parameters = ParameterList()\n # conjugate gradient solver for symmetric positive-definite\n # systems\n aztec_parameters.set(\"AZ_solver\", \"AZ_cg\")\n aztec_parameters.set(\"AZ_output\", \"AZ_none\")\n # settings of the ML preconditioner\n MLList = ParameterList()\n default_settings = EpetraDefaultSetter()\n default_settings.SetDefaults(MLList, \"SA\")\n MLList.set(\"max levels\", 3)\n MLList.set(\"prec type\", \"MGW\")\n MLList.set(\"smoother: type\", \"Chebyshev\")\n MLList.set(\"smoother: sweeps\", 2);\n # create solver\n linear_solver = MultiLevelSolver(aztec_parameters, MLList, tolerance, max_iterations)\n # only form the ML preconditioner at the first solve.\n # this speeds up solution of linear problems.\n linear_solver.SetReformPrecAtEachStep(False)\n\n # don't left scale the system matrix. this would destroy\n # the symmetry needed by the conjugate gradient method.\n linear_solver.SetScalingType(MLSolverScalingType.NoScaling)\n\n return linear_solver\n","sub_path":"applications/trilinos_application/python_scripts/trilinos_linear_elastic_ml_solver.py","file_name":"trilinos_linear_elastic_ml_solver.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"243318664","text":"from mgmt import connection\nfrom DiscardSession import DiscardSessionLib\nimport json\nimport requests\n\n__all__ = [\n 'EditGroupLib'\n]\n\n\nclass EditGroupLib():\n def __init__(self):\n self.f = requests.Session()\n\n def run(self, mgmt_ip, sid, group_name, hostname):\n '''\n Function used to edit a group object then to add new host or network to it.\n :param mgmt_ip: IP address of the CheckPoint Management Server.\n :param sid: Session ID provided from the Login Action.\n :param group_name: Malicious (or Whitelist) group name that will be edit to add a host.\n :param hostname: the name of the host that will be added in the mentioned above group.\n :return: the http status code of the api request to edit the group.\n '''\n t = connection(mgmt_ip)\n url = t.create_api_url(\"set-group\")\n print(url)\n host = hostname\n action = \"add\"\n request_headers = {'Content-Type':'application/json', 'X-chkp-sid' : sid}\n my_data = {\"name\": group_name, \"members\": {action: host}}\n r = self.f.post(url, data=json.dumps(my_data), headers=request_headers, verify=False)\n try:\n '''\n #Un-hash the following 2 lines to see the full response of the API request\n #a = json.loads(r.content)\n #print(a)'''\n return_code = r.status_code\n if return_code == 200:\n print(\"Edit {} Group to add host {} is OK \".format(group_name, hostname))\n return return_code\n else:\n print(\"Edit {} Group to add host {} is NOK \".format(group_name, hostname))\n '''\n #Un-hash the following 2 lines, if you want to use embedded discard to discard the changes in case of errors instead of using the Stackstorm DiscardSession action in the workflow.\n y = DiscardSessionLib()\n print(y.run(mgmt_ip, sid))\n return return_code, a['errors']\n '''\n\n except Exception as e:\n print(\"Error is :\",e)\n '''\n #Un-hash the following 2 lines, if you want to use embedded discard to discard the changes in case of errors instead of using the Stackstorm DiscardSession action in the workflow.\n y = DiscardSessionLib()\n print(y.run(mgmt_ip, sid))\n return return_code, a['errors']\n '''\n","sub_path":"actions/lib/EditGroup.py","file_name":"EditGroup.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"241120202","text":"# 文字列の入力\n#s1 = input()\ns1 = 'korakunoaki'\n#print(\"{}\".format(s1))\n#print(s1[4])\n\n#find()検索した文字列の初めの位置を検索\nn = s1.find('noaki')\n#print(n)\n\nif n > 0:\n for i in range(n):\n print(s1[i],end=\"\")\n\nelif n <= 0:\n print(s1)","sub_path":"入力した文字列から検索する文字列が何番目にあるか検索する/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"113352278","text":"def gCd(x, y):\n if y==0:\n return x\n else:\n return gCd(y, x%y)\n \ndef main():\n x=eval(input(\"Pick a number.\"))\n y=eval(input(\"Pick another number.\"))\n print(\"GCD of\",x,\",\",y,\"=\",gCd(x,y))\n\nif __name__ == \"__main__\":\n main()","sub_path":"EuclidGCD.py","file_name":"EuclidGCD.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"607745652","text":"\nimport requests\nfrom lxml import etree\n\nimport json\nimport random\nbaseurl = 'http://www.xicidaili.com/nn/'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',\n}\n\nhttp_list = []\n\ndef get_IP():\n print('-----IP爬取进度-----')\n for i in range(1,5):\n print('------第' + str(i) + '页开始爬取------')\n url = baseurl + str(i)\n raw_html = requests.get(url, headers=headers).text\n # print(raw_html)\n selector = etree.HTML(raw_html)\n # td[index]中index从1开始\n ip = selector.xpath('//tr[@class=\"odd\"]//td[2]/text()')\n port = selector.xpath('//tr[@class=\"odd\"]//td[3]/text()')\n httptype = selector.xpath('//tr[@class=\"odd\"]//td[6]/text()')\n for eachip,eachport,eachtype in zip(ip,port,httptype):\n http_dict = {}\n http_dict[eachtype] = eachtype + '://' + eachip + ':' + eachport\n http_list.append(http_dict)\n print(http_list) # 两页总共的ip\n print('------第' + str(i) + '页爬取结束------')\n return http_list\n\nprint('------IP爬虫结束------')\n\n# 用于测试\nif __name__ == '__main__':\n http_list = get_IP()\n print(http_list)\n print(len(http_list))\n\n ouf = open(\"valid_ip.txt\", \"a+\")\n\n # 分行存储\n for each_proxies in http_list:\n ouf.write(str(each_proxies))\n ouf.write('\\n')\n\n a = random.randint(1, len(http_list))\n # 分行随机读取\n theline = open('valid_ip.txt', 'r').readlines()[a]\n print('------------------------')\n print(theline)\n\n theline = theline.replace(\"'\", '\"')\n theline = json.loads(theline)\n print(theline)\n print(type(theline))\n html = requests.get('http://cq.ganji.com/fang5/o5/', headers=headers, proxies=theline).text\n print(html)\n\n\n","sub_path":"get_xiciip.py","file_name":"get_xiciip.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"468787385","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nimport tensorflow as tf\nimport vocab \nimport graph_utils\n\nMAX_LENGTH = 100\n\ndef decode_value(data):\n split_tensor = tf.string_split([data], delimiter=' ')\n tokens = split_tensor.values\n target_length = tf.size(tokens)\n def trunc():\n return tokens[:MAX_LENGTH]\n def notrunc():\n return tokens\n tokens = tf.cond(tf.greater(target_length, MAX_LENGTH),true_fn=trunc, false_fn=notrunc)\n source = tf.concat([tokens, [\"SEQUENCE_END\"]], 0)\n target = tf.concat([tokens, [\"SEQUENCE_END\"]], 0)\n target = tf.concat([[\"SEQUENCE_START\"], target], 0)\n target_length = tf.size(target)\n\n return source, target, target_length\n \ndef read_my_format(filename_queue):\n reader = tf.TextLineReader()\n key, record_string = reader.read(filename_queue)\n s, t, t_len = decode_value(record_string)\n return s, t, t_len\n\ndef input_pipeline(filenames, batch_size, num_epochs=None):\n filename_queue = tf.train.string_input_producer(\n [filenames], num_epochs=num_epochs, shuffle=True)\n\n source , target , target_length= read_my_format(filename_queue)\n\n min_after_dequeue=50\n capacity = min_after_dequeue + 2 * batch_size\n\n data_batch = tf.train.batch(\n [source, target, target_length], batch_size=batch_size, capacity=capacity,\n dynamic_pad=True)\n return data_batch\n\ndef preprcess(data, vocab_file_path):\n\n # Create vocabulary lookup for source\n source_vocab_to_id, source_id_to_vocab, source_word_to_count, _ = \\\n vocab.create_vocabulary_lookup_table(vocab_file_path)\n\n # Create vocabulary look for target\n target_vocab_to_id, target_id_to_vocab, target_word_to_count, _ = \\\n vocab.create_vocabulary_lookup_table(vocab_file_path)\n\n source_batch = data[0]\n target_batch = data[1]\n target_length = data[2]\n\n # Look up the source ids in the vocabulary\n features = source_vocab_to_id.lookup(source_batch)\n\n # Look up the target ids in the vocabulary\n labels = target_vocab_to_id.lookup(target_batch)\n\n # Add vocab tables to graph colection so that we can access them in\n # other places.\n graph_utils.add_dict_to_collection({\n \"target_vocab_to_id\": target_vocab_to_id,\n \"target_id_to_vocab\": target_id_to_vocab,\n }, \"vocab_tables\")\n\n\n return features, labels, target_length\n\n\n\n\n\n\n","sub_path":"old/learn_or_test/studyae/input_pipe.py","file_name":"input_pipe.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"18392782","text":"\"\"\"\n{\n \"author\": \"Yucheng Huang\",\n \"difficulty\": \"medium\",\n \"link\": \"https://leetcode.com/problems/product-of-array-except-self/description/\",\n \"beats\": 0.4188,\n \"category\": [\"math\"],\n \"tags\": [],\n \"questions\": []\n}\n\"\"\"\n\n\"\"\"\n思路\n\t- 从左边和右边开始各自delay 1个数在遍历累乘一边\n\t- 从左边开始的遍历,到第i个数,就会使output[i]乘上它左边左右的数的乘积;反之亦然\n\"\"\"\n\nclass Solution:\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n L = len(nums)\n output = [1 for __ in range(L)]\n left = nums[0]\n for i in range(1, L):\n output[i] *= left\n left *= nums[i]\n right = nums[L-1]\n for i in range(L-2, -1, -1):\n output[i] *= right\n right *= nums[i]\n return output","sub_path":"solutions/238.py","file_name":"238.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"552274348","text":"import threading,queue,time,json,urllib.request\n \nq=queue.Queue()\nl=threading.Lock()\ntxt=open('proxy.txt','r',encoding='utf-8').read()\nproxylist = json.loads(txt,encoding='utf-8')\na=0\nclass MyThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.daemon=True\n self.timeout=10\n def run(self):\n while True:\n item=q.get()\n self.register(item)\n q.task_done()\n def register(self,i):\n global a\n url = 'http://www.7do.net/member.php?mod=register&inajax=1'\n headers={'Referer':'http://www.7do.net/member.php?mod=register&fromuser=qingda',\n 'Cookie':'6cb3_lastvisit=1405905911; 6cb3_plugin_userfrom=1_1; 6cb3_promotion=666886; 6cb3_sendmail=1; 6cb3_seccodeSJcu0sU0=9336O2uqYac%2Fqw6mEcIgNb7lh6Ob%2BlOMoDU5yESsGWK4gAuqu1iGO26XX%2F2LicoTDkdomabWzVBKCIcsarc; CNZZDATA3976001=cnzz_eid%3D2138891269-1405909565-%26ntime%3D1405909565; pgv_pvi=9632772158; pgv_info=ssi=s1322463744; Hm_lvt_a977edcda5a5b058fa5da310a960e7d7=1405909577; Hm_lpvt_a977edcda5a5b058fa5da310a960e7d7=1405909577; bdshare_firstime=1405909577897; logintip_stop=1; 6cb3_sid=Jcu0sU; 6cb3_lastact=1405909542%09misc.php%09seccode',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36'}\n try:\n \n set_proxy=urllib.request.ProxyHandler({'http':i['Ip']+':'+str(i['Port'])})\n b=urllib.request.build_opener(set_proxy)\n urllib.request.install_opener(b)\n for kk in range(3):\n email = 'e7wrf02'+str(a)\n username = 's7fhe'+str(a)+'aa'\n data = {'regsubmit':'yes','regsubmit':'true',\"formhash\":'4772b46a',\"referer\":'http://www.7do.net/./','activationauth':'',\n 'seccodeverify':'fbhm','sechash':'SJcu0sU0','yRa7a7':'123qwe','tBYOcK':'123qwe','21tP45':str(email)+'@bq.com','JBH7dN':username}\n p=urllib.parse.urlencode(data).encode(encoding='gb2312')\n req = urllib.request.Request(url,p,headers)\n r= urllib.request.urlopen(req,timeout=self.timeout)\n result=r.read().decode('gb2312')\n if l.acquire(1):\n a+=1\n l.release()\n if('该用户名已被注册' in result):\n print('the username is exist ! ')\n continue\n if('regfloodctrl' in result):\n print('24h register > 3')\n break\n if('感谢您的注册' in result):\n print(username+' register success ! ')\n continue\n \n print(result)\n \n except Exception as ex :\n print(ex)\n print(i['Ip'],':',i['Port'])\n pass\n \n \nfor i in range(10):\n t=MyThread()\n t.start()\nfor i in proxylist:\n q.put(i)\nq.join()\n","sub_path":"test/7doThread.py","file_name":"7doThread.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"248641669","text":"__author__ = 'inbaravni'\n\nfrom JackTokenizer import *\nfrom CompilationEngine import *\nimport sys\nimport os\n\n\n\ndef main(argv):\n # file given\n if (os.path.isfile(argv[0])):\n try:\n file = open(argv[0].split('.')[-2] + '.xml','w') # Trying to create a new file\n CompilationEngine(argv[0], file)\n file.close()\n except:\n print('Can\\'t create xml file')\n\n\n\n\n # directory given\n else:\n #path = os.path.abspath(argv[0])+'/'\n path = argv[0]\n if path[-1] != '/':\n path = path+'/'\n name = path + path.split('/')[-2]\n for each_file in os.listdir(argv[0]):\n if each_file.endswith(\".jack\"):\n try:\n file = open((path+each_file).split('.')[-2] + '.xml','w') # Trying to create a new file\n CompilationEngine((path+each_file), file)\n file.close();\n except:\n print('Can\\'t create an xml file')\n\n\n \n\n\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n","sub_path":"project10/JackAnalyzer.py","file_name":"JackAnalyzer.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"616515350","text":"import json\nfrom nltk.tokenize import word_tokenize\nfrom nltk.probability import FreqDist\nfrom textblob import TextBlob\nimport pickle\n\n\ndef main():\n\tmodel = pickle.load(open('tonDreSent.pkl', 'rb'))\n\twith open('namedcoins.json') as f:\n\t\tcoins = json.loads(f.read())\n\twith open('newdata.json') as f:\n\t\tdata = json.loads(f.read())\n\tallNes = \" \"\n\tcoincounta = {}\n\tfor x in data:\n\t\ttweet = x[1]['attributes']['tweet']['S']\n\t\toursentiment = model.predict([tweet])\n\t\ta = tokneiza(tweet)\n\t\twords = list(a[0])\n\t\tfor word in words:\n\t\t\tfor coin, coinphrase in coins.items():\n\t\t\t\tif word in coinphrase:\n\t\t\t\t\tif coin in list(coincounta.keys()):\n\t\t\t\t\t\tcoincounta[coin][0] += 1\n\t\t\t\t\t\tcoincounta[coin][1].append(oursentiment[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tcoincounta[coin]= [1, [oursentiment[0]]]\n\t\tnouns = list(a[2])\n\t\tallNes += \" \".join(nouns)\n\tallNes.maketrans(\" \",\"$\")\n\tallNes.maketrans(\" \",\"#\")\n\tallNes.maketrans(\" \", \"@\")\n\tfd = FreqDist(word_tokenize(allNes))\n\tprint(fd.most_common(100))\n\tfor coin, mentions in coincounta.items():\n\t\tprint(coin+\" was mentioned: \"+str(mentions[0])+\" times, \"+str(sum(mentions[1]))+\" times positively\")\n\n\ndef split_into_tokens(message):\n if type(message) is float:\n message = str(message)\n return TextBlob(message).words# / .tags\n\ndef tokneiza(message):\n\tif type(message) is not str:\n\t\tmessage = str(message)\n\tmess = TextBlob(message)\n\treturn mess.words, mess.tags, mess.noun_phrases, mess.sentiment\n\n\t# tweetCorpus = \", \".join([x[1]['attributes']['tweet']['S'] for x in data])\n\t# fd = FreqDist(word_tokenize(tweetCorpus))\n\t# fd2 = FreqDist(tweetCorpus.split(' '))\n\t# print(fd.most_common(100))\n\t# print(fd2.most_common(100))\n\n\n\nif (__name__ == '__main__'):\n\tmain()\n","sub_path":"processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"137394959","text":"#!/usr/bin/python3\n# coding: utf-8\n# encoding: utf-8\nimport pymysql\nimport requests\nfrom urllib.parse import unquote\nimport time\nimport re\n\n# 打开数据库连接\ndb = pymysql.connect(host=\"47.97.186.203\", user=\"root\", password=\"Rk2018!@#\",\n database=\"cal_dev\", port=3306, charset='utf8')\n\nAPI_URL = \"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?resource_id=28297&from_mid=1&&format=json&ie=utf-8&oe=utf-8&query=%E4%BA%8C%E5%8D%81%E5%9B%9B%E8%8A%82%E6%B0%94\"\n# 使用cursor()方法获取操作游标\ncursor = db.cursor()\ntry:\n aff = cursor.execute(\"DELETE FROM t_cal_solar_term\")\n resp = requests.request('GET', API_URL)\n d = resp.json()\n terms = d['data'][0]['result']\n for term in terms:\n print(term)\n sql = \"INSERT INTO t_cal_solar_term(name,date)VALUES (%s, %s)\"\n day = unquote(term['additional'])\n mr = re.match(r'(\\d+)月\\d+-(\\d+)', day)\n if mr:\n day = '{0}-{1}'.format(mr.group(1), mr.group(2))\n print(day)\n day = '2018{}'.format(time.strftime(\n '%m%d', time.strptime(day, '%m-%d')))\n aff = cursor.execute(sql, (unquote(term['ename']), day))\n print(\"insert \", aff)\n\n db.commit()\nexcept BaseException as ex:\n # 如果发生错误则回滚\n db.rollback()\n print(ex)\n\n# 关闭数据库连接\ndb.close()\n\n# https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=2018%E5%B9%B44%E6%9C%88&co=&resource_id=6018&ie=utf8&format=json\n\n# https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?resource_id=28297&from_mid=1&&format=json&ie=utf-8&oe=utf-8&query=%E4%BA%8C%E5%8D%81%E5%9B%9B%E8%8A%82%E6%B0%94\n","sub_path":"python/insert_term.py","file_name":"insert_term.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"578900894","text":"# Caesar Cipher\n# http://inventwithpython.com/hacking (BSD Licensed)\n\n# import pyperclip, necessary to run this script\nimport pyperclip\n\n# message to be encrypted\nmessage = input(\"Enter you message to encrypt:\\n\")\n\n# encryption/decryption key used\nkey = 13\n\n# tell the computer whether to encrypt or decrypt\nmode = 'encrypt'\n\n# every possible symbol that can be encrypted\nLETTERS = ' !\"#$%&\\'()*+,-./0123456789:;<=>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'\n\n# store the encrypted message\ntranslated = ''\n\n# capitalize the string before encryption\n# message = message.upper()\n\n# run the encryption code on each symbol in the message string\nfor symbol in message:\n if symbol in LETTERS:\n # get the encrypted/decrypted number for this symbol\n num = LETTERS.find(symbol) # get the number of the symbol\n if mode == 'encrypt':\n num = num + key\n elif mode == 'decrypt':\n num = num - key\n # handle the wrap-around if num is > the length of LETTERS or < 0\n if num >= len(LETTERS):\n num = num - len(LETTERS)\n elif num < 0:\n num = num + len(LETTERS)\n # add encrypted/decrypted number's symbol at the end of translated\n translated = translated + LETTERS[num]\n else:\n # just add the symbol without encrypting/decrypting\n translated = translated + symbol\n# print out the encrypted/decrypted message to the screen\nprint(translated)\n\n# copy the string to clipboard\npyperclip.copy(translated)\n","sub_path":"CaesarCipher.py","file_name":"CaesarCipher.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"142236629","text":"import os\nfrom shutil import copyfile\nimport re\nimport requests\n\nfrom dvc.command.base import CmdBase, DvcLock\nfrom dvc.data_cloud import sizeof_fmt\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.runtime import Runtime\nfrom dvc.state_file import StateFile\nfrom dvc.system import System\n\n\nclass ImportFileError(DvcException):\n def __init__(self, msg):\n DvcException.__init__(self, 'Import file: {}'.format(msg))\n\n\nclass CmdImportFile(CmdBase):\n def __init__(self, settings):\n super(CmdImportFile, self).__init__(settings)\n\n def define_args(self, parser):\n self.set_no_git_actions(parser)\n\n self.add_string_arg(parser, 'input', 'Input file.')\n self.add_string_arg(parser, 'output', 'Output file.')\n pass\n\n def run(self):\n with DvcLock(self.is_locker, self.git):\n return self.import_and_commit_if_needed(self.parsed_args.input,\n self.parsed_args.output,\n self.parsed_args.lock)\n pass\n\n def import_and_commit_if_needed(self, input, output, lock=False, check_if_ready=True):\n if check_if_ready and not self.no_git_actions and not self.git.is_ready_to_go():\n return 1\n\n self.import_file(input, output, lock)\n\n message = 'DVC import file: {} {}'.format(input, output)\n return self.commit_if_needed(message)\n\n def import_file(self, input, output, lock=False):\n if not CmdImportFile.is_url(input):\n if not os.path.exists(input):\n raise ImportFileError('Input file \"{}\" does not exist'.format(input))\n if not os.path.isfile(input):\n raise ImportFileError('Input file \"{}\" has to be a regular file'.format(input))\n\n if os.path.isdir(output):\n output = os.path.join(output, os.path.basename(input))\n\n data_item = self.settings.path_factory.data_item(output)\n\n if os.path.exists(data_item.data.relative):\n raise ImportFileError('Output file \"{}\" already exists'.format(data_item.data.relative))\n if not os.path.isdir(os.path.dirname(data_item.data.relative)):\n raise ImportFileError('Output file directory \"{}\" does not exists'.format(\n os.path.dirname(data_item.data.relative)))\n\n cache_dir = os.path.dirname(data_item.cache.relative)\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n if CmdImportFile.is_url(input):\n Logger.debug('Downloading file {} ...'.format(input))\n self.download_file(input, data_item.cache.relative)\n Logger.debug('Input file \"{}\" was downloaded to cache \"{}\"'.format(\n input, data_item.cache.relative))\n else:\n copyfile(input, data_item.cache.relative)\n Logger.debug('Input file \"{}\" was copied to cache \"{}\"'.format(\n input, data_item.cache.relative))\n\n Logger.debug('Creating symlink {} --> {}'.format(data_item.symlink_file, data_item.data.relative))\n System.symlink(data_item.symlink_file, data_item.data.relative)\n\n state_file = StateFile(StateFile.COMMAND_IMPORT_FILE,\n data_item.state.relative,\n self.settings,\n argv=[input, output],\n input_files=[],\n output_files=[output],\n lock=lock)\n state_file.save()\n Logger.debug('State file \"{}\" was created'.format(data_item.state.relative))\n pass\n\n URL_REGEX = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n @staticmethod\n def is_url(url):\n return CmdImportFile.URL_REGEX.match(url) is not None\n\n @staticmethod\n def download_file(from_url, to_file):\n r = requests.get(from_url, stream=True)\n\n chunk_size = 1024 * 100\n downloaded = 0\n last_reported = 0\n report_bucket = 100*1024*1024\n with open(to_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk: # filter out keep-alive new chunks\n downloaded += chunk_size\n last_reported += chunk_size\n if last_reported >= report_bucket:\n last_reported = 0\n Logger.debug('Downloaded {}'.format(sizeof_fmt(downloaded)))\n f.write(chunk)\n return\n\nif __name__ == '__main__':\n Runtime.run(CmdDataImport)\n","sub_path":"dvc/command/import_file.py","file_name":"import_file.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"124007837","text":"import ply.yacc as yacc\nimport lexer\n\nprecedence = (\n ('left','PLUS','MINUS'),\n ('left','TIMES','DIV')\n\n)\n\ntokens = lexer.tokens\nst = lexer.symbolTable\n\nclass Node:\n def __init__(self, type, children=None, leaf=None):\n self.type = type\n if children:\n self.children = children\n self.terminal = False\n else:\n self.children = []\n self.terminal = True\n self.leaf = leaf\n\n\n def print_t(self, level=0):\n string = ' ' * level + repr(self.type)\n #if self.leaf:\n #string = ' ' * level + repr(self.type) + ' => ' + repr(self.leaf)\n\n print(string)\n for child in self.children:\n if isinstance(child, Node):\n child.print_t(level+1)\n elif isinstance(child, str) : # Is a leaf\n print (' ' * (level + 1), child, ' (leaf)')\n\noutErrors = \"\"\nerrList = []\n\nparams_list = []\n\ndef get_params(node_params):\n for child in node_params:\n if not isinstance(child, str):\n get_params(child.children)\n if isinstance(child, str):\n if child != \",\":\n params_list.append(str(child))\n\nparams = []\ndef stErrors(st, list):\n errors = [k for k,v in st.items() if v == {}]\n all_errors = (set(errors).union(set(list)))\n for error in all_errors:\n print( error + \" is not defined\")\n if(not(all_errors)):\n return False\n else:\n return True\n\n\ndef p_error(p):\n global outErrors\n if p:\n line = p.lineno - 7\n outErrors += (\"Syntax error at \" + str(p.value) + \" in line \" + str(line) + \"\\n\")\n else:\n outErrors += \"Syntax error\\n\"\n\ndef p_program(p):\n \"\"\"\n program : element program\n | element\n \"\"\"\n if len(p) == 2:\n p[0] = Node(\"program\", [p[1]], None)\n else:\n p[0] = Node(\"program\", [p[1], p[2]], None)\n\ndef p_element(p):\n \"\"\"\n element : FUNCT ID OP params CP statement\n | FUNCT ID OP CP statement\n | statementList\n | classStatement\n \"\"\"\n if len(p) == 2:\n p[0] = Node(\"element\", [p[1]], None)\n elif len(p) == 6:\n p[0] = Node(\"element\", [p[1], p[2], p[3], p[4],p[5]], None)\n elif len(p) == 7:\n p[0] = Node(\"element\", [p[1], p[2], p[3], p[4], p[5], p[6]], None)\n st[p[2]] = \"defined\"\n\n\ndef p_classStatement(p):\n \"\"\"\n classStatement : CLASS ID OB constructorStmt element CB\n \"\"\"\n p[0] = Node('classStatement', [p[1], p[2], p[3], p[4], p[5], p[6]], None)\n\ndef p_constructorStmt(p):\n \"\"\"\n constructorStmt : CONSTRUCT OP params CP statement\n \"\"\"\n p[0] = Node('constructorStmt', [p[1], p[2], p[3], p[4], p[5]], None)\n\ndef p_params(p):\n \"\"\"\n params : ID\n | STRING\n | NUMBER\n | boolean\n | ID COM params\n | STRING COM params\n | NUMBER COM params\n | boolean COM params\n \"\"\"\n if len(p) == 2:\n if p[1].isupper() :\n p[0] = Node(\"params\", [p[1]], None)\n\n else:\n p[0] = Node(\"params\", [p[1]], None)\n if(st[p[1]] != \"defined\"):\n st[p[1]] = \"undefined\"\n elif (len(p) == 4):\n if p[1].isupper():\n p[0] = Node('params', [p[1], p[2], p[3]], None)\n\n else:\n p[0] = Node(\"params\", [p[1], p[2], p[3]], None)\n if(st[p[1]] != \"defined\"):\n st[p[1]] = \"undefined\"\n\ndef p_statementList(p):\n \"\"\"\n statementList : statement\n | statement statementList\n \"\"\"\n if len(p) == 2:\n p[0] = Node(\"statementList\", [p[1]], None)\n else:\n p[0] = Node(\"statementList\", [p[1], p[2]], None)\n\n\ndef p_statement(p):\n \"\"\"\n statement : OB statementList CB\n | expressionStmt\n | conditionStmt\n | iterationStmt\n | returnStmt\n | breakStmt\n | continueStmt\n | ID OP params CP\n | ID OP CP\n \"\"\"\n if len(p) == 2:\n p[0] = Node(\"statement\", [p[1]], None)\n elif len(p) == 4:\n if p[1] == \"ID\" and p[2] == \"OP\" and p[3] == \"CP\":\n p[0] = Node(\"statement\", [p[1], p[2], p[3]], None)\n else:\n p[0] = Node(\"statement\", [p[1], p[2], p[3]], None)\n elif len(p) == 5:\n p[0] = Node(\"statement\", [p[1], p[2], p[3], p[4]], None)\n\n get_params(p[3].children)\n if(st[p[1]] == {}):\n st[p[1]] == \"undefinded\"\n errList.append(str(p[1]))\n for param in params_list:\n if(st[param] == \"undefined\"):\n errList.append(param)\n\n params_list.clear()\n\n\n\n\ndef p_expressionStmt(p):\n \"\"\"\n expressionStmt : expression SC\n | SC\n \"\"\"\n if len(p) == 2:\n p[0] = Node(\"expressionStmt\", [p[1]], None)\n else:\n p[0] = Node(\"expressionStmt\", [p[1], p[2]], None)\n\ndef p_conditionStmt(p):\n \"\"\"\n conditionStmt : IF condition statement ELSE statement\n | IF condition statement\n \"\"\"\n if len(p) == 4:\n p[0] = Node(\"conditionStmt\", [p[1], p[2], p[3]], [])\n else:\n p[0] = Node(\"conditionStmt\", [p[1], p[2], p[3], p[4], p[5]], [])\n\ndef p_iterationStmt(p):\n \"\"\"\n iterationStmt : WHILE condition statement\n | DO statement WHILE condition\n | FOR OP expression IN expression CP statement\n | FOR OP expression SC expression SC expression CP statement\n \"\"\"\n if len(p) == 4:\n p[0] = Node(\"iterationStmt\", [p[1], p[2], p[3]], [])\n elif len(p) == 5:\n p[0] = Node(\"iterationStmt\", [p[1], p[2], p[3], p[4]], [])\n elif len(p) == 8:\n p[0] = Node(\"iterationStmt\", [p[1], p[2], p[3], p[4], p[5], p[6], p[7]], [])\n elif len(p) == 10:\n p[0] = Node(\"iterationStmt\", [p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]], [])\n\ndef p_breakStmt(p):\n \"\"\"\n breakStmt : BREAK SC\n \"\"\"\n p[0] = Node(\"breakStmt\", [p[1], p[2]], [])\n\ndef p_returnStmt(p):\n \"\"\"\n returnStmt : RTRN SC\n | RTRN expression SC\n \"\"\"\n if len(p) == 3:\n p[0] = Node(\"returnStmt\", [p[1], p[2]], [])\n else:\n p[0] = Node(\"returnStmt\", [p[1], p[2], p[3]], [])\n\ndef p_continueStmt(p):\n \"\"\"\n continueStmt : CONTI SC\n \"\"\"\n p[0] = Node(\"continueStmt\", [p[1], p[2]], [])\n\ndef p_condition(p):\n \"\"\"\n condition : OP expression CP\n \"\"\"\n p[0] = Node(\"condition\", [p[1], p[2], p[3]], [])\n\ndef p_expression(p):\n \"\"\"\n expression : operatorStmt\n | comparisonStmt\n | logicalStmt\n | ID\n | NUMBER\n | boolean\n | STRING\n | NULL\n | OSB expression CSB\n | variableDefinition\n | multipleExpression\n | consoleExpression\n | documentExpression\n | promptExpression\n | methodCall\n \"\"\"\n if len(p) == 2:\n if p[1] == \"ID\" or p[1] == \"NUMBER\" or p[1] == \"STRING\" or p[1] == \"NULL\":\n p[0] = Node(\"expression\", [p[1]], [])\n else:\n p[0] = Node(\"expression\", [p[1]], None)\n else:\n p[0] = Node(\"expression\", [p[1], p[2], p[3]], [])\n\ndef p_variableDefinition(p):\n \"\"\"\n variableDefinition : VAR expression\n | CONST expression\n | VAR ID COM expression\n | CONST ID COM expression\n \"\"\"\n if len(p) == 3:\n p[0] = Node(\"variableDefinition\", [p[1], p[2]], [])\n else:\n p[0] = Node(\"variableDefinition\", [p[1], p[2], p[3], p[4]], [])\n\ndef p_multipleExpression(p):\n \"\"\"\n multipleExpression : STRING COM expression\n | ID COM expression\n | NUMBER COM expression\n \"\"\"\n p[0] = Node(\"multipleExpression\", [p[1], p[2], p[3]], [])\n\ndef p_operatorStmt(p):\n \"\"\"\n operatorStmt : expression PLUS expression\n | expression MINUS expression\n | expression TIMES expression\n | expression DIV expression\n | expression MOD expression\n | expression PPLUS\n | expression MMINUS\n | expression EXPON expression\n | expression PLUEQ expression\n | expression MINEQ expression\n | expression TIMEQ expression\n | expression EXPEQ expression\n | expression DIVEQ expression\n | expression MODEQ expression\n | expression ASSIG expression\n | THIS DOT ID ASSIG expression\n | expression ASSIG NEW ID OP params CP\n \"\"\"\n if len(p) == 3:\n p[0] = Node('operatorStmt', [p[1], p[2]], [])\n elif len(p) == 4:\n p[0] = Node('operatorStmt', [p[1], p[2], p[3]], [])\n\n if(p[2] == \"=\"):\n print(p[1].children[0])\n st[p[1].children[0]] = \"defined\"\n elif len(p) == 6:\n p[0] = Node('operatorStmt', [p[1], p[2], p[3], p[4], p[5]], [])\n else:\n p[0] = Node('operatorStmt', [p[1], p[2], p[3], p[4], p[5], p[6], p[7]], [])\n\ndef p_comparisonStmt(p):\n \"\"\"\n comparisonStmt : expression EQ expression\n | expression GT expression\n | expression LT expression\n | expression EEQ expression\n | expression NTEQ expression\n | expression NTEEQ expression\n | expression GTEQ expression\n | expression LTEQ expression\n \"\"\"\n p[0] = Node('comparisonStmt', [p[1], p[2], p[3]])\n\ndef p_logicalStmt(p):\n \"\"\"\n logicalStmt : NEL expression\n | expression AND expression\n | expression OR expression\n \"\"\"\n if len(p) == 3:\n p[0] = Node('logicalStmt', [p[1], p[2]], [])\n else:\n p[0] = Node('logicalStmt', [p[1], p[2], p[3]], [])\n\ndef p_consoleExpression(p):\n \"\"\"\n consoleExpression : CONS DOT LOG OP expression CP\n \"\"\"\n p[0] = Node('consoleExpression', [p[1], p[2], p[3], p[4], p[5], p[6]], [])\n\ndef p_documentExpression(p):\n \"\"\"\n documentExpression : DOC DOT WRITE OP expression CP\n \"\"\"\n p[0] = Node('documentExpression', [p[1], p[2], p[3], p[4], p[5], p[6]], [])\n\ndef p_promptExpression(p):\n \"\"\"\n promptExpression : PROMPT OP expression CP\n \"\"\"\n p[0] = Node('promptExpression', [p[1], p[2], p[3], p[4]], [])\n\ndef p_methodCall(p):\n \"\"\"\n methodCall : ID DOT ID OP CP\n | ID DOT ID OP params CP\n \"\"\"\n if len(p) == 6:\n p[0] = Node('methodCall', [p[1], p[2], p[3], p[4], p[5]], [])\n else:\n p[0] = Node('methodCall', [p[1], p[2], p[3], p[4], p[6], p[5]], [])\n\ndef p_boolean(p):\n \"\"\"\n boolean : TRUE\n | FALSE\n \"\"\"\n p[0] = Node('boolean', [p[1]], [])\n\n\ndef run_test(file_name, symbolTable):\n with open(file_name, 'r') as file:\n s = file.read()\n\n tree = yacc.yacc().parse(s, debug=False)\n #print(\"\\t------- SymbolTable ---------\")\n st = symbolTable\n #print(st)\n print(\"\\t------ Parser ------\")\n #print(outErrors)\n if (not outErrors):\n print(\"SUCCESS\")\n print(\"\\t------ Semantic ------\")\n if(not stErrors(st, errList)):\n print(\"SUCCESS\")\n\n #print(\"\\t------ TREE ------\")\n #tree.print_t()\n return tree\n else:\n return 1\n else:\n return 1\n","sub_path":"grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":10660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"312006008","text":"###########\n# Imports\n###########\nimport os\nimport src.file_processing as my_fp\n\n########\n# Parameter definitions\n########\n\n# Define file paths.\ninput_base_dir = os.path.abspath('../outputs/vec_text')\noutput_base_dir = os.path.abspath('../outputs/vec_text_balanced')\n\n# Specify processed directories.\n#dir_names = ['fb_com_10pd', 'fb_post', 'twitter_4', 'article']\ndir_names = ['xtest']\n\n########\n# EXECUTION\n########\n\n# Test\n#print my_fp._get_class_counts_from_stat_file('../outputs/vec_text/xtest/tweet_44-202-233-300_adjclose_1_4_tf-idf-no.stat.txt')\n\n# Process all specified directories.\nfor dirname in dir_names:\n input_dir = os.path.join(input_base_dir, dirname)\n output_dir = my_fp.get_or_create_directory(os.path.join(output_base_dir, dirname))\n my_fp.balance_files(input_dir, output_dir)\n","sub_path":"libs/AnalPipeline/balance_files.py","file_name":"balance_files.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"642615495","text":"def solution(A):\n # write your code in Python 2.7\n seen = {}\n for a in A:\n seen[a] = seen.get(a, 0) + 1\n for a, v in seen.items():\n if v % 2 == 1:\n return a\n\n\nfor item in (\n [9, 3, 9, 3, 9, 7, 9],\n [9, 3, 9, 3, 1, 7, 9, 3, 1],\n):\n print(solution(item))\n","sub_path":"python/lesson2/array_pair.py","file_name":"array_pair.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"445309817","text":"import copy\nimport datetime\nimport shutil\nimport subprocess\nimport sys\nfrom os import chdir\nfrom pathlib import Path\n\nimport pytest\nfrom flexmock import flexmock\nfrom ogr.abstract import PullRequest, PRStatus\nfrom ogr.services.github import GithubService\nfrom ogr.services.pagure import PagureProject, PagureService\nfrom rebasehelper.specfile import SpecFile\n\nfrom packit.api import PackitAPI\nfrom packit.bot_api import PackitBotAPI\nfrom packit.config import Config, get_local_package_config\nfrom packit.distgit import DistGit\nfrom packit.fed_mes_consume import Consumerino\nfrom packit.utils import FedPKG\nfrom tests.spellbook import git_set_user_email\n\nTHIS_DIR = Path(__file__).parent\nTESTS_DIR = THIS_DIR.parent\nDATA_DIR = TESTS_DIR / \"data\"\nUPSTREAM = DATA_DIR / \"upstream_git\"\nDISTGIT = DATA_DIR / \"dist_git\"\nTARBALL_NAME = \"beerware-0.1.0.tar.gz\"\n\n\ndef get_test_config():\n conf = Config()\n conf._pagure_user_token = \"test\"\n conf._pagure_fork_token = \"test\"\n conf._github_token = \"test\"\n return conf\n\n\ndef git_add_n_commit(directory, tag=None):\n subprocess.check_call([\"git\", \"init\", \".\"], cwd=directory)\n git_set_user_email(directory)\n subprocess.check_call([\"git\", \"add\", \".\"], cwd=directory)\n subprocess.check_call([\"git\", \"commit\", \"-m\", \"initial commit\"], cwd=directory)\n if tag:\n subprocess.check_call([\"git\", \"tag\", tag], cwd=directory)\n subprocess.check_call([\"git\", \"remote\", \"add\", \"origin\", \"https://lol.wat\"], cwd=directory)\n\n\n@pytest.fixture()\ndef github_release_fedmsg():\n return {\n \"msg_id\": \"2019-a5034b55-339d-4fa5-a72b-db74579aeb5a\",\n \"topic\": \"org.fedoraproject.prod.github.release\",\n \"msg\": {\n \"repository\": {\n \"full_name\": \"brewery/beer\",\n \"owner\": {\n \"login\": \"brewery\",\n },\n \"name\": \"beer\",\n \"html_url\": \"https://github.com/brewery/beer\",\n },\n \"release\": {\n \"body\": \"Changelog content will be here\",\n \"tag_name\": \"0.1.0\",\n \"created_at\": \"2019-02-28T18:48:27Z\",\n \"published_at\": \"2019-02-28T18:51:10Z\",\n \"draft\": False,\n \"prerelease\": False,\n \"name\": \"Beer 0.1.0 is gooooood\"\n },\n \"action\": \"published\",\n }\n }\n\n\n@pytest.fixture()\ndef upstream_n_distgit(tmpdir):\n t = Path(str(tmpdir))\n\n u = t / \"upstream_git\"\n shutil.copytree(UPSTREAM, u)\n git_add_n_commit(u, tag=\"0.1.0\")\n\n d = t / \"dist_git\"\n shutil.copytree(DISTGIT, d)\n git_add_n_commit(d)\n\n return u, d\n\n\n@pytest.fixture()\ndef mock_update_workflow(upstream_n_distgit):\n u, d = upstream_n_distgit\n\n def mocked_pr_create(*args, **kwargs):\n return PullRequest(\n title=\"\",\n id=42,\n status=PRStatus.open,\n url=\"\",\n description=\"\",\n author=\"\",\n source_branch=\"\",\n target_branch=\"\",\n created=datetime.datetime(1969, 11, 11, 11, 11, 11, 11)\n )\n flexmock(\n PagureProject,\n get_git_urls=lambda: {\"git\": str(d)},\n fork_create=lambda: None,\n get_fork=lambda: PagureProject(\n \"\", \"\", PagureService()\n ),\n pr_create=mocked_pr_create\n )\n flexmock(\n GithubService,\n get_project=None\n )\n\n def mock_download_remote_sources():\n \"\"\" mock download of the remote archive and place it into dist-git repo \"\"\"\n tarball_path = d / TARBALL_NAME\n hops_filename = \"hops\"\n hops_path = d / hops_filename\n hops_path.write_text(\"Cascade\\n\")\n subprocess.check_call([\"tar\", \"-cf\", str(tarball_path), hops_filename], cwd=d)\n\n flexmock(SpecFile, download_remote_sources=mock_download_remote_sources)\n\n flexmock(DistGit, push_to_fork=lambda *args, **kwargs: None)\n\n def mocked_new_sources(sources=None):\n if not Path(sources).is_file():\n raise RuntimeError(\"archive does not exist\")\n flexmock(FedPKG, init_ticket=lambda x=None: None, new_sources=mocked_new_sources)\n\n pc = get_local_package_config(str(u))\n pc.downstream_project_url = str(d)\n pc.upstream_project_url = str(u)\n # https://stackoverflow.com/questions/45580215/using-flexmock-on-python-modules\n flexmock(sys.modules[\"packit.bot_api\"]).should_receive(\"get_packit_config_from_repo\").and_return(pc)\n return u, d\n\n\ndef test_basic_local_update(upstream_n_distgit, mock_update_workflow):\n \"\"\" basic propose-update test: mock remote API, use local upstream and dist-git \"\"\"\n u, d = upstream_n_distgit\n\n chdir(u)\n c = get_test_config()\n\n pc = get_local_package_config(str(u))\n pc.upstream_project_url = str(u)\n pc.downstream_project_url = str(d)\n api = PackitAPI(c, pc)\n api.sync_release(\"master\", \"0.1.0\")\n\n assert (d / TARBALL_NAME).is_file()\n spec = SpecFile(str(d / \"beer.spec\"), None)\n assert spec.get_full_version() == \"0.1.0\"\n\n\ndef test_single_message(github_release_fedmsg, mock_update_workflow):\n u, d = mock_update_workflow\n\n conf = get_test_config()\n api = PackitBotAPI(conf)\n api.sync_upstream_release_with_fedmsg(github_release_fedmsg)\n assert (d / TARBALL_NAME).is_file()\n spec = SpecFile(str(d / \"beer.spec\"), None)\n assert spec.get_full_version() == \"0.1.0\"\n\n\ndef test_loop(mock_update_workflow, github_release_fedmsg):\n def mocked_iter_releases():\n msg = copy.deepcopy(github_release_fedmsg)\n yield msg[\"topic\"], msg\n flexmock(Consumerino, iterate_releases=mocked_iter_releases)\n conf = get_test_config()\n api = PackitBotAPI(conf)\n api.watch_upstream_release()\n","sub_path":"tests/integration/test_update.py","file_name":"test_update.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"377561758","text":"from functools import wraps\n\nfrom flask import Blueprint, render_template, request, session, flash, redirect, url_for\n\nteacherController = Blueprint(\"teacherController\", __name__)\n\nfrom models.model import teacher as teacher_model, rooms as siniflar, enrolled_rooms, assignments, students, \\\n submitted_assignments\n\n\n# Bu fonksiyon sayfaya ulaşılması için login olunmasını gerekli kılmaktadır\ndef teacher_login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if not session.get(\n \"teacher_name\") is None: # Session da teacher_name bilgisinin varlığını kontrol ederek yapmaktadır.\n return f(*args, **kwargs)\n else:\n flash(\"Lütfen sisteme giriş yapınız\")\n return redirect(url_for(\"index\"))\n\n return wrap\n\n\n@teacherController.route('/tlogin', methods=[\"POST\", \"GET\"])\ndef tlogin():\n if request.method == \"POST\":\n email = request.form[\"email\"]\n logging_teacher = teacher_model.getTeacher(email)\n if logging_teacher != \"Kullanıcı Bulunamadı\":\n if logging_teacher.password == request.form[\"password\"]:\n ogretmen_adi = logging_teacher.name\n session[\"teacher_name\"] = ogretmen_adi\n session[\"teacher_email\"] = logging_teacher.email\n session[\"teacher_password\"] = logging_teacher.password\n\n return redirect(url_for(\"teacherController.teacher\"))\n else:\n flash(\"Kullanıcı adı veya parola hatalı\")\n return render_template(\"teacherView/tlogin.html\")\n else:\n flash(\"Kullanıcı adı veya parola hatalı\")\n return render_template(\"teacherView/tlogin.html\")\n else:\n return render_template(\"teacherView/tlogin.html\")\n\n\n@teacherController.route('/tsignup', methods=[\"POST\", \"GET\"])\ndef tsignup():\n if request.method == \"POST\":\n logging_teacher = teacher_model.getTeacher(request.form[\"email\"])\n if isinstance(logging_teacher, teacher_model):\n flash(\"Bu e-posta kullanılmaktadır\")\n return redirect(url_for(\"teacherController.tsignup\"))\n else:\n if request.form[\"password\"] == request.form[\"password_check\"]:\n logging_teacher = teacher_model(request.form[\"email\"], request.form[\"isim\"], request.form[\"password\"])\n logging_teacher.setTeacher()\n flash(\"Kayıt başarıyla gerçekleştirilmiştir\")\n return redirect(url_for(\"teacherController.tlogin\"))\n else:\n flash(\"Parolalar uyuşmamaktadır.\")\n return redirect(url_for(\"teacherController.tsignup\"))\n else:\n return render_template(\"teacherView/tsignup.html\")\n\n\n@teacherController.route('/teacher', methods=[\"POST\", \"GET\"])\n@teacher_login_required\ndef teacher():\n if request.method == \"POST\": # POST mothodunda öğretmen yeni sınıf oluşturulmakta\n sinif_adi = request.form[\"sinif_adi\"]\n ogretmenin_maili = session[\"teacher_email\"]\n yeni_sinif = siniflar()\n yeni_sinif.teacher_mail = ogretmenin_maili\n yeni_sinif.room_name = sinif_adi\n yeni_sinif.set_room()\n return redirect(url_for(\"teacherController.teacher\"))\n\n else:\n ogretmen_maili = session[\"teacher_email\"]\n ogretmen_rooms = siniflar.get_rooms(ogretmen_maili)\n tablo=[]\n if ogretmen_rooms!=\"Öğretmenin kayıtlı sınıfı bulunmamaktadır.\":\n for room in ogretmen_rooms:\n siniftaki_ogrenci_sayisi=enrolled_rooms.get_students_id_from_room_id(room.room_id)\n odevler=assignments.get_assignments(room.room_id)\n sutun={\n \"sinif_adi\":room.room_name,\n \"sinif_id\":room.room_id,\n \"ogrenci_sayisi\":len(siniftaki_ogrenci_sayisi),\n \"odev_sayisi\":len(odevler),\n }\n tablo.append(sutun)\n return render_template(\"teacherView/teacher.html\", tablo=tablo)\n\n\n@teacherController.route('/room/')\n@teacher_login_required\ndef room(room_id):\n session.pop(\"bulunan_oda\", None)\n session[\"bulunan_oda\"] = room_id\n gelen_sinif = siniflar.get_room(room_id)\n if gelen_sinif != \"Sınıf bulunamadı\": # sınıfın varlığının kontrolü\n if session.get(\n \"teacher_email\") == gelen_sinif.teacher_mail: # öğretmenin tanımladığı sınf olup olmadığının kontrolü\n siniftaki_ogrencilerin_idler = enrolled_rooms.get_students_id_from_room_id(room_id)\n sinifin_odevleri = assignments.get_assignments(room_id)\n x = 0\n y = 0\n tablo = [] # öğrencilerin ödev durumlarının listeleneceği tablo\n for student in siniftaki_ogrencilerin_idler: # her satırın ilk hücresi öğrenci adı, id'si ve giriş kodu bilgilerini bulunduran bir dic\n ogr = students.get_student_by_id(student)\n ogr_dic = {\"ogrno\": str(ogr.student_id),\n \"ogradi\": ogr.name,\n \"ogrpass\": str(ogr.password)\n }\n column = [ogr_dic]\n for odev in sinifin_odevleri:\n ogrencinin_odevi = submitted_assignments.get_submitted_assignment_by_assignment_and_student(\n odev.assignment_id, ogr.student_id)\n if ogrencinin_odevi != \"Öğrenci ödevi teslim etmemiştir.\":\n ogrencinin_odev_durumu = True\n else:\n ogrencinin_odev_durumu = False\n\n odev_dic = {\"ödevno\": str(odev.assignment_id),\n \"teslim\": str(ogrencinin_odev_durumu),\n }\n column.append(odev_dic)\n tablo.append(column)\n\n return render_template(\"teacherView/room.html\", tablo=tablo, gelen_sinif=gelen_sinif)\n\n return redirect(url_for(\"teacherController.teacher\"))\n\n\n@teacherController.route('/addstudent', methods=[\"POST\"])\n@teacher_login_required\ndef add_student():\n yeni_ogrenci = students()\n yeni_ogrenci.name = request.form[\"odevadi\"]\n yeni_ogrenci.set_student(session.get(\"bulunan_oda\"))\n\n return redirect(url_for('teacherController.room', room_id=session.get(\"bulunan_oda\")))\n\n\n@teacherController.route('/updatestudent/', methods=[\"POST\"])\n@teacher_login_required\ndef change_student(student_id):\n changed_student = students.get_student(student_id)\n if changed_student != \"Öğrenci bulunamadı\":\n changed_student.name = request.form[\"adi\"]\n changed_student.update_student()\n return redirect(url_for('teacherController.room', room_id=session.get(\"bulunan_oda\")))\n\n\n@teacherController.route('/createassignment', methods=['POST', \"GET\"])\n@teacher_login_required\ndef createassignment():\n if request.method == \"POST\":\n yeni_odev = assignments()\n yeni_odev.room_id = session.get(\"bulunan_oda\")\n yeni_odev.name = request.form[\"odevadi\"]\n yeni_odev.message = request.form[\"konu\"]\n yeni_odev.create_assignment()\n return redirect(url_for('teacherController.room', room_id=yeni_odev.room_id))\n\n\n else:\n return redirect(url_for('teacherController.room', room_id=session.get(\"bulunan_oda\")))\n\n\n@teacherController.route('/checkassignment//')\n@teacher_login_required\ndef checkassignment(student_id, assignment_id):\n ogr = students.get_student_by_id(student_id)\n odev = assignments.get_assignment(assignment_id)\n teslim_edilen_odev = submitted_assignments.get_submitted_assignment_by_assignment_and_student(assignment_id,\n student_id)\n formatli_tarih = teslim_edilen_odev.delivery_date\n sayfada_gorulecek_veriler = dict([\n (\"ogrenci_adi\", ogr.name),\n (\"odev_adi\", odev.name),\n (\"gorsel_path\", teslim_edilen_odev.path_of_image),\n (\"teslim_tarihi\", formatli_tarih.strftime(\"%m/%d/%Y, %H:%M:%S\"))\n ])\n\n return render_template(\"teacherView/checkassignment.html\", sayfada_gorulecek_veriler=sayfada_gorulecek_veriler)\n\n\n@teacherController.route('/editassignment', methods=[\"POST\", \"GET\"])\n@teacher_login_required\ndef editassignment():\n if request.method == \"POST\":\n gelen_odev = assignments()\n gelen_odev.assignment_id = request.form[\"id\"]\n gelen_odev.name = request.form[\"odevadi\"]\n gelen_odev.message = request.form[\"odevkonusu\"]\n gelen_odev.update_assignment()\n return redirect(url_for('teacherController.editassignment'))\n else:\n rooms_assignments = assignments.get_assignments(session.get(\"bulunan_oda\"))\n return render_template(\"teacherView/editassignment.html\", rooms_assignments=rooms_assignments)\n\n\n@teacherController.route('/deleteassignment/', methods=[\"GET\"])\n@teacher_login_required\ndef deleteassignment(assignment_id):\n silinecek_odev = assignments.get_assignment(assignment_id)\n silinecek_odev.delete_assignment()\n return redirect(url_for(\"teacherController.editassignment\"))\n\n\n@teacherController.route('/accinfo', methods=['POST', \"GET\"])\n@teacher_login_required\ndef accinfo():\n if request.method == \"POST\":\n parola = request.form[\"inputPassword\"]\n eposta = session[\"teacher_email\"]\n isim = request.form[\"inputName\"]\n\n\n\n if request.form[\"inputPassword\"] != request.form[\"inputPasswordCheck\"]:\n flash(\"Parola bilgileri aynı değil!\")\n\n else:\n parlosi_yeni_ogretmen = teacher_model(eposta, isim, parola)\n parlosi_yeni_ogretmen.updateTeacher()\n session[\"teacher_name\"]= isim\n flash(\"Hesap başarıyla güncellenmiştir.\")\n return redirect(url_for(\"teacherController.accinfo\"))\n else:\n return render_template(\"teacherView/accinfo.html\")\n\n\n@teacherController.route('/logout')\n@teacher_login_required\ndef logout():\n session.clear()\n return redirect(url_for(\"index\"))\n","sub_path":"teacherController.py","file_name":"teacherController.py","file_ext":"py","file_size_in_byte":10139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"258529929","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# Filename : done_downimg.py\n\nimport requests\nimport urllib\nimport json\n\ndef getSogouImag(category,length,path):\n n = length\n cate = category\n imgs = (requests.get('http://pic.sogou.com/pics/channel/getAllRecomPicByTag.jsp?category='+ cate +'&tag=%E5%85%A8%E9%83%A8&start=0&len='+ str(n)))\n jd = json.loads(imgs.text)\n jd = jd['all_items']\n imgs_url = []\n for j in jd :\n imgs_url.append(j['bthumbUrl'])\n m = 1\n for img_url in imgs_url:\n print('*****' + cate + str(m) +'.jpg *****' + 'Downloading...')\n urllib.request.urlretrieve(img_url,path+ cate + str(m) + '.jpg')\n m = m + 1\n print('Download complete!')\ngetSogouImag('汽车',6,'/Users/luyankai/python/downimg/')","sub_path":"done_downimg.py","file_name":"done_downimg.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"492550283","text":"# -*- coding: utf-8 -*-\nimport requests\nimport telebot\nimport config\nimport ssl\nfrom aiohttp import web\n\n# Server settings\nWEBHOOK_HOST = '00.00.00.00' # IP address of your bot's server\nWEBHOOK_PORT = 443 # 443, 80, 88 or 8443 - port number\nWEBHOOK_LISTEN = '00.00.00.00' # IP address of your bot's server\nWEBHOOK_SSL_CERT = './webhook_cert.pem' # certificate path\nWEBHOOK_SSL_PRIV = './webhook_pkey.pem' # private key path\nWEBHOOK_URL_BASE = 'https://{}:{}'.format(WEBHOOK_HOST, WEBHOOK_PORT)\nWEBHOOK_URL_PATH = '/{}/'.format(config.token)\n\n# class instances of Telegram bot and server\nbot = telebot.TeleBot(config.token)\napp = web.Application()\n\n\n# Configure server\nasync def handle(request):\n if request.match_info.get('token') == config.token:\n request_body_dict = await request.json()\n update = telebot.types.Update.de_json(request_body_dict)\n bot.process_new_updates([update])\n return web.Response()\n else:\n return web.Response(status=403)\n\napp.router.add_post('/{token}/', handle)\n\n\n# Bot's reaction for typical messages\ndef typical_reactions(message):\n if message.content_type == 'sticker':\n sticker_response(message)\n elif message.text == '/start':\n start(message)\n elif message.text == '/help':\n help_cmd(message)\n elif message.text == '/next_player':\n next_player(message)\n\n\ndef complete_the_search(message):\n markup = telebot.types.ReplyKeyboardRemove(selective=False)\n sent = bot.send_message(message.chat.id, 'Отменяю поиск...')\n bot.send_message(message.chat.id, 'Поиск завершен', reply_markup=markup)\n bot.delete_message(sent.chat.id, sent.message_id)\n\n\n# Bot's reaction to the sticker\n@bot.message_handler(content_types=['sticker'])\ndef sticker_response(message):\n sticker = open('./Granata.webp', 'rb') # path to bot's sticker\n markup = telebot.types.ReplyKeyboardRemove(selective=False)\n bot.send_sticker(message.chat.id, sticker, reply_markup=markup)\n bot.send_message(message.chat.id, '''Это, конечно, забавно, но я не умею искать по стикерам.\nПопробуй еще раз:\n/next_player''')\n\n\n# Bot's reaction to the /help command\n@bot.message_handler(commands=['help'])\ndef help_cmd(message):\n markup = telebot.types.ReplyKeyboardRemove(selective=False)\n bot.send_message(message.chat.id, '''Что умеет этот бот? Он показывает насколько крут тот или иной игрок в Battlefield.\nБот умеет показывать статистику игроков в Battlefield 4 и Battlefield Hardline.\nЧтобы начать работать с ботом введи команду /start.\nЕсли ты уже что-то спрашивал у бота, можешь использовать команду /next_player''', reply_markup=markup)\n\n\n# Bot's reaction to the /next_player command\n@bot.message_handler(commands=['next_player'])\ndef next_player(message):\n markup = telebot.types.ReplyKeyboardMarkup()\n markup.row('Battlefield 4', 'Battlefield Hardline')\n markup.row('Завершить поиск')\n sent = bot.send_message(message.chat.id,\n 'Хочешь посмотреть статистику другого игрока в BF? Выбери игру:',\n reply_markup=markup)\n bot.register_next_step_handler(sent, game)\n\n\n# Bot's reaction to the /start command\n@bot.message_handler(commands=['start'])\ndef start(message):\n markup = telebot.types.ReplyKeyboardMarkup()\n markup.row('Battlefield 4', 'Battlefield Hardline')\n markup.row('Завершить поиск')\n sent = bot.send_message(message.chat.id, '''Привет, я бот, который показывает статистику игроков в играх серии Battlefield.\nДля начала выбери игру:''', reply_markup=markup)\n bot.register_next_step_handler(sent, game)\n\n\n# Game choice\ndef game(message):\n global api_url\n if message.text in ('Battlefield 4', 'Battlefield Hardline'):\n api_url = config.api_urls[message.text]\n markup = telebot.types.ReplyKeyboardRemove(selective=False)\n sent = bot.send_message(message.chat.id, 'Теперь введи ник игрока Battlefield.', reply_markup=markup)\n bot.register_next_step_handler(sent, platform_name)\n elif message.text == 'Завершить поиск':\n complete_the_search(message)\n else:\n typical_reactions(message)\n\n\n# Platform choice\ndef platform_name(message):\n if message.content_type == 'sticker' or message.text in ('/start', '/help', '/next_player'):\n typical_reactions(message)\n else:\n config.params.setdefault('name', message.text)\n markup = telebot.types.ReplyKeyboardMarkup()\n markup.row('PC', 'PlayStation4', 'Xbox One')\n markup.row('PlayStation3', 'Xbox 360')\n markup.row('Завершить поиск')\n sent = bot.send_message(message.chat.id, 'Выбери платформу:', reply_markup=markup)\n bot.register_next_step_handler(sent, show_stats)\n\n\n# This function shows players stats\ndef show_stats(message):\n global res\n if message.content_type == 'sticker' or message.text in ('/start', '/help', '/next_player'):\n typical_reactions(message)\n elif message.text == 'Завершить поиск':\n complete_the_search(message)\n else:\n markup = telebot.types.ReplyKeyboardRemove(selective=False)\n bot_message = bot.send_message(message.chat.id, 'Вывожу статистику...', reply_markup=markup)\n try:\n config.params.setdefault('plat', config.platforms[message.text])\n res = requests.get(api_url, params=config.params).json()\n name = config.params['name']\n kills = res['stats']['kills']\n deaths = res['stats']['deaths']\n time = round(int(res['stats']['timePlayed']) / 60 / 60, 1)\n score = res['player']['score']\n kd_ratio = str(res['stats']['extra']['kdr'])[:5]\n spm = str(res['stats']['extra']['spm'])[:7]\n num_wins = res['stats']['numWins']\n num_losses = res['stats']['numLosses']\n wlr = str(res['stats']['extra']['wlr'])[:4]\n markup = telebot.types.ReplyKeyboardMarkup()\n markup.row('Захват флагов', 'Счет по классам')\n markup.row('Классы оружия', 'Техника')\n markup.row('Следующий игрок', 'Завершить поиск')\n sent = bot.send_message(message.chat.id, '''Игрок {name} совершил {kills} убийств и был убит {deaths} раз.\nИтоговое соотношение K/D: {kd}.\nСчет в минуту: {spm}.\nОчков набрано: {score}.\nВремени проведено в игре: {time} часов.\nКоличество побед: {num_wins}.\nКоличество поражений: {num_losses}.\nСоотношение побед/поражений: {wlr}.\nЧтобы просмотреть более подробную статистику выбери интересующую тебя информацию.\nИли посмотри статистику другого игрока:'''.format(name=name,\n kills=kills,\n deaths=deaths,\n kd=kd_ratio,\n time=time,\n score=score,\n spm=spm,\n num_wins=num_wins,\n num_losses=num_losses,\n wlr=wlr), reply_markup=markup)\n config.params.pop('name')\n config.params.pop('plat')\n bot.register_next_step_handler(sent, more_stats)\n except (KeyError, TypeError):\n bot.send_message(message.chat.id, '''Упс. Кажется я не смог найти такого игрока.\nПроверь, правильно ли введен ник и повтори попытку.\nПовторить: /next_player''')\n try:\n # cleaning dict with search options\n config.params.pop('name')\n config.params.pop('plat')\n except KeyError:\n pass\n # delete message 'Вывожу статистику...'\n bot.delete_message(bot_message.chat.id, bot_message.message_id)\n\n\n# This function shows detailed stats\ndef more_stats(message):\n if message.content_type == 'sticker' or message.text in ('/start', '/help', '/next_player'):\n typical_reactions(message)\n elif message.text == 'Захват флагов':\n cf = res['stats']['flagCaptures']\n df = res['stats']['flagDefend']\n bot.send_message(message.chat.id, '''Захвачено флагов: {cf}.\nФлагов защищено: {df}.'''.format(cf=cf, df=df))\n bot.register_next_step_handler(message, more_stats)\n elif message.text == 'Счет по классам':\n string = ''''''\n for elem in res['stats']['kits']:\n score = res['stats']['kits'][elem]['score']\n elem = config.classes[elem]\n string += '''{elem}: {score}.\n'''.format(elem=elem, score=score)\n bot.send_message(message.chat.id, string)\n bot.register_next_step_handler(message, more_stats)\n elif message.text == 'Классы оружия':\n weapons = list(res['weaponCategory'])\n string_weapon = '''Статистика по оружию:\n\n'''\n if api_url == config.api_urls['Battlefield 4']:\n for weapon in weapons:\n kpm = str(weapon['extra']['kpm'])[:5]\n kills = weapon['stat']['kills']\n shots = weapon['stat']['shots']\n time = round(int(weapon['stat']['time']) / 60 / 60, 2)\n accuracy = str(weapon['extra']['accuracy'])[:5]\n name = config.dict_weapons_bf4[weapon['name']]\n string_weapon += '''{name}:\nУбийств совершено: {kills}.\nВыстрелов сделано: {shots}.\nВремя использования: {time} ч.\nУбийств в минуту: {kpm}.\nТочность: {accuracy} %.\n\n'''.format(kills=kills, shots=shots, time=time, accuracy=accuracy, name=name, kpm=kpm)\n elif api_url == config.api_urls['Battlefield Hardline']:\n for weapon in weapons:\n kpm = str(weapon['extra']['kpm'])[:5]\n kills = weapon['stat']['kills']\n shots = weapon['stat']['shots']\n time = round(int(weapon['stat']['time'])/ 60 / 60, 2)\n accuracy = str(weapon['extra']['accuracy'])[:5]\n name = config.dict_weapons_bfh[weapon['name']]\n string_weapon += '''{name}:\nУбийств совершено: {kills}.\nВыстрелов сделано: {shots}.\nВремя использования: {time} ч.\nУбийств в минуту: {kpm}.\nТочность: {accuracy} %.\n\n'''.format(kills=kills, shots=shots, time=time, accuracy=accuracy, name=name, kpm=kpm)\n bot.send_message(message.chat.id, string_weapon)\n bot.register_next_step_handler(message, more_stats)\n elif message.text == 'Техника':\n string_vehicle = '''Статистика по технике:\n\n'''\n vehicles = list(res['vehicleCategory'])\n if api_url == config.api_urls['Battlefield 4']:\n for vehicle in vehicles:\n if vehicle['name'] in config.dict_vehicles_bf4:\n name = config.dict_vehicles_bf4[vehicle['name']]\n else:\n continue\n destroys = vehicle['stat']['destroys']\n kpm = str(vehicle['extra']['kpm'])[:5]\n kills = vehicle['stat']['kills']\n time = round(int(vehicle['stat']['time']) / 60 / 60, 2)\n string_vehicle += '''{name}:\nУбийств совершено: {kills}.\nУничтожено единиц: {destroys}.\nВремя использования: {time} ч.\nУбийств в минуту: {kpm}.\n\n'''.format(destroys=destroys, kpm=kpm, kills=kills, time=time, name=name)\n elif api_url == config.api_urls['Battlefield Hardline']:\n for vehicle in vehicles:\n destroys = vehicle['stat']['destroys']\n kpm = str(vehicle['extra']['kpm'])[:5]\n kills = vehicle['stat']['kills']\n time = round(int(vehicle['stat']['time']) / 60 / 60, 2)\n name = config.dict_vehicles_bfh[vehicle['name']]\n string_vehicle += '''{name}:\nУбийств совершено: {kills}.\nУничтожено единиц: {destroys}.\nВремя использования: {time} ч.\nУбийств в минуту: {kpm}.\n\n'''.format(destroys=destroys, kpm=kpm, kills=kills, time=time, name=name)\n bot.send_message(message.chat.id, string_vehicle)\n bot.register_next_step_handler(message, more_stats)\n elif message.text == 'Завершить поиск':\n complete_the_search(message)\n elif message.text == 'Следующий игрок':\n next_player(message)\n elif message.content_type == 'text' and message.text not in ('/start', '/help', '/next_player'):\n text_reaction(message)\n\n\n# Bot's reaction to the text\n@bot.message_handler(content_types=['text'])\ndef text_reaction(message):\n markup = telebot.types.ReplyKeyboardRemove(selective=False)\n bot.send_message(message.chat.id, '''Я необщительный интроверт.\nЕсли хочешь просмотреть статистику игроков в Battlefield, используй команду: /start.\nЕсли хочешь пообщаться, найди какого-нибудь другого бота.''', reply_markup=markup)\n\n# Remove webhook, it fails sometimes the set if there is a previous webhook\nbot.remove_webhook()\n\n# Set webhook\nbot.set_webhook(url=WEBHOOK_URL_BASE+WEBHOOK_URL_PATH,\n certificate=open(WEBHOOK_SSL_CERT, 'r'))\n\n# Build ssl context\ncontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\ncontext.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)\n\n# Start aiohttp server\nweb.run_app(\n app,\n host=WEBHOOK_LISTEN,\n port=WEBHOOK_PORT,\n ssl_context=context,\n)\n\n# Uncomment follow option if you don't want to use webhooks:\n# if __name__ == '__main__':\n# bot.polling(none_stop=True)\n","sub_path":"BF4_BFH_Stats_WH.py","file_name":"BF4_BFH_Stats_WH.py","file_ext":"py","file_size_in_byte":14813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"295115823","text":"#! python3\n# PP4_linkVerification.py\n# Write a program that, given the URL of a web page, will attempt to download\n# every linked page on the page. The program should flag any pages\n# that have a 404 “Not Found” status code and print them out as broken links.\nimport requests, sys, bs4, random,os\nimport logging\n\nlogging.basicConfig(level=logging.ERROR, format=' %(asctime)s - %(levelname)s- %(message)s')\nlogging.debug('Start Program!')\nres = requests.get(sys.argv[1])\nres.raise_for_status()\nsession = requests.session()\nsoup = bs4.BeautifulSoup(res.text, \"html.parser\")\nurl_list = soup.select('a[href]')\nnew_dir = 'downloaded_links_from_'+sys.argv[1].replace('://', '_')\nos.mkdir(new_dir)\nos.chdir('.\\\\'+new_dir)\nfor link in url_list:\n if link.get('href').startswith('http'):\n url = link.get('href')\n res = requests.get(url)\n status = res.status_code\n if status in range(400, 500):\n logging.error(f'요청오류 : {status} url : {url}')\n break\n elif status in range(500,600):\n logging.error(f'서버 오류 : {status}')\n break\n link_file = open('from_'+str(random.randint(0, 1000))+'.html', 'wb+')\n for chunk in res.iter_content(100000):\n link_file.write(chunk)\n link_file.close()\n\nlogging.debug('END Program!')\n","sub_path":"Chapter11/PP4_linkVerification.py","file_name":"PP4_linkVerification.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"217697985","text":"#!/usr/bin/env python\n\nfrom models.utils import get_model\nimport torch\nimport torch.nn as nn\nfrom loaders import *\nfrom graph import *\nfrom utils import save_dipha\nfrom passers import Passer\nfrom config import SAVE_PATH, MAX_EPSILON, UPPER_DIM, NPROC\nfrom pathlib import Path\nfrom bettis import read_pd\nimport numpy as np\nimport pickle\nfrom pathlib import Path\nimport multiprocessing\n\n\ndef calc_adjacency_matrix(activations, verbose=False):\n \"\"\" Build matrix A of dimensions nxn where A_{ij} = pearson's correlation(a_i, a_j)\n \n Parameters\n ----------\n activations: list\n List of ndarray containing activation signals. Position in list\n corresponds to the layer (with activations, e.g. no pooling) in network. \n Each ndarray has shape (samples, depth, width, height). \n verbose: bool, optional\n Print verbose info (default: False).\n \n Returns\n -------\n ndarray\n a nxn matrix containng pearson's correlations between activations\n \"\"\"\n activations = signal_concat(activations)\n adj = adjacency(activations)\n\n if verbose:\n print('The dimension of the adjacency matrix is {}.'.format(adj.shape))\n print('Adj mean {}, min {}, max {}'.format(np.mean(adj), np.min(adj), np.max(adj)))\n\n return adj\n\ndef compute_persistent_homology(export_dir, adjacency_file, sparsemat_file, pershomology_file, \n max_epsilon, upper_dim, nproc, verbose=False):\n \"\"\" Filters adjacency matrix and computes persistence homology from it.\n\n Parameters\n ----------\n exportdir: str, Path\n Directory where temporary files are stored.\n adjacency_file: str\n Name of intermediate file where adjacency matrix is stored in.\n sparsemat_file: str\n Name of intermediate file where sparse adjacency matrix gets stored in.\n pershomology_file: str\n Name of intermediate file where persistent homology gets stored in.\n max_epsilon: float\n Upper limit for distance between nodes where they still can be connected\n to a simplex.\n upper_dim: int\n Highest dimension for which persistence homology is computed.\n nproc: int\n Number of CPU cores that can be used for processing.\n verbose: bool, optional\n Print verbose info (default: False).\n\n Returns\n -------\n \"\"\"\n export_dir = Path(export_dir)\n\n adjacency_export_file = export_dir / adjacency_file\n sparsemat_export_file = export_dir / sparsemat_file\n pershomology_export_file = export_dir / pershomology_file\n if verbose:\n print(f\"Adjacency matrix file: {adjacency_export_file.absolute()}\")\n print(f\"Sparse adjacency matrix file: {sparsemat_export_file.absolute()}\")\n print(f\"Persistent homology export file: {pershomology_export_file.absolute()}\")\n print(f\"Max Epsilon: {max_epsilon}\")\n print(f\"Upper Dimension: {upper_dim}\")\n print(f\"Number of used processor cores: {nproc}\")\n\n # create sparse matrix by cuttin off all values above 'max_epsilon' (?)\n os.system(\"./dipha/build/full_to_sparse_distance_matrix \"\n + str(max_epsilon) + \" \" + str(adjacency_export_file.absolute()) + \" \" + \n str(sparsemat_export_file.absolute()))\n\n os.system(\"mpiexec -n \" +str(nproc) + \" ./dipha/build/dipha --upper_dim \"\n + str(upper_dim) + \" --benchmark --dual \" + str(sparsemat_export_file.absolute()) + \" \"\n + str(pershomology_export_file.absolute()))\n\ndef calc_summary(export_dir, pershomology_file, dim, persistence, verbose=False):\n \"\"\" Calculates topological summary from given persistent homology\n\n Parameters\n ----------\n exportdir: str\n Directory where temporary files are stored in.\n pershomology_file: str\n Name of intermediate file where persistent homology is stored in.\n dim: int\n Dimension for which topological summary is returned\n persistence: float\n ?\n verbose: bool, optional\n Print verbose information (default: False).\n\n Returns\n -------\n (my, lambda): (float, float)\n Topological summary with 'my' being average life and 'lambda' being midlife \n of cavities in topology\n \"\"\"\n\n export_dir = Path(export_dir)\n pershomology_export_file = export_dir / pershomology_file\n\n birth, death = np.array(\n read_pd(pershomology_export_file, dimension=dim, persistence=persistence)\n )\n avg_life = np.mean(death - birth)\n midlife = np.mean((birth + death)/2)\n if verbose:\n print(\"Average Life: {}\".format(avg_life))\n print(\"Midlife: {}\".format(midlife))\n\n return (avg_life, midlife)\n\n\ndef compute_topological_summary(activations, export_dir=\"/tmp/\", adjacency_file='adj.bin', \n sparsemat_file='sparsemat.out', pershomology_file='ph.out',\n max_epsilon=0.3, upper_dim=1, nproc=None, dim=0, persistence=0.02,\n verbose=False):\n \"\"\" Compute topological summary for topology of network models\n functional space given by sample activations.\n\n Parameters\n ----------\n activations: list\n List of ndarray containing activation signals. Position in list\n corresponds to the layer (with activations, e.g. no pooling) in network. \n Each ndarray has shape (samples, depth, width, height). \n export_dir: str, optional\n Directory to store intermediate files in (default is '/tmp/').\n adjacency_file: str, optional\n Name of intermediate file to store adjacency matrix in. DIPHA algorithm\n later uses these files (default: 'adj.bin').\n sparsemat_file: str, optional\n Name of intermediate file where sparse adjacency matrix gets stored in \n (default: 'sparsemat.out').\n pershomology_file: str, optional\n Name of intermediate file where persistent homology gets stored in (default: 'ph.out').\n max_epsilon: float, optional\n upper limit for distance between nodes where they still can be connected\n to a simplex (default: 0.3).\n upper_dim: int, optional\n Highest dimension for which persistence homology is computed (default: 1).\n nproc: int, optinal\n Number of CPU cores that can be used for processing (default: all available, max: 16).\n dim: int, optional\n Dimension for which topological summary is returned (default: 0).\n persistence: float, optional\n ? (default: 0.02)\n\n Returns\n -------\n (my, lambda): (float, float)\n Topological summary with 'my' being average life and 'lambda' being midlife \n of cavities in topology.\n\n \"\"\"\n if(nproc is None):\n nproc = min(multiprocessing.cpu_count(), 16)\n \n export_dir = Path(export_dir)\n adjacency_export_path = export_dir / adjacency_file\n\n adj = calc_adjacency_matrix(activations=activations, verbose=verbose)\n\n # Write adjacency matrix to binary to use DIPHA \n # input for persistence homology.\n save_dipha(adjacency_export_path, 1-adj)\n compute_persistent_homology(export_dir=export_dir, adjacency_file=adjacency_file,\n sparsemat_file=sparsemat_file, pershomology_file=pershomology_file,\n max_epsilon=max_epsilon, upper_dim=upper_dim, nproc=nproc, verbose=verbose)\n\n return calc_summary(export_dir=export_dir, pershomology_file=pershomology_file, \n dim=dim, persistence=persistence, verbose=verbose)\n\nif __name__ == '__main__':\n with open('../../alexnet_activations.pkl', 'rb') as f:\n data = pickle.load(f)\n #activations = [activation.detach().numpy() for activation in data]\n activations = data\n compute_topological_summary(activations, verbose=True)\n","sub_path":"topological_summary.py","file_name":"topological_summary.py","file_ext":"py","file_size_in_byte":7629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"480739741","text":"# Given an integer n and an integer start.\n\n# Define an array nums where nums[i] = start + 2*i (0-indexed)\n# and n == nums.length.\n\n# Return the bitwise XOR of all elements of nums.\n\n \n\n# Example 1:\n\n# Input: n = 5, start = 0\n# Output: 8\n# Explanation: Array nums is equal to [0, 2, 4, 6, 8] \n# where (0 ^ 2 ^ 4 ^ 6 ^ 8) = 8.\n# Where \"^\" corresponds to bitwise XOR operator.\n\n#linear t const s\ndef xor(n, start):\n\tresult = 0\n\n\tfor i in range(n):\n\t\tresult ^= start + 2*i\n\n\treturn result\n\nn = 5\nstart = 0\nprint(xor(n, start))","sub_path":"leetcode_problems/easy/xor_arr.py","file_name":"xor_arr.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"194076412","text":"import sklearn.model_selection\nfrom torch.utils.data import Dataset\nimport wfdb\nimport torch\nimport numpy as np\nimport os\n\nfrom dbloader import *\n\n\nclass CpscDataset(Dataset):\n def __init__(self, root_dir, record_list, pre_processing):\n self.sampling_rate = 500\n self.cls_list = ['Normal', 'AF', 'I-AVB', 'LBBB', 'RBBB', 'PAC', 'PVC', 'STD', 'STE']\n self.root_dir = root_dir\n self.record_list = record_list\n self.pre_processing = pre_processing\n\n def __len__(self):\n return len(self.record_list)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n record_name = self.record_list[idx]\n record = np.load(f'{self.root_dir}/{record_name}.npy', allow_pickle=True).item()\n record['ecg'] = np.transpose(record['ecg'])\n label = np.zeros(len(self.cls_list))\n for beat in record['beats']:\n label[list(self.cls_list).index(beat)] = 1\n record['beats'] = label\n\n if self.pre_processing:\n record = self.pre_processing(record)\n\n return record['ecg'], record['beats'], record['id']\n\n def pre_pre_processing(self, root_dir, save_dir, base_sec=10, max_sec=20):\n \"\"\"\n 1. Selecting a record by limited duration\n 2. Normalization along all leads\n 3. Zero-padding\n :param root_dir: Directory where .hea and .mat files located\n :param save_dir:\n :param base_sec:\n :param max_sec:\n :return:\n \"\"\"\n raw_record_list = np.loadtxt(f'./RECORDS', delimiter=',', dtype=str)\n file_dir = root_dir\n os.makedirs(save_dir, exist_ok=True)\n\n pass_cnt = 0\n cls_cnt = [0] * len(self.cls_list)\n multi_label_cnt = 0\n record_list = []\n for record_name in raw_record_list:\n record = wfdb.rdrecord(f'{file_dir}/{record_name}')\n ecg = record.p_signal # [time, channel]\n beats = record.comments[2][4:].split(',')\n # Discard a record over [sec_limit]\n if len(ecg) > max_sec * self.sampling_rate:\n pass_cnt += 1\n else: # Selected record\n record_list.append(record_name)\n # Class count\n if len(beats) > 1:\n multi_label_cnt += 1\n for beat in beats:\n cls_cnt[list(self.cls_list).index(beat)] += 1\n # Pick center of record when it over base duration\n if len(ecg) > base_sec * self.sampling_rate:\n diff = len(ecg) - base_sec * self.sampling_rate\n gap = int(diff / 2)\n ecg = ecg[gap: len(ecg) - gap, :]\n if len(ecg) != base_sec * self.sampling_rate:\n ecg = ecg[1:, :]\n # Normalization by mean and std\n ecg = (ecg - np.mean(ecg)) / np.std(ecg)\n # Padding when record lower than base duration\n if len(ecg) < base_sec * self.sampling_rate:\n diff = base_sec * self.sampling_rate - len(ecg)\n zeros = np.zeros([diff, ecg.shape[1]])\n ecg = np.append(ecg, zeros, axis=0)\n # Save with signals and class information\n item = {'ecg': ecg, 'beats': beats, 'id': record_name.split('A')[-1]}\n np.save(f'{save_dir}/{record_name}', item)\n np.savetxt(f'{save_dir}/record', record_list, fmt='%s', delimiter=',')\n print(f'{len(raw_record_list) - pass_cnt} are selected, '\n f'{pass_cnt} records are discarded from {len(raw_record_list)} records')\n print(f'There are {multi_label_cnt} records containing multi-label')\n for cls_name, cnt in zip(self.cls_list, cls_cnt):\n print(f'{cls_name} : {cnt}')\n print(f'Total : {np.sum(cls_cnt)}')\n tr, te = sklearn.model_selection.train_test_split(record_list, test_size=.2, random_state=4)\n np.savetxt(f'{save_dir}/train', tr, fmt='%s', delimiter=',')\n np.savetxt(f'{save_dir}/val', te, fmt='%s', delimiter=',')\n print(f'# of train records={len(tr)}')\n print(f'# of val records={len(te)}')\n\n\nclass ToTensor(object):\n def __call__(self, sample):\n for key, value in sample.items():\n if key != 'id':\n sample[key] = torch.tensor(value, dtype=torch.float)\n else:\n sample[key] = torch.tensor(int(value), dtype=torch.int)\n\n return sample\n","sub_path":"cpsc_db.py","file_name":"cpsc_db.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"645739768","text":"import argparse\nfrom collections import Counter\nimport re\nfrom pprint import pprint\nimport os\n\n\ndef is_file_valid(filepath):\n \"\"\" Function checks whether file is OK or not (also verify UTF-8)\n\n Returns:\n filepath: Path to file.\n\n Notes:\n Path to file will be returned only if no errors occurred\n \"\"\"\n if not os.path.isfile(filepath):\n raise TypeError('Not file')\n if os.path.splitext(filepath)[-1].lower() != '.txt':\n raise TypeError('Not .txt file')\n try:\n with open(filepath, 'rb+') as file:\n file.read().decode('utf-8')\n except UnicodeDecodeError:\n return None\n return filepath\n\n\ndef parse_input_file(filepath):\n \"\"\" Function which extracts 10 most used hashtags from array of tweets and\n then finds 5 most used words for every hashtag from 10's list\n\n Args:\n filepath: path to file (*.txt)\n \"\"\"\n words = re.findall(r'[#][^\\s#]+', open(filepath).read())\n popular_tweets = {\n word: Counter() for word in dict(Counter(words).most_common(10)).keys()\n }\n\n #\n print(f'The most common hashtags are - '\n f'{\", \".join(list(popular_tweets.keys()))}')\n #\n print(f'Verification output - {Counter(words).most_common(10)}\\n')\n\n # Any character combination except \\n (new line)\n tweets = re.findall(r'.+', open(filepath).read())\n # List comp used here because of best readability (it's working slower btw)\n # There were no words about speed and accuracy, only about readability\n # In case of real project it is better to use map or generator\n [\n popular_tweets[word].update(\n Counter(\n re.findall(r'(?u)(?> 56) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 48) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 40) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 32) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 24) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 16) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 8) & 0xff)\r\n\t\t\to_byte.append((i_word[i] >> 0) & 0xff)\r\n\r\n\t\treturn o_byte\r\n\r\n\tdef __crop(self, size, data, right):\r\n\t\tlength = int((size + 7) / 8)\r\n\t\tremain = size % 8\r\n\r\n\t\tif right:\r\n\t\t\tdata = data[len(data) - length:]\r\n\t\telse:\r\n\t\t\tdata = data[:length]\r\n\r\n\t\tif remain > 0:\r\n\t\t\tdata[length - 1] &= (0xff << (8 - remain)) & 0xff\r\n\r\n\t\treturn data\r\n\r\n\tdef __hash(self, size, data, key, levels):\r\n\t\tb = 512\r\n\t\tc = 128\r\n\t\tn = 89\r\n\t\td = size\r\n\t\tM = data\r\n\r\n\t\tK = key[:64]\r\n\t\tk = len(K)\r\n\r\n\t\twhile len(K) < 64:\r\n\t\t\tK.append(0x00)\r\n\r\n\t\tK = self.__to_word(K)\r\n\r\n\t\tr = max(80 if k else 0, 40 + int(d / 4))\r\n\r\n\t\tL = levels\r\n\t\tell = 0\r\n\r\n\t\tS0 = 0x0123456789abcdef\r\n\t\tSm = 0x7311c2812425cfa0\r\n\r\n\t\tQ = [\r\n\t\t\t0x7311c2812425cfa0, 0x6432286434aac8e7, 0xb60450e9ef68b7c1,\r\n\t\t\t0xe8fb23908d9f06f1, 0xdd2e76cba691e5bf, 0x0cd0d63b2c30bc41,\r\n\t\t\t0x1f8ccf6823058f8a, 0x54e5ed5b88e3775d, 0x4ad12aae0a6d6031,\r\n\t\t\t0x3e7f16bb88222e0d, 0x8af8671d3fb50c2c, 0x995ad1178bd25c31,\r\n\t\t\t0xc878c1dd04c4b633, 0x3b72066c7a1552ac, 0x0d6f3522631effcb\r\n\t\t];\r\n\r\n\t\tt = [17, 18, 21, 31, 67, 89]\r\n\t\trs = [10, 5, 13, 10, 11, 12, 2, 7, 14, 15, 7, 13, 11, 7, 6, 12]\r\n\t\tls = [11, 24, 9, 16, 15, 9, 27, 15, 6, 2, 29, 8, 15, 5, 31, 9]\r\n\r\n\t\tdef f(N):\r\n\t\t\tS = S0\r\n\t\t\tA = list(N)\r\n\r\n\t\t\tj = 0\r\n\t\t\ti = n\r\n\r\n\t\t\twhile j < r:\r\n\t\t\t\tfor s in range(16):\r\n\t\t\t\t\tx = S\r\n\t\t\t\t\tx ^= A[i + s - t[5]]\r\n\t\t\t\t\tx ^= A[i + s - t[0]]\r\n\t\t\t\t\tx ^= A[i + s - t[1]] & A[i + s - t[2]]\r\n\t\t\t\t\tx ^= A[i + s - t[3]] & A[i + s - t[4]]\r\n\t\t\t\t\tx ^= x >> rs[s]\r\n\r\n\t\t\t\t\tif len(A) <= i + s:\r\n\t\t\t\t\t\twhile len(A) <= i + s:\r\n\t\t\t\t\t\t\tA.append(0x00)\r\n\r\n\t\t\t\t\tA[i + s] = x ^ ((x << ls[s]) & 0xffffffffffffffff)\r\n\r\n\t\t\t\tS = (((S << 1) & 0xffffffffffffffff) ^ (S >> 63)) ^ (S & Sm)\r\n\r\n\t\t\t\tj += 1\r\n\t\t\t\ti += 16\r\n\r\n\t\t\treturn A[(len(A) - 16):]\r\n\r\n\t\tdef mid(B, C, i, p, z):\r\n\t\t\tU = ((ell & 0xff) << 56) | i & 0xffffffffffffff\r\n\t\t\tV = ((r & 0xfff) << 48) | ((L & 0xff) << 40) | ((z & 0xf) << 36) | ((p & 0xffff) << 20) | ((k & 0xff) << 12) | (d & 0xfff)\r\n\r\n\t\t\treturn f(Q + K + [U, V] + C + B)\r\n\r\n\t\tdef par(M):\r\n\t\t\tP = 0\r\n\t\t\tB = []\r\n\t\t\tC = []\r\n\t\t\tz = 0 if len(M) > b else 1\r\n\r\n\t\t\twhile len(M) < 1 or (len(M) % b) > 0:\r\n\t\t\t\tM.append(0x00)\r\n\t\t\t\tP += 8\r\n\r\n\t\t\tM = self.__to_word(M)\r\n\r\n\t\t\twhile len(M) > 0:\r\n\t\t\t\tB.append(M[:int(b / 8)])\r\n\t\t\t\tM = M[int(b / 8):]\r\n\r\n\t\t\ti = 0\r\n\t\t\tp = 0\r\n\t\t\tl = len(B)\r\n\r\n\t\t\twhile i < l:\r\n\t\t\t\tp = P if i == (len(B) - 1) else 0\r\n\t\t\t\tC += mid(B[i], [], i, p, z)\r\n\r\n\t\t\t\ti += 1\r\n\t\t\t\tp = 0\r\n\r\n\t\t\treturn self.__from_word(C)\r\n\r\n\t\tdef seq(M):\r\n\t\t\tP = 0\r\n\t\t\tB = []\r\n\t\t\tC = [0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]\r\n\r\n\t\t\twhile len(M) < 1 or (len(M) % (b - c)) > 0:\r\n\t\t\t\tM.append(0x00)\r\n\t\t\t\tP += 8\r\n\r\n\t\t\tM = self.__to_word(M)\r\n\r\n\t\t\twhile len(M) > 0:\r\n\t\t\t\tB.append(M[:int((b - c) / 8)])\r\n\t\t\t\tM = M[int((b - c) / 8):]\r\n\r\n\t\t\ti = 0\r\n\t\t\tp = 0\r\n\t\t\tl = len(B)\r\n\r\n\t\t\twhile i < l:\r\n\t\t\t\tp = P if i == (len(B) - 1) else 0\r\n\t\t\t\tz = 1 if i == (len(B) - 1) else 0\r\n\t\t\t\tC = mid(B[i], C, i, p, z)\r\n\r\n\t\t\t\ti += 1\r\n\t\t\t\tp = 0\r\n\r\n\t\t\treturn self.__from_word(C)\r\n\r\n\t\twhile True:\r\n\t\t\tell += 1\r\n\t\t\tM = seq(M) if ell > L else par(M)\r\n\r\n\t\t\tif len(M) == c:\r\n\t\t\t\tbreak\r\n\r\n\t\treturn self.__crop(d, M, True)\r\n\r\n\tdef __bytes(self, data):\r\n\t\tdata = (data)\r\n\t\to_byte = list(data)\r\n\r\n\t\treturn o_byte\r\n\r\n\r\n\tdef __prehash(self, data, size, key, levels):\r\n\t\tdata = self.__bytes(data)\r\n\t\tkey = self.__bytes(key)\r\n\r\n\t\tif size <= 0:\r\n\t\t\tsize = 1\r\n\t\telif size > 512:\r\n\t\t\tsize = 512\r\n\r\n\t\treturn self.__hash(size, data, key, levels)\r\n\r\n\tdef hex(self, data=\"\", size=512, key=\"\", levels=64):\r\n\t\tbyte = self.__prehash(data, size, key, levels)\r\n\t\thexstr = \"\"\r\n\r\n\t\tfor i in byte:\r\n\t\t\thexstr += \"%02x\" % i\r\n\r\n\t\treturn hexstr\r\n\r\n\tdef raw(self, data=\"\", size=512, key=\"\", levels=64):\r\n\t\tbyte = self.__prehash(data, size, key, levels)\r\n\t\tf = bytes(byte)\r\n\t\treturn(f)\r\n\r\n#Define HKDF for MD6\r\ndef hkdf_expand2(pseudo_random_key, info=b\"\", length=64, hash=md6hash):\r\n hash_len = 64\r\n length = int(length)\r\n t = b\"\"\r\n okm = b\"\"\r\n for i in range(ceil(length / hash_len)):\r\n t = bytearray(md6hash().raw(pseudo_random_key+(bytes([1 + i]))))\r\n okm += t\r\n return bytearray(okm[:length])\r\n\r\nclass MD6StreamCipher:\r\n def encrypt(plaintext, key):\r\n stream = bytearray(hkdf_expand2(key, info=b\"\", length=(len(plaintext)), hash=md6hash))\r\n return bytearray(x^y for x, y in zip(plaintext, stream))\r\n\r\n\r\nprint (\"Input plaintext:\")\r\nplaintext = bytearray(input().encode())\r\nprint (\"Input Password:\")\r\npassword = bytearray(input().encode())\r\nsalt = bytearray(secrets.token_bytes(32))\r\nkey = bytearray(hashlib.pbkdf2_hmac('sha512', password, salt, 500000, dklen=64))\r\nciphertext = bytearray(MD6StreamCipher.encrypt(plaintext, key))\r\nprint (\"Ciphertext:\", base64.b64encode(ciphertext+salt).decode('utf-8'))\r\n\r\n#Clear memory\r\nfor i in range(len(plaintext)):\r\n plaintext[i] = 0\r\nfor i in range(len(password)):\r\n password[i] = 0\r\nfor i in range(len(salt)):\r\n salt[i] = 0\r\nfor i in range(len(key)):\r\n key[i] = 0\r\nfor i in range(len(ciphertext)):\r\n ciphertext[i] = 0\r\ninput()\r\n","sub_path":"MD6 Stream Cipher - encrypt.py","file_name":"MD6 Stream Cipher - encrypt.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"208815196","text":"# \n# Copyright 2013-2015 University of Southern California\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA database introspection layer.\n\nAt present, the capabilities of this module are limited to introspection of an \nexisting database model. This module does not attempt to capture all of the \ndetails that could be found in an entity-relationship model or in the standard \ninformation_schema of a relational database. It represents the model as \nneeded by other modules of the ermrest project.\n\"\"\"\n\nimport web\n\nfrom .. import exception\nfrom ..util import table_exists, view_exists\nfrom .misc import frozendict, Model, Schema, annotatable_classes\nfrom .type import Type, ArrayType, canonicalize_column_type\nfrom .column import Column\nfrom .table import Table\nfrom .key import Unique, ForeignKey, KeyReference, PseudoUnique, PseudoKeyReference\n\ndef introspect(cur, config=None):\n \"\"\"Introspects a Catalog (i.e., a database).\n \n This function (currently) does not attempt to catch any database \n (or other) exceptions.\n \n The 'conn' parameter must be an open connection to a database.\n \n Returns the introspected Model instance.\n \"\"\"\n \n # this postgres-specific code borrows bits from its information_schema view definitions\n # but is trimmed down to be a cheaper query to execute\n\n # Select all schemas from database, excluding system schemas\n SELECT_SCHEMAS = '''\nSELECT\n current_database() AS catalog_name,\n nc.nspname AS schema_name,\n obj_description(nc.oid) AS schema_comment\nFROM \n pg_catalog.pg_namespace nc\nWHERE\n nc.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')\n AND NOT pg_is_other_temp_schema(nc.oid);\n '''\n\n # Select all column metadata from database, excluding system schemas\n SELECT_TABLES = '''\nSELECT\n current_database() AS table_catalog,\n nc.nspname AS table_schema,\n c.relname AS table_name,\n c.relkind AS table_kind,\n obj_description(c.oid) AS table_comment\nFROM pg_catalog.pg_class c\nJOIN pg_catalog.pg_namespace nc ON (c.relnamespace = nc.oid)\nLEFT JOIN pg_catalog.pg_attribute a ON (a.attrelid = c.oid)\nWHERE nc.nspname NOT IN ('information_schema', 'pg_catalog', 'pg_toast')\n AND NOT pg_is_other_temp_schema(nc.oid) \n AND (c.relkind = ANY (ARRAY['r'::\"char\", 'v'::\"char\", 'f'::\"char\", 'm'::\"char\"]))\n AND (pg_has_role(c.relowner, 'USAGE'::text) OR has_column_privilege(c.oid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES'::text))\nGROUP BY nc.nspname, c.relname, c.relkind, c.oid\n '''\n \n SELECT_COLUMNS = '''\nSELECT\n current_database() AS table_catalog,\n nc.nspname AS table_schema,\n c.relname AS table_name,\n c.relkind AS table_kind,\n obj_description(c.oid) AS table_comment,\n array_agg(a.attname::text ORDER BY a.attnum) AS column_names,\n array_agg(pg_get_expr(ad.adbin, ad.adrelid)::text ORDER BY a.attnum) AS default_values,\n array_agg(\n CASE\n WHEN t.typtype = 'd'::\"char\" THEN\n CASE\n WHEN bt.typelem <> 0::oid AND bt.typlen = (-1) THEN 'ARRAY'::text\n WHEN nbt.nspname = 'pg_catalog'::name THEN format_type(t.typbasetype, NULL::integer)\n ELSE 'USER-DEFINED'::text\n END\n ELSE\n CASE\n WHEN t.typelem <> 0::oid AND t.typlen = (-1) THEN 'ARRAY'::text\n WHEN nt.nspname = 'pg_catalog'::name THEN format_type(a.atttypid, NULL::integer)\n ELSE 'USER-DEFINED'::text\n END\n END::text\n ORDER BY a.attnum) AS data_types,\n array_agg(\n CASE\n WHEN t.typtype = 'd'::\"char\" THEN\n CASE\n WHEN bt.typelem <> 0::oid AND bt.typlen = (-1) THEN format_type(bt.typelem, NULL::integer)\n WHEN nbt.nspname = 'pg_catalog'::name THEN NULL\n ELSE 'USER-DEFINED'::text\n END\n ELSE\n CASE\n WHEN t.typelem <> 0::oid AND t.typlen = (-1) THEN format_type(t.typelem, NULL::integer)\n WHEN nt.nspname = 'pg_catalog'::name THEN NULL\n ELSE 'USER-DEFINED'::text\n END\n END::text\n ORDER BY a.attnum) AS element_types,\n array_agg(\n a.attnotnull\n ORDER BY a.attnum) AS notnull,\n array_agg(\n col_description(c.oid, a.attnum)\n ORDER BY a.attnum) AS comments\nFROM pg_catalog.pg_attribute a\nJOIN pg_catalog.pg_class c ON (a.attrelid = c.oid)\nJOIN pg_catalog.pg_namespace nc ON (c.relnamespace = nc.oid)\nLEFT JOIN pg_catalog.pg_attrdef ad ON (a.attrelid = ad.adrelid AND a.attnum = ad.adnum)\nJOIN pg_catalog.pg_type t ON (t.oid = a.atttypid)\nJOIN pg_catalog.pg_namespace nt ON (t.typnamespace = nt.oid)\nLEFT JOIN pg_catalog.pg_type bt ON (t.typtype = 'd'::\"char\" AND t.typbasetype = bt.oid)\nLEFT JOIN pg_catalog.pg_namespace nbt ON (bt.typnamespace = nbt.oid)\nWHERE nc.nspname NOT IN ('information_schema', 'pg_catalog', 'pg_toast')\n AND NOT pg_is_other_temp_schema(nc.oid) \n AND a.attnum > 0\n AND NOT a.attisdropped\n AND (c.relkind = ANY (ARRAY['r'::\"char\", 'v'::\"char\", 'f'::\"char\", 'm'::\"char\"]))\n AND (pg_has_role(c.relowner, 'USAGE'::text) OR has_column_privilege(c.oid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES'::text))\nGROUP BY nc.nspname, c.relname, c.relkind, c.oid\n '''\n \n # Select the unique key reference columns\n PKEY_COLUMNS = '''\n SELECT\n ncon.nspname::information_schema.sql_identifier AS pk_constraint_schema,\n con.conname::information_schema.sql_identifier AS pk_constraint_name,\n npk.nspname::information_schema.sql_identifier AS pk_table_schema,\n pkcl.relname::information_schema.sql_identifier AS pk_table_name,\n (SELECT array_agg(pka.attname ORDER BY i.i)\n FROM generate_subscripts(con.conkey, 1) i\n JOIN pg_catalog.pg_attribute pka ON con.conrelid = pka.attrelid AND con.conkey[i.i] = pka.attnum\n ) AS pk_column_names,\n obj_description(con.oid) AS constraint_comment\n FROM pg_namespace ncon\n JOIN pg_constraint con ON ncon.oid = con.connamespace\n JOIN pg_class pkcl ON con.conrelid = pkcl.oid AND con.contype = ANY (ARRAY['u'::\"char\",'p'::\"char\"])\n JOIN pg_namespace npk ON pkcl.relnamespace = npk.oid\n WHERE has_table_privilege(pkcl.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER'::text) OR has_any_column_privilege(pkcl.oid, 'INSERT, UPDATE, REFERENCES'::text) \n ;\n'''\n\n PSEUDO_PKEY_COLUMNS = '''\nSELECT \n id AS pk_id,\n schema_name AS pk_table_schema,\n table_name AS pk_table_name,\n column_names AS pk_column_names,\n comment AS constraint_comment\nFROM _ermrest.model_pseudo_key ;\n'''\n \n # Select the foreign key reference columns\n FKEY_COLUMNS = '''\n SELECT\n ncon.nspname::information_schema.sql_identifier AS fk_constraint_schema,\n con.conname::information_schema.sql_identifier AS fk_constraint_name,\n nfk.nspname::information_schema.sql_identifier AS fk_table_schema,\n fkcl.relname::information_schema.sql_identifier AS fk_table_name,\n (SELECT array_agg(fka.attname ORDER BY i.i)\n FROM generate_subscripts(con.conkey, 1) i\n JOIN pg_catalog.pg_attribute fka ON con.conrelid = fka.attrelid AND con.conkey[i.i] = fka.attnum\n ) AS fk_column_names,\n nk.nspname::information_schema.sql_identifier AS uq_table_schema,\n kcl.relname::information_schema.sql_identifier AS uq_table_name,\n (SELECT array_agg(ka.attname ORDER BY i.i)\n FROM generate_subscripts(con.confkey, 1) i\n JOIN pg_catalog.pg_attribute ka ON con.confrelid = ka.attrelid AND con.confkey[i.i] = ka.attnum\n ) AS uq_column_names,\n CASE con.confdeltype\n WHEN 'c'::\"char\" THEN 'CASCADE'::text\n WHEN 'n'::\"char\" THEN 'SET NULL'::text\n WHEN 'd'::\"char\" THEN 'SET DEFAULT'::text\n WHEN 'r'::\"char\" THEN 'RESTRICT'::text\n WHEN 'a'::\"char\" THEN 'NO ACTION'::text\n ELSE NULL::text\n END::information_schema.character_data AS rc_delete_rule,\n CASE con.confupdtype\n WHEN 'c'::\"char\" THEN 'CASCADE'::text\n WHEN 'n'::\"char\" THEN 'SET NULL'::text\n WHEN 'd'::\"char\" THEN 'SET DEFAULT'::text\n WHEN 'r'::\"char\" THEN 'RESTRICT'::text\n WHEN 'a'::\"char\" THEN 'NO ACTION'::text\n ELSE NULL::text\n END::information_schema.character_data AS rc_update_rule,\n obj_description(con.oid) AS constraint_comment\n FROM pg_namespace ncon\n JOIN pg_constraint con ON ncon.oid = con.connamespace\n JOIN pg_class fkcl ON con.conrelid = fkcl.oid AND con.contype = 'f'::\"char\"\n JOIN pg_class kcl ON con.confrelid = kcl.oid AND con.contype = 'f'::\"char\"\n JOIN pg_namespace nfk ON fkcl.relnamespace = nfk.oid\n JOIN pg_namespace nk ON kcl.relnamespace = nk.oid\n WHERE (pg_has_role(kcl.relowner, 'USAGE'::text) \n OR has_table_privilege(kcl.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER'::text) OR has_any_column_privilege(kcl.oid, 'INSERT, UPDATE, REFERENCES'::text))\n AND (pg_has_role(fkcl.relowner, 'USAGE'::text) \n OR has_table_privilege(fkcl.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER'::text) OR has_any_column_privilege(fkcl.oid, 'INSERT, UPDATE, REFERENCES'::text))\n ;\n'''\n\n PSEUDO_FKEY_COLUMNS = '''\nSELECT\n id AS fk_id,\n from_schema_name AS fk_table_schema,\n from_table_name AS fk_table_name,\n from_column_names AS fk_column_names,\n to_schema_name AS uq_table_schema,\n to_table_name AS uq_table_name,\n to_column_names AS uq_column_names,\n comment AS constraint_comment\nFROM _ermrest.model_pseudo_keyref ;\n'''\n\n # PostgreSQL denotes array types with the string 'ARRAY'\n ARRAY_TYPE = 'ARRAY'\n \n # Dicts for quick lookup\n schemas = dict()\n tables = dict()\n columns = dict()\n pkeys = dict()\n fkeys = dict()\n fkeyrefs = dict()\n\n model = Model()\n \n #\n # Introspect schemas, tables, columns\n #\n \n # get schemas (including empty ones)\n cur.execute(SELECT_SCHEMAS);\n for dname, sname, scomment in cur:\n if (dname, sname) not in schemas:\n schemas[(dname, sname)] = Schema(model, sname, scomment)\n\n # get columns\n cur.execute(SELECT_COLUMNS)\n for dname, sname, tname, tkind, tcomment, cnames, default_values, data_types, element_types, notnull, comments in cur:\n\n cols = []\n for i in range(0, len(cnames)):\n # Determine base type\n is_array = (data_types[i] == ARRAY_TYPE)\n if is_array:\n base_type = ArrayType(Type(canonicalize_column_type(element_types[i], default_values[i], config, True)))\n else:\n base_type = Type(canonicalize_column_type(data_types[i], default_values[i], config, True))\n \n # Translate default_value\n try:\n default_value = base_type.default_value(default_values[i])\n except ValueError:\n # TODO: raise informative exception instead of masking error\n default_value = None\n\n col = Column(cnames[i].decode('utf8'), i, base_type, default_value, not notnull[i], comments[i])\n cols.append( col )\n columns[(dname, sname, tname, cnames[i])] = col\n \n # Build up the model as we go without redundancy\n if (dname, sname) not in schemas:\n schemas[(dname, sname)] = Schema(model, sname)\n assert (dname, sname, tname) not in tables\n tables[(dname, sname, tname)] = Table(schemas[(dname, sname)], tname, cols, tkind, tcomment)\n\n # also get empty tables\n cur.execute(SELECT_TABLES)\n for dname, sname, tname, tkind, tcomment in cur:\n if (dname, sname) not in schemas:\n schemas[(dname, sname)] = Schema(model, sname)\n if (dname, sname, tname) not in tables:\n tables[(dname, sname, tname)] = Table(schemas[(dname, sname)], tname, [], tkind, tcomment)\n\n #\n # Introspect uniques / primary key references, aggregated by constraint\n #\n def _introspect_pkey(pk_table_schema, pk_table_name, pk_column_names, pk_comment, pk_factory):\n try:\n pk_cols = [ columns[(dname, pk_table_schema, pk_table_name, pk_column_name)]\n for pk_column_name in pk_column_names ]\n except KeyError:\n return\n\n pk_colset = frozenset(pk_cols)\n\n # each constraint implies a pkey but might be duplicate\n pk = pk_factory(pk_colset)\n if pk_colset not in pkeys:\n pkeys[pk_colset] = pk\n else:\n pkeys[pk_colset].constraints.add(pk)\n if pk_comment:\n # save at least one comment in case multiple constraints have same key columns\n pkeys[pk_colset].comment = pk_comment\n \n cur.execute(PKEY_COLUMNS)\n for pk_schema, pk_name, pk_table_schema, pk_table_name, pk_column_names, pk_comment in cur:\n _introspect_pkey(\n pk_table_schema, pk_table_name, pk_column_names, pk_comment,\n lambda pk_colset: Unique(pk_colset, (pk_schema, pk_name), pk_comment)\n )\n\n cur.execute(PSEUDO_PKEY_COLUMNS)\n for pk_id, pk_table_schema, pk_table_name, pk_column_names, pk_comment in cur:\n _introspect_pkey(\n pk_table_schema, pk_table_name, pk_column_names, pk_comment,\n lambda pk_colset: PseudoUnique(pk_colset, pk_id, pk_comment)\n )\n \n #\n # Introspect foreign keys references, aggregated by reference constraint\n #\n def _introspect_fkr(\n fk_table_schema, fk_table_name, fk_column_names,\n uq_table_schema, uq_table_name, uq_column_names, fk_comment,\n fkr_factory\n ):\n try:\n fk_cols = [ columns[(dname, fk_table_schema, fk_table_name, fk_column_names[i])]\n for i in range(0, len(fk_column_names)) ]\n pk_cols = [ columns[(dname, uq_table_schema, uq_table_name, uq_column_names[i])]\n for i in range(0, len(uq_column_names)) ]\n except KeyError:\n return\n\n fk_colset = frozenset(fk_cols)\n pk_colset = frozenset(pk_cols)\n fk_ref_map = frozendict(dict([ (fk_cols[i], pk_cols[i]) for i in range(0, len(fk_cols)) ]))\n\n # each reference constraint implies a foreign key but might be duplicate\n if fk_colset not in fkeys:\n fkeys[fk_colset] = ForeignKey(fk_colset)\n\n fk = fkeys[fk_colset]\n pk = pkeys[pk_colset]\n\n # each reference constraint implies a foreign key reference but might be duplicate\n fkr = fkr_factory(fk, pk, fk_ref_map)\n if fk_ref_map not in fk.references:\n fk.references[fk_ref_map] = fkr\n else:\n fk.references[fk_ref_map].constraints.add(fkr)\n if fk_comment:\n # save at least one comment in case multiple csontraints have same key mapping\n fk.references[fk_ref_map].comment = fk_comment\n\n \n cur.execute(FKEY_COLUMNS)\n for fk_schema, fk_name, fk_table_schema, fk_table_name, fk_column_names, \\\n uq_table_schema, uq_table_name, uq_column_names, on_delete, on_update, fk_comment \\\n in cur:\n _introspect_fkr(\n fk_table_schema, fk_table_name, fk_column_names,\n uq_table_schema, uq_table_name, uq_column_names, fk_comment,\n lambda fk, pk, fk_ref_map: KeyReference(fk, pk, fk_ref_map, on_delete, on_update, (fk_schema, fk_name), comment=fk_comment)\n )\n \n cur.execute(PSEUDO_FKEY_COLUMNS)\n for fk_id, fk_table_schema, fk_table_name, fk_column_names, \\\n uq_table_schema, uq_table_name, uq_column_names, fk_comment \\\n in cur:\n _introspect_fkr(\n fk_table_schema, fk_table_name, fk_column_names,\n uq_table_schema, uq_table_name, uq_column_names, fk_comment,\n lambda fk, pk, fk_ref_map: PseudoKeyReference(fk, pk, fk_ref_map, fk_id, comment=fk_comment)\n )\n \n #\n # Introspect ERMrest model overlay annotations\n #\n for klass in annotatable_classes:\n if hasattr(klass, 'introspect_helper'):\n klass.introspect_helper(cur, model)\n\n # save our private schema in case we want to unhide it later...\n model.ermrest_schema = model.schemas['_ermrest']\n del model.schemas['_ermrest']\n \n if not table_exists(cur, '_ermrest', 'valuemap'):\n # rebuild missing table and add it to model manually since we already introspected\n web.debug('NOTICE: adding empty valuemap during model introspection')\n model.recreate_value_map(cur.connection, cur, empty=True)\n valuemap_columns = ['schema', 'table', 'column', 'value']\n for i in range(len(valuemap_columns)):\n valuemap_columns[i] = Column(valuemap_columns[i], i, Type(canonicalize_column_type('text', 'NULL', config, True)), None)\n model.ermrest_schema.tables['valuemap'] = Table(model.ermrest_schema, 'valuemap', valuemap_columns, 't')\n\n return model\n\n","sub_path":"ermrest/model/introspect.py","file_name":"introspect.py","file_ext":"py","file_size_in_byte":17199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"226854558","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file or at\n# https://developers.google.com/open-source/licenses/bsd\n\n\"\"\"A class to display details about each project member.\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport logging\nimport time\n\nfrom third_party import ezt\n\nfrom framework import exceptions\nfrom framework import framework_bizobj\nfrom framework import framework_helpers\nfrom framework import framework_views\nfrom framework import jsonfeed\nfrom framework import permissions\nfrom framework import servlet\nfrom framework import template_helpers\nfrom framework import urls\nfrom project import project_helpers\nfrom project import project_views\n\nCHECKBOX_PERMS = [\n permissions.VIEW,\n permissions.COMMIT,\n permissions.CREATE_ISSUE,\n permissions.ADD_ISSUE_COMMENT,\n permissions.EDIT_ISSUE,\n permissions.EDIT_ISSUE_OWNER,\n permissions.EDIT_ISSUE_SUMMARY,\n permissions.EDIT_ISSUE_STATUS,\n permissions.EDIT_ISSUE_CC,\n permissions.DELETE_ISSUE,\n permissions.DELETE_OWN,\n permissions.DELETE_ANY,\n permissions.EDIT_ANY_MEMBER_NOTES,\n permissions.MODERATE_SPAM,\n ]\n\n\nclass PeopleDetail(servlet.Servlet):\n \"\"\"People detail page documents one partipant's involvement in a project.\"\"\"\n\n _PAGE_TEMPLATE = 'project/people-detail-page.ezt'\n _MAIN_TAB_MODE = servlet.Servlet.MAIN_TAB_PEOPLE\n\n def AssertBasePermission(self, mr):\n \"\"\"Check that the user is allowed to access this servlet.\"\"\"\n super(PeopleDetail, self).AssertBasePermission(mr)\n member_id = self.ValidateMemberID(mr.cnxn, mr.specified_user_id, mr.project)\n # For now, contributors who cannot view other contributors are further\n # restricted from viewing any part of the member list or detail pages.\n if (not permissions.CanViewContributorList(mr, mr.project) and\n member_id != mr.auth.user_id):\n raise permissions.PermissionException(\n 'User is not allowed to view other people\\'s details')\n\n def GatherPageData(self, mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page.\"\"\"\n\n member_id = self.ValidateMemberID(mr.cnxn, mr.specified_user_id, mr.project)\n group_ids = self.services.usergroup.DetermineWhichUserIDsAreGroups(\n mr.cnxn, [member_id])\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user, [member_id])\n framework_views.RevealAllEmailsToMembers(mr.auth, mr.project, users_by_id)\n\n project_commitments = self.services.project.GetProjectCommitments(\n mr.cnxn, mr.project_id)\n (ac_exclusion_ids, no_expand_ids\n ) = self.services.project.GetProjectAutocompleteExclusion(\n mr.cnxn, mr.project_id)\n member_view = project_views.MemberView(\n mr.auth.user_id, member_id, users_by_id[member_id], mr.project,\n project_commitments,\n ac_exclusion=(member_id in ac_exclusion_ids),\n no_expand=(member_id in no_expand_ids),\n is_group=(member_id in group_ids))\n\n member_user = self.services.user.GetUser(mr.cnxn, member_id)\n # This ignores indirect memberships, which is ok because we are viewing\n # the page for a member directly involved in the project\n role_perms = permissions.GetPermissions(\n member_user, {member_id}, mr.project)\n\n # TODO(jrobbins): clarify in the UI which permissions are built-in to\n # the user's direct role, vs. which are granted via a group membership,\n # vs. which ones are extra_perms that have been added specifically for\n # this user.\n member_perms = template_helpers.EZTItem()\n for perm in CHECKBOX_PERMS:\n setattr(member_perms, perm,\n ezt.boolean(role_perms.HasPerm(perm, member_id, mr.project)))\n\n displayed_extra_perms = [perm for perm in member_view.extra_perms\n if perm not in CHECKBOX_PERMS]\n\n viewing_self = mr.auth.user_id == member_id\n warn_abandonment = (viewing_self and\n permissions.ShouldCheckForAbandonment(mr))\n\n return {\n 'subtab_mode': None,\n 'member': member_view,\n 'role_perms': role_perms,\n 'member_perms': member_perms,\n 'displayed_extra_perms': displayed_extra_perms,\n 'offer_edit_perms': ezt.boolean(self.CanEditPerms(mr)),\n 'offer_edit_member_notes': ezt.boolean(\n self.CanEditMemberNotes(mr, member_id)),\n 'offer_remove_role': ezt.boolean(self.CanRemoveRole(mr, member_id)),\n 'expand_perms': ezt.boolean(mr.auth.user_pb.keep_people_perms_open),\n 'warn_abandonment': ezt.boolean(warn_abandonment),\n 'total_num_owners': len(mr.project.owner_ids),\n }\n\n def ValidateMemberID(self, cnxn, member_id, project):\n \"\"\"Lookup a project member by user_id.\n\n Args:\n cnxn: connection to SQL database.\n member_id: int user_id, same format as user profile page.\n project: the current Project PB.\n\n Returns:\n The user ID of the project member. Raises an exception if the username\n cannot be looked up, or if that user is not in the project.\n \"\"\"\n if not member_id:\n self.abort(404, 'project member not specified')\n\n member_username = None\n try:\n member_username = self.services.user.LookupUserEmail(cnxn, member_id)\n except exceptions.NoSuchUserException:\n logging.info('user_id %s not found', member_id)\n\n if not member_username:\n logging.info('There is no such user id %r', member_id)\n self.abort(404, 'project member not found')\n\n if not framework_bizobj.UserIsInProject(project, {member_id}):\n logging.info('User %r is not a member of %r',\n member_username, project.project_name)\n self.abort(404, 'project member not found')\n\n return member_id\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Process the posted form.\"\"\"\n # 1. Parse and validate user input.\n user_id, role, extra_perms, notes, ac_exclusion, no_expand = (\n self.ParsePersonData(mr, post_data))\n member_id = self.ValidateMemberID(mr.cnxn, user_id, mr.project)\n\n # 2. Call services layer to save changes.\n if 'remove' in post_data:\n self.ProcessRemove(mr, member_id)\n else:\n self.ProcessSave(\n mr, role, extra_perms, notes, member_id, ac_exclusion, no_expand)\n\n # 3. Determine the next page in the UI flow.\n if 'remove' in post_data:\n return framework_helpers.FormatAbsoluteURL(\n mr, urls.PEOPLE_LIST, saved=1, ts=int(time.time()))\n else:\n return framework_helpers.FormatAbsoluteURL(\n mr, urls.PEOPLE_DETAIL, u=user_id, saved=1, ts=int(time.time()))\n\n def ProcessRemove(self, mr, member_id):\n \"\"\"Process the posted form when the user pressed 'Remove'.\"\"\"\n if not self.CanRemoveRole(mr, member_id):\n raise permissions.PermissionException(\n 'User is not allowed to remove this member from the project')\n\n self.RemoveRole(mr.cnxn, mr.project, member_id)\n\n def ProcessSave(\n self, mr, role, extra_perms, notes, member_id, ac_exclusion,\n no_expand):\n \"\"\"Process the posted form when the user pressed 'Save'.\"\"\"\n if (not self.CanEditPerms(mr) and\n not self.CanEditMemberNotes(mr, member_id)):\n raise permissions.PermissionException(\n 'User is not allowed to edit people in this project')\n\n if self.CanEditPerms(mr):\n self.services.project.UpdateExtraPerms(\n mr.cnxn, mr.project_id, member_id, extra_perms)\n self.UpdateRole(mr.cnxn, mr.project, role, member_id)\n\n if self.CanEditMemberNotes(mr, member_id):\n self.services.project.UpdateCommitments(\n mr.cnxn, mr.project_id, member_id, notes)\n\n if self.CanEditPerms(mr):\n self.services.project.UpdateProjectAutocompleteExclusion(\n mr.cnxn, mr.project_id, member_id, ac_exclusion, no_expand)\n\n def CanEditMemberNotes(self, mr, member_id):\n \"\"\"Return true if the logged in user can edit the current user's notes.\"\"\"\n return (self.CheckPerm(mr, permissions.EDIT_ANY_MEMBER_NOTES) or\n member_id == mr.auth.user_id)\n\n def CanEditPerms(self, mr):\n \"\"\"Return true if the logged in user can edit the current user's perms.\"\"\"\n return self.CheckPerm(mr, permissions.EDIT_PROJECT)\n\n def CanRemoveRole(self, mr, member_id):\n \"\"\"Return true if the logged in user can remove the current user's role.\"\"\"\n return (self.CheckPerm(mr, permissions.EDIT_PROJECT) or\n member_id == mr.auth.user_id)\n\n def ParsePersonData(self, mr, post_data):\n \"\"\"Parse the POST data for a project member.\n\n Args:\n mr: common information parsed from the user's request.\n post_data: dictionary of lists of values for each HTML\n form field.\n\n Returns:\n A tuple with user_id, role, extra_perms, and notes.\n \"\"\"\n if not mr.specified_user_id:\n raise exceptions.InputException('Field user_id is missing')\n\n role = post_data.get('role', '').lower()\n extra_perms = []\n for ep in post_data.getall('extra_perms'):\n perm = framework_bizobj.CanonicalizeLabel(ep)\n # Perms with leading underscores are reserved.\n perm = perm.strip('_')\n if perm:\n extra_perms.append(perm)\n\n notes = post_data.get('notes', '').strip()\n ac_exclusion = not post_data.get('ac_include', False)\n no_expand = not post_data.get('ac_expand', False)\n return (mr.specified_user_id, role, extra_perms, notes, ac_exclusion,\n no_expand)\n\n def RemoveRole(self, cnxn, project, member_id):\n \"\"\"Remove the given member from the project.\"\"\"\n (owner_ids, committer_ids,\n contributor_ids) = project_helpers.MembersWithoutGivenIDs(\n project, {member_id})\n self.services.project.UpdateProjectRoles(\n cnxn, project.project_id, owner_ids, committer_ids, contributor_ids)\n\n def UpdateRole(self, cnxn, project, role, member_id):\n \"\"\"If the user's role was changed, update that in the Project.\"\"\"\n if not role:\n return # Role was not in the form data\n\n if role == framework_helpers.GetRoleName({member_id}, project).lower():\n return # No change needed\n\n (owner_ids, committer_ids,\n contributor_ids) = project_helpers.MembersWithGivenIDs(\n project, {member_id}, role)\n\n self.services.project.UpdateProjectRoles(\n cnxn, project.project_id, owner_ids, committer_ids, contributor_ids)\n","sub_path":"appengine/monorail/project/peopledetail.py","file_name":"peopledetail.py","file_ext":"py","file_size_in_byte":10446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"9857253","text":"import media\nimport fresh_tomatoes\n\n# class instantiating section\nlife_of_pie = media.Movie(\"Life of pie\",\n \"The story of a boy and his friend tiger\",\n \"Rating:9/10\",\n \"http://natasha-stojanovska.com/wp-content/uploads/2015/04/Life-Of-Pi.jpeg\",\n \"https://www.youtube.com/watch?v=j9Hjrs6WQ8M\")\n\ntoy_story = media.Movie(\"Toy story\",\n \"A story of a boy and his toys that come to life\",\n \"Rating:8/10\",\n \"http://img.lum.dolimg.com/v1/images/open-uri20150422-20810-m8zzyx_5670999f.jpeg?region=0,0,300,450\",\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\n\navatar = media.Movie(\"Avatar\",\n \"A marine on an alien planet\",\n \"Rating:6/10\", \n \"http://resizing.flixster.com/7j4Uky7sPOl5jUPSL2JBIDjeCvk=/800x1200/v1.bTsxMTE3Njc5MjtqOzE3MTkxOzIwNDg7ODAwOzEyMDA\",\n \"http://www.youtube.com/watch?v=-9ceBgWV8io\")\n\nZMND = media.Movie(\"Zindagi na milegi dubbara\",\n \"A story on Life\",\n \"Rating:9/10\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/3/3d/Zindaginamilegidobara.jpg/220px-Zindaginamilegidobara.jpg\",\n \"https://www.youtube.com/watch?v=ifIBOKCfjVs\")\n\nTwilight = media.Movie(\"The twilight saga\",\n \"love story between vampire and a human\",\n \"Rating:7/10\",\n \"http://static.rogerebert.com/uploads/movie/movie_poster/twilight-2008/large_nlvPMLCdum7bkHKmDSMnNLGztmW.jpg\",\n \"https://www.youtube.com/watch?v=edLB6YWZ-R4\")\n\nthreeidiots = media.Movie(\"3 idiots\",\n \"Life of engineers\",\n \"Rating:9/10\",\n \"https://upload.wikimedia.org/wikipedia/en/d/df/3_idiots_poster.jpg\",\n \"https://www.youtube.com/watch?v=xvszmNXdM4w\")\n\n# appending movies into a list:\nmovies = [life_of_pie, toy_story, avatar, ZMND, Twilight, threeidiots]\n# calling the external rendering function:\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_centre.py","file_name":"entertainment_centre.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"151905198","text":"#!/usr/bin/env pybricks-micropython\n\nfrom pybricks.hubs import EV3Brick\nfrom pybricks.ev3devices import Motor\nfrom pybricks.parameters import Port, Stop, Button\nfrom pybricks.tools import wait\nfrom pybricks.messaging import BluetoothMailboxServer, TextMailbox\n\n\nSPEED = 100\n\n\nclass SpikeMonitor:\n\n def __init__(self):\n # Initialize devices.\n self.ev3 = EV3Brick()\n self.usb_motor = Motor(Port.D)\n self.left_motor = Motor(Port.B)\n self.right_motor = Motor(Port.A)\n\n # Relax target tolerances so the motion is considered complete even\n # if off by a few more degrees than usual. This way, it won't block.\n # But set speed tolerance strict, so we move at least until fully\n # stopped, which is when we are pressing the button.\n self.left_motor.control.target_tolerances(speed=0, position=30)\n self.right_motor.control.target_tolerances(speed=0, position=30)\n\n # Run all motors to end points.\n self.targets = {\n 'usb_in': self.usb_motor.run_until_stalled(-SPEED, duty_limit=50) + 10,\n 'usb_out': self.usb_motor.run_until_stalled(SPEED, duty_limit=50) - 10,\n 'center_pressed': self.left_motor.run_until_stalled(-SPEED, duty_limit=50) + 10,\n 'left_pressed': self.left_motor.run_until_stalled(SPEED, duty_limit=50),\n 'right_pressed': self.right_motor.run_until_stalled(SPEED, duty_limit=50) + 10,\n 'bluetooth_pressed': self.right_motor.run_until_stalled(-SPEED, duty_limit=50) - 10,\n }\n\n # Set other targets between end points.\n self.targets['left_released'] = (self.targets['left_pressed'] + self.targets['center_pressed']) / 2\n self.targets['center_released'] = self.targets['left_released']\n\n self.targets['right_released'] = (self.targets['right_pressed'] + self.targets['bluetooth_pressed']) / 2\n self.targets['bluetooth_released'] = self.targets['right_released']\n\n # Get in initial state.\n self.press_center(False)\n self.press_bluetooth(False)\n self.insert_usb(False)\n\n # Turn the hub off.\n self.shutdown()\n self.ev3.speaker.beep()\n\n def insert_usb(self, insert):\n key = 'usb_in' if insert else 'usb_out'\n self.usb_motor.run_target(SPEED, self.targets[key])\n\n def press_left(self, press):\n if press:\n self.left_motor.run_target(SPEED, self.targets['left_pressed'])\n self.left_motor.dc(80)\n else:\n while abs(self.left_motor.speed()) > 100:\n wait(10)\n self.left_motor.run_target(SPEED, self.targets['left_released'], Stop.COAST)\n\n def press_center(self, press):\n if press:\n self.left_motor.run_target(SPEED, self.targets['center_pressed'])\n else:\n self.left_motor.run_target(SPEED, self.targets['center_released'], Stop.COAST)\n\n def press_right(self, press):\n if press:\n self.right_motor.run_target(SPEED, self.targets['right_pressed'])\n else:\n self.right_motor.run_target(SPEED, self.targets['right_released'], Stop.COAST)\n \n def press_bluetooth(self, press):\n if press:\n self.right_motor.run_target(SPEED, self.targets['bluetooth_pressed'])\n self.right_motor.dc(-100)\n else:\n while abs(self.right_motor.speed()) > 100:\n wait(10)\n self.right_motor.run_target(SPEED, self.targets['bluetooth_released'], Stop.COAST)\n\n def click_center(self, duration=100):\n self.press_center(False)\n self.press_center(True)\n wait(duration)\n self.press_center(False)\n\n def click_bluetooth(self, duration=200):\n self.press_bluetooth(False)\n self.press_bluetooth(True)\n wait(duration)\n self.press_bluetooth(False)\n\n def click_left(self, duration=100):\n self.press_left(False)\n self.press_left(True)\n wait(duration)\n self.press_left(False)\n\n def click_right(self, duration=100):\n self.press_right(False)\n self.press_right(True)\n wait(duration)\n self.press_right(False)\n\n def activate_dfu(self):\n self.press_bluetooth(True)\n wait(600)\n self.insert_usb(True)\n wait(8000)\n self.press_bluetooth(False)\n\n def shutdown(self):\n self.click_center(duration=4000)\n\n def test_buttons(self):\n while True:\n while True:\n pressed = self.ev3.buttons.pressed()\n if any(pressed):\n break\n\n if Button.CENTER in pressed:\n self.click_center()\n elif Button.UP in pressed:\n self.click_bluetooth()\n elif Button.LEFT in pressed:\n self.click_left()\n elif Button.RIGHT in pressed:\n self.click_right()\n elif Button.DOWN in pressed:\n break\n\n while any(self.ev3.buttons.pressed()):\n wait(10)\n\n\nif __name__ == \"__main__\":\n spike = SpikeMonitor()\n spike.test_buttons()\n\n","sub_path":"ev3/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"562514449","text":"#!/usr/bin/python3\n\nimport base64\nimport sys\nfrom Crypto.Cipher import AES\n\n\ndef die(msg, name=sys.argv[0]):\n print('%s:' % name, msg, file=sys.stderr)\n sys.exit(84)\n\n\ndef main(args):\n if len(args) != 2:\n die('invalid number of arguments')\n try:\n with open(args[1], 'r') as f:\n data = [base64.b64decode(line) for line in f]\n if len(data) == 0:\n raise ValueError()\n if any(len(d) % 16 != 0 for d in data):\n raise ValueError()\n except IOError as e:\n die('cannot open or read file: %s' % e.strerror, name=args[1])\n except ValueError:\n die('invalid data in file', name=args[1])\n\n data = [[d[i:i+16] for i in range(0, len(data))] for d in data]\n line = max(enumerate(data), key=lambda x: sum(x[1].count(b) for b in x[1]))[0]\n print(line + 1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"src/challenge08.py","file_name":"challenge08.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"384299706","text":"from pyecharts import Parallel\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n \ndata = pd.read_csv('bank0.csv')\ndata_0 = np.array(data[['age','day','x']]).tolist()\ndata = pd.read_csv('bank1.csv')\ndata_1= np.array(data[['age','day','x']]).tolist()\nschema = ['age', 'day','y']\n \nparallel = Parallel('bank')\nparallel.config(schema)\nparallel.add('no',data_0,is_random = True,area_color='#b399ff')\nparallel.add('yes',data_1,is_random = True,area_color='#006400')\nparallel.render('Bank.html')","sub_path":"EE6435/DataWithPyechart.py","file_name":"DataWithPyechart.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"536041371","text":"import os;\nimport ctypes;\nclass disable_file_system_redirection:\n _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection\n _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection\n def __enter__(self):\n self.old_value = ctypes.c_long()\n self.success = self._disable(ctypes.byref(self.old_value))\n def __exit__(self, type, value, traceback):\n if self.success:\n self._revert(self.old_value)\nwith disable_file_system_redirection():\n set=os.popen('REG ADD \"HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Policies\\System\" /V \"EnableLUA\" /t REG_DWORD /d 0 /F').read();\n print(set);","sub_path":"User Accounts/Disable UAC (User Access Control)/disable-uac-user-access-control.py","file_name":"disable-uac-user-access-control.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"275136859","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/24 13:01\n# @Author : LIUXIN\n# @Site : \n# @File : manage.py\n# @Software: PyCharm\nfrom app import create_app\nfrom flask_script import Manager, Server\n\n\nserver = Server(host='192.168.0.20', port='8080', use_debugger=True)\napp = create_app()\nmanager = Manager(app)\nmanager.add_command(\"runserver\", server)\n\n\nif __name__ == '__main__':\n manager.run(default_command='runserver')\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"608895100","text":"for farm_id in farm_ids:\n #take the df_temp for farm_id \n df_temp=df_temp.dropna() #first drop all Nan\n list_dfs.append(df_temp)\n\n#Now concat\ndf=pd.concat(list_dfs,axis=1)\ndf=df.fillna(0) #After concatenation fill NaN with 0, so it's like the farm was not there\n\ndf['value_10_sum']=df[['value_10']].apply(lambda x: sum(x), axis=1)\ndf['value_25_sum']=df[['value_25']].apply(lambda x: sum(x), axis=1)\ndf['value_50_sum']=df[['value_50']].apply(lambda x: sum(x), axis=1)\ndf['value_75_sum']=df[['value_75']].apply(lambda x: sum(x), axis=1)\ndf['value_90_sum']=df[['value_90']].apply(lambda x: sum(x), axis=1)\n\n\nfrom MeteoVerification import validate\nscores= validate(portfolio_df, ['BDT','NN'],\n 'Act', scores=['mae','mae_pct_err','n_obs'], boostrap=None, dropna='Pair') \n\nprint(scores)\n","sub_path":"add_dataframes_for_score.py","file_name":"add_dataframes_for_score.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"295947415","text":"from typing import Optional, Tuple, Union\nfrom warnings import warn\n\nimport gym\nimport numpy as np\nfrom stable_baselines.common.base_class import BaseRLModel\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom imitation import summaries\nfrom imitation.discrim_net import DiscrimNet\nfrom imitation.util import buffer, reward_wrapper, rollout, util\n\n\nclass Trainer:\n \"\"\"Trainer for GAIL and AIRL.\"\"\"\n\n env: gym.Env\n \"\"\"The original environment.\"\"\"\n\n env_train: gym.Env\n \"\"\"Like `self.env`, but wrapped with train reward except in debug mode.\n\n If `debug_use_ground_truth=True` was passed into the initializer then\n `self.env_train` is the same as `self.env`.\n \"\"\"\n\n env_test: gym.Env\n \"\"\"Like `self.env`, but wrapped with test reward except in debug mode.\n\n If `debug_use_ground_truth=True` was passed into the initializer then\n `self.env_test` is the same as `self.env`.\n \"\"\"\n\n def __init__(self,\n env: Union[gym.Env, str],\n gen_policy: BaseRLModel,\n discrim: DiscrimNet,\n expert_rollouts: Tuple[np.ndarray, np.ndarray, np.ndarray],\n *,\n disc_opt_cls: tf.train.Optimizer = tf.train.AdamOptimizer,\n disc_opt_kwargs: dict = {},\n n_disc_samples_per_buffer: int = 200,\n gen_replay_buffer_capacity: Optional[int] = None,\n init_tensorboard: bool = False,\n debug_use_ground_truth: bool = False):\n \"\"\"Builds Trainer.\n\n Args:\n env: A Gym environment or ID that the policy is trained on.\n gen_policy: The generator policy that is trained to maximize\n discriminator confusion.\n discrim: The discriminator network.\n For GAIL, use a DiscrimNetGAIL. For AIRL, use a DiscrimNetAIRL.\n expert_rollouts: A tuple of three arrays from expert rollouts,\n `old_obs`, `act`, and `new_obs`.\n disc_opt_cls: The optimizer for discriminator training.\n disc_opt_kwargs: Parameters for discriminator training.\n n_disc_samples_per_buffer: The number of obs-act-obs triples\n sampled from each replay buffer (expert and generator) during each\n step of discriminator training. This is also the number of triples\n stored in the replay buffer after each epoch of generator training.\n gen_replay_buffer_capacity: The capacity of the\n generator replay buffer (the number of obs-action-obs samples from\n the generator that can be stored).\n\n By default this is equal to `20 * n_disc_samples_per_buffer`.\n init_tensorboard: If True, makes various discriminator\n TensorBoard summaries.\n debug_use_ground_truth: If True, use the ground truth reward for\n `self.train_env`.\n This disables the reward wrapping that would normally replace\n the environment reward with the learned reward. This is useful for\n sanity checking that the policy training is functional.\n \"\"\"\n self._sess = tf.get_default_session()\n self._global_step = tf.train.create_global_step()\n\n self._n_disc_samples_per_buffer = n_disc_samples_per_buffer\n self.debug_use_ground_truth = debug_use_ground_truth\n\n self.env = util.maybe_load_env(env, vectorize=True)\n self._gen_policy = gen_policy\n\n # Discriminator and reward output\n self._disc_opt_cls = disc_opt_cls\n self._disc_opt_kwargs = disc_opt_kwargs\n with tf.variable_scope(\"trainer\"):\n with tf.variable_scope(\"discriminator\"):\n self._discrim = discrim\n self._build_disc_train()\n self._build_policy_train_reward()\n self._build_test_reward()\n self._init_tensorboard = init_tensorboard\n if init_tensorboard:\n with tf.name_scope(\"summaries\"):\n self._build_summarize()\n self._sess.run(tf.global_variables_initializer())\n\n # TODO(adam): make this wrapping configurable for debugging purposes\n self.env_train = self.wrap_env_train_reward(self.env)\n self.env_test = self.wrap_env_test_reward(self.env)\n\n if gen_replay_buffer_capacity is None:\n gen_replay_buffer_capacity = 20 * self._n_disc_samples_per_buffer\n self._gen_replay_buffer = buffer.ReplayBuffer(gen_replay_buffer_capacity,\n self.env)\n self._populate_gen_replay_buffer()\n self._exp_replay_buffer = buffer.ReplayBuffer.from_data(*expert_rollouts)\n if n_disc_samples_per_buffer > len(self._exp_replay_buffer):\n warn(\"The discriminator batch size is larger than the number of \"\n \"expert samples.\")\n\n @property\n def discrim(self) -> DiscrimNet:\n \"\"\"Discriminator being trained, used to compute reward for policy.\"\"\"\n return self._discrim\n\n @property\n def gen_policy(self) -> BaseRLModel:\n \"\"\"Policy (i.e. the generator) being trained.\"\"\"\n return self._gen_policy\n\n def train_disc(self, n_steps=10, **kwargs):\n \"\"\"Trains the discriminator to minimize classification cross-entropy.\n\n Args:\n n_steps (int): The number of training steps.\n gen_old_obs (np.ndarray): See `_build_disc_feed_dict`.\n gen_act (np.ndarray): See `_build_disc_feed_dict`.\n gen_new_obs (np.ndarray): See `_build_disc_feed_dict`.\n \"\"\"\n for _ in range(n_steps):\n fd = self._build_disc_feed_dict(**kwargs)\n step, _ = self._sess.run([self._global_step, self._disc_train_op],\n feed_dict=fd)\n if self._init_tensorboard and step % 20 == 0:\n self._summarize(fd, step)\n\n def train_gen(self, n_steps=10000):\n self._gen_policy.set_env(self.env_train)\n # TODO(adam): learn was not intended to be called for each training batch\n # It should work, but might incur unnecessary overhead: e.g. in PPO2\n # a new Runner instance is created each time. Also a hotspot for errors:\n # algorithms not tested for this use case, may reset state accidentally.\n self._gen_policy.learn(n_steps, reset_num_timesteps=False)\n self._populate_gen_replay_buffer()\n\n def _populate_gen_replay_buffer(self) -> None:\n \"\"\"Generate and store generator samples in the buffer.\n\n More specifically, rolls out generator-policy trajectories in the\n environment until `self._n_disc_samples_per_buffer` obs-act-obs samples are\n produced, and then stores these samples.\n \"\"\"\n gen_rollouts = rollout.generate_transitions(\n self._gen_policy, self.env_train,\n n_timesteps=self._n_disc_samples_per_buffer)[:3]\n self._gen_replay_buffer.store(*gen_rollouts)\n\n def train(self, n_epochs=100, *, n_gen_steps_per_epoch=None,\n n_disc_steps_per_epoch=None):\n \"\"\"Trains the discriminator and generator against each other.\n\n Args:\n n_epochs (int): The number of epochs to train. Every epoch consists\n of training the discriminator and then training the generator.\n n_disc_steps_per_epoch (int): The number of steps to train the\n discriminator every epoch. More precisely, the number of full batch\n Adam optimizer steps to perform.\n n_gen_steps_per_epoch (int): The number of generator training steps\n during each epoch. (ie, the timesteps argument in in\n `policy.learn(timesteps)`).\n \"\"\"\n for i in tqdm(range(n_epochs), desc=\"AIRL train\"):\n self.train_disc(**_n_steps_if_not_none(n_disc_steps_per_epoch))\n self.train_gen(**_n_steps_if_not_none(n_gen_steps_per_epoch))\n\n def eval_disc_loss(self, **kwargs):\n \"\"\"Evaluates the discriminator loss.\n\n The generator rollout parameters of the form \"gen_*\" are optional,\n but if one is given, then all such parameters must be filled (otherwise\n this method will error). If none of the generator rollout parameters are\n given, then a rollout with the same length as the expert rollout\n is generated on the fly.\n\n Args:\n gen_old_obs (np.ndarray): See `_build_disc_feed_dict`.\n gen_act (np.ndarray): See `_build_disc_feed_dict`.\n gen_new_obs (np.ndarray): See `_build_disc_feed_dict`.\n\n Returns:\n discriminator_loss (float): The total cross-entropy error in the\n discriminator's classification.\n \"\"\"\n fd = self._build_disc_feed_dict(**kwargs)\n return np.mean(self._sess.run(self._discrim.disc_loss, feed_dict=fd))\n\n def wrap_env_train_reward(self, env):\n \"\"\"Returns the given Env wrapped with a reward function that returns\n the AIRL training reward (discriminator confusion).\n\n The wrapped `Env`'s reward is directly evaluated from the reward network,\n and therefore changes whenever `self.train()` is called.\n\n Args:\n env (str, Env, or VecEnv): The Env that we want to wrap. If a\n string environment name is given or a Env is given, then we first\n convert to a VecEnv before continuing.\n wrapped_env (VecEnv): The wrapped environment with a new reward.\n \"\"\"\n env = util.maybe_load_env(env, vectorize=True)\n if self.debug_use_ground_truth:\n return env\n else:\n return reward_wrapper.RewardVecEnvWrapper(env,\n self._policy_train_reward_fn)\n\n def wrap_env_test_reward(self, env):\n \"\"\"Returns the given Env wrapped with a reward function that returns\n the reward learned by this Trainer.\n\n The wrapped `Env`'s reward is directly evaluated from the reward network,\n and therefore changes whenever `self.train()` is called.\n\n Args:\n env (str, Env, or VecEnv): The Env that should be wrapped. If a\n string environment name is given or a Env is given, then we first\n make a VecEnv before continuing.\n\n Returns:\n wrapped_env (VecEnv): The wrapped environment with a new reward.\n \"\"\"\n env = util.maybe_load_env(env, vectorize=True)\n if self.debug_use_ground_truth:\n return env\n else:\n return reward_wrapper.RewardVecEnvWrapper(env, self._test_reward_fn)\n\n def _build_summarize(self):\n self._summary_writer = summaries.make_summary_writer(\n graph=self._sess.graph)\n self._discrim.build_summaries()\n self._summary_op = tf.summary.merge_all()\n\n def _summarize(self, fd, step):\n events = self._sess.run(self._summary_op, feed_dict=fd)\n self._summary_writer.add_summary(events, step)\n\n def _build_disc_train(self):\n # Construct Train operation.\n disc_opt = self._disc_opt_cls(**self._disc_opt_kwargs)\n self._disc_train_op = disc_opt.minimize(\n tf.reduce_mean(self._discrim.disc_loss),\n global_step=self._global_step)\n\n def _build_disc_feed_dict(self, *,\n gen_old_obs: Optional[np.ndarray] = None,\n gen_act: Optional[np.ndarray] = None,\n gen_new_obs: Optional[np.ndarray] = None,\n ) -> dict:\n \"\"\"Build a feed dict that holds the next training batch of generator\n and expert obs-act-obs triples.\n\n Args:\n gen_old_obs (np.ndarray): A numpy array with shape\n `[self.n_disc_samples_per_buffer_per_buffer] + env.observation_space.shape`.\n The ith observation in this array is the observation seen when the\n generator chooses action `gen_act[i]`.\n gen_act (np.ndarray): A numpy array with shape\n `[self.n_disc_samples_per_buffer_per_buffer] + env.action_space.shape`.\n gen_new_obs (np.ndarray): A numpy array with shape\n `[self.n_disc_samples_per_buffer_per_buffer] + env.observation_space.shape`.\n The ith observation in this array is from the transition state after\n the generator chooses action `gen_act[i]`.\n \"\"\" # noqa: E501\n\n # Sample generator training batch from replay buffers, unless provided\n # in argument.\n none_count = sum(int(x is None)\n for x in (gen_old_obs, gen_act, gen_new_obs))\n if none_count == 3:\n tf.logging.debug(\"_build_disc_feed_dict: No generator rollout \"\n \"parameters were \"\n \"provided, so we are generating them now.\")\n gen_old_obs, gen_act, gen_new_obs = self._gen_replay_buffer.sample(\n self._n_disc_samples_per_buffer)\n elif none_count != 0:\n raise ValueError(\"Gave some but not all of the generator params.\")\n\n # Sample expert training batch from replay buffer.\n expert_old_obs, expert_act, expert_new_obs = self._exp_replay_buffer.sample(\n self._n_disc_samples_per_buffer)\n\n # Check dimensions.\n n_expert = len(expert_old_obs)\n n_gen = len(gen_old_obs)\n N = n_expert + n_gen\n assert n_expert == len(expert_act)\n assert n_expert == len(expert_new_obs)\n assert n_gen == len(gen_act)\n assert n_gen == len(gen_new_obs)\n\n # Concatenate rollouts, and label each row as expert or generator.\n old_obs = np.concatenate([expert_old_obs, gen_old_obs])\n act = np.concatenate([expert_act, gen_act])\n new_obs = np.concatenate([expert_new_obs, gen_new_obs])\n labels = np.concatenate([np.zeros(n_expert, dtype=int),\n np.ones(n_gen, dtype=int)])\n\n # Calculate generator-policy log probabilities.\n log_act_prob = self._gen_policy.action_probability(old_obs, actions=act,\n logp=True)\n assert len(log_act_prob) == N\n log_act_prob = log_act_prob.reshape((N,))\n\n fd = {\n self._discrim.old_obs_ph: old_obs,\n self._discrim.act_ph: act,\n self._discrim.new_obs_ph: new_obs,\n self._discrim.labels_ph: labels,\n self._discrim.log_policy_act_prob_ph: log_act_prob,\n }\n return fd\n\n def _build_policy_train_reward(self):\n \"\"\"Sets self._policy_train_reward_fn, the reward function to use when\n running a policy optimizer (e.g. PPO).\n \"\"\"\n\n def R(old_obs, act, new_obs):\n \"\"\"Vectorized reward function.\n\n Args:\n old_obs (array): The observation input. Its shape is\n `((None,) + self.env.observation_space.shape)`.\n act (array): The action input. Its shape is\n `((None,) + self.env.action_space.shape)`. The None dimension is\n expected to be the same as None dimension from `obs_input`.\n new_obs (array): The observation input. Its shape is\n `((None,) + self.env.observation_space.shape)`.\n \"\"\"\n old_obs = np.atleast_1d(old_obs)\n act = np.atleast_1d(act)\n new_obs = np.atleast_1d(new_obs)\n\n n_gen = len(old_obs)\n assert len(act) == n_gen\n assert len(new_obs) == n_gen\n\n # Calculate generator-policy log probabilities.\n log_act_prob = self._gen_policy.action_probability(old_obs, actions=act,\n logp=True)\n assert len(log_act_prob) == n_gen\n log_act_prob = log_act_prob.reshape((n_gen,))\n\n fd = {\n self._discrim.old_obs_ph: old_obs,\n self._discrim.act_ph: act,\n self._discrim.new_obs_ph: new_obs,\n self._discrim.labels_ph: np.ones(n_gen),\n self._discrim.log_policy_act_prob_ph: log_act_prob,\n }\n rew = self._sess.run(self._discrim.policy_train_reward, feed_dict=fd)\n return rew.flatten()\n\n self._policy_train_reward_fn = R\n\n def _build_test_reward(self):\n \"\"\"Sets self._test_reward_fn, the transfer reward function\"\"\"\n def R(old_obs, act, new_obs):\n fd = {\n self._discrim.old_obs_ph: old_obs,\n self._discrim.act_ph: act,\n self._discrim.new_obs_ph: new_obs,\n }\n rew = self._sess.run(self._discrim._policy_test_reward,\n feed_dict=fd)\n return rew.flatten()\n\n self._test_reward_fn = R\n\n\ndef _n_steps_if_not_none(n_steps):\n if n_steps is None:\n return {}\n else:\n return dict(n_steps=n_steps)\n","sub_path":"src/imitation/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":15799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"119360527","text":"import json\nimport logging\nimport math\nimport os\nimport random\nimport sys\nimport time\nimport xml.etree.ElementTree as ET\n\nimport cv2\nimport numpy as np\nimport pybullet as p\nimport trimesh\n\nimport igibson\nfrom igibson.external.pybullet_tools.utils import (\n get_joint_info,\n get_joints,\n link_from_name,\n matrix_from_quat,\n quat_from_matrix,\n set_joint_position,\n)\nfrom igibson.object_states.factory import prepare_object_states\nfrom igibson.object_states.texture_change_state_mixin import TextureChangeStateMixin\nfrom igibson.object_states.utils import clear_cached_states\nfrom igibson.objects.stateful_object import StatefulObject\nfrom igibson.render.mesh_renderer.materials import ProceduralMaterial, RandomizedMaterial\nfrom igibson.utils.urdf_utils import add_fixed_link, get_base_link_name, round_up, save_urdfs_without_floating_joints\nfrom igibson.utils.utils import get_transform_from_xyz_rpy, quatXYZWFromRotMat, rotate_vector_3d\n\n# Optionally import bddl for object taxonomy.\ntry:\n from bddl.object_taxonomy import ObjectTaxonomy\n\n OBJECT_TAXONOMY = ObjectTaxonomy()\nexcept ImportError:\n print(\"BDDL could not be imported - object taxonomy / abilities will be unavailable.\", file=sys.stderr)\n OBJECT_TAXONOMY = None\n\n\nclass ArticulatedObject(StatefulObject):\n \"\"\"\n Articulated objects are defined in URDF files.\n They are passive (no motors).\n \"\"\"\n\n def __init__(self, filename, scale=1, merge_fixed_links=True):\n super(ArticulatedObject, self).__init__()\n self.filename = filename\n self.scale = scale\n self.merge_fixed_links = merge_fixed_links\n\n def _load(self):\n \"\"\"\n Load the object into pybullet\n \"\"\"\n flags = p.URDF_USE_MATERIAL_COLORS_FROM_MTL | p.URDF_ENABLE_SLEEPING\n if self.merge_fixed_links:\n flags |= p.URDF_MERGE_FIXED_LINKS\n\n body_id = p.loadURDF(self.filename, globalScaling=self.scale, flags=flags)\n\n self.mass = p.getDynamicsInfo(body_id, -1)[0]\n self.body_id = body_id\n self.create_link_name_to_vm_map(body_id)\n return body_id\n\n def create_link_name_to_vm_map(self, body_id):\n self.link_name_to_vm = []\n link_name_to_vm_urdf = {}\n for visual_shape in p.getVisualShapeData(body_id):\n id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = visual_shape[:8]\n try:\n if link_id == -1:\n link_name = p.getBodyInfo(id)[0].decode(\"utf-8\")\n else:\n link_name = p.getJointInfo(id, link_id)[12].decode(\"utf-8\")\n if not link_name in link_name_to_vm_urdf:\n link_name_to_vm_urdf[link_name] = []\n else:\n raise ValueError(\"link name clashing\")\n link_name_to_vm_urdf[link_name].append(filename.decode(\"utf-8\"))\n except:\n pass\n self.link_name_to_vm = [link_name_to_vm_urdf]\n\n def force_wakeup(self):\n \"\"\"\n Force wakeup sleeping objects\n \"\"\"\n for joint_id in range(p.getNumJoints(self.body_id)):\n p.changeDynamics(self.body_id, joint_id, activationState=p.ACTIVATION_STATE_WAKE_UP)\n p.changeDynamics(self.body_id, -1, activationState=p.ACTIVATION_STATE_WAKE_UP)\n\n def get_body_id(self):\n return self.body_id\n\n\nclass RBOObject(ArticulatedObject):\n \"\"\"\n RBO object from assets/models/rbo\n Reference: https://tu-rbo.github.io/articulated-objects/\n \"\"\"\n\n def __init__(self, name, scale=1):\n filename = os.path.join(igibson.assets_path, \"models\", \"rbo\", name, \"configuration\", \"{}.urdf\".format(name))\n super(RBOObject, self).__init__(filename, scale)\n\n\nclass URDFObject(StatefulObject):\n \"\"\"\n URDFObjects are instantiated from a URDF file. They can be composed of one\n or more links and joints. They should be passive. We use this class to\n parse our modified link tag for URDFs that embed objects into scenes\n \"\"\"\n\n def __init__(\n self,\n filename,\n name=\"object_0\",\n category=\"object\",\n abilities=None,\n model_path=None,\n bounding_box=None,\n scale=None,\n fit_avg_dim_volume=False,\n connecting_joint=None,\n initial_pos=None,\n initial_orn=None,\n avg_obj_dims=None,\n joint_friction=None,\n in_rooms=None,\n texture_randomization=False,\n overwrite_inertial=True,\n scene_instance_folder=None,\n bddl_object_scope=None,\n visualize_primitives=False,\n joint_positions=None,\n merge_fixed_links=True,\n ignore_visual_shape=False,\n ):\n \"\"\"\n :param filename: urdf file path of that object model\n :param name: object name, unique for each object instance, e.g. door_3\n :param category: object category, e.g. door\n :param model_path: folder path of that object model\n :param bounding_box: bounding box of this object\n :param scale: scaling factor of this object\n :param: fit_avg_dim_volume: whether to fit the object to have the same volume as the average dimension while keeping the aspect ratio\n :param connecting_joint: connecting joint to the scene that defines the object's initial pose (optional)\n :param initial_pos: initial position of the object (lower priority than connecting_joint)\n :param initial_orn: initial orientation of the object (lower priority than connecting_joint)\n :param avg_obj_dims: average object dimension of this object\n :param joint_friction: joint friction for joints in this object\n :param in_rooms: which room(s) this object is in. It can be in more than one rooms if it sits at room boundary (e.g. doors)\n :param texture_randomization: whether to enable texture randomization\n :param overwrite_inertial: whether to overwrite the inertial frame of the original URDF using trimesh + density estimate\n :param scene_instance_folder: scene instance folder to split and save sub-URDFs\n :param bddl_object_scope: bddl object scope name, e.g. chip.n.04_2\n :param visualize_primitives: whether to render geometric primitives\n :param joint_positions: Joint positions, keyed by body index and joint name, in the form of\n List[Dict[name, position]]\n \"\"\"\n super(URDFObject, self).__init__()\n\n self.name = name\n self.category = category\n self.in_rooms = in_rooms\n self.connecting_joint = connecting_joint\n self.initial_pos = initial_pos\n self.initial_orn = initial_orn\n self.texture_randomization = texture_randomization\n self.overwrite_inertial = overwrite_inertial\n self.scene_instance_folder = scene_instance_folder\n self.bddl_object_scope = bddl_object_scope\n self.joint_positions = joint_positions\n self.merge_fixed_links = merge_fixed_links\n self.room_floor = None\n self.ignore_visual_shape = ignore_visual_shape\n\n # Load abilities from taxonomy if needed & possible\n if abilities is None:\n if OBJECT_TAXONOMY is not None:\n taxonomy_class = OBJECT_TAXONOMY.get_class_name_from_igibson_category(self.category)\n if taxonomy_class is not None:\n abilities = OBJECT_TAXONOMY.get_abilities(taxonomy_class)\n else:\n abilities = {}\n else:\n abilities = {}\n\n assert isinstance(abilities, dict), \"Object abilities must be in dictionary form.\"\n self.abilities = abilities\n\n # Friction for all prismatic and revolute joints\n if joint_friction is not None:\n self.joint_friction = joint_friction\n else:\n if self.category in [\"oven\", \"dishwasher\"]:\n self.joint_friction = 30\n elif self.category in [\"toilet\"]:\n self.joint_friction = 3\n else:\n self.joint_friction = 10\n\n # These following fields have exactly the same length (i.e. the number\n # of sub URDFs in this object)\n # urdf_paths, string\n self.urdf_paths = []\n # object poses, 4 x 4 numpy array\n self.poses = []\n # pybullet body ids, int\n self.body_ids = []\n # whether this object is fixed or not, boolean\n self.is_fixed = []\n self.main_body = -1\n\n logging.info(\"Category \" + self.category)\n self.filename = filename\n dirname = os.path.dirname(filename)\n urdf = os.path.basename(filename)\n urdf_name, _ = os.path.splitext(urdf)\n simplified_urdf = os.path.join(dirname, urdf_name + \"_simplified.urdf\")\n if os.path.exists(simplified_urdf):\n self.filename = simplified_urdf\n filename = simplified_urdf\n logging.info(\"Loading the following URDF template \" + filename)\n self.object_tree = ET.parse(filename) # Parse the URDF\n\n if not visualize_primitives:\n for link in self.object_tree.findall(\"link\"):\n for element in link:\n if element.tag == \"visual\" and len(element.findall(\".//box\")) > 0:\n link.remove(element)\n\n self.model_path = model_path\n if self.model_path is None:\n self.model_path = os.path.dirname(filename)\n\n # Change the mesh filenames to include the entire path\n for mesh in self.object_tree.iter(\"mesh\"):\n mesh.attrib[\"filename\"] = os.path.join(self.model_path, mesh.attrib[\"filename\"])\n\n # Apply the desired bounding box size / scale\n # First obtain the scaling factor\n if bounding_box is not None and scale is not None:\n logging.error(\"You cannot define both scale and bounding box size when creating a URDF Objects\")\n exit(-1)\n\n meta_json = os.path.join(self.model_path, \"misc\", \"metadata.json\")\n bbox_json = os.path.join(self.model_path, \"misc\", \"bbox.json\")\n # In the format of {link_name: [linkX, linkY, linkZ]}\n self.metadata = {}\n meta_links = dict()\n if os.path.isfile(meta_json):\n with open(meta_json, \"r\") as f:\n self.metadata = json.load(f)\n bbox_size = np.array(self.metadata[\"bbox_size\"])\n base_link_offset = np.array(self.metadata[\"base_link_offset\"])\n\n if \"orientations\" in self.metadata and len(self.metadata[\"orientations\"]) > 0:\n self.orientations = self.metadata[\"orientations\"]\n else:\n self.orientations = None\n\n if \"links\" in self.metadata:\n meta_links = self.metadata[\"links\"]\n\n elif os.path.isfile(bbox_json):\n with open(bbox_json, \"r\") as bbox_file:\n bbox_data = json.load(bbox_file)\n bbox_max = np.array(bbox_data[\"max\"])\n bbox_min = np.array(bbox_data[\"min\"])\n bbox_size = bbox_max - bbox_min\n base_link_offset = (bbox_min + bbox_max) / 2.0\n else:\n bbox_size = None\n base_link_offset = np.zeros(3)\n\n if bbox_size is not None:\n if fit_avg_dim_volume:\n if avg_obj_dims is None:\n scale = np.ones(3)\n else:\n spec_vol = avg_obj_dims[\"size\"][0] * avg_obj_dims[\"size\"][1] * avg_obj_dims[\"size\"][2]\n cur_vol = bbox_size[0] * bbox_size[1] * bbox_size[2]\n volume_ratio = spec_vol / cur_vol\n size_ratio = np.cbrt(volume_ratio)\n scale = np.array([size_ratio] * 3)\n bounding_box = bbox_size * scale\n elif bounding_box is not None:\n # Obtain the scale as the ratio between the desired bounding box size\n # and the original bounding box size of the object at scale (1, 1, 1)\n scale = bounding_box / bbox_size\n else:\n if scale is None:\n scale = np.ones(3)\n bounding_box = bbox_size * scale\n\n self.scale = scale\n self.bounding_box = bounding_box\n\n # If no bounding box, cannot compute dynamic properties from density\n if self.bounding_box is None:\n self.overwrite_inertial = False\n\n logging.info(\"Scale: \" + np.array2string(scale))\n\n # We need to know where the base_link origin is wrt. the bounding box\n # center. That allows us to place the model correctly since the joint\n # transformations given in the scene urdf are wrt. the bounding box\n # center. We need to scale this offset as well.\n self.scaled_bbxc_in_blf = -self.scale * base_link_offset\n\n self.avg_obj_dims = avg_obj_dims\n\n self.rename_urdf()\n\n self.meta_links = {}\n self.add_meta_links(meta_links)\n\n self.scale_object()\n self.compute_object_pose()\n self.remove_floating_joints(self.scene_instance_folder)\n\n prepare_object_states(self, abilities, online=True)\n self.prepare_visual_mesh_to_material()\n\n def set_ignore_visual_shape(self, value):\n self.ignore_visual_shape = value\n\n def compute_object_pose(self):\n if self.connecting_joint is not None:\n joint_type = self.connecting_joint.attrib[\"type\"]\n joint_xyz = np.array([float(val) for val in self.connecting_joint.find(\"origin\").attrib[\"xyz\"].split(\" \")])\n if \"rpy\" in self.connecting_joint.find(\"origin\").attrib:\n joint_rpy = np.array(\n [float(val) for val in self.connecting_joint.find(\"origin\").attrib[\"rpy\"].split(\" \")]\n )\n else:\n joint_rpy = np.array([0.0, 0.0, 0.0])\n else:\n joint_type = \"floating\"\n if self.initial_pos is not None:\n joint_xyz = self.initial_pos\n else:\n joint_xyz = np.array([0.0, 0.0, 0.0])\n if self.initial_orn is not None:\n joint_rpy = self.initial_orn\n else:\n joint_rpy = np.array([0.0, 0.0, 0.0])\n\n # The joint location is given wrt the bounding box center but we need it wrt to the base_link frame\n # scaled_bbxc_in_blf is in object local frame, need to rotate to global (scene) frame\n x, y, z = self.scaled_bbxc_in_blf\n roll, pitch, yaw = joint_rpy\n x, y, z = rotate_vector_3d(self.scaled_bbxc_in_blf, roll, pitch, yaw, False)\n joint_xyz += np.array([x, y, z])\n\n # We save the transformation of the joint to be used when we load the\n # embedded urdf\n self.joint_frame = get_transform_from_xyz_rpy(joint_xyz, joint_rpy)\n\n self.main_body_is_fixed = joint_type == \"fixed\"\n\n def load_supporting_surfaces(self):\n self.supporting_surfaces = {}\n\n # Supporting surfaces can potentially refer to the names of the fixed links that are\n # merged into the world. These links will become inaccessible after the merge, e.g.\n # link_from_name will raise an error and we won't have any correspounding link id to\n # invoke get_link_state later.\n if self.merge_fixed_links:\n return\n\n heights_file = os.path.join(self.model_path, \"misc\", \"heights_per_link.json\")\n if not os.path.isfile(heights_file):\n return\n\n with open(heights_file, \"r\") as f:\n heights = json.load(f)\n\n original_object_tree = ET.parse(self.filename)\n sub_urdfs = [ET.parse(urdf_path) for urdf_path in self.urdf_paths]\n for predicate in heights:\n height_maps_dir = os.path.join(self.model_path, \"misc\", \"height_maps_per_link\", \"{}\".format(predicate))\n\n height_maps = {}\n for link_name in heights[predicate]:\n link_dir = os.path.join(height_maps_dir, link_name)\n\n # Get collision mesh of the link in the original urdf\n link = original_object_tree.find(\".//link[@name='{}']\".format(link_name))\n link_col_mesh = link.find(\"collision/geometry/mesh\")\n col_mesh_path = os.path.join(self.model_path, link_col_mesh.attrib[\"filename\"])\n\n # Try to find the body_id (after splitting) and the new link name (after renaming)\n # by matching the collision mesh file path\n new_link = None\n new_body_id = None\n assert len(sub_urdfs) == len(self.body_ids)\n for sub_urdf, body_id in zip(sub_urdfs, self.body_ids):\n for link in sub_urdf.findall(\"link\"):\n link_col_mesh = link.find(\"collision/geometry/mesh\")\n if link_col_mesh is None:\n continue\n if link_col_mesh.attrib[\"filename\"] == col_mesh_path:\n new_link = link.attrib[\"name\"]\n new_body_id = body_id\n break\n if new_link is not None:\n break\n\n assert new_link is not None\n new_link_id = link_from_name(new_body_id, new_link)\n\n height_maps[(new_body_id, new_link_id)] = []\n\n for i, z_value in enumerate(heights[predicate][link_name]):\n img_fname = os.path.join(link_dir, link_dir, \"{}.png\".format(i))\n xy_map = cv2.imread(img_fname, 0)\n height_maps[(new_body_id, new_link_id)].append((z_value, xy_map))\n self.supporting_surfaces[predicate] = height_maps\n\n def sample_orientation(self):\n if self.orientations is None:\n raise ValueError(\"No orientation probabilities set\")\n indices = list(range(len(self.orientations)))\n orientations = [np.array(o[\"rotation\"]) for o in self.orientations]\n probabilities = [o[\"prob\"] for o in self.orientations]\n probabilities = np.array(probabilities) / np.sum(probabilities)\n chosen_orientation_idx = np.random.choice(indices, p=probabilities)\n chosen_orientation = orientations[chosen_orientation_idx]\n # Randomize yaw based on the variation annotation\n # variation = [o['variation'] for o in self.orientations]\n # min_rotation = 0.05\n # rotation_variance = max(\n # variation[chosen_orientation_idx], min_rotation)\n # rot_num = np.random.random() * rotation_variance\n\n # Randomize yaw from -pi to pi\n rot_num = np.random.uniform(-1, 1)\n rot_matrix = np.array(\n [\n [math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],\n [math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n rotated_quat = quat_from_matrix(np.dot(rot_matrix, matrix_from_quat(chosen_orientation)))\n return rotated_quat\n\n def get_prefixed_joint_name(self, name):\n return self.name + \"_\" + name\n\n def rename_urdf(self):\n \"\"\"\n Helper function that renames the file paths in the object urdf\n from relative paths to absolute paths\n \"\"\"\n base_link_name = get_base_link_name(self.object_tree)\n\n # Change the links of the added object to adapt to the given name\n for link_emb in self.object_tree.iter(\"link\"):\n # If the original urdf already contains world link, do not rename\n if link_emb.attrib[\"name\"] == \"world\":\n pass\n elif link_emb.attrib[\"name\"] == base_link_name:\n # The base_link get renamed as the link tag indicates\n # Just change the name of the base link in the embedded urdf\n link_emb.attrib[\"name\"] = self.name\n else:\n # The other links get also renamed to add the name of the link tag as prefix\n # This allows us to load several instances of the same object\n link_emb.attrib[\"name\"] = self.name + \"_\" + link_emb.attrib[\"name\"]\n\n # Change the joints of the added object to adapt them to the given name\n for joint_emb in self.object_tree.iter(\"joint\"):\n # We change the joint name\n joint_emb.attrib[\"name\"] = self.get_prefixed_joint_name(joint_emb.attrib[\"name\"])\n # We change the child link names\n for child_emb in joint_emb.findall(\"child\"):\n # If the original urdf already contains world link, do not rename\n if child_emb.attrib[\"link\"] == \"world\":\n pass\n elif child_emb.attrib[\"link\"] == base_link_name:\n child_emb.attrib[\"link\"] = self.name\n else:\n child_emb.attrib[\"link\"] = self.name + \"_\" + child_emb.attrib[\"link\"]\n # and the parent link names\n for parent_emb in joint_emb.findall(\"parent\"):\n # If the original urdf already contains world link, do not rename\n if parent_emb.attrib[\"link\"] == \"world\":\n pass\n elif parent_emb.attrib[\"link\"] == base_link_name:\n parent_emb.attrib[\"link\"] = self.name\n else:\n parent_emb.attrib[\"link\"] = self.name + \"_\" + parent_emb.attrib[\"link\"]\n\n def scale_object(self):\n \"\"\"\n Scale the object according to the given bounding box\n \"\"\"\n # We need to scale 1) the meshes, 2) the position of meshes, 3) the position of joints, 4) the orientation\n # axis of joints. The problem is that those quantities are given wrt. its parent link frame, and this can be\n # rotated wrt. the frame the scale was given in Solution: parse the kin tree joint by joint, extract the\n # rotation, rotate the scale, apply rotated scale to 1, 2, 3, 4 in the child link frame\n\n # First, define the scale in each link reference frame\n # and apply it to the joint values\n base_link_name = get_base_link_name(self.object_tree)\n scales_in_lf = {base_link_name: self.scale}\n all_processed = False\n while not all_processed:\n all_processed = True\n for joint in self.object_tree.iter(\"joint\"):\n parent_link_name = joint.find(\"parent\").attrib[\"link\"]\n child_link_name = joint.find(\"child\").attrib[\"link\"]\n if parent_link_name in scales_in_lf and child_link_name not in scales_in_lf:\n scale_in_parent_lf = scales_in_lf[parent_link_name]\n # The location of the joint frame is scaled using the scale in the parent frame\n for origin in joint.iter(\"origin\"):\n current_origin_xyz = np.array([float(val) for val in origin.attrib[\"xyz\"].split(\" \")])\n new_origin_xyz = np.multiply(current_origin_xyz, scale_in_parent_lf)\n new_origin_xyz = np.array([round_up(val, 10) for val in new_origin_xyz])\n origin.attrib[\"xyz\"] = \" \".join(map(str, new_origin_xyz))\n\n # scale the prismatic joint\n if joint.attrib[\"type\"] == \"prismatic\":\n limits = joint.findall(\"limit\")\n assert len(limits) == 1\n limit = limits[0]\n axes = joint.findall(\"axis\")\n assert len(axes) == 1\n axis = axes[0]\n axis_np = np.array([float(elem) for elem in axis.attrib[\"xyz\"].split()])\n major_axis = np.argmax(np.abs(axis_np))\n # assume the prismatic joint is roughly axis-aligned\n limit.attrib[\"upper\"] = str(float(limit.attrib[\"upper\"]) * scale_in_parent_lf[major_axis])\n limit.attrib[\"lower\"] = str(float(limit.attrib[\"lower\"]) * scale_in_parent_lf[major_axis])\n\n # Get the rotation of the joint frame and apply it to the scale\n if \"rpy\" in joint.keys():\n joint_frame_rot = np.array([float(val) for val in joint.attrib[\"rpy\"].split(\" \")])\n # Rotate the scale\n scale_in_child_lf = rotate_vector_3d(scale_in_parent_lf, *joint_frame_rot, cck=True)\n scale_in_child_lf = np.absolute(scale_in_child_lf)\n else:\n scale_in_child_lf = scale_in_parent_lf\n\n # print(\"Adding: \", joint.find(\"child\").attrib[\"link\"])\n\n scales_in_lf[joint.find(\"child\").attrib[\"link\"]] = scale_in_child_lf\n\n # The axis of the joint is defined in the joint frame, we scale it after applying the rotation\n for axis in joint.iter(\"axis\"):\n current_axis_xyz = np.array([float(val) for val in axis.attrib[\"xyz\"].split(\" \")])\n new_axis_xyz = np.multiply(current_axis_xyz, scale_in_child_lf)\n new_axis_xyz /= np.linalg.norm(new_axis_xyz)\n new_axis_xyz = np.array([round_up(val, 10) for val in new_axis_xyz])\n axis.attrib[\"xyz\"] = \" \".join(map(str, new_axis_xyz))\n\n # Iterate again the for loop since we added new elements to the dictionary\n all_processed = False\n\n all_links = self.object_tree.findall(\"link\")\n # compute dynamics properties\n if self.overwrite_inertial and self.category not in [\"walls\", \"floors\", \"ceilings\"]:\n all_links_trimesh = []\n total_volume = 0.0\n for link in all_links:\n meshes = link.findall(\"collision/geometry/mesh\")\n if len(meshes) == 0:\n all_links_trimesh.append(None)\n continue\n # assume one collision mesh per link\n assert len(meshes) == 1, (self.filename, link.attrib[\"name\"])\n # check collision mesh path\n collision_mesh_path = os.path.join(meshes[0].attrib[\"filename\"])\n trimesh_obj = trimesh.load(file_obj=collision_mesh_path, force=\"mesh\")\n all_links_trimesh.append(trimesh_obj)\n volume = trimesh_obj.volume\n # a hack to artificially increase the density of the lamp base\n if link.attrib[\"name\"] == base_link_name:\n if self.category in [\"lamp\"]:\n volume *= 10.0\n total_volume += volume\n\n # avg L x W x H and Weight is given for this object category\n if self.avg_obj_dims is not None:\n avg_density = self.avg_obj_dims[\"density\"]\n\n # otherwise, use the median density across all existing object categories\n else:\n avg_density = 67.0\n\n # Scale the mass based on bounding box size\n # TODO: how to scale moment of inertia?\n total_mass = avg_density * self.bounding_box[0] * self.bounding_box[1] * self.bounding_box[2]\n # print('total_mass', total_mass)\n\n density = total_mass / total_volume\n # print('avg density', density)\n for trimesh_obj in all_links_trimesh:\n if trimesh_obj is not None:\n trimesh_obj.density = density\n\n assert len(all_links_trimesh) == len(all_links)\n\n # Now iterate over all links and scale the meshes and positions\n for i, link in enumerate(all_links):\n if self.overwrite_inertial and self.category not in [\"walls\", \"floors\", \"ceilings\"]:\n link_trimesh = all_links_trimesh[i]\n # assign dynamics properties\n inertials = link.findall(\"inertial\")\n if len(inertials) == 0:\n inertial = ET.SubElement(link, \"inertial\")\n else:\n assert len(inertials) == 1\n inertial = inertials[0]\n\n masses = inertial.findall(\"mass\")\n if len(masses) == 0:\n mass = ET.SubElement(inertial, \"mass\")\n else:\n assert len(masses) == 1\n mass = masses[0]\n\n inertias = inertial.findall(\"inertia\")\n if len(inertias) == 0:\n inertia = ET.SubElement(inertial, \"inertia\")\n else:\n assert len(inertias) == 1\n inertia = inertias[0]\n\n origins = inertial.findall(\"origin\")\n if len(origins) == 0:\n origin = ET.SubElement(inertial, \"origin\")\n else:\n assert len(origins) == 1\n origin = origins[0]\n\n if link_trimesh is not None:\n # a hack to artificially increase the density of the lamp base\n if link.attrib[\"name\"] == base_link_name:\n if self.category in [\"lamp\"]:\n link_trimesh.density *= 10.0\n\n if link_trimesh.is_watertight:\n center = np.copy(link_trimesh.center_mass)\n else:\n center = np.copy(link_trimesh.centroid)\n\n collision_mesh = [col for col in link.findall(\"collision\") if col.find(\"geometry/mesh\") is not None]\n assert len(collision_mesh) == 1, \"more than one collision mesh in one link\"\n collision_mesh = collision_mesh[0]\n collision_mesh_origin = collision_mesh.find(\"origin\")\n if collision_mesh_origin is not None:\n offset = np.array([float(val) for val in collision_mesh_origin.attrib[\"xyz\"].split(\" \")])\n center += offset\n\n # The inertial frame origin will be scaled down below.\n # Here, it has the value BEFORE scaling\n origin.attrib[\"xyz\"] = \" \".join(map(str, center))\n origin.attrib[\"rpy\"] = \" \".join(map(str, [0.0, 0.0, 0.0]))\n\n mass.attrib[\"value\"] = str(round_up(link_trimesh.mass, 10))\n moment_of_inertia = link_trimesh.moment_inertia\n inertia.attrib[\"ixx\"] = str(moment_of_inertia[0][0])\n inertia.attrib[\"ixy\"] = str(moment_of_inertia[0][1])\n inertia.attrib[\"ixz\"] = str(moment_of_inertia[0][2])\n inertia.attrib[\"iyy\"] = str(moment_of_inertia[1][1])\n inertia.attrib[\"iyz\"] = str(moment_of_inertia[1][2])\n inertia.attrib[\"izz\"] = str(moment_of_inertia[2][2])\n else:\n # empty link that does not have any mesh\n origin.attrib[\"xyz\"] = \" \".join(map(str, [0.0, 0.0, 0.0]))\n origin.attrib[\"rpy\"] = \" \".join(map(str, [0.0, 0.0, 0.0]))\n mass.attrib[\"value\"] = str(0.0)\n inertia.attrib[\"ixx\"] = str(0.0)\n inertia.attrib[\"ixy\"] = str(0.0)\n inertia.attrib[\"ixz\"] = str(0.0)\n inertia.attrib[\"iyy\"] = str(0.0)\n inertia.attrib[\"iyz\"] = str(0.0)\n inertia.attrib[\"izz\"] = str(0.0)\n\n scale_in_lf = scales_in_lf[link.attrib[\"name\"]]\n # Apply the scale to all mesh elements within the link (original scale and origin)\n for mesh in link.iter(\"mesh\"):\n if \"scale\" in mesh.attrib:\n mesh_scale = np.array([float(val) for val in mesh.attrib[\"scale\"].split(\" \")])\n new_scale = np.multiply(mesh_scale, scale_in_lf)\n new_scale = np.array([round_up(val, 10) for val in new_scale])\n mesh.attrib[\"scale\"] = \" \".join(map(str, new_scale))\n else:\n new_scale = np.array([round_up(val, 10) for val in scale_in_lf])\n mesh.set(\"scale\", \" \".join(map(str, new_scale)))\n\n for box in link.iter(\"box\"):\n if \"size\" in box.attrib:\n box_scale = np.array([float(val) for val in box.attrib[\"size\"].split(\" \")])\n new_scale = np.multiply(box_scale, scale_in_lf)\n new_scale = np.array([round_up(val, 10) for val in new_scale])\n box.attrib[\"size\"] = \" \".join(map(str, new_scale))\n\n for origin in link.iter(\"origin\"):\n origin_xyz = np.array([float(val) for val in origin.attrib[\"xyz\"].split(\" \")])\n new_origin_xyz = np.multiply(origin_xyz, scale_in_lf)\n new_origin_xyz = np.array([round_up(val, 10) for val in new_origin_xyz])\n origin.attrib[\"xyz\"] = \" \".join(map(str, new_origin_xyz))\n\n def remove_floating_joints(self, folder=None):\n \"\"\"\n Split a single urdf to multiple urdfs if there exist floating joints\n \"\"\"\n if folder is None:\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n folder = os.path.join(\n igibson.ig_dataset_path,\n \"scene_instances\",\n \"{}_{}_{}\".format(timestr, random.getrandbits(64), os.getpid()),\n )\n os.makedirs(folder, exist_ok=True)\n\n # Deal with floating joints inside the embedded urdf\n file_prefix = os.path.join(folder, self.name)\n urdfs_no_floating = save_urdfs_without_floating_joints(self.object_tree, self.main_body_is_fixed, file_prefix)\n\n # append a new tuple of file name of the instantiated embedded urdf\n # and the transformation (!= identity if its connection was floating)\n for i, urdf in enumerate(urdfs_no_floating):\n self.urdf_paths.append(urdfs_no_floating[urdf][0])\n transformation = np.dot(self.joint_frame, urdfs_no_floating[urdf][1])\n self.poses.append(transformation)\n self.is_fixed.append(urdfs_no_floating[urdf][2])\n if urdfs_no_floating[urdf][3]:\n self.main_body = i\n\n def prepare_visual_mesh_to_material(self):\n # mapping between visual objects and possible textures\n # multiple visual objects can share the same material\n # if some sub URDF does not have visual object or this URDF is part of\n # the building structure, it will have an empty dict\n # [\n # { # 1st sub URDF\n # 'visual_1.obj': randomized_material_1\n # 'visual_2.obj': randomized_material_1\n # },\n # {}, # 2nd sub URDF\n # { # 3rd sub URDF\n # 'visual_3.obj': randomized_material_2\n # }\n # ]\n\n self.visual_mesh_to_material = [{} for _ in self.urdf_paths]\n\n # a list of all materials used for RandomizedMaterial\n self.randomized_materials = []\n # mapping from material class to friction coefficient\n self.material_to_friction = None\n\n # procedural material that can change based on state changes\n self.procedural_material = None\n\n self.texture_procedural_generation = False\n for state in self.states:\n if issubclass(state, TextureChangeStateMixin):\n self.texture_procedural_generation = True\n break\n\n if self.texture_randomization and self.texture_procedural_generation:\n raise ValueError(\"Cannot support both randomized and procedural texture\")\n\n if self.texture_randomization:\n self.prepare_randomized_texture()\n if self.texture_procedural_generation:\n self.prepare_procedural_texture()\n\n self.create_link_name_vm_mapping()\n\n def create_link_name_vm_mapping(self):\n self.link_name_to_vm = []\n\n for i in range(len(self.urdf_paths)):\n link_name_to_vm_urdf = {}\n sub_urdf_tree = ET.parse(self.urdf_paths[i])\n\n links = sub_urdf_tree.findall(\".//link\")\n for link in links:\n name = link.attrib[\"name\"]\n if name in link_name_to_vm_urdf:\n raise ValueError(\"link name collision\")\n link_name_to_vm_urdf[name] = []\n for visual_mesh in link.findall(\"visual/geometry/mesh\"):\n link_name_to_vm_urdf[name].append(visual_mesh.attrib[\"filename\"])\n self.link_name_to_vm.append(link_name_to_vm_urdf)\n\n def randomize_texture(self):\n \"\"\"\n Randomize texture and material for each link / visual shape\n \"\"\"\n for material in self.randomized_materials:\n material.randomize()\n self.update_friction()\n\n def update_friction(self):\n \"\"\"\n Update the surface lateral friction for each link based on its material\n \"\"\"\n if self.material_to_friction is None:\n return\n for i in range(len(self.urdf_paths)):\n # if the sub URDF does not have visual meshes\n if len(self.visual_mesh_to_material[i]) == 0:\n continue\n body_id = self.body_ids[i]\n sub_urdf_tree = ET.parse(self.urdf_paths[i])\n\n for j in np.arange(-1, p.getNumJoints(body_id)):\n # base_link\n if j == -1:\n link_name = p.getBodyInfo(body_id)[0].decode(\"UTF-8\")\n else:\n link_name = p.getJointInfo(body_id, j)[12].decode(\"UTF-8\")\n link = sub_urdf_tree.find(\".//link[@name='{}']\".format(link_name))\n link_materials = []\n for visual_mesh in link.findall(\"visual/geometry/mesh\"):\n link_materials.append(self.visual_mesh_to_material[i][visual_mesh.attrib[\"filename\"]])\n link_frictions = []\n for link_material in link_materials:\n if link_material.random_class is None:\n friction = 0.5\n elif link_material.random_class not in self.material_to_friction:\n friction = 0.5\n else:\n friction = self.material_to_friction.get(link_material.random_class, 0.5)\n link_frictions.append(friction)\n link_friction = np.mean(link_frictions)\n p.changeDynamics(body_id, j, lateralFriction=link_friction)\n\n def prepare_randomized_texture(self):\n \"\"\"\n Set up mapping from visual meshes to randomizable materials\n \"\"\"\n if self.category in [\"walls\", \"floors\", \"ceilings\"]:\n material_groups_file = os.path.join(\n self.model_path, \"misc\", \"{}_material_groups.json\".format(self.category)\n )\n else:\n material_groups_file = os.path.join(self.model_path, \"misc\", \"material_groups.json\")\n\n assert os.path.isfile(material_groups_file), \"cannot find material group: {}\".format(material_groups_file)\n with open(material_groups_file) as f:\n material_groups = json.load(f)\n\n # create randomized material for each material group\n all_material_categories = material_groups[0]\n all_materials = {}\n for key in all_material_categories:\n all_materials[int(key)] = RandomizedMaterial(all_material_categories[key])\n\n # make visual mesh file path absolute\n visual_mesh_to_idx = material_groups[1]\n for old_path in list(visual_mesh_to_idx.keys()):\n new_path = os.path.join(self.model_path, \"shape\", \"visual\", old_path)\n visual_mesh_to_idx[new_path] = visual_mesh_to_idx[old_path]\n del visual_mesh_to_idx[old_path]\n\n # check each visual object belongs to which sub URDF in case of splitting\n for i, urdf_path in enumerate(self.urdf_paths):\n sub_urdf_tree = ET.parse(urdf_path)\n for visual_mesh_path in visual_mesh_to_idx:\n # check if this visual object belongs to this URDF\n if sub_urdf_tree.find(\".//mesh[@filename='{}']\".format(visual_mesh_path)) is not None:\n self.visual_mesh_to_material[i][visual_mesh_path] = all_materials[\n visual_mesh_to_idx[visual_mesh_path]\n ]\n\n self.randomized_materials = list(all_materials.values())\n\n friction_json = os.path.join(igibson.ig_dataset_path, \"materials\", \"material_friction.json\")\n if os.path.isfile(friction_json):\n with open(friction_json) as f:\n self.material_to_friction = json.load(f)\n\n def prepare_procedural_texture(self):\n \"\"\"\n Set up mapping from visual meshes to procedural materials\n Assign all visual meshes to the same ProceduralMaterial\n \"\"\"\n procedural_material = ProceduralMaterial(material_folder=os.path.join(self.model_path, \"material\"))\n\n for i, urdf_path in enumerate(self.urdf_paths):\n sub_urdf_tree = ET.parse(urdf_path)\n for visual_mesh in sub_urdf_tree.findall(\"link/visual/geometry/mesh\"):\n filename = visual_mesh.attrib[\"filename\"]\n self.visual_mesh_to_material[i][filename] = procedural_material\n\n for state in self.states:\n if issubclass(state, TextureChangeStateMixin):\n procedural_material.add_state(state)\n self.states[state].material = procedural_material\n\n self.procedural_material = procedural_material\n\n def _load(self):\n \"\"\"\n Load the object into pybullet and set it to the correct pose\n \"\"\"\n flags = p.URDF_ENABLE_SLEEPING\n if self.merge_fixed_links:\n flags |= p.URDF_MERGE_FIXED_LINKS\n\n if self.ignore_visual_shape:\n flags |= p.URDF_IGNORE_VISUAL_SHAPES\n\n for idx in range(len(self.urdf_paths)):\n logging.info(\"Loading \" + self.urdf_paths[idx])\n is_fixed = self.is_fixed[idx]\n body_id = p.loadURDF(self.urdf_paths[idx], flags=flags, useFixedBase=is_fixed)\n # flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)\n transformation = self.poses[idx]\n pos = transformation[0:3, 3]\n orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))\n logging.info(\"Moving URDF to (pos,ori): \" + np.array_str(pos) + \", \" + np.array_str(orn))\n dynamics_info = p.getDynamicsInfo(body_id, -1)\n inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]\n pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)\n p.resetBasePositionAndOrientation(body_id, pos, orn)\n p.changeDynamics(body_id, -1, activationState=p.ACTIVATION_STATE_ENABLE_SLEEPING)\n\n for j in get_joints(body_id):\n info = get_joint_info(body_id, j)\n jointType = info.jointType\n if jointType in [p.JOINT_REVOLUTE, p.JOINT_PRISMATIC]:\n p.setJointMotorControl2(\n body_id, j, p.VELOCITY_CONTROL, targetVelocity=0.0, force=self.joint_friction\n )\n\n # Only need to restore revolute and prismatic joints\n if self.joint_positions:\n joint_name = str(info.jointName, encoding=\"utf-8\")\n joint_position = self.joint_positions[idx][joint_name]\n set_joint_position(body_id, j, joint_position)\n\n self.body_ids.append(body_id)\n\n self.load_supporting_surfaces()\n\n return self.body_ids\n\n def force_wakeup(self):\n \"\"\"\n Force wakeup sleeping objects\n \"\"\"\n for body_id in self.body_ids:\n for joint_id in range(p.getNumJoints(body_id)):\n p.changeDynamics(body_id, joint_id, activationState=p.ACTIVATION_STATE_WAKE_UP)\n p.changeDynamics(body_id, -1, activationState=p.ACTIVATION_STATE_WAKE_UP)\n\n def reset(self):\n \"\"\"\n Reset the object to its original pose and joint configuration\n \"\"\"\n for idx in range(len(self.body_ids)):\n body_id = self.body_ids[idx]\n transformation = self.poses[idx]\n pos = transformation[0:3, 3]\n orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))\n logging.info(\"Resetting URDF to (pos,ori): \" + np.array_str(pos) + \", \" + np.array_str(orn))\n dynamics_info = p.getDynamicsInfo(body_id, -1)\n inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]\n pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)\n p.resetBasePositionAndOrientation(body_id, pos, orn)\n\n # reset joint position to 0.0\n for j in range(p.getNumJoints(body_id)):\n info = p.getJointInfo(body_id, j)\n jointType = info[2]\n if jointType in [p.JOINT_REVOLUTE, p.JOINT_PRISMATIC]:\n p.resetJointState(body_id, j, targetValue=0.0, targetVelocity=0.0)\n p.setJointMotorControl2(\n body_id, j, p.VELOCITY_CONTROL, targetVelocity=0.0, force=self.joint_friction\n )\n\n def get_position(self):\n \"\"\"\n Get object position\n\n :return: position in xyz\n \"\"\"\n body_id = self.get_body_id()\n pos, _ = p.getBasePositionAndOrientation(body_id)\n return pos\n\n def get_orientation(self):\n \"\"\"\n Get object orientation\n\n :return: quaternion in xyzw\n \"\"\"\n body_id = self.get_body_id()\n _, orn = p.getBasePositionAndOrientation(body_id)\n return orn\n\n def get_position_orientation(self):\n \"\"\"\n Get object position and orientation\n\n :return: position in xyz\n :return: quaternion in xyzw\n \"\"\"\n body_id = self.get_body_id()\n pos, orn = p.getBasePositionAndOrientation(body_id)\n return pos, orn\n\n def get_base_link_position_orientation(self):\n \"\"\"\n Get object base link position and orientation\n\n :return: position in xyz\n :return: quaternion in xyzw\n \"\"\"\n # TODO: not used anywhere yet, but probably should be put in ObjectBase\n body_id = self.get_body_id()\n pos, orn = p.getBasePositionAndOrientation(body_id)\n dynamics_info = p.getDynamicsInfo(body_id, -1)\n inertial_pos = dynamics_info[3]\n inertial_orn = dynamics_info[4]\n inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn)\n pos, orn = p.multiplyTransforms(pos, orn, inv_inertial_pos, inv_inertial_orn)\n return pos, orn\n\n def set_position(self, pos):\n \"\"\"\n Set object position\n\n :param pos: position in xyz\n \"\"\"\n body_id = self.get_body_id()\n if self.main_body_is_fixed:\n logging.warning(\"cannot set_position for fixed objects\")\n return\n\n _, old_orn = p.getBasePositionAndOrientation(body_id)\n p.resetBasePositionAndOrientation(body_id, pos, old_orn)\n clear_cached_states(self)\n\n def set_orientation(self, orn):\n \"\"\"\n Set object orientation\n\n :param orn: quaternion in xyzw\n \"\"\"\n body_id = self.get_body_id()\n if self.main_body_is_fixed:\n logging.warning(\"cannot set_orientation for fixed objects\")\n return\n\n old_pos, _ = p.getBasePositionAndOrientation(body_id)\n p.resetBasePositionAndOrientation(body_id, old_pos, orn)\n clear_cached_states(self)\n\n def set_position_orientation(self, pos, orn):\n \"\"\"\n Set object position and orientation\n :param pos: position in xyz\n :param orn: quaternion in xyzw\n \"\"\"\n body_id = self.get_body_id()\n if self.main_body_is_fixed:\n logging.warning(\"cannot set_position_orientation for fixed objects\")\n return\n\n p.resetBasePositionAndOrientation(body_id, pos, orn)\n clear_cached_states(self)\n\n def set_base_link_position_orientation(self, pos, orn):\n body_id = self.get_body_id()\n if self.main_body_is_fixed:\n logging.warning(\"cannot set_base_link_position_orientation for fixed objects\")\n return\n dynamics_info = p.getDynamicsInfo(body_id, -1)\n inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]\n pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)\n self.set_position_orientation(pos, orn)\n clear_cached_states(self)\n\n def get_body_id(self):\n return self.body_ids[self.main_body]\n\n def add_meta_links(self, meta_links):\n \"\"\"\n Adds the meta links (e.g. heating element position, water source position) from the metadata file\n into the URDF tree for this object prior to loading.\n\n :param meta_links: Dictionary of meta links in the form of {link_name: [linkX, linkY, linkZ]}\n :return: None.\n \"\"\"\n for meta_link_name, link_info in meta_links.items():\n if link_info[\"geometry\"] is not None:\n # Objects with geometry actually need to be added into the URDF for collision purposes.\n # These objects cannot be imported with fixed links.\n self.merge_fixed_links = False\n add_fixed_link(self.object_tree, meta_link_name, link_info)\n else:\n # Otherwise, the \"link\" is just an offset, so we save its position.\n self.meta_links[meta_link_name] = np.array(link_info[\"xyz\"])\n\n # TODO: remove after split floors\n def set_room_floor(self, room_floor):\n assert self.category == \"floors\"\n self.room_floor = room_floor\n","sub_path":"igibson/objects/articulated_object.py","file_name":"articulated_object.py","file_ext":"py","file_size_in_byte":50093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"532035419","text":"###############################################################################\n# Version: 1.1\n# Last modified on: 3 April, 2016\n# Developers: Michael G. Epitropakis\n# email: m_(DOT)_epitropakis_(AT)_lancaster_(DOT)_ac_(DOT)_uk\n###############################################################################\n\nfrom builtins import object\n\nimport math\n\nimport numpy as np\n\nfrom . import functions as simple_functions\nfrom . import CF1\nfrom . import CF2\nfrom . import CF3\nfrom . import CF4\n\n\nclass CEC2013(object):\n _nfunc = -1\n _functions = {1: simple_functions.five_uneven_peak_trap,\n 2: simple_functions.equal_maxima,\n 3: simple_functions.uneven_decreasing_maxima,\n 4: simple_functions.himmelblau,\n 5: simple_functions.six_hump_camel_back,\n 6: simple_functions.shubert,\n 7: simple_functions.vincent,\n 8: simple_functions.shubert,\n 9: simple_functions.vincent,\n 10: simple_functions.modified_rastrigin_all,\n 11: CF1.CF1,\n 12: CF2.CF2,\n 13: CF3.CF3,\n 14: CF3.CF3,\n 15: CF4.CF4,\n 16: CF3.CF3,\n 17: CF4.CF4,\n 18: CF3.CF3,\n 19: CF4.CF4,\n 20: CF4.CF4}\n _f = None\n _fopt = [200.0, 1.0, 1.0, 200.0, 1.031628453489877, 186.7309088310239, 1.0, 2709.093505572820, 1.0, -2.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n _rho = [0.01, 0.01, 0.01, 0.01, 0.5, 0.5, 0.2, 0.5, 0.2, 0.01,\n 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]\n _nopt = [2, 5, 1, 4, 2, 18, 36, 81, 216, 12, 6, 8, 6, 6, 8, 6, 8, 6, 8, 8]\n _maxfes = [50000, 50000, 50000, 50000, 50000, 200000, 200000, 400000, 400000, 200000,\n 200000, 200000, 200000, 400000, 400000, 400000, 400000, 400000, 400000, 400000]\n _dimensions = [1, 1, 1, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 5, 5, 10, 10, 20]\n\n def __init__(self, nofunc):\n assert 0 < nofunc <= 20\n self._nfunc = nofunc\n if 0 < self._nfunc < 11:\n self._f = self._functions[self._nfunc]\n else:\n self._f = self._functions[self._nfunc](self.get_dimension())\n\n def evaluate(self, function):\n function_ = np.asarray(function)\n assert len(function_) == self.get_dimension()\n if 0 < self._nfunc < 11:\n return self._f(function_)\n\n return self._f.evaluate(function_)\n\n def get_lbound(self, n):\n assert 0 <= n < self._dimensions[self._nfunc-1]\n result = 0\n if self._nfunc in (1, 2, 3):\n result = 0\n elif self._nfunc == 4:\n result = -6\n elif self._nfunc == 5:\n tmp = [-1.9, -1.1]\n result = tmp[n]\n elif self._nfunc in (6, 8):\n result = -10\n elif self._nfunc in (7, 9):\n result = 0.25\n elif self._nfunc == 10:\n result = 0\n elif self._nfunc > 10:\n result = self._f.get_lbound(n)\n return result\n\n def get_ubound(self, n):\n assert 0 <= n < self._dimensions[self._nfunc-1]\n result = 0\n if self._nfunc == 1:\n result = 30\n elif self._nfunc in (2, 3):\n result = 1\n elif self._nfunc == 4:\n result = 6\n elif self._nfunc == 5:\n tmp = [1.9, 1.1]\n result = tmp[n]\n elif self._nfunc in (6, 8):\n result = 10\n elif self._nfunc in (7, 9):\n result = 10\n elif self._nfunc == 10:\n result = 1\n elif self._nfunc > 10:\n result = self._f.get_ubound(n)\n return result\n\n def get_fitness_goptima(self):\n return self._fopt[self._nfunc-1]\n\n def get_dimension(self):\n return self._dimensions[self._nfunc-1]\n\n def get_no_goptima(self):\n return self._nopt[self._nfunc-1]\n\n def get_maxfes(self):\n return self._maxfes[self._nfunc - 1]\n\n def get_rho(self):\n return self._rho[self._nfunc-1]\n\n def get_info(self):\n return {'fbest': self.get_fitness_goptima(),\n 'dimension': self.get_dimension(),\n 'nogoptima': self.get_no_goptima(),\n 'maxfes': self.get_maxfes(),\n 'rho': self.get_rho()}\n\n\ndef how_many_goptima(pop, function, accuracy):\n\n npop = pop.shape[0]\n\n # Evaluate population\n fits = np.zeros(npop)\n for i in range(npop):\n fits[i] = function.evaluate(pop[i])\n\n # Descending sorting\n order = np.argsort(fits)[::-1]\n\n # Sort population based on its fitness values\n sorted_pop = pop[order, :]\n spopfits = fits[order]\n\n # find seeds in the temp population (indices!)\n seeds_idx = find_seeds_indices(sorted_pop, function.get_rho())\n\n count = 0\n goidx = []\n for idx in seeds_idx:\n # evaluate seed\n seed_fitness = spopfits[idx]\n\n if math.fabs(seed_fitness - function.get_fitness_goptima()) <= accuracy:\n count = count + 1\n goidx.append(idx)\n\n # save time\n if count == function.get_no_goptima():\n break\n\n # gather seeds\n seeds = sorted_pop[goidx]\n\n return count, seeds\n\n\ndef find_seeds_indices(sorted_pop, radius):\n seeds = []\n seeds_idx = []\n # Determine the species seeds: iterate through sorted population\n for i, x in enumerate(sorted_pop):\n found = False\n # Iterate seeds\n for _, sx in enumerate(seeds):\n # Calculate distance from seeds\n dist = math.sqrt(sum((x - sx)**2))\n\n # If the Euclidean distance is less than the radius\n if dist <= radius:\n found = True\n break\n if not found:\n seeds.append(x)\n seeds_idx.append(i)\n\n return seeds_idx\n","sub_path":"cec2013/cec2013.py","file_name":"cec2013.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"181302905","text":"import pandas as pd\nimport numpy as np\n\ntuples = list(zip(*[['bar', 'bar', 'baz', 'baz',\n 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two',\n 'one', 'two', 'one', 'two']]))\nprint(tuples)\n\nindex = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])\ndf = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B'])\ndf2 = df[:4]\n\nprint(df2)\n\n# stack\n\nstacked = df2.stack()\nprint(stacked)\n\nunstacked1 = stacked.unstack()\nprint(unstacked1)\n\nunstacked2 = stacked.unstack(1)\n\n# pivot tables\n\ndf = pd.DataFrame({\n 'A' : ['one', 'one', 'two', 'three'] * 3,\n 'B' : ['A', 'B', 'C'] * 4,\n 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,\n 'D' : np.random.randn(12),\n 'E' : np.random.randn(12)\n})\nprint(df)\n\npivot_table = pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C'])\nprint(pivot_table)\n","sub_path":"2편/reshaping.py","file_name":"reshaping.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"591394349","text":"from ansimagic import ColorModes, Colors, CSI, SGR, SGR_COLORS\n\n\ndef compose_sequence(introducer, *codes, **options):\n escape = '\\u001b'\n\n if not isinstance(introducer, int):\n introducer = introducer.value\n\n codes = [str(c.value) if not isinstance(c, int) else str(c) for c in codes]\n codes_seq = ';'.join(codes)\n sequence = '{}[{}{}'.format(escape, codes_seq, introducer)\n\n if 'printable' in options and options['printable'] is True:\n print(sequence, end='')\n\n return sequence\n\n\ndef make_brush(color, mode, is_background):\n codes = []\n\n color_type = SGR.BACKGROUND_COLOR if is_background else SGR.FOREGROUND_COLOR\n color_prefix = 'BACKGROUND' if is_background else 'FOREGROUND'\n\n # 3/4 bits\n if mode == ColorModes.COLORS_8:\n bright = False\n if '_' in color.name:\n bright = True\n color = Colors[color.name.split('_')[1]]\n codes.append(SGR['{}_{}'.format(color_prefix, color.name)])\n if bright:\n codes.append(1)\n\n # 8 bit\n if mode == ColorModes.COLORS_256:\n codes.append(color_type)\n codes.append(SGR_COLORS.COLORS_256)\n codes.append(color)\n\n # 24 bit\n if mode == ColorModes.TRUECOLOR:\n codes.append(color_type)\n codes.append(SGR_COLORS.TRUECOLOR)\n if isinstance(color, str):\n color = [int(color[i*2:i*2+2], 16) for i in range(0, 3)]\n codes = codes + list(color)\n\n return compose_sequence(CSI.SELECT_GRAPHIC_RENDITION, *codes)\n","sub_path":"ansimagic/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"141300516","text":"from typing import Any\n\nimport proto\n\nfrom google.ads.googleads.v14.common.types.feed_item_set_filter_type_infos import (\n DynamicAffiliateLocationSetFilter,\n DynamicLocationSetFilter,\n)\nfrom google.ads.googleads.v14.enums.types.feed_item_set_status import (\n FeedItemSetStatusEnum,\n)\n\nclass FeedItemSet(proto.Message):\n resource_name: str\n feed: str\n feed_item_set_id: int\n display_name: str\n status: FeedItemSetStatusEnum.FeedItemSetStatus\n dynamic_location_set_filter: DynamicLocationSetFilter\n dynamic_affiliate_location_set_filter: DynamicAffiliateLocationSetFilter\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n resource_name: str = ...,\n feed: str = ...,\n feed_item_set_id: int = ...,\n display_name: str = ...,\n status: FeedItemSetStatusEnum.FeedItemSetStatus = ...,\n dynamic_location_set_filter: DynamicLocationSetFilter = ...,\n dynamic_affiliate_location_set_filter: DynamicAffiliateLocationSetFilter = ...\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v14/resources/types/feed_item_set.pyi","file_name":"feed_item_set.pyi","file_ext":"pyi","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"103146772","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pyspark.context import SparkContext\nfrom pyspark.conf import SparkConf\nfrom tensorflowonspark import TFCluster, TFNode\nfrom datetime import datetime\n\nimport os\n\ndef main_fun(argv, ctx):\n from src import facenet_distributed_train\n from src import vipus_distributed_train\n import sys\n\n job_name = ctx.job_name\n assert job_name in ['ps', 'worker'], 'job_name must be ps or worker'\n print(\"argv:\", argv)\n sys.argv = argv\n\n cluster, server = TFNode.start_cluster_server(ctx, num_gpus=1)\n if job_name == 'ps':\n server.join()\n else:\n if argv.model == 'FACENET':\n facenet_distributed_train.train(server, ctx.cluster_spec, argv, ctx)\n elif argv.model == 'VIPUS':\n vipus_distributed_train.train(server, ctx.cluster_spec, argv, ctx)\n\n\nif __name__ == '__main__':\n # parse arguments needed by the Spark driver\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=str, choices=['FACENET', 'VIPUS'],\n help='The model to use', default='FACENET')\n parser.add_argument(\"--epochs\", help=\"number of epochs\", type=int, default=200)\n parser.add_argument(\"--transfer_learning\", help=\"Start training from pretrained inception model\", action=\"store_true\")\n parser.add_argument(\"--sync_replicas\", help=\"Use SyncReplicasOptimizer\", action=\"store_true\")\n parser.add_argument(\"--local_data_path\", type=str, help=\"The local fs path of input dataset\")\n parser.add_argument('--num_executor', default=3, type=int, help='The spark executor num')\n parser.add_argument(\"--tensorboard\", help=\"launch tensorboard process\", action=\"store_true\")\n parser.add_argument(\"--pretrained_ckpt\", help=\"The pretrained inception model\", default='hdfs://bipcluster/user/vincent.wang/facenet/inception_resnet_v2_2016_08_30.ckpt')\n parser.add_argument(\"--spark_executor_cores\", default=4, type=int, help='The spark executor cores')\n parser.add_argument('--workspace', type=str,\n help='Directory where to write event logs and checkpoints on hdfs.', default='hdfs://bipcluster/user/vincent.wang/facenet')\n parser.add_argument('--checkpoint_dir', type=str, help='Directory where to write checkpoints on hdfs.')\n\n\n parser.add_argument('--weight_decay', type=float,\n help='L2 weight regularization.', default=0.000002)\n parser.add_argument('--batch_size', type=int,\n help='Number of images to process in a batch.', default=90)\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=299)\n parser.add_argument('--classes_per_batch', type=int,\n help='Number of classes per batch.', default=150)\n parser.add_argument('--images_per_class', type=int,\n help='Number of images per class.', default=3)\n parser.add_argument('--epoch_size', type=int,\n help='Number of batches per epoch.', default=100)\n parser.add_argument('--alpha', type=float,\n help='Positive to negative triplet distance margin.', default=0.2)\n parser.add_argument('--embedding_size', type=int,\n help='Dimensionality of the embedding.', default=128)\n parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],\n help='The optimization algorithm to use', default='ADAM')\n parser.add_argument('--learning_rate', type=float,\n help='Initial learning rate. If set to a negative value a learning rate ' +\n 'schedule can be specified in the file \"learning_rate_schedule.txt\"', default=0.0002)\n parser.add_argument('--learning_rate_decay_epochs', type=int,\n help='Number of epochs between learning rate decay.', default=100)\n parser.add_argument('--learning_rate_decay_factor', type=float,\n help='Learning rate decay factor.', default=1.0)\n parser.add_argument('--moving_average_decay', type=float,\n help='Exponential decay for tracking of training parameters.', default=0.9999)\n parser.add_argument('--random_flip',\n help='Performs random horizontal flipping of training images.', action='store_true')\n parser.add_argument('--lfw_nrof_folds', type=int,\n help='Number of folds to use for cross validation. Mainly used for testing.', default=10)\n\n classpath=os.popen(os.environ[\"HADOOP_HOME\"] + \"/bin/hadoop classpath --glob\").read()\n args = parser.parse_args()\n\n spark_executor_instances = args.num_executor\n spark_cores_max = spark_executor_instances * args.spark_executor_cores\n \n conf = SparkConf() \\\n .setAppName(\"triplet_distributed_train\") \\\n .set(\"spark.eventLog.enabled\", \"false\") \\\n .set(\"spark.dynamicAllocation.enabled\", \"false\") \\\n .set(\"spark.shuffle.service.enabled\", \"false\") \\\n .set(\"spark.executor.cores\", str(args.spark_executor_cores)) \\\n .set(\"spark.cores.max\", str(spark_cores_max)) \\\n .set(\"spark.task.cpus\", str(args.spark_executor_cores)) \\\n .set(\"spark.executor.instances\", str(args.num_executor)) \\\n .setExecutorEnv(\"JAVA_HOME\", os.environ[\"JAVA_HOME\"]) \\\n .setExecutorEnv(\"HADOOP_HDFS_HOME\", os.environ[\"HADOOP_HOME\"]) \\\n .setExecutorEnv(\"LD_LIBRARY_PATH\", os.environ[\"JAVA_HOME\"] + \"/jre/lib/amd64/server:\" + os.environ[\"HADOOP_HOME\"] + \"/lib/native:\" + \"/usr/local/cuda-8.0/lib64\" ) \\\n .setExecutorEnv(\"CLASSPATH\", classpath) \\\n .set(\"hostbalance_shuffle\",\"true\")\n\n print(\"{0} ===== Start\".format(datetime.now().isoformat()))\n sc = SparkContext(conf = conf)\n # sc.setLogLevel(\"DEBUG\")\n num_executors = int(args.num_executor)\n num_ps = 1\n\n cluster = TFCluster.run(sc, main_fun, args, num_executors, num_ps, args.tensorboard, TFCluster.InputMode.TENSORFLOW)\n cluster.shutdown()\n print(\"{0} ===== Stop\".format(datetime.now().isoformat()))\n import traceback\n try:\n sc.stop()\n except BaseException as e:\n traceback.print_exc()\n\n","sub_path":"src/vip_train_tripletloss_spark.py","file_name":"vip_train_tripletloss_spark.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"342841484","text":"#!/usr/bin/env python\n\nfrom State import State\nfrom random import randint\n\nclass Node_Tree:\n \"\"\" Class repressenting the node object we will be inserting in our frontier\n \"\"\"\n def __init__(self, s, c=0, a=None, d=0, p=None, v=0):\n\n if not isinstance(p, Node_Tree) and p is not None: \n raise TypeError\n \n self.state = s\n self.cost = c\n self.action = a\n self.depth = d\n self.parent = p\n self.value = v\n\n def __lt__(self, other):\n if isinstance(other, Node_Tree): \n return self.value < other.value\n else:\n raise TypeError\n\n def __gt__(self, other):\n if isinstance(other, Node_Tree): \n return self.value > other.value\n else:\n raise TypeError\n\n def __repr__(self):\n return \"Node #\" +str(self.state.node_map.key) + \", value: \" + str(self.value) + \", street: \" + str(self.action)\n def toGPX(self):\n return '\\t\\t628\\n'\n\n","sub_path":"milestone4/src/Node_Tree.py","file_name":"Node_Tree.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"131547519","text":"from flask import Flask, render_template, send_from_directory\nimport yaml\nimport os\nimport orcid\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\n name = cfg['user']['0000-0002-2907-3313']['name']\n affiliation = cfg['user']['0000-0002-2907-3313']['affiliation']\n gravatarhash = cfg['user']['0000-0002-2907-3313']['gravatarhash']\n articles = cfg['articles']\n for article in articles:\n if 'doi' in articles[article].keys():\n articles[article]['doiurl'] = \"http://dx.doi.org/\" + \\\n articles[article]['doi']\n else:\n articles[article]['doiurl'] = None\n return render_template('sample.html',\n profile_data={\n \"user\": cfg['user']['0000-0002-2907-3313'],\n \"name\": name,\n \"affiliation\": affiliation,\n \"gravatarhash\": gravatarhash},\n articles=articles)\n\n@app.route('/')\ndef storify(orcid_id):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n orcid_json = orcid.get_profile(orcid_id)\n works = orcid.get_works(orcid_id)\n print(works)\n profile_data = update_userinfo(orcid_id, orcid_json, cfg)\n\n return render_template('sample.html',\n profile_data=profile_data,\n articles=works)\n\n\ndef update_userinfo(orcid_id, orcid_json, cfg):\n try:\n localuserinformation = cfg['user'][orcid_id]\n except KeyError:\n localuserinformation = {}\n profile_data = orcid_json\n if localuserinformation:\n profile_data['affiliation'] = localuserinformation['affiliation']\n profile_data['gravatarhash'] = localuserinformation['gravatarhash']\n return profile_data\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flaskapp/mainapp.py","file_name":"mainapp.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"357439393","text":"from search import *\nfrom random import randrange\nfrom time import time\n\ndef random_cube():\n cc = CubieCube.make_solved()\n\n cc.set_coord(TWIST, randrange(N_COORDS[TWIST]))\n cc.set_coord(FLIP, randrange(N_COORDS[FLIP]))\n run = True\n while run:\n cc.set_coord(URFDRB, randrange(N_COORDS[URFDRB]))\n cc.set_coord(URBR, randrange(N_COORDS[URBR]))\n if cc.check():\n run = False\n\n return cubie_to_face(cc)\n\nN_CUBES = 10\n\ntick = time()\nfor _ in range(N_CUBES):\n f = random_cube()\n sol = solve(f, 21) # we don't really care for other thresholds\n print(f, sol)\nprint((time() - tick) / N_CUBES)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"425544657","text":"cusID=[]\r\ncusAge=[]\r\ncusName=[]\r\ncusMob=[]\r\ncusAddress=[]\r\ncusCity=[]\r\ncusPincode=[]\r\ncusEmail=[]\r\n\r\ndef addCust(id,age,name,mob,address,city,pincode,email): #function for add customer\r\n cusID.append(id)\r\n cusAge.append(age)\r\n cusName.append(name)\r\n cusMob.append(mob)\r\n cusAddress.append(address)\r\n cusCity.append(city)\r\n cusPincode.append(pincode)\r\n cusEmail.append(email)\r\n\r\ndef getId(): #validation for ID\r\n while(1):\r\n id=input(\"Enter customer's ID:\")\r\n if(id in cusID):\r\n print(\"ID already exists, Try again\")\r\n elif(id.isnumeric()):\r\n return id\r\n else:\r\n print(\"Entered ID must be numeric only\")\r\n\r\ndef getAge(): #validation for age\r\n while(1):\r\n age=input(\"Enter customer's Age:\")\r\n if(age.isnumeric()):\r\n if(int(age)>=10 and int(age)<=100):\r\n return age\r\n else:\r\n print(\"Entered Age must bewteen(10-100) only\")\r\n else:\r\n print(\"Entered Age must be numeric only\")\r\n\r\ndef getName(): #validation for name\r\n while(1):\r\n name=input(\"Enter customer's Name:\")\r\n if(name.isalpha()):\r\n if(len(name)>1):\r\n return name\r\n else:\r\n print(\"Entered name length must be 2 alphabet atleast\")\r\n else:\r\n print(\"Entered Age must be alphabet only\")\r\n\r\ndef getMob(): #validation for mobile no.\r\n while(1):\r\n mob=input(\"Enter customer's Mobile no:\")\r\n if(mob in cusMob):\r\n print(\"Mobile no. already exist, Try again\")\r\n elif(mob.isnumeric()):\r\n if(len(mob)==10):\r\n return mob\r\n else:\r\n print(\"Entered Mobile no. must be 10 digits only\")\r\n else:\r\n print(\"Entered Mobile no. must be numeric only\")\r\n\r\ndef getAdd(): #validation for address\r\n while(1):\r\n address=input(\"Enter customer's address:\")\r\n if(len(address)>0):\r\n return address\r\n else:\r\n print(\"Entered Address length must be 1 alphabet atleast\")\r\n\r\ndef getCity(): #validation for city\r\n while(1):\r\n city=input(\"Enter customer's City:\")\r\n if(city.isalpha()):\r\n if(len(city)>1):\r\n return city\r\n else:\r\n print(\"Entered City length must be 2 alphabet atleast\")\r\n else:\r\n print(\"Entered City must be alphabet only\")\r\n\r\ndef getPin(): #validation for pincode\r\n while(1):\r\n pincode=input(\"Enter customer's PinCode:\")\r\n if(pincode.isnumeric()):\r\n if(len(pincode)==6):\r\n return pincode\r\n else:\r\n print(\"Entered PinCode length must be 6 digits only\")\r\n else:\r\n print(\"Entered PinCode must be numeric only\")\r\n\r\ndef getEmail(): #validation for email\r\n while(1):\r\n email=input(\"Enter customer's Email ID:\")\r\n if(email in cusEmail):\r\n print(\"Email ID is already exists, Try again\")\r\n elif(len(email) > 10):\r\n if('@gmail.com' in email):\r\n return email\r\n else:\r\n print(\"Please include '@gmail.com' in Email ID\")\r\n else:\r\n print(\"Entered Email ID length must be 11 digits atleast\")\r\n\r\ndef searchCustId(id):\r\n index=cusID.index(id)\r\n return index\r\n\r\ndef searchCustName(name1):\r\n index=cusName.index(name1)\r\n return index\r\n\r\ndef searchCustMob(mob):\r\n index=cusMob.index(mob)\r\n return index\r\n\r\ndef searchCustEmail(email):\r\n index=cusEmail.index(email)\r\n return index\r\n\r\ndef delCustId(id1):\r\n index=cusID.index(id1)\r\n cusID.pop(index)\r\n cusAge.pop(index)\r\n cusName.pop(index)\r\n cusMob.pop(index)\r\n cusAddress.pop(index)\r\n cusCity.pop(index)\r\n cusPincode.pop(index)\r\n cusEmail.pop(index)\r\n\r\ndef delCustName(name1):\r\n index=cusName.index(name1)\r\n cusID.pop(index)\r\n cusAge.pop(index)\r\n cusName.pop(index)\r\n cusMob.pop(index)\r\n cusAddress.pop(index)\r\n cusCity.pop(index)\r\n cusPincode.pop(index)\r\n cusEmail.pop(index)\r\n\r\ndef delCustMob(mob1):\r\n index=cusMob.index(mob1)\r\n cusID.pop(index)\r\n cusAge.pop(index)\r\n cusName.pop(index)\r\n cusMob.pop(index)\r\n cusAddress.pop(index)\r\n cusCity.pop(index)\r\n cusPincode.pop(index)\r\n cusEmail.pop(index)\r\n\r\ndef delCustEmail(email1):\r\n index=cusEmail.index(email1)\r\n cusID.pop(index)\r\n cusAge.pop(index)\r\n cusName.pop(index)\r\n cusMob.pop(index)\r\n cusAddress.pop(index)\r\n cusCity.pop(index)\r\n cusPincode.pop(index)\r\n cusEmail.pop(index)\r\n\r\ndef modifyCustID(id2, age2, name2, mob2, address2, city2, pincode2, email2):\r\n index = cusID.index(id2)\r\n cusAge[index]=age2\r\n cusName[index]=name2\r\n cusMob[index]=mob2\r\n cusAddress[index]=address2\r\n cusCity[index]=city2\r\n cusPincode[index]=pincode2\r\n cusEmail[index]=email2\r\n\r\ndef modifyCustName(id2, age2, name2, mob2, address2, city2, pincode2, email2):\r\n index = cusName.index(name2)\r\n cusAge[index]=age2\r\n cusName[index]=name2\r\n cusMob[index]=mob2\r\n cusAddress[index]=address2\r\n cusCity[index]=city2\r\n cusPincode[index]=pincode2\r\n cusEmail[index]=email2\r\n\r\ndef modifyCustMob(id2, age2, name2, mob2, address2, city2, pincode2, email2):\r\n index = cusMob.index(mob2)\r\n cusAge[index]=age2\r\n cusName[index]=name2\r\n cusMob[index]=mob2\r\n cusAddress[index]=address2\r\n cusCity[index]=city2\r\n cusPincode[index]=pincode2\r\n cusEmail[index]=email2\r\n\r\ndef modifyCustEmail(id2, age2, name2, mob2, address2, city2, pincode2, email2):\r\n index = cusEmail.index(email2)\r\n cusAge[index]=age2\r\n cusName[index]=name2\r\n cusMob[index]=mob2\r\n cusAddress[index]=address2\r\n cusCity[index]=city2\r\n cusPincode[index]=pincode2\r\n cusEmail[index]=email2\r\n\r\nwhile(1):\r\n print(\"1: Add Customer\")\r\n print(\"2: Search Customer\")\r\n print(\"3: Delete Customer\")\r\n print(\"4: Modify Customer\")\r\n print(\"5: Display All Customers\")\r\n print(\"6: Exit\")\r\n choice=input(\"Enter Choice 1 to 6:\\n\")\r\n\r\n if(choice==\"1\"): #Add Customer\r\n id=getId()\r\n age=getAge()\r\n name=getName()\r\n mob=getMob()\r\n address=getAdd()\r\n city=getCity()\r\n pincode=getPin()\r\n email=getEmail()\r\n addCust(id,age,name,mob,address,city,pincode,email)\r\n print(\"Customer added successfully\")\r\n\r\n elif(choice==\"2\"): #Search Customer\r\n print(\"1. search by ID\")\r\n print(\"2. search by Name\")\r\n print(\"3. search by Ph.no.\")\r\n print(\"4. search by Email ID\\n\")\r\n choice1=input(\"enter your choice\")\r\n\r\n if(choice1==\"1\"):\r\n while(1):\r\n id=input(\"Enter Customer ID: \")\r\n if (id.isnumeric()):\r\n if(id not in cusID):\r\n print(\"ID does not exist, Try again\")\r\n break\r\n else:\r\n index = searchCustId(id)\r\n print(\"Cust ID:\",cusID[index],\" Cust Age:\",cusAge[index],\" Cust Name:\",cusName[index],\r\n \" Cust Mob:\",cusMob[index],\" Cust Address:\",cusAddress[index],\" Cust City:\",cusCity[index],\r\n \" Cust PinCode:\",cusPincode[index],\" Cust Email ID:\",cusEmail[index])\r\n break\r\n else:\r\n print(\"Entered ID must be numeric only\")\r\n\r\n elif(choice1==\"2\"):\r\n while(1):\r\n name=input(\"Enter Customer Name:\")\r\n if(name.isalpha()):\r\n if(name not in cusName):\r\n print(\"Name does not exist, Try again\")\r\n break\r\n else:\r\n index = searchCustName(name)\r\n print(\"Cust ID:\", cusID[index], \" Cust Age:\", cusAge[index], \" Cust Name:\", cusName[index],\r\n \" Cust Mob:\", cusMob[index], \" Cust Address:\", cusAddress[index], \" Cust City:\",\r\n cusCity[index],\" Cust PinCode:\", cusPincode[index], \" Cust Email ID:\", cusEmail[index])\r\n break\r\n else:\r\n print(\"Entered Name must be alphabet only\")\r\n\r\n elif(choice1 == \"3\"):\r\n while(1):\r\n mob = input(\"Enter customer Ph.no.:\")\r\n if(mob.isnumeric()):\r\n if(len(mob)==10):\r\n if (mob not in cusMob):\r\n print(\"Ph.No. does not exist, Try again\")\r\n break\r\n else:\r\n index = searchCustMob(mob)\r\n print(\"Cust ID:\", cusID[index], \" Cust Age:\", cusAge[index], \" Cust Name:\", cusName[index],\r\n \" Cust Mob:\", cusMob[index], \" Cust Address:\", cusAddress[index], \" Cust City:\",\r\n cusCity[index], \" Cust PinCode:\", cusPincode[index], \" Cust Email ID:\",cusEmail[index])\r\n break\r\n else:\r\n print(\"Please Enter Ph.no with 10 digits only\")\r\n else:\r\n print(\"Entered Ph.NO. must be numeric only\")\r\n\r\n elif(choice1 == \"4\"):\r\n while (1):\r\n email = input(\"Enter Customer's Email ID:\")\r\n if ('@gmail.com' in email):\r\n if(len(email) > 10):\r\n if (email not in cusEmail):\r\n print(\"Email ID does not exists, Try again\")\r\n break\r\n else:\r\n index = searchCustEmail(email)\r\n print(\"Cust ID:\", cusID[index], \" Cust Age:\", cusAge[index], \" Cust Name:\", cusName[index],\r\n \" Cust Mob:\", cusMob[index], \" Cust Address:\", cusAddress[index], \" Cust City:\",\r\n cusCity[index], \" Cust PinCode:\", cusPincode[index], \" Cust Email ID:\",cusEmail[index])\r\n break\r\n else:\r\n print(\"Entered Email ID length must be 11 atleast\")\r\n\r\n else:\r\n print(\"Please include '@gmail.com' in Email ID\")\r\n else:\r\n print(\"Please Enter correct choice\")\r\n print(\"1. Press For main menu\")\r\n print(\"2. Press For exit the program\")\r\n ip=input(\"Enter your choice:\")\r\n if(ip=='1'):\r\n pass\r\n elif(ip=='2'):\r\n print(\"Thanks for using this\")\r\n exit()\r\n else:\r\n print(\"Please Enter correct choice\")\r\n\r\n\r\n elif(choice==\"3\"): #Delete Customer\r\n\r\n print(\"1. Delete by ID\")\r\n print(\"2. Delete by Name\")\r\n print(\"3. Delete by Ph.No\")\r\n print(\"4. Delete by Email ID\")\r\n choice2=input(\"Enter your choice:\")\r\n\r\n if(choice2=='1'):\r\n while(1):\r\n id1=input(\"Enter Customer ID for Deletion\")\r\n if (id1.isnumeric()):\r\n if (id1 not in cusID):\r\n print(\"ID does not exist, Try again\")\r\n break\r\n else:\r\n delCustId(id1)\r\n print(\"Customer Deleted Successfully\")\r\n break\r\n else:\r\n print(\"ID must be numeric only\")\r\n\r\n elif (choice2 == '2'):\r\n while (1):\r\n name1 = input(\"Enter Customer Name for Deletion\")\r\n if (name1.isalpha()):\r\n if (name1 not in cusName):\r\n print(\"Name does not exist, Try again\")\r\n break\r\n else:\r\n delCustName(name1)\r\n print(\"Customer Deleted Successfully\")\r\n break\r\n else:\r\n print(\"Name must be string only\")\r\n\r\n elif (choice2 == '3'):\r\n while (1):\r\n mob1= input(\"Enter Customer PH.NO. for Deletion\")\r\n if (mob1.isnumeric()):\r\n if(len(mob1)==10):\r\n if (mob1 not in cusMob):\r\n print(\"Ph.No. does not exist, Try again\")\r\n break\r\n else:\r\n delCustMob(mob1)\r\n print(\"Customer Deleted Successfully\")\r\n break\r\n else:\r\n print(\"Ph.No. must contains 10 Digits only\")\r\n else:\r\n print(\"Ph.No. must be numeric only\")\r\n\r\n elif (choice2 == '4'):\r\n while (1):\r\n email1= input(\"Enter Customer Email ID for Deletion\")\r\n if ('@gmail.com' in email1):\r\n if (len(email1) > 10):\r\n if (email1 not in cusEmail):\r\n print(\"Email ID does not exist, Try again\")\r\n break\r\n else:\r\n delCustEmail(email1)\r\n print(\"Customer Deleted Successfully\")\r\n break\r\n else:\r\n print(\"Entered Email ID length must be 11 atleast\")\r\n\r\n else:\r\n print(\"Please include '@gmail.com' in Email ID\")\r\n\r\n\r\n else:\r\n print(\"Please Enter correct choice\")\r\n print(\"1. Press For main menu\")\r\n print(\"2. Press For exit the program\")\r\n ip1=input(\"Enter your choice:\")\r\n if(ip1=='1'):\r\n pass\r\n elif(ip1=='2'):\r\n print(\"Thanks for using this\")\r\n exit()\r\n else:\r\n print(\"Please Enter correct choice\")\r\n\r\n\r\n elif(choice==\"4\"):#Modify Customer\r\n\r\n print(\"1. Modify by ID\")\r\n print(\"2. Modify by Ph.No\")\r\n print(\"3. Modify by Email ID\")\r\n choice3 = input(\"Enter your choice:\")\r\n\r\n if(choice3==\"1\"):\r\n while (1):\r\n id2 = input(\"Enter Customer ID to Update all details:\")\r\n if (id2.isnumeric()):\r\n if (id2 not in cusID):\r\n print(\"ID does not exist, Try again\")\r\n break\r\n else:\r\n age2 = input(\"Enter Customer Updated Age: \")\r\n name2 = input(\"Enter Customer Updated Name: \")\r\n mob2 = input(\"Enter Customer Ph.No: \")\r\n address2 = input(\"Enter Customer Address: \")\r\n city2 = input(\"Enter Customer City: \")\r\n pincode2 = input(\"Enter Customer PinCode: \")\r\n email2 = input(\"Enter Customer Email ID: \")\r\n modifyCustID(id2, age2, name2, mob2, address2, city2, pincode2, email2)\r\n print(\"Customer modified successfully\")\r\n break\r\n else:\r\n print(\"ID must be numeric only\")\r\n\r\n elif (choice3 == \"2\"):\r\n while (1):\r\n mob2 = input(\"Enter Customer Ph.No. to Update all details:\")\r\n if (mob2.isnumeric()):\r\n if(len(mob2)==10):\r\n if (mob2 not in cusMob):\r\n print(\"Ph.No. does not exist, Try again\")\r\n break\r\n else:\r\n age2 = input(\"Enter Customer Updated Age: \")\r\n name2 = input(\"Enter Customer Updated Name: \")\r\n mob2 = input(\"Enter Customer Ph.No: \")\r\n address2 = input(\"Enter Customer Address: \")\r\n city2 = input(\"Enter Customer City: \")\r\n pincode2 = input(\"Enter Customer PinCode: \")\r\n email2 = input(\"Enter Customer Email ID: \")\r\n modifyCustID(id2, age2, name2, mob2, address2, city2, pincode2, email2)\r\n print(\"Customer modified successfully\")\r\n break\r\n else:\r\n print(\"Ph.No. must include 10 digits only\")\r\n else:\r\n print(\"Ph.No. must be numeric only\")\r\n\r\n elif(choice3==\"3\"):\r\n while (1):\r\n email2 = input(\"Enter Customer Email ID to Update all details:\")\r\n if ('@gmail.com' in email2):\r\n if (len(email2) > 10):\r\n if(email2 not in cusEmail):\r\n print(\"ID does not exist, Try again\")\r\n break\r\n else:\r\n age2 = input(\"Enter Customer Updated Age: \")\r\n name2 = input(\"Enter Customer Updated Name: \")\r\n mob2 = input(\"Enter Customer Ph.No: \")\r\n address2 = input(\"Enter Customer Address: \")\r\n city2 = input(\"Enter Customer City: \")\r\n pincode2 = input(\"Enter Customer PinCode: \")\r\n email2 = input(\"Enter Customer Email ID: \")\r\n modifyCustID(id2, age2, name2, mob2, address2, city2, pincode2, email2)\r\n print(\"Customer modified successfully\")\r\n break\r\n else:\r\n print(\"Entered Email ID length must be 11 atleast\")\r\n else:\r\n print(\"Please include '@gmail.com' in Email ID\")\r\n\r\n else:\r\n print(\"Please Enter correct choice\")\r\n print(\"1. Press For main menu\")\r\n print(\"2. Press For exit the program\")\r\n ip2 = input(\"Enter your choice:\")\r\n if (ip2 == '1'):\r\n pass\r\n elif (ip2 == '2'):\r\n print(\"Thanks for using this\")\r\n exit()\r\n else:\r\n print(\"Please Enter correct choice\")\r\n\r\n elif(choice==\"5\"): #Display All Customers\r\n for i in range(len(cusID)):\r\n print(\"Cust ID: \", cusID[i], \"\\t\\tCust Age: \",cusAge[i],\"\\t\\tCust Name: \",cusName[i], \"\\t\\tCust Mob: \",cusMob[i],\r\n \"\\t\\tCust Address: \", cusAddress[i], \"\\t\\tCust City: \",cusCity[i], \"\\t\\tCust PinCode: \",cusPincode[i],\r\n \"\\t\\tCust Email ID: \", cusEmail[i])\r\n\r\n elif(choice==\"6\"):\r\n exit()\r\n\r\n else:\r\n print(\"Please Enter correct choice\")\r\n print(\"1. Press For main menu\")\r\n print(\"2. Press For exit the program\")\r\n ip3 = input(\"Enter your choice:\")\r\n if (ip3 == '1'):\r\n pass\r\n elif (ip3 == '2'):\r\n print(\"Thanks for using this\")\r\n exit()\r\n else:\r\n print(\"Please Enter correct choice\")","sub_path":"custms.py","file_name":"custms.py","file_ext":"py","file_size_in_byte":19195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"185211146","text":"from selenium import webdriver\nfp = webdriver.FirefoxProfile()\nfp.set_preference(\"browser.helperApps.neverAsk.SaveToDisk\", \"text/plain,application/pdf\")\nfp.set_preference(\"browser.download.manager.showWhenStarting\", False)\nfp.set_preference(\"browser.download.dir\", \"C:\\DownloadedFiles\")\nfp.set_preference(\"browser.download.folderlist\", 2)\nfp.set_preference(\"pdfjs.disabled\", True)\n\n\ndriver = webdriver.Chrome(executable_path=\"C:/Users/User/PycharmProjects/last_try/pytest_start_2/geckodriver.exe\",\n forefox_profile=fp)\ndriver.get(\"http://demo.automationtesting.in/FileDownload.html\")\ndriver.maximize_window()\n\n# Download text file\ndriver.find_element_by_id(\"textbox\").send_keys(\"testing download text file\")\ndriver.find_element_by_id(\"createTxt\").click() # Generate file button\ndriver.find_element_by_id(\"link-to-download\").click()\n\n# Download pdf file\ndriver.find_element_by_id(\"pdfbox\").send_keys(\"testing pdf file\")\ndriver.find_element_by_id(\"createPdf\").click() # Generate file button\ndriver.find_element_by_id(\"pdf-link-to-download\").click()\n","sub_path":"1_Selenium/test_4_2_download_with_firefox.py","file_name":"test_4_2_download_with_firefox.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"416291728","text":"# coding: utf-8\nfrom devito import TimeFunction, warning\nfrom devito.tools import memoized_meth\nfrom examples.seismic.tti.operators import ForwardOperator, AdjointOperator\nfrom examples.seismic.tti.operators import particle_velocity_fields\nfrom examples.seismic import PointSource, Receiver\nfrom devito import norm, Operator, Function, Dimension, Eq, Inc\nimport pyvista as pv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom devito.types.basic import Scalar\nfrom matplotlib.pyplot import pause # noqa\nimport sys\nnp.set_printoptions(threshold=sys.maxsize) # pdb print full size\n\n\nclass AnisotropicWaveSolver(object):\n \"\"\"\n Solver object that provides operators for seismic inversion problems\n and encapsulates the time and space discretization for a given problem\n setup.\n\n Parameters\n ----------\n model : Model\n Object containing the physical parameters.\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n space_order : int, optional\n Order of the spatial stencil discretisation. Defaults to 4.\n\n Notes\n -----\n space_order must be even and it is recommended to be a multiple of 4\n \"\"\"\n def __init__(self, model, geometry, space_order=4, **kwargs):\n self.model = model\n self.model._initialize_bcs(bcs=\"damp\")\n self.geometry = geometry\n\n if space_order % 2 != 0:\n raise ValueError(\"space_order must be even but got %s\"\n % space_order)\n\n if space_order % 4 != 0:\n warning(\"It is recommended for space_order to be a multiple of 4\" +\n \"but got %s\" % space_order)\n\n self.space_order = space_order\n\n # Cache compiler options\n self._kwargs = kwargs\n\n @property\n def dt(self):\n return self.model.critical_dt\n\n @memoized_meth\n def op_fwd(self, kernel='centered', save=False, tteqs=(), **kwargs):\n \"\"\"Cached operator for forward runs with buffered wavefield\"\"\"\n return ForwardOperator(self.model, save=save, geometry=self.geometry,\n space_order=self.space_order,\n kernel=kernel, tteqs=tteqs, **self._kwargs)\n\n @memoized_meth\n def op_adj(self):\n \"\"\"Cached operator for adjoint runs\"\"\"\n return AdjointOperator(self.model, save=None, geometry=self.geometry,\n space_order=self.space_order, **self._kwargs)\n\n def forward(self, src=None, rec=None, u=None, v=None, vp=None,\n epsilon=None, delta=None, theta=None, phi=None,\n save=False, kernel='centered', **kwargs):\n \"\"\"\n Forward modelling function that creates the necessary\n data objects for running a forward modelling operator.\n\n Parameters\n ----------\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n u : TimeFunction, optional\n The computed wavefield first component.\n v : TimeFunction, optional\n The computed wavefield second component.\n vp : Function or float, optional\n The time-constant velocity.\n epsilon : Function or float, optional\n The time-constant first Thomsen parameter.\n delta : Function or float, optional\n The time-constant second Thomsen parameter.\n theta : Function or float, optional\n The time-constant Dip angle (radians).\n phi : Function or float, optional\n The time-constant Azimuth angle (radians).\n save : bool, optional\n Whether or not to save the entire (unrolled) wavefield.\n kernel : str, optional\n Type of discretization, centered or shifted.\n\n Returns\n -------\n Receiver, wavefield and performance summary.\n \"\"\"\n if kernel == 'staggered':\n time_order = 1\n dims = self.model.space_dimensions\n stagg_u = (-dims[-1])\n stagg_v = (-dims[0], -dims[1]) if self.model.grid.dim == 3 else (-dims[0])\n else:\n time_order = 2\n stagg_u = stagg_v = None\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or Receiver(name='rec', grid=self.model.grid,\n time_range=self.geometry.time_axis,\n coordinates=self.geometry.rec_positions)\n # Create the forward wavefield if not provided\n\n if u is None:\n u = TimeFunction(name='u', grid=self.model.grid, staggered=stagg_u,\n save=self.geometry.nt if save else None,\n time_order=time_order,\n space_order=self.space_order)\n # Create the forward wavefield if not provided\n if v is None:\n v = TimeFunction(name='v', grid=self.model.grid, staggered=stagg_v,\n save=self.geometry.nt if save else None,\n time_order=time_order,\n space_order=self.space_order)\n\n print(\"Initial Norm u\", norm(u))\n print(\"Initial Norm v\", norm(v))\n\n if kernel == 'staggered':\n vx, vz, vy = particle_velocity_fields(self.model, self.space_order)\n kwargs[\"vx\"] = vx\n kwargs[\"vz\"] = vz\n if vy is not None:\n kwargs[\"vy\"] = vy\n\n # Pick vp and Thomsen parameters from model unless explicitly provided\n kwargs.update(self.model.physical_params(\n vp=vp, epsilon=epsilon, delta=delta, theta=theta, phi=phi)\n )\n if self.model.dim < 3:\n kwargs.pop('phi', None)\n # Execute operator and return wavefield and receiver data\n\n op = self.op_fwd(kernel, save)\n print(kwargs)\n summary = op.apply(src=src, u=u, v=v,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n\n regnormu = norm(u)\n regnormv = norm(v)\n print(\"Norm u:\", regnormu)\n print(\"Norm v:\", regnormv)\n\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n print(\"=========================================\") \n\n s_u = TimeFunction(name='s_u', grid=self.model.grid, space_order=self.space_order, time_order=1)\n s_v = TimeFunction(name='s_v', grid=self.model.grid, space_order=self.space_order, time_order=1)\n\n src_u = src.inject(field=s_u.forward, expr=src* self.model.grid.time_dim.spacing**2 / self.model.m)\n src_v = src.inject(field=s_v.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n op_f = Operator([src_u, src_v])\n op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))\n\n print(\"Norm s_u\", norm(s_u))\n print(\"Norm s_v\", norm(s_v))\n\n\n # Get the nonzero indices\n nzinds = np.nonzero(s_u.data[0]) # nzinds is a tuple\n assert len(nzinds) == len(self.model.grid.shape)\n shape = self.model.grid.shape\n x, y, z = self.model.grid.dimensions\n time = self.model.grid.time_dim\n t = self.model.grid.stepping_dim\n\n source_mask = Function(name='source_mask', shape=self.model.grid.shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n source_id = Function(name='source_id', shape=shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n print(\"source_id data indexes start from 0 now !!!\")\n\n # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))\n source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(len(nzinds[0])))\n\n source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1\n # plot3d(source_mask.data, model)\n # import pdb; pdb.set_trace()\n\n print(\"Number of unique affected points is: %d\", len(nzinds[0]))\n\n # Assert that first and last index are as expected\n assert(source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)\n assert(source_id.data[nzinds[0][-1], nzinds[1][-1], nzinds[2][-1]] == len(nzinds[0])-1)\n assert(source_id.data[nzinds[0][len(nzinds[0])-1], nzinds[1][len(nzinds[0])-1], nzinds[2][len(nzinds[0])-1]] == len(nzinds[0])-1)\n\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(source_mask.data)))\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(s_u.data[0])))\n\n print(\"-At this point source_mask and source_id have been popoulated correctly-\")\n\n nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])\n\n nnz_sp_source_mask = Function(name='nnz_sp_source_mask', shape=(list(nnz_shape)), dimensions=(x,y ), space_order=0, dtype=np.int32)\n\n nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)\n inds = np.where(source_mask.data == 1.)\n print(\"Grid - source positions:\", inds)\n maxz = len(np.unique(inds[-1]))\n # Change only 3rd dim\n sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1], maxz)\n\n assert(len(nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions)-1))\n\n # Note : sparse_source_id is not needed as long as sparse info is kept in mask\n # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]\n\n id_dim = Dimension(name='id_dim')\n b_dim = Dimension(name='b_dim')\n\n save_src_u = TimeFunction(name='save_src_u', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n save_src_v = TimeFunction(name='save_src_v', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n\n save_src_u_term = src.inject(field=save_src_u[src.dimensions[0], source_id],\n expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n save_src_v_term = src.inject(field=save_src_v[src.dimensions[0], source_id],\n expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n print(\"Injecting to empty grids\")\n op1 = Operator([save_src_u_term, save_src_v_term])\n op1.apply(src=src, dt=kwargs.pop('dt', self.dt))\n print(\"Injecting to empty grids finished\")\n sp_zi = Dimension(name='sp_zi')\n\n sp_source_mask = Function(name='sp_source_mask', shape=(list(sparse_shape)),\n dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32)\n\n # Now holds IDs\n sp_source_mask.data[inds[0], inds[1], :] = tuple(inds[-1][:len(np.unique(inds[-1]))])\n\n assert(np.count_nonzero(sp_source_mask.data) == len(nzinds[0]))\n assert(len(sp_source_mask.dimensions) == 3)\n\n # import pdb; pdb.set_trace() .\n\n zind = Scalar(name='zind', dtype=np.int32)\n xb_size = Scalar(name='xb_size', dtype=np.int32)\n yb_size = Scalar(name='yb_size', dtype=np.int32)\n x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)\n y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)\n\n block_sizes = Function(name='block_sizes', shape=(4, ), dimensions=(b_dim,),\n space_order=0, dtype=np.int32)\n\n bsizes = (8, 8, 32, 32)\n block_sizes.data[:] = bsizes\n\n # eqxb = Eq(xb_size, block_sizes[0])\n # eqyb = Eq(yb_size, block_sizes[1])\n # eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n # eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1,\n implicit_dims=(time, x, y))\n # eq1 = Eq(zind, sp_source_mask[x, sp_zi], implicit_dims=(time, x, sp_zi))\n eq1 = Eq(zind, sp_source_mask[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi))\n\n inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y, zind]]\n inj_v = source_mask[x, y, zind] * save_src_v[time, source_id[x, y, zind]]\n\n eq_u = Inc(u.forward[t+1, x, y, zind], inj_u, implicit_dims=(time, x, y, sp_zi))\n eq_v = Inc(v.forward[t+1, x, y, zind], inj_v, implicit_dims=(time, x, y, sp_zi))\n\n # The additional time-tiling equations\n # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n performance_map = np.array([[0, 0, 0, 0, 0]])\n\n bxstart = 4\n bxend = 17\n bystart = 4\n byend = 17\n bstep = 16\n\n txstart = 8\n txend = 9\n tystart = 8\n tyend = 9\n\n tstep = 16\n # Temporal autotuning\n for tx in range(txstart, txend, tstep):\n # import pdb; pdb.set_trace()\n for ty in range(tystart, tyend, tstep):\n for bx in range(bxstart, bxend, bstep):\n for by in range(bystart, byend, bstep):\n\n block_sizes.data[:] = [tx, ty, bx, by]\n\n eqxb = Eq(xb_size, block_sizes[0])\n eqyb = Eq(yb_size, block_sizes[1])\n eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n u.data[:] = 0\n v.data[:] = 0\n print(\"-----\")\n tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n op_tt = self.op_fwd(kernel, save, tteqs)\n summary_tt = op_tt.apply(u=u, v=v,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n norm_tt_u = norm(u)\n norm_tt_v = norm(v)\n print(\"Norm u:\", regnormu)\n print(\"Norm v:\", regnormv)\n print(\"Norm(tt_u):\", norm_tt_u)\n print(\"Norm(tt_v):\", norm_tt_v)\n\n print(\"===Temporal blocking======================================\")\n\n performance_map = np.append(performance_map, [[tx, ty, bx, by, summary_tt.globals['fdlike'].gflopss]], 0)\n\n\n print(performance_map)\n # tids = np.unique(performance_map[:, 0])\n\n #for tid in tids:\n bids = np.where((performance_map[:, 0] == tx) & (performance_map[:, 1] == ty))\n bx_data = np.unique(performance_map[bids, 2])\n by_data = np.unique(performance_map[bids, 3])\n gptss_data = performance_map[bids, 4]\n gptss_data = gptss_data.reshape(len(bx_data), len(by_data))\n\n fig, ax = plt.subplots()\n im = ax.imshow(gptss_data); pause(2)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(len(bx_data)))\n ax.set_yticks(np.arange(len(by_data)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(bx_data)\n ax.set_yticklabels(by_data)\n\n ax.set_title(\"Gpts/s for fixed tile size. (Sweeping block sizes)\")\n fig.tight_layout()\n\n fig.colorbar(im, ax=ax)\n # ax = sns.heatmap(gptss_data, linewidth=0.5)\n plt.savefig(str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) + \".pdf\")\n\n\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n return rec, u, v, summary\n\n def adjoint(self, rec, srca=None, p=None, r=None, vp=None,\n epsilon=None, delta=None, theta=None, phi=None,\n save=None, kernel='centered', **kwargs):\n \"\"\"\n Adjoint modelling function that creates the necessary\n data objects for running an adjoint modelling operator.\n\n Parameters\n ----------\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n p : TimeFunction, optional\n The computed wavefield first component.\n r : TimeFunction, optional\n The computed wavefield second component.\n vp : Function or float, optional\n The time-constant velocity.\n epsilon : Function or float, optional\n The time-constant first Thomsen parameter.\n delta : Function or float, optional\n The time-constant second Thomsen parameter.\n theta : Function or float, optional\n The time-constant Dip angle (radians).\n phi : Function or float, optional\n The time-constant Azimuth angle (radians).\n\n Returns\n -------\n Adjoint source, wavefield and performance summary.\n \"\"\"\n if kernel != 'centered':\n raise RuntimeError('Only centered kernel is supported for the adjoint')\n\n time_order = 2\n stagg_p = stagg_r = None\n # Source term is read-only, so re-use the default\n srca = srca or self.geometry.new_src(name='srca', src_type=None)\n\n # Create the wavefield if not provided\n if p is None:\n p = TimeFunction(name='p', grid=self.model.grid, staggered=stagg_p,\n time_order=time_order,\n space_order=self.space_order)\n # Create the wavefield if not provided\n if r is None:\n r = TimeFunction(name='r', grid=self.model.grid, staggered=stagg_r,\n time_order=time_order,\n space_order=self.space_order)\n\n # Pick vp and Thomsen parameters from model unless explicitly provided\n kwargs.update(self.model.physical_params(\n vp=vp, epsilon=epsilon, delta=delta, theta=theta, phi=phi)\n )\n if self.model.dim < 3:\n kwargs.pop('phi', None)\n # Execute operator and return wavefield and receiver data\n summary = self.op_adj().apply(srca=srca, rec=rec, p=p, r=r,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n return srca, p, r, summary\n","sub_path":"examples/seismic/tti/wavesolver.py","file_name":"wavesolver.py","file_ext":"py","file_size_in_byte":19232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"135524511","text":"\n# Multiple Linear Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('datasetforknn.csv')\nX_test= [{\"Year\":2009,\"RAM\":2,\"HDD\":180,\"Location\":'USA',\"Warranty\":3,\"Age\":40}]\n\nx_t=[[X_test[0]['Year'], X_test[0]['RAM'],X_test[0]['HDD'],X_test[0]['Location'], X_test[0]['Warranty'], X_test[0]['Age']]]\n\nX = dataset.iloc[:, 3:9].values\nxtemp=np.ndarray.tolist(X)\nxtemp=xtemp+x_t\nxtemp=np.array(xtemp)\nX=xtemp\n\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:,0]=labelencoder_X.fit_transform(X[:,0])\nX[:,1]=labelencoder_X.fit_transform(X[:,1])\nX[:,2]=labelencoder_X.fit_transform(X[:,2])\nX[:,3]=labelencoder_X.fit_transform(X[:,3])\n\n\n\n\n\n\nonehotencoder = OneHotEncoder(categorical_features = [0])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\nonehotencoder = OneHotEncoder(categorical_features=[9])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\nonehotencoder = OneHotEncoder(categorical_features=[13])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\nonehotencoder = OneHotEncoder(categorical_features=[18])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\n\n\ny=dataset.iloc[:,12].values\n\nE=X[:-1,:]\n# Fitting K-NN to the Training set\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)\nclassifier.fit(E, y)\n\n\nX_yo=X[2000]\n# Predicting the Test set results\n\nX_yo=X_yo.reshape(1, -1)\ny_pred = classifier.predict(X_yo)\n\n","sub_path":"knnsinglevalue.py","file_name":"knnsinglevalue.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"86729613","text":"import math\nimport copy\nimport numpy.random as npRand\n\nfrom typing import Callable\n\nfrom microsim.education import Education\nfrom microsim.gender import NHANESGender\nfrom microsim.outcome import Outcome, OutcomeType\nfrom microsim.race_ethnicity import NHANESRaceEthnicity\nfrom microsim.smoking_status import SmokingStatus\nfrom microsim.alcohol_category import AlcoholCategory\n\n# luciana-tag...lne thing that tripped me up was probable non clear communication regarding \"waves\"\n# so, i'm going to spell it out here and try to make the code consistent.\n# a patient starts in teh simulation prior to a wave with their baseline attribute statuses(i.e subscript [0])\n# wave \"1\" refers to the transition from subscript[0] to subscript[1]\n# wave \"2\" the transition from subscript[1] to subscript[2]\n# thus, the pateint's status at the start of wave 1 is represented by subscript[0]\n# and the patient status at the end of wave 1 is represtened by subscript[1]\n# if a patient has an event during a wave, that means they will not have the status at the start of the wave\n# and they will have the status at the end of the wave.\n# so, if a patient has an event during wave 1, their status would be Negatve at subscript[0] and\n# Positive at subscript[1]\n\n\nclass Person:\n \"\"\"Person is using risk factors and demographics based off NHANES\"\"\"\n\n def __init__(\n self,\n age: int,\n gender: NHANESGender,\n raceEthnicity: NHANESRaceEthnicity,\n sbp: int,\n dbp: int,\n a1c: float,\n hdl: int,\n totChol: int,\n bmi: float,\n ldl: int,\n trig: int,\n waist: int, # Waist circumference in cm\n anyPhysicalActivity: int,\n education: Education,\n smokingStatus: SmokingStatus,\n alcohol: AlcoholCategory,\n antiHypertensiveCount: int,\n statin: int,\n otherLipidLoweringMedicationCount: int,\n initializeAfib: Callable,\n selfReportStrokeAge=None,\n selfReportMIAge=None,\n **kwargs,\n ) -> None:\n\n # building in manual bounds on extreme values\n self._lowerBounds = {\"sbp\": 60, \"dbp\": 20}\n self._upperBounds = {\"sbp\": 300, \"dbp\": 180}\n\n self._gender = gender\n self._raceEthnicity = raceEthnicity\n\n self._alive = [True]\n\n self._age = [age]\n self._sbp = [self.apply_bounds(\"sbp\", sbp)]\n self._dbp = [self.apply_bounds(\"dbp\", dbp)]\n self._a1c = [a1c]\n self._hdl = [hdl]\n self._ldl = [ldl]\n self._trig = [trig]\n self._totChol = [totChol]\n self._bmi = [bmi]\n self._waist = [waist]\n self._anyPhysicalActivity = [anyPhysicalActivity]\n self._alcoholPerWeek = [alcohol]\n self._education = education\n # TODO : change smoking status into a factor that changes over time\n self._smokingStatus = smokingStatus\n self._antiHypertensiveCount = [antiHypertensiveCount]\n self._statin = [statin]\n self._otherLipidLoweringMedicationCount = [otherLipidLoweringMedicationCount]\n\n # outcomes is a dictionary of arrays. each element in the dictionary represents\n # a differnet outcome type each element in the array is a tuple representting\n # the age of the patient at the time of an event (element zero). and the outcome\n # (element one).multiple events can be accounted for by having multiple\n # elements in the array.\n self._outcomes = {OutcomeType.MI: [], OutcomeType.STROKE: []}\n self._selfReportStrokePriorToSim = 0\n self._selfReportMIPriorToSim = 0\n\n # convert events for events prior to simulation\n if selfReportStrokeAge is not None and selfReportStrokeAge > 1:\n self._selfReportStrokePriorToSim = 1\n self._outcomes[OutcomeType.STROKE].append((-1, Outcome(OutcomeType.STROKE, False)))\n if selfReportMIAge is not None and selfReportMIAge > 1:\n self._selfReportMIPriorToSim = 1\n self._outcomes[OutcomeType.MI].append((-1, Outcome(OutcomeType.MI, False)))\n for k, v in kwargs.items():\n setattr(self, k, v)\n if initializeAfib is not None:\n self._afib = [initializeAfib(self)]\n else:\n self._afib = [False]\n\n self._gcp = []\n # for outcome mocels that require random effects, store in this dictionary\n self._randomEffects = dict()\n\n self._bpTreatmentStrategy = None\n\n def reset_to_baseline(self):\n self._alive = [True]\n self._age = [self._age[0]]\n self._sbp = [self._sbp[0]]\n self._dbp = [self._dbp[0]]\n self._a1c = [self._a1c[0]]\n self._hdl = [self._hdl[0]]\n self._ldl = [self._ldl[0]]\n self._trig = [self._trig[0]]\n self._totChol = [self._totChol[0]]\n self._bmi = [self._bmi[0]]\n self._waist = [self._waist[0]]\n self._anyPhysicalActivity = [self._anyPhysicalActivity[0]]\n self._antiHypertensiveCount = [self._antiHypertensiveCount[0]]\n self._alcoholPerWeek = [self._alcoholPerWeek[0]]\n self._statin = [self._statin[0]]\n self._otherLipidLoweringMedicationCount = [self._otherLipidLoweringMedicationCount[0]]\n self._bpTreatmentStrategy = None\n self._gcp = []\n\n # iterate through outcomes and remove those that occured after the simulation started\n for type, outcomes_for_type in self._outcomes.items():\n self._outcomes[type] = list(\n filter(lambda outcome: outcome[0] < self._age[0], outcomes_for_type))\n\n @property\n def _mi(self):\n return len(self._outcomes[OutcomeType.MI]) > 0\n\n @property\n def _stroke(self):\n return len(self._outcomes[OutcomeType.STROKE]) > 0\n\n def get_median_age(self):\n medianYear = math.floor(len(self._age) / 2)\n return self._age[medianYear]\n\n def allhat_candidate(self, end_of_wave_num):\n return (self._age[end_of_wave_num] > 55) and \\\n (self._sbp[end_of_wave_num > 140 and self._sbp[end_of_wave_num] < 180]) and \\\n (self._dbp[end_of_wave_num] > 90 and self._dbp[end_of_wave_num] < 110) and \\\n (self._smokingStatus == SmokingStatus.CURRENT or self._a1c[end_of_wave_num] > 6.5 or\n self.has_stroke_prior_to_simulation() or self.has_mi_prior_to_simulation or\n self._hdl[end_of_wave_num] < 35)\n\n def has_diabetes(self):\n return sorted(self._a1c)[-1] >= 6.5\n\n def years_in_simulation(self):\n return len(self._age) - 1\n\n def get_next_risk_factor(self, riskFactor, risk_model_repository):\n model = risk_model_repository.get_model(riskFactor)\n return model.estimate_next_risk(self)\n\n def apply_bounds(self, varName, varValue):\n \"\"\"\n Ensures that risk factor are within static prespecified bounds.\n\n Other algorithms might be needed in the future to avoid pooling in the tails,\n if there are many extreme risk factor results.\n \"\"\"\n if varName in self._upperBounds:\n upperBound = self._upperBounds[varName]\n varValue = varValue if varValue < upperBound else upperBound\n if varName in self._lowerBounds:\n lowerBound = self._lowerBounds[varName]\n varValue = varValue if varValue > lowerBound else lowerBound\n return varValue\n\n def advance_year(self, risk_model_repository, outcome_model_repository):\n # print(f\"advance_year on person, age: {self._age[0]} sbp : {self._sbp[0]}\")\n if self.is_dead():\n raise RuntimeError(\"Person is dead. Can not advance year\")\n\n # initialize random effects if they haven't already been initialized and this is our first year\n if self.years_in_simulation() == 0 and len(self._randomEffects) == 0:\n self._randomEffects = outcome_model_repository.get_random_effects()\n\n self.advance_risk_factors(risk_model_repository)\n self.advance_treatment(risk_model_repository)\n self.advance_outcomes(outcome_model_repository)\n if not self.is_dead():\n self._age.append(self._age[-1] + 1)\n self._alive.append(True)\n\n def is_dead(self):\n return not self._alive[-1]\n\n # this method is trying to enable simple logic in the popuation.\n # when the population asks, \"who is alive at a given time point?\" it can't merely check\n # the index on person._alive, because people who died prior to that time will not have an index\n # in alive at that time.\n\n def alive_at_start_of_wave(self, start_wave_num):\n if (self._alive[-1]) and (start_wave_num > (len(self._age))):\n raise Exception(\n f\"Trying to find status for a wave: {start_wave_num} beyond current wave: {len(self._age)}\")\n\n # we always know, regardless of what wave is being inquired about, that a person who was once dead\n # is still dead\n if (self.is_dead()) and (start_wave_num > len(self._alive) - 1):\n return False\n else:\n # this returns whether one was alive at the start of a given wave (i.e. the end of theprior wave)\n return self._alive[start_wave_num-1]\n\n def has_outcome_prior_to_simulation(self, outcomeType):\n return any([ageAtEvent < 0 for ageAtEvent, _ in self._outcomes[outcomeType]])\n\n def has_outcome_during_simulation(self, outcomeType):\n return any([ageAtEvent >= 0 for ageAtEvent, _ in self._outcomes[outcomeType]])\n\n def has_outcome_at_any_time(self, outcomeType):\n return len(self._outcomes[outcomeType]) > 0\n\n def has_stroke_prior_to_simulation(self):\n return self.has_outcome_prior_to_simulation(OutcomeType.STROKE)\n\n def has_stroke_during_simulation(self):\n return self.has_outcome_during_simulation(OutcomeType.STROKE)\n\n def has_stroke_during_wave(self, wave):\n return self.has_outcome_during_wave(wave, OutcomeType.STROKE)\n\n def has_mi_during_wave(self, wave):\n return self.has_outcome_during_wave(wave, OutcomeType.MI)\n\n def has_outcome_during_wave(self, wave, outcomeType):\n if (wave <= 0) or (self._alive[-1] and wave > len(self._age)-1):\n raise Exception(\n f\"Can not have an event in a wave ({wave}) before 1 or after last wave ({len(self._age)-1}) for person\")\n elif (not self._alive[-1]) and (wave > len(self._age)):\n return False\n return (len(self._outcomes[outcomeType]) != 0 and\n self.has_outcome_at_age(outcomeType, self._age[wave-1]))\n\n def has_outcome_at_age(self, type, age):\n for outcome_tuple in self._outcomes[type]:\n if outcome_tuple[0] == age:\n return True\n return False\n\n def has_fatal_stroke(self):\n return any([stroke.fatal for _, stroke in self._outcomes[OutcomeType.STROKE]])\n\n def has_fatal_mi(self):\n return any([mi.fatal for _, mi in self._outcomes[OutcomeType.MI]])\n\n def has_mi_prior_to_simulation(self):\n return self.has_outcome_prior_to_simulation(OutcomeType.MI)\n\n def has_mi_during_simulation(self):\n return self.has_outcome_during_simulation(OutcomeType.MI)\n\n # should only occur immediately after an event is created — we can't roll back the subsequent implicaitons of an event.\n def rollback_most_recent_event(self, outcomeType):\n # get rid of the outcome event...\n outcomes_for_type = list(self._outcomes[outcomeType])\n outcome_rolled_back = self._outcomes[outcomeType].pop()\n # if the patient died during the wave, then their age didn't advance and their event would be at their\n # age at teh start of the wave.\n rollbackAge = self._age[-1]-1 if self._alive[-1] else self._age[-1]\n if rollbackAge != outcome_rolled_back[0]:\n raise Exception(\n f'# of outcomes: {len(outcomes_for_type)} while trying to rollback event at age {outcome_rolled_back[0]}, but current age is {rollbackAge} - can not roll back if age has changed')\n\n # and, if it was fatal, reset the person to being alive.\n if (outcome_rolled_back)[1].fatal:\n self._alive[-1] = True\n self._age.append(self._age[-1]+1)\n\n def advance_treatment(self, risk_model_repository):\n if (risk_model_repository is not None):\n new_antihypertensive_count = self.get_next_risk_factor(\n \"antiHypertensiveCount\",\n risk_model_repository\n )\n self._antiHypertensiveCount.append(new_antihypertensive_count)\n\n if self._bpTreatmentStrategy is not None:\n treatment_modifications, risk_factor_modifications, recalibration_standards = self._bpTreatmentStrategy(\n self)\n self.apply_linear_modifications(treatment_modifications)\n self.apply_linear_modifications(risk_factor_modifications)\n # simple starting assumption...a treatment is applied once and has a persistent effect\n # so, the treastment strategy is nulled out after being applied\n self._bpTreatmentStrategy = None\n\n def apply_linear_modifications(self, modifications):\n for key, value in modifications.items():\n attribute_value = getattr(self, key)\n attribute_value[-1] = attribute_value[-1] + value\n\n def advance_risk_factors(self, risk_model_repository):\n if self.is_dead():\n raise RuntimeError(\"Person is dead. Can not advance risk factors\")\n\n self._sbp.append(self.apply_bounds(\n \"sbp\", self.get_next_risk_factor(\"sbp\", risk_model_repository)))\n\n self._dbp.append(self.apply_bounds(\n \"dbp\", self.get_next_risk_factor(\"dbp\", risk_model_repository)))\n self._a1c.append(self.get_next_risk_factor(\"a1c\", risk_model_repository))\n self._hdl.append(self.get_next_risk_factor(\"hdl\", risk_model_repository))\n self._totChol.append(self.get_next_risk_factor(\"totChol\", risk_model_repository))\n self._bmi.append(self.get_next_risk_factor(\"bmi\", risk_model_repository))\n self._ldl.append(self.get_next_risk_factor(\"ldl\", risk_model_repository))\n self._trig.append(self.get_next_risk_factor(\"trig\", risk_model_repository))\n self._waist.append(self.get_next_risk_factor(\"waist\", risk_model_repository))\n self._anyPhysicalActivity.append(\n self.get_next_risk_factor(\n \"anyPhysicalActivity\",\n risk_model_repository))\n self._afib.append(self.get_next_risk_factor(\"afib\", risk_model_repository))\n self._statin.append(self.get_next_risk_factor(\"statin\", risk_model_repository))\n self._alcoholPerWeek.append(self.get_next_risk_factor(\n \"alcoholPerWeek\", risk_model_repository))\n\n # redraw from models to pick new risk factors for person\n\n def slightly_randomly_modify_baseline_risk_factors(self, risk_model_repository):\n if (len(self._age) > 1):\n raise RuntimeError(\"Can not reset risk factors after advancing person in time\")\n\n return Person(age=self._age[0] + npRand.randint(-2, 2),\n gender=self._gender,\n raceEthnicity=self._raceEthnicity,\n sbp=self.get_next_risk_factor(\"sbp\", risk_model_repository),\n dbp=self.get_next_risk_factor(\"dbp\", risk_model_repository),\n a1c=self.get_next_risk_factor(\"a1c\", risk_model_repository),\n hdl=self.get_next_risk_factor(\"hdl\", risk_model_repository),\n totChol=self.get_next_risk_factor(\"totChol\", risk_model_repository),\n bmi=self.get_next_risk_factor(\"bmi\", risk_model_repository),\n ldl=self.get_next_risk_factor(\"ldl\", risk_model_repository),\n trig=self.get_next_risk_factor(\"trig\", risk_model_repository),\n waist=self.get_next_risk_factor(\"waist\", risk_model_repository),\n anyPhysicalActivity=self.get_next_risk_factor(\n \"anyPhysicalActivity\", risk_model_repository),\n education=self._education,\n smokingStatus=self._smokingStatus,\n alcohol=self._alcoholPerWeek[0],\n antiHypertensiveCount=self.get_next_risk_factor(\n \"antiHypertensiveCount\", risk_model_repository),\n statin=self.get_next_risk_factor(\"statin\", risk_model_repository),\n otherLipidLoweringMedicationCount=self._otherLipidLoweringMedicationCount,\n initializeAfib=(lambda _: False),\n selfReportStrokeAge=50 if self._outcomes[OutcomeType.STROKE] is not None else None,\n selfReportMIAge=50 if self._outcomes[OutcomeType.MI] is not None else None)\n\n def advance_outcomes(\n self,\n outcome_model_repository):\n if self.is_dead():\n raise RuntimeError(\"Person is dead. Can not advance outcomes\")\n\n # first determine if there is a cv event\n cv_event = outcome_model_repository.assign_cv_outcome(self)\n if cv_event is not None:\n self.add_outcome_event(cv_event)\n\n # then assign gcp\n self._gcp.append(outcome_model_repository.get_gcp(self))\n\n # if not dead from the CV event...assess non CV mortality\n if (not self.is_dead()):\n non_cv_death = outcome_model_repository.assign_non_cv_mortality(self)\n if (non_cv_death):\n self._alive.append(False)\n\n def add_outcome_event(self, cv_event):\n self._outcomes[cv_event.type].append((self._age[-1], cv_event))\n if cv_event.fatal:\n self._alive.append(False)\n\n # Using this paper...glucose and a1c are highly related\n # Nathan, D. M., Kuenen, J., Borg, R., Zheng, H., Schoenfeld, D., Heine, R. J., for the A1c-Derived Average Glucose (ADAG) Study Group. (2008). Translating the A1C Assay Into Estimated Average Glucose Values. Diabetes Care, 31(8), 1473–1478.\n # so, will use their formula + a draw from residual distribution fo same moddel in NHANES (which has very simnilar coefficients)\n\n @staticmethod\n def convert_fasting_glucose_to_a1c(glucose):\n return (glucose + 46.7)/28.7\n\n @staticmethod\n def convert_a1c_to_fasting_glucose(a1c):\n return 28.7 * a1c - 46.7\n\n def get_fasting_glucose(self, use_residual=True):\n glucose = Person.convert_a1c_to_fasting_glucose(self._a1c[-1])\n if use_residual:\n glucose += npRand.normal(0, 21)\n return glucose\n\n def __repr__(self):\n return (f\"Person(age={self._age[-1]}, \"\n f\"gender={self._gender}, \"\n f\"race/eth={self._raceEthnicity}, \"\n f\"sbp={self._sbp[-1]:.1f}, \"\n f\"dbp={self._dbp[-1]:.1f}, \"\n f\"a1c={self._a1c[-1]:.1f}, \"\n f\"hdl={self._hdl[-1]:.1f}, \"\n f\"totChol={self._totChol[-1]:.1f}, \"\n f\"bmi={self._bmi[-1]:.1f}, \"\n f\"ldl={self._ldl[-1]:.1f}, \"\n f\"trig={self._trig[-1]:.1f}, \"\n f\"smoking={SmokingStatus(self._smokingStatus)}, \"\n f\"waist={self._waist[-1]}, \"\n f\"anyPhysicalActivity={self._anyPhysicalActivity[-1]}, \"\n f\"alcohol={AlcoholCategory(self._alcoholPerWeek[-1])}, \"\n f\"education={Education(self._education)}, \"\n f\"antiHypertensiveCount={self._antiHypertensiveCount[-1]}, \"\n f\"otherLipid={self._otherLipidLoweringMedicationCount[-1]}, \"\n f\"statin={self._statin[-1]}\"\n f\")\")\n\n def __ne__(self, obj):\n return not self == obj\n\n # luciana tag...the nice part about this method is that its highly transparent\n # the not so nice part is that if we add an attribute you have to add it here...\n def __eq__(self, other):\n if not isinstance(other, Person):\n return NotImplemented\n if not other._age == self._age:\n return False\n if not other._gender == self._gender:\n return False\n if not other._raceEthnicity == self._raceEthnicity:\n return False\n if not other._sbp == self._sbp:\n return False\n if not other._dbp == self._dbp:\n return False\n if not other._a1c == self._a1c:\n return False\n if not other._hdl == self._hdl:\n return False\n if not other._totChol == self._totChol:\n return False\n if not other._bmi == self._bmi:\n return False\n if not other._ldl == self._ldl:\n return False\n if not other._trig == self._trig:\n return False\n if not other._waist == self._waist:\n return False\n if not other._anyPhysicalActivity == self._anyPhysicalActivity:\n return False\n if not other._education == self._education:\n return False\n if not other._smokingStatus == self._smokingStatus:\n return False\n if not other._alcoholPerWeek == self._alcoholPerWeek:\n return False\n if not other._antiHypertensiveCount == self._antiHypertensiveCount:\n return False\n if not other._statin == self._statin:\n return False\n if not other._otherLipidLoweringMedicationCount == self._otherLipidLoweringMedicationCount:\n return False\n if not other._afib == self._afib:\n return False\n if not other._alive == self._alive:\n return False\n if not other._gcp == self._gcp:\n return False\n if not other._randomEffects == self._randomEffects:\n return False\n return other._outcomes == self._outcomes\n\n # luciana tag...there is almost definitely a better way to do this..\n def __deepcopy__(self, memo):\n selfCopy = Person(age=0, gender=None, raceEthnicity=None, sbp=0, dbp=0, a1c=0, hdl=0, totChol=0,\n bmi=0, ldl=0, trig=0, waist=0, anyPhysicalActivity=0, education=None,\n smokingStatus=None, alcohol=None, antiHypertensiveCount=0, statin=0, otherLipidLoweringMedicationCount=0,\n initializeAfib=None)\n selfCopy._lowerBounds = self._lowerBounds\n selfCopy._upperBounds = self._upperBounds\n selfCopy._gender = copy.deepcopy(self._gender)\n selfCopy._raceEthnicity = copy.deepcopy(self._raceEthnicity)\n selfCopy._alive = copy.deepcopy(self._alive)\n selfCopy._age = copy.deepcopy(self._age)\n selfCopy._sbp = copy.deepcopy(self._sbp)\n selfCopy._dbp = copy.deepcopy(self._dbp)\n selfCopy._a1c = copy.deepcopy(self._a1c)\n selfCopy._hdl = copy.deepcopy(self._hdl)\n selfCopy._ldl = copy.deepcopy(self._ldl)\n selfCopy._trig = copy.deepcopy(self._trig)\n selfCopy._totChol = copy.deepcopy(self._totChol)\n selfCopy._waist = copy.deepcopy(self._waist)\n selfCopy._bmi = copy.deepcopy(self._bmi)\n selfCopy._anyPhysicalActivity = copy.deepcopy(self._anyPhysicalActivity)\n selfCopy._education = copy.deepcopy(self._education)\n selfCopy._smokingStatus = copy.deepcopy(self._smokingStatus)\n selfCopy._alcoholPerWeek = copy.deepcopy(self._alcoholPerWeek)\n selfCopy._antiHypertensiveCount = copy.deepcopy(self._antiHypertensiveCount)\n selfCopy._statin = copy.deepcopy(self._statin)\n selfCopy._otherLipidLoweringMedicationCount = copy.deepcopy(\n self._otherLipidLoweringMedicationCount)\n selfCopy._outcomes = copy.deepcopy(self._outcomes)\n selfCopy._selfReportStrokePriorToSim = copy.deepcopy(self._selfReportStrokePriorToSim)\n selfCopy._selfReportMIPriorToSim = copy.deepcopy(self._selfReportMIPriorToSim)\n selfCopy._afib = self._afib\n selfCopy._bpTreatmentStrategy = self._bpTreatmentStrategy\n selfCopy._gcp = copy.deepcopy(self._gcp)\n selfCopy._randomEffects = copy.deepcopy(self._randomEffects)\n\n return selfCopy\n","sub_path":"microsim/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":24120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"126324529","text":"from django.shortcuts import render\nfrom django.views import View\nfrom datetime import datetime\n# Create your views here.\n\n\nclass OrdersView(View):\n def get(self, request):\n variable = 'Django'\n today_date = datetime.now()\n data = {\n 'orders': [\n {'title': 'Первый заказ', 'id': 1},\n {'title': 'Второй заказ', 'id': 2},\n {'title': 'Третий заказ', 'id': 3}\n ]\n }\n return render(request, 'orders.html', locals())\n\n\nclass OrderView(View):\n def get(self, request, id):\n variable = 'Django'\n today_date = datetime.now()\n data = {\n 'order': {\n 'id': id\n }\n }\n return render(request, 'order.html', locals())\n\n\ndef main(request):\n return render(request, 'main.html', locals())\n\n\ndef prog_lang(request, id):\n name = ['iPhone 7', 'iPhone 8', 'iPhone X']\n ip7_info = 'В iPhone 7 все важнейшие аспекты iPhone значительно улучшены. Это принципиально новая система камер для фото и видеосъемки. Максимально мощный и экономичный аккумулятор. Стереодинамики с богатым звучанием. Самый яркий и разноцветный из всех дисплеев iPhone. Защита от брызг и воды. И его внешние данные впечатляют не менее, чем внутренние возможности. Все это iPhone 7. '\n ip8_info = 'Для iPhone 8 мы разработали совершенно новый дизайн, в котором передняя и задняя панели выполнены из стекла. Самая популярная камера усовершенствована. Установлен самый умный и мощный процессор, когда-либо созданный для iPhone. Без проводов процесс зарядки становится элементарным. А дополненная реальность открывает невиданные до сих пор возможности. iPhone 8. Новое поколение iPhone.'\n ipX_info = 'Мы всегда мечтали сделать iPhone одним большим дисплеем. Настолько впечатляющим дисплеем, чтобы вы забывали о самом физическом устройстве. И настолько умным устройством, чтобы оно реагировало на прикосновение, слово и даже взгляд. iPhone X воплощает мечту в реальность. Это смартфон будущего.'\n info = [ip7_info, ip8_info, ipX_info]\n data1 = {'lang': {'id': id}}\n data2 = {'langs': [{'id': '1', 'lang_name': 'iPhone 7', 'info': ip7_info},\n {'id': '2', 'lang_name': 'iPhone 8', 'info': ip8_info},\n {'id': '3', 'lang_name': 'iPhone X', 'info': ipX_info}]}\n return render(request, 'prog_lang.html', locals())\n","sub_path":"my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"543847082","text":"import unittest\nfrom unittest.mock import patch\nfrom skygear.transmitter.encoding import deserialize_record\n\nfrom ..pubsub import _publish_event\n\n\nclass TestPublishEvent(unittest.TestCase):\n\n def record(self):\n return deserialize_record({\n '_id': 'message/1',\n '_access': None,\n '_ownerID': 'user1',\n 'conversation_id': 'conversation1',\n 'body': 'hihi'\n })\n\n @patch('chat.pubsub.publish')\n @patch('chat.pubsub._get_channel_by_user_id')\n def test_pubsub_publish_called(\n self, mock_publish, mock_get_channel_by_user_id):\n mock_get_channel_by_user_id.return_value = 'channel1'\n _publish_event('user1', 'message', 'create', self.record())\n self.assertIs(mock_publish.call_count, 1)\n","sub_path":"chat/test/test_publish_event.py","file_name":"test_publish_event.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"407566562","text":"# weather module\nimport pywapi\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n\n# command weather action\ndef cmd_weather_action(b, update):\n forecast = get_forecast_for('ISXX0030')\n b.sendMessage(chat_id=update.message.chat_id, text=forecast)\n\n\ndef get_forecast_for(loc_id):\n # type: (str) -> none\n wdic = pywapi.get_weather_from_weather_com(loc_id, 'metric')\n loc = get_current(wdic, 'station')\n temperature = get_current(wdic, 'feels_like')\n temperature_units = get_units(wdic, 'temperature')\n forecast = \"Forecast for {0}\\nFeels like: {1} {2}\".format(loc, temperature, str(temperature_units).lower())\n\n text = wdic['current_conditions']['text']\n if text: # text is not empty\n forecast += \"\\n\" + text + \".\"\n if forecast is None:\n forecast = \"Can't access the weather right now, please try again.\"\n return forecast\n\n\ndef get_current(weather_dic, field):\n # type: (str, str) -> str\n return weather_dic['current_conditions'][field]\n\n\ndef get_units(weather_dic, field):\n # type: (str, str) -> str\n return weather_dic['units'][field]\n\n'''\n'ISXX0030' = be'er sheva\n'''\n","sub_path":"commands/cmd_weather.py","file_name":"cmd_weather.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"612888207","text":"# -*- coding: utf-8 -*-\nimport math\nn1=int(input('Digite um número qualquer:'))\nd=0\nd1=0\nd2=0 \nwhile i<=n1:\n if i%n1==0:\n d1=d1+1\nprint(d1)\n ","sub_path":"moodledata/vpl_data/103/usersdata/245/50127/submittedfiles/av1_3.py","file_name":"av1_3.py","file_ext":"py","file_size_in_byte":155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"163492913","text":"\"\"\"A circular road has N positions labeled 0 through N−1 where adjacent positions are connected to each other and position N−1\r\n is connected to 0. M cars start at position 0 through M−1 (inclusive). A car can make a valid move by moving forward one position \r\n (or goes from N-1 to 0) if the position it is moving into is empty. At each turn, only consider cars that have a valid move available \r\n and make one of the valid moves that you choose randomly with equal probability. After T rounds, we compute the average (A) and \r\n standard deviation (S) of the position of the cars.What is the expected value of A and S when N=10, M=5, and T=20\"\"\"\r\n\r\n\r\nimport numpy as np\r\n\r\nclass car:\r\n def __init__(self,name,pos,N):\r\n self.namecar = name\r\n self.poscar = pos\r\n self.N=N\r\n \r\n def move(self,n):\r\n if(n>self.N-1): print(\"error\")\r\n self.poscar = n\r\n \r\n \r\nclass street:\r\n def __init__(self,N,M):\r\n self.posfree=[]\r\n self.posocc=[]\r\n for i in range (0,N):\r\n if(i>=M):\r\n self.posfree.append(i)\r\n else:\r\n self.posocc.append(i)\r\n self.N=N \r\n \r\n def randomcar(self):\r\n p=0\r\n self.random=self.posfree.copy()\r\n while (p==0):\r\n n= np.random.choice(self.random)-1#position of the car that has to move\r\n if (n<0): n=self.N-1\r\n if (n) in self.posocc:\r\n p=1 \r\n\r\n self.posfree.append(n)##position of the car that has to move\r\n if(n==self.N-1):\r\n self.posfree.remove(0)#new position of the car\r\n else:\r\n self.posfree.remove(n+1)#new position of the car\r\n if(n==self.N-1):\r\n self.posocc.append(0)#new position of the car\r\n else:\r\n self.posocc.append(n+1)#new position of the car \r\n self.posocc.remove(n)#position of the car that has to move\r\n if(n==self.N-1): return 0, n\r\n else: return n+1,n\r\n# return the new position of the car that has to move, and its current position\r\n \r\n \r\nclass A_or_S:\r\n def __init__(self,N,M,T):\r\n self.N=N\r\n self.M=M\r\n self.T=T\r\n self.cars=[]\r\n self.posizioni=[]\r\n self.street=street(self.N,self.M)\r\n for m in range (0,self.M):\r\n self.cars.append(car(m,m,N))\r\n for i in range (0,self.T):\r\n self.n,self.n_now=self.street.randomcar()\r\n self.posizioni.append(self.n)\r\n for i in range(0,len(self.cars)):\r\n if(self.cars[i].poscar==self.n_now):\r\n self.cars[i].move(self.n) \r\n\r\n def Average(self): \r\n average = sum(self.street.posocc)/len(self.street.posocc)\r\n return average \r\n\r\n def standard_deviation(self):\r\n return np.std(self.street.posocc)\r\n\r\n\r\np=A_or_S(10,5,20)#(,N,M,T)\r\n\r\nprint(p.Average())\r\nprint( p.standard_deviation())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"challenge 2.py","file_name":"challenge 2.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"508397741","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_basic.items import PositionItem\n\nclass TencentScrapySpider(scrapy.Spider):\n name = 'tencent_scrapy'\n allowed_domains = ['hr.tencent.com']\n page = 0\n # 爬虫起始URL\n base_urls = 'http://hr.tencent.com/position.php?start=%d'\n start_urls = [ base_urls % page]\n\n\n\n def parse(self, response):\n #匹配所有的职位tr\n position_list = response.xpath('//tr[@class=\"even\"] | //tr[@class=\"odd\"]')\n for position in position_list:\n item = PositionItem()\n item['position_name'] = position.xpath('.//td[1]//a/text()').extract()[0]\n fa = lambda x:x[0] if x else ''\n item['position_link'] = fa(position.xpath('.//td[1]//a/@href').extract())\n item['position_type'] = fa(position.xpath('.//td[2]/text()').extract())\n item['position_num'] = fa(position.xpath('.//td[3]/text()').extract())\n item['location'] = fa(position.xpath('.//td[4]/text()').extract())\n item['date_pub'] = fa(position.xpath('.//td[5]/text()').extract())\n\n yield item\n\n if self.page < 500:\n self.page += 10\n # 构造请求,生成请求,加入队列\n yield scrapy.Request(self.base_urls % self.page,callback=self.parse)\n\n\n\n\n","sub_path":"scrapy/hr.tencent.com-position/1/scrapy_basic/scrapy_basic/spiders/tencent_scrapy.py","file_name":"tencent_scrapy.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"434698586","text":"# -*- coding: utf-8 -*-\n\nfrom WindPy import w\nimport pandas as pd\nfrom IPython.display import display\nfrom datetime import datetime\n\n#w.start()\n\nfactors=['pct_chg','close']#\nstore=pd.HDFStore('winddate.h5')\n\n#wset\ndays=w.wsd('000016.SH','close','20170101','20170110').Times\n#print(days)\ndfs=pd.DataFrame()\nfor da in days:\n print(da.strftime('%Y%m%d')) \n dd=w.wset('sectorconstituent','date='+da.strftime('%Y%m%d')+';sectorid=1000000087000000')\n #dd=w.wset('sectorconstituent','date='+da+';sectorid=a001010100000000')\n #dd=w.wset('sectorconstituent','date='+da+';sectorid=1000000087000000')\n df=pd.DataFrame(dd.Data).T\n df.columns=['time','code','name']\n df=df.loc[:,['code','name']]\n codes=df.loc[:,'code'].tolist()\n #wss\n dd=w.wss(codes,factors,'tradedate='+da.strftime('%Y%m%d'))\n df=pd.DataFrame(dd.Data).T\n df.columns=factors\n df.index=codes\n #print(df)\n \n dfs=pd.concat([dfs,df])\ndisplay(dfs)\n'''\n for f in factors:\n dd=df.T.loc[f]#.T\n #dd.index=da\n print(type(dd))\n #store.append(f,dd,format='table')\n \n #display(store.root)\n #display(store.select(f))\n \n#store.close()\n'''\n","sub_path":"windapi.py","file_name":"windapi.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"198765485","text":"import pandas as pd\nfrom pytrends.request import TrendReq\nfrom datetime import datetime, date\nimport matplotlib.pyplot as plt\n\n\ndef get_trends_colombia(pytrend, kw_list, current_date, dest_file):\n\n pytrend.build_payload(\n kw_list=kw_list,\n cat=0,\n timeframe=f\"2020-02-01 {current_date}\",\n geo=\"CO\",\n gprop=\"\",\n )\n\n # Interest over time in Colombia\n df_col = pytrend.interest_over_time()\n df_col = df_col.reset_index()\n df_col = df_col.drop(labels=[\"isPartial\"], axis=\"columns\")\n df_col.to_csv(dest_file, index=False)\n\n df_col.plot(x='date', y=kw_list, figsize=(120, 10), kind ='line')\n plt.show()\n\n\ndef get_trends_departments(pytrend, kw_list, current_date, dest_file):\n\n # Interest over time by department\n departments = {\n \"amazonas\": \"AMA\",\n \"antioquia\": \"ANT\",\n \"arauca\": \"ARA\",\n \"atlantico\": \"ATL\",\n \"bogota\": \"CUN\",\n \"bolivar\": \"BOL\",\n \"boyaca\": \"BOY\",\n \"caldas\": \"CAL\",\n \"caqueta\": \"CAQ\",\n \"casanare\": \"CAS\",\n \"cauca\": \"CAU\",\n \"cesar\": \"CES\",\n \"choco\": \"CHO\",\n \"cordoba\": \"COR\",\n \"cundinamarca\": \"CUN\",\n \"huila\": \"HUI\",\n \"la guajira\": \"LAG\",\n \"magdalena\": \"MAG\",\n \"meta\": \"MET\",\n \"narino\": \"NAR\",\n \"norte de santander\": \"\",\n \"putumayo\": \"PUT\",\n \"quindio\": \"\",\n \"risaralda\": \"\",\n \"san andres y providencia\": \"SAP\",\n \"santander\": \"SAN\",\n \"sucre\": \"SUC\",\n \"tolima\": \"TOL\",\n \"valle del cauca\": \"VAC\",\n \"vaupes\": \"VAU\"\n }\n \n dfs = []\n for dep, code in departments.items():\n if code != \"\":\n try:\n geo = f\"CO-{code}\"\n pytrend.build_payload(\n kw_list=kw_list,\n cat=0,\n timeframe=f\"2020-02-01 {current_date}\",\n geo=geo,\n gprop=\"\",\n )\n except:\n raise Exception(f\"fail {geo}\")\n else:\n df_dep = pytrend.interest_over_time()\n df_dep = df_dep.reset_index()\n df_dep = df_dep.drop(labels=[\"isPartial\"], axis=\"columns\")\n df_dep[\"department\"] = dep\n dfs.append(df_dep)\n\n df_departments = pd.concat(dfs, axis=0)\n df_departments = df_departments.sort_values(by=[\"date\"]).reset_index(drop=True)\n df_departments.to_csv(dest_file, index=False)\n\nif __name__ == \"__main__\":\n pytrend = TrendReq()\n kw_list = [\"coronavirus\", \"covid\", \"cuarentena\"]\n current_date = datetime.now().date().strftime(\"%Y-%m-%d\")\n get_trends_colombia(pytrend, kw_list, current_date, \"google_trends_colombia.csv\")\n get_trends_departments(pytrend, kw_list, current_date, \"google_trends_departamentos.csv\")\n\n","sub_path":"Google_trends/update_google_trends.py","file_name":"update_google_trends.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"244082995","text":"\"\"\"\r\nProblem: #2\r\n\r\nThis problem was asked by Uber.\r\n\r\nGiven an array of integers, return a new array such that each element at index i of the new array is the product of all\r\nthe numbers in the original array except the one at i.\r\n\r\nFor example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1],\r\nthe expected output would be [2, 3, 6].\r\n\r\nFollow-up: what if you can't use division?\r\n\"\"\"\r\nfrom functools import reduce\r\n\r\ndef multiplesExceptI(array):\r\n product = reduce(lambda x, y: x * y, array)\r\n result = []\r\n for number in array:\r\n result.append(product // number)\r\n return result\r\n\r\ndef multiplesExceptIWithoutDivision(array):\r\n leftMultiples = [1] * len(array)\r\n rightMultiples = [1] * len(array)\r\n result = []\r\n temp = 1\r\n for i in range(1, len(array)):\r\n temp *= array[i-1]\r\n leftMultiples[i] *= temp\r\n temp = 1\r\n for i in range(len(array) - 2, -1, -1):\r\n temp *= array[i+1]\r\n rightMultiples[i] *= temp\r\n for i in range(len(array)):\r\n result.append(leftMultiples[i] * rightMultiples[i])\r\n return result\r\n\r\nprint(multiplesExceptI([1, 2, 3, 4, 5]))\r\nprint(multiplesExceptIWithoutDivision([1, 2, 3, 4, 5]))\r\nprint(multiplesExceptI([3, 2, 1]))\r\nprint(multiplesExceptIWithoutDivision([3, 2, 1]))\r\n","sub_path":"solutions/product-of-array-except-i.py","file_name":"product-of-array-except-i.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"83957700","text":"import sys \n\nimport numpy as np\n\n\ndef read(data_file):\n with open(data_file,'r') as data:\n data.seek(0)\n states=int(data.readline())\n actions=int(data.readline())\n gamma=float(data.readline())\n episode=[]\n for i,line in enumerate(data):\n temp=line.split('\\n')\n temp=temp[0].split('\\t')\n if len(temp)==1:\n temp=[temp[0],np.nan,np.nan]\n t=np.array(temp)\n #print(t)\n episode=np.append(episode,t,axis=0)\n #print(episode)\n a=int(len(episode)/3)\n episode=episode.reshape(a,3)\n return states,gamma,episode\n\n\ndef esimate_v(states,gamma,episode):\n v=np.zeros(states)\n for k in range(6):\n g=0.0\n returns=np.zeros(states)\n count=np.zeros(states)\n for i in range(len(episode)-2, -1, -1):\n s=int(episode[i][0])\n g=(gamma*g)+float(episode[i][2])\n returns[s]+=g\n count[s]+=1\n if k==0:\n v=returns/count\n else:\n v-=0.00001*returns/count\n return v\n\n\n\nif __name__ == \"__main__\":\n data_file=sys.argv[1]\n print(sys.argv[1])\n states,gamma,episode=read(data_file)\n v=esimate_v(states,gamma,episode)\n for i in range(states):\n print(v[i])\n \n\n ","sub_path":"cs747-pa3/submission/estimate.py","file_name":"estimate.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"370280123","text":"import os\nimport sys\nimport numpy as np\nimport pickle\n#all pkl file format is label + content\n#load data_vec data_lable\n\ndef load_data_xy(file_names):\n datas = []\n lables = []\n for file_name in file_names:\n f = open(file_name, 'rb')\n x, y = pickle.load(f)\n datas.append(x)\n lables.append(y)\n\n combine_label = np.hstack(lables)\n combine_data = np.vstack(datas)\n return combine_data, combine_label\n\ndef cPickle_output(var, file_name):\n import cPickle\n f = open(file_name, 'wb')\n cPickle.dump(var, f, protocol=cPickle.HIGHEST_PROTOCOL)\n f.close()\n\ndef out_put_data_xy(vector_vars, vector_folder, batch_size = 1000):\n if not vector_folder.endswith('/'):\n vector_folder += '/'\n if not os.path.exists(vector_folder):\n os.mkdir(vector_folder)\n x, y = vector_vars\n n_batch = len(x) / batch_size\n for i in range(n_batch):\n file_name = vector_folder + str(i) + '.pkl'\n batch_x = x[i*batch_size: (i+1)*batch_size]\n batch_y = y[i*batch_size: (i+1)*batch_size]\n cPickle_output((batch_x, batch_y), file_name)\n if n_batch * batch_size < len(x):\n batch_x = x[n_batch*batch_size: ]\n batch_y = y[n_batch*batch_size: ]\n file_name = vector_folder + str(n_batch) + '.pkl'\n cPickle_output((batch_x, batch_y), file_name)\n\n\ndef out_put_data_xy_small(vector_vars, vector_folder):\n if not vector_folder.endswith('/'):\n vector_folder += '/'\n if not os.path.exists(vector_folder):\n os.mkdir(vector_folder)\n x, y = vector_vars\n n_batch = len(x) / batch_size\n for i in range(n_batch):\n file_name = vector_folder + str(i) + '.pkl'\n batch_x = x[i*batch_size: (i+1)*batch_size]\n batch_y = y[i*batch_size: (i+1)*batch_size]\n cPickle_output((batch_x, batch_y), file_name)\n if n_batch * batch_size < len(x):\n batch_x = x[n_batch*batch_size: ]\n batch_y = y[n_batch*batch_size: ]\n file_name = vector_folder + str(n_batch) + '.pkl'\n cPickle_output((batch_x, batch_y), file_name)\n\n\ndef scandir(startdir, file, last_dir):\n os.chdir(startdir)\n childlist = os.listdir(os.curdir)\n for obj in childlist:\n if os.path.isdir(obj):\n scandir(os.getcwd() + os.sep + obj, file, last_dir)\n else:\n file.append(os.getcwd() + os.sep + obj)\n last_dir.append(os.getcwd())\n\n os.chdir(os.pardir)\n return file, last_dir\n\n\ndef get_files(vec_folder):\n file_names = os.listdir(vec_folder)\n file_names.sort()\n if not vec_folder.endswith('/'):\n vec_folder += '/'\n for i in range(len(file_names)):\n file_names[i] = vec_folder + file_names[i]\n return file_names\n","sub_path":"ml_file_pkg/pickle_file.py","file_name":"pickle_file.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"475659571","text":"\"\"\"\nvizualize pcd file\n\"\"\"\nimport numpy as np\nimport open3d as o3d\n\ndef main(filepath):\n\n print(\"Reading PCD file: {0}\".format(filepath))\n print(\"Load a pcd point cloud, print it, and render it\")\n pcd = o3d.io.read_point_cloud(filepath)\n o3d.visualization.draw_geometries([pcd])\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) != 2:\n print(\"Please specify path to .pcd file.\")\n exit()\n main(sys.argv[1])","sub_path":"svo_utils/python_pcl_viewer.py","file_name":"python_pcl_viewer.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"405174052","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.http import require_GET\n\nfrom recipes.forms import RecipeForm\nfrom recipes.models import Recipe\n\n\ndef index(request):\n recipes = Recipe.objects.all().order_by('time')\n context = {\n 'recipes':recipes,\n }\n return render(request,'index.html',context)\n\n\ndef create(request):\n if request.method == \"GET\":\n form = RecipeForm()\n context = {\n 'form':form\n }\n return render(request, 'create.html',context)\n else:\n recipes = Recipe.objects.all()\n form = RecipeForm(request.POST)\n context = {\n 'recipes': recipes,\n 'form': form,\n }\n if form.is_valid():\n form.save()\n return render(request, 'index.html',context)\n else:\n return render(request, 'create.html',context)\n\n\ndef edit(request,pk):\n recipe = Recipe.objects.get(pk=pk)\n if request.method == \"GET\":\n form = RecipeForm(instance=recipe)\n context = {\n \"form\": form,\n 'pk': pk\n }\n return render(request, 'edit.html',context)\n else:\n form = RecipeForm(request.POST, instance=recipe)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n context = {\n 'form': form,\n 'pk': pk\n }\n return render(request,'edit.html',context)\n\n\ndef delete(request, pk):\n recipe = Recipe.objects.get(pk=pk)\n recipe.delete()\n return redirect('index')\n\n@require_GET\ndef details(request,pk):\n recipe = Recipe.objects.get(pk=pk)\n context = {\n 'recipe': recipe,\n 'pk': pk\n }\n return render(request, 'details.html', context)\n","sub_path":"recipes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"456734780","text":"\nfrom model import Generator, Discriminator\nimport torch\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom utils import save_image, save_one_image, illuminationNormalization\nimport torchvision\nimport os\n\n\"\"\"\nusing trained model, test whole image\nshifting window\n\"\"\"\n\n# load model & load parameters\ngenerator = Generator()\ndiscriminator = Discriminator()\n\ng_path = \"weights_AAE_illu_2/generator_200.pth\"\nd_path = \"weights_AAE_illu_2/discriminator_200.pth\"\ngenerator.load_state_dict(torch.load(g_path))\ndiscriminator.load_state_dict(torch.load(d_path))\n\n# load one image\n# PIL\ntransform = transforms.Compose([\n # transforms.Resize(128),\n transforms.Lambda(illuminationNormalization),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])\n\n\ntest_all = True\nif test_all:\n # test all pics\n test_dir = \"../data/fabric/test/abnormal/\"\n #test_dir = \"../data/fabric/train/normal/\"\n for idx, file in enumerate(os.listdir(test_dir)):\n im = Image.open(test_dir + file)\n im = im.convert('RGB')\n x = transform(im)\n fake_x = torch.zeros_like(x)\n\n # perform windows 8*8\n size = 32\n start_i, start_j = 0, 0\n end_i, end_j = 32, 32\n while end_i <= 256:\n start_j = 0\n end_j = 32\n while end_j <= 256:\n # crop a rect\n rect = [start_i, start_j, end_i, end_j]\n\n # perform generator\n patch = x[:, start_i:end_i, start_j:end_j].unsqueeze(0)\n fake_patch, _ = generator(patch)\n # print(\"%f \" % discriminator(fake_patch))\n #\n fake_x[:, start_i:end_i, start_j:end_j] = fake_patch.squeeze(0)\n\n start_j += 32\n end_j += 32\n\n #print(\"=====\")\n start_i += 32\n end_i += 32\n print(\"===%d==\" % idx)\n save_one_image(idx, x, \"real\", \"test5\")\n save_one_image(idx, fake_x, \"fake\", \"test5\")\n save_one_image(idx, (fake_x - x).abs(), \"diff\", \"test5\")\n\nelse:\n # test one pic\n im = Image.open(\"../data/fabric/test/abnormal/06061012.png\")\n im = im.convert('RGB')\n x = transform(im)\n fake_x = torch.zeros_like(x)\n\n # perform windows 8*8\n size = 32\n start_i, start_j = 0, 0\n end_i, end_j = 32, 32\n while end_i <= 256 / 2:\n start_j = 0\n end_j = 32\n while end_j <= 256 / 2:\n # crop a rect\n rect = [start_i, start_j, end_i, end_j]\n\n # perform generator\n patch = x[:, start_i:end_i, start_j:end_j].unsqueeze(0)\n fake_patch, _ = generator(patch)\n #print(\"%f \" % discriminator(fake_patch))\n #\n fake_x[:, start_i:end_i, start_j:end_j] = fake_patch.squeeze(0)\n\n start_j += 32\n end_j += 32\n\n print(\"=====\")\n start_i += 32\n end_i += 32\n\n #save_image(0, x, fake_x, \"test2\")\n #torchvision.utils.save_image((fake_x - x).abs(), \"test2/fakeMinX.png\")\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"238014534","text":"# Time: O(n)\n# Space: O(1)\n\n'''\nGiven a sorted linked list, delete all duplicates such that each element appear only once.\n\nFor example,\nGiven 1->1->2, return 1->2.\nGiven 1->1->2->3->3, return 1->2->3.\n'''\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n \n dummy = ListNode(float(\"Inf\")) # not that good \n dummy.next = head\n pre, cur = dummy, dummy.next\n \n while cur != None:\n if cur.val == pre.val:\n pre.next = cur.next\n cur = cur.next\n else:\n cur = cur.next\n pre = pre.next\n \n return dummy.next\n\n def deleteDuplicates_2(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n cur = head\n while cur:\n runner = cur.next\n while runner and runner.val == cur.val:\n runner = runner.next\n cur.next = runner\n cur = runner\n return head","sub_path":"remove-duplicates-from-sorted-list.py","file_name":"remove-duplicates-from-sorted-list.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"136317458","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom collections import Counter\nfrom six.moves import urllib\n\nimport torch\n\nfrom ps_code.tracking.logging import info\n\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n self.counter = Counter()\n self.total = 0\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n self.counter[token_id] += 1\n self.total += 1\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\n\nclass Corpus(object):\n def __init__(self, path):\n self.dictionary = Dictionary()\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\ndef download_or_retrieve_file(path, filename=None, url=None):\n filepath = os.path.join(path, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n saved_file, _ = urllib.request.urlretrieve(\n url, filepath, _progress)\n print()\n info('download file to the path:' + filepath)\n else:\n info('retrieve file to the path:' + filepath)\n return filepath\n\n\ndef batchify(data, bsz):\n \"\"\"\n Starting from sequential data, batchify arranges the dataset into columns.\n For instance, with the alphabet as the sequence and batch size 4, we'd get\n ┌ a g m s ┐\n │ b h n t │\n │ c i o u │\n │ d j p v │\n │ e k q w │\n └ f l r x ┘.\n These columns are treated as independent by the model,\n which means that the dependence of e. g. 'g' on 'f' can not be learned,\n but allows more efficient batch processing.\n \"\"\"\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = data.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n data = data.view(bsz, -1).t().contiguous()\n return data\n\n\ndef get_batch(args, source, batch_iter, seq_len=None):\n \"\"\"get_batch subdivides the source data into chunks of length args.bptt.\n\n If source is equal to the example output of the batchify function, with\n a bptt-limit of 2, we'd get the following two Variables for i = 0:\n ┌ a g m s ┐ ┌ b h n t ┐\n └ b h n t ┘ └ c i o u ┘\n\n Note that despite the name of the function, the subdivison of data is not\n done along the batch dimension (i.e. dimension 1), since that was handled\n by the batchify function. The chunks are along dimension 0, corresponding\n to the seq_len dimension in the LSTM.\n \"\"\"\n seq_len = min(\n seq_len if seq_len else args.rnnlm_bptt, len(source) - 1 - batch_iter\n )\n input = source[batch_iter: batch_iter + seq_len]\n target = source[batch_iter + 1: batch_iter + 1 + seq_len].view(-1)\n return input, target\n","sub_path":"frameworks/sync-pytorch-ps/ps_code/ml_components/dataset/nlp_datasets/loader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"453056956","text":"from module_imports import *\n\ndef modify_columns(ticker, normalize):\n df = pd.read_csv(\"/Users/excalibur/Dropbox/datasets/quandl_data/{}.csv\".format(ticker))\n df = df.drop('Adjusted Close', axis=1)\n \n df['50dravg'] = pd.rolling_mean(df['Close'], window=50)\n df['200dravg'] = pd.rolling_mean(df['Close'], window=200)\n\n df['50dravg'] = pd.rolling_mean(df['Close'], window=50)\n df['200dravg'] = pd.rolling_mean(df['Close'], window=200)\n\n if normalize == True:\n temp_df = df['Volume']\n df = df.drop('Volume', axis=1)\n std_df = df.std(axis=1, ddof=0)\n \n df['mean'] = df.mean(axis=1)\n df['std'] = std_df\n\n df['Open'] = (df['Open'] - df['mean']) / df['std']\n df['High'] = (df['High'] - df['mean']) / df['std']\n df['Low'] = (df['Low'] - df['mean']) / df['std']\n df['Close'] = (df['Close'] - df['mean']) / df['std']\n \n df['50dravg'] = (df['50dravg'] - df['mean']) / df['std']\n df['200dravg'] = (df['200dravg'] - df['mean']) / df['std']\n\n df = df.drop(['mean', 'std'], axis=1)\n\n df['Volume'] = temp_df\n\n df['OC%'] = (df['Close'] / df['Open']) - 1\n df['HL%'] = (df['High'] / df['Low']) - 1\n \n df['ticker'] = ticker\n\n df['label'] = df['OC%'].shift(-1)\n #df['label'] = df['HL%'].shift(-1)\n \n return df #df.loc[500:] # remove first 500 days\n\n\ndef get_quandl_data():\n \n tickers = [filename[:-4] for filename in os.listdir('/Users/excalibur/Dropbox/datasets/quandl_data/') if filename != '.DS_Store']\n\n normalize = False\n\n scale_volume = False\n\n # gather data\n stock_df = pd.DataFrame()\n for ticker in tickers:\n if stock_df.empty:\n stock_df = modify_columns(ticker, normalize)\n else:\n stock_df = stock_df.append(modify_columns(ticker, normalize))\n #stock_df = pd.concat([stock_df, modify_columns(ticker, normalize)])\n #stock_df = pd.concat([stock_df, modify_columns(ticker, normalize)], verify_integrity=True)\n \n # scale volume\n if scale_volume == True: \n stock_df['Volume'] = (stock_df['Volume'] - stock_df['Volume'].min()) / (stock_df['Volume'].max() - stock_df['Volume'].min())\n \n # log volume\n #stock_df['Volume'] = stock_df['Volume'].map(lambda x: np.log(x))\n\n #stock_df = stock_df.drop(['Open', 'High', 'Low', 'Close'], axis=1)\n\n # add bias\n #stock_df.insert(0, 'bias', 1.0)\n\n # keep tickers for predictions\n pred_tickers = stock_df['ticker'].unique()\n\n # categoricalize tickers\n #stock_df['ticker'] = stock_df['ticker'].astype('category').cat.codes\n\n # replace Infs with NaNs\n stock_df = stock_df.replace([np.inf, -np.inf], np.nan)\n\n prediction_df = stock_df.copy()\n\n #stock_df = stock_df.drop('ticker', axis=1)\n\n stock_df = stock_df.dropna()\n\n return stock_df, prediction_df, pred_tickers\n\n#########################################\n\ndef flatten_goog_data(ticker):\n \n stock_df = pd.read_csv(\"/Users/excalibur/Dropbox/datasets/goog_data/{}.csv\".format(ticker))\n\n # intraday drop last rows\n stock_df = stock_df.drop(stock_df.index[[-1,-2]])\n \n #print stock_df['time'].value_counts()\n stock_df = stock_df[(stock_df['time'] != 9.3) & (stock_df['time'] != 16.0)]\n #print stock_df['time'].value_counts()\n times = stock_df['time'].unique()\n \n columns = list(stock_df.columns)\n \n new_columns = []\n \n for time in times:\n for column in columns:\n new_columns.append(str(time) + \"-\" + column)\n \n #print \"number of flattened columns\", len(new_columns)\n #print new_columns\n \n flat_values = stock_df.values.ravel()\n stock_df = pd.DataFrame(columns=new_columns)\n \n errors = 0\n for day in xrange(len(flat_values)/len(new_columns)):\n \n day_values = flat_values[:len(new_columns)]\n \n #if day == 0:\n # print list(day_values)\n\n if day_values[0] != 10.0 or day_values[-10] != 15.3:\n errors += 1\n continue\n else:\n df = pd.DataFrame([list(day_values)], columns=new_columns)\n stock_df = stock_df.append(df)\n \n flat_values = flat_values[len(new_columns):]\n\n #print \"number of errors:\", errors, \"for\", ticker\n\n stock_df['label'] = ((stock_df['15.3-CLOSE'] / stock_df['10.0-OPEN']) - 1).shift(-1)\n\n stock_df['ticker'] = ticker\n\n return stock_df\n\ndef get_goog_data():\n\n tickers = [filename[:-4] for filename in os.listdir('/Users/excalibur/Dropbox/datasets/goog_data/') if filename != '.DS_Store']\n\n # gather data\n stock_df = pd.DataFrame()\n for ticker in tickers:\n if stock_df.empty:\n stock_df = flatten_goog_data(ticker)\n else:\n df = flatten_goog_data(ticker)\n if not df.empty and len(df.columns) == len(stock_df.columns):\n stock_df = stock_df.append(df)\n\n week_days = stock_df['10.0-week_day'].values\n dates = stock_df['10.0-date'].values\n stock_df = stock_df.drop([col for col in stock_df.columns if (\"time\" in col) or (\"week_day\" in col) or (\"date\" in col)], axis=1)\n stock_df['week_day'] = week_days\n stock_df['date'] = dates\n\n # keep tickers for predictions\n pred_tickers = stock_df['ticker'].unique()\n\n # categoricalize tickers\n #stock_df['ticker'] = stock_df['ticker'].astype('category').cat.codes\n\n # replace Infs with NaNs\n stock_df = stock_df.replace([np.inf, -np.inf], np.nan)\n\n prediction_df = stock_df.copy()\n\n #stock_df = stock_df.drop('ticker', axis=1)\n\n stock_df = stock_df.dropna()\n\n return stock_df, prediction_df, pred_tickers\n\n\n","sub_path":"import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"345344515","text":"# -*- coding:utf-8 -*-\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen, HTTPError\nimport pycrfsuite\n\n\nif __name__ == \"__main__\":\n base_url = \"http://www.thelatinlibrary.com/\"\n home_content = urlopen(base_url)\n soup = BeautifulSoup(home_content, \"lxml\")\n author_page_links = soup.find_all(\"a\")\n author_pages = [ap[\"href\"] for i, ap in enumerate(author_page_links) if i < 49]\n ap_content = list()\n texts = list()\n for ap in author_pages:\n ap_content.append(urlopen(base_url + ap))\n\n #\n book_links = list()\n for path, content in zip(author_pages, ap_content):\n author_name = path.split(\".\")[0]\n ap_soup = BeautifulSoup(content, \"lxml\")\n book_links += ([link for link in ap_soup.find_all(\"a\", {\"href\": True}) if author_name in link[\"href\"]])\n # print(book_links[0])\n\n #\n texts = list()\n count = 0\n num_pages = 200\n with open(\"H:\\\\TanBoOwn\\\\Github\\\\CRF\\\\crf_train_corpus.txt\", \"w\", encoding=\"utf-8\") as f:\n for i, bl in enumerate(book_links[:num_pages]):\n # print(\"Getting content \" + str(i + 1) + \" of \" + str(num_pages), end=\"\\r\", flush=True)\n try:\n content = urlopen(base_url + bl[\"href\"]).read()\n # texts.append(content)\n # print(\"Document \" + str(count + 1) + \" of \" + str(len(texts)), end=\"\\r\", flush=True)\n textSoup = BeautifulSoup(content, \"lxml\")\n paragraphs = textSoup.find_all(\"p\", attrs={\"class\": None})\n prepared = (\"\".join([p.text.strip().lower() for p in paragraphs[1:-1]]))\n for t in prepared.split(\".\"):\n part = \"\".join([c for c in t if c.isalpha() or c.isspace()])\n if len(part) > 5:\n f.write(part)\n f.write(\"\\r\\n\")\n except HTTPError as err:\n print(\"Unable to retrieve \" + bl[\"href\"] + \".\")\n continue\n count += 1\n\n\n\n\n\n\n\n","sub_path":"My_CRF.py","file_name":"My_CRF.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"31686374","text":"class Job:\n def __init__(self, activity, timeAbs, name, state, site, toRetire, toDie, jobId, snapshot):\n self.activity = activity\n self.timeAbs = timeAbs\n self.name = name\n self.state = state\n self.site = site\n self.toRetire = toRetire\n self.toDie = toDie\n self.jobId = jobId\n self.snapshot = snapshot\n self.lifeCycle = 1\n\n","sub_path":"JobClass.py","file_name":"JobClass.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"370705132","text":"import socket\nimport threading\nimport time\nfrom sys import argv\nfrom math import floor\n\n\ndef listen(sock_client, ACK_tab):\n while(1):\n #we listen the socket to receive ACKs\n data_ack, address = sock_client.recvfrom(1024)\n print(\"received :\", data_ack.decode('utf-8'))\n message = data_ack.decode('utf-8').rstrip('\\0')\n index = int(message[3::])\n for i in range(240):\n if((index-240)+i >=0):\n ACK_tab[(index-240)+i][0] += 1\n\n\ndef send_file(socket_port, packet_length):\n # socket initialization\n sock_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock_client.bind((UDP_IP, socket_port))\n\n RTT = 0.05\n cwnd = 120 #window size\n Tab_ACK = []\n\n # file initialization\n filename, address = sock_client.recvfrom(1024)\n print(\"received :\", filename.decode('utf-8'))\n file = open('files/' + filename.decode('utf-8').rstrip('\\0'), 'rb')\n content = file.read()\n step = 0\n NUM_SEG = 1 # the number of the segment\n\n # loop where we send the file in packets of length\n while step * packet_length <= len(content):\n # we adapt the end of the sequence to the length of the content in order not to send random\n # data from the memory\n end = (step + 1) * packet_length if (step + 1 * packet_length) < len(content) else len(content)\n\n # we create the segment number on 6 bytes then we add the content of the file\n SEG = bytes(str(NUM_SEG).zfill(6), 'utf-8')\n SEG += content[step * packet_length:end]\n Tab_ACK.append([0, 0, SEG, 0]) #[nbr of received ACK for this segment, time when we send the segment, segment number, did we already send the segment once]\n NUM_SEG +=1\n step += 1\n\n #we start a thread to listen the socket\n thread = threading.Thread(target=listen, args=(sock_client, Tab_ACK))\n thread.start()\n Last_Segment = False\n NUM_SEG = 1\n\n\n while(NUM_SEG < len(Tab_ACK) and Last_Segment == 0):\n #if we received the ACK of the first window's packet we slice the window\n while(Tab_ACK[NUM_SEG - 1][0] > 0 and NUM_SEG < len(Tab_ACK)):\n NUM_SEG += 1\n\n #we run through the window to launch the segments\n for i in range(int(cwnd)):\n if((i+NUM_SEG) > len(Tab_ACK)):\n break\n\n #If one segment has never been sent we send it\n if(Tab_ACK[i+NUM_SEG - 1][3] == 0):\n print(\"send : \", i+NUM_SEG)\n sock_client.sendto(Tab_ACK[i+NUM_SEG-1][2], address)\n Tab_ACK[i+NUM_SEG-1][3] += 1\n Tab_ACK[i+NUM_SEG-1][1] = time.time()\n\n #if we detect a loss we resend the lost segment\n elif((Tab_ACK[i+NUM_SEG - 1][0] == 0) and ((time.time() - Tab_ACK[i+NUM_SEG-1][1] > RTT) or (Tab_ACK[i+NUM_SEG-2][0] > 3 and Tab_ACK[i+NUM_SEG-1][3] < 2))):\n print(\"loss detected\")\n print(\"send : \", i+NUM_SEG)\n sock_client.sendto(Tab_ACK[i+NUM_SEG-1][2], address)\n Tab_ACK[i+NUM_SEG-1][1] = time.time()\n Tab_ACK[i+NUM_SEG-1][3] += 1\n\n #if we received an ACK for the last segment, that means we reached the end\n Last_Segment = Tab_ACK[len(Tab_ACK) - 1][0]\n\n # when we reach the end of the content we send a message \"FIN\" to the client to end the communication\n MESSAGE_FIN = bytes(\"FIN\", 'utf-8')\n for i in range(10):\n sock_client.sendto(MESSAGE_FIN, address)\n thread.join()\n\n\nif __name__ == \"__main__\":\n # initialization of the arguments from the command line\n if len(argv) < 2:\n print(\"Use of the program : 'python3 \" + argv[0] + \" '\")\n exit(1)\n\n if int(argv[1]) <= 1000:\n print(\"The port number must be above 1000\")\n exit(1)\n\n port = int(argv[1])\n\n # we listen to every interfaces on port given by args on the console\n UDP_IP = ''\n UDP_PORT = port\n\n # socket initialization\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM)\n sock.bind((UDP_IP, UDP_PORT))\n\n # we give a new port to send the file that's above the original port\n port_new = port + 1 if port + 1 < 65535 else 1001\n\n #\n while 1:\n # receiving the SYN from the client\n data, addr = sock.recvfrom(1024)\n print(\"received :\", data.decode('utf-8'))\n port_new = port_new + 1 if port_new + 1 < 65535 else 1001\n\n # we start the thread before sending the SYN-ACK because the thread can take too much time to start\n # and the client try to connect to a socket that does not exist yet, causing an error\n thread = threading.Thread(target=send_file, args=(int(port_new), 1494))\n thread.start()\n\n # sending the SYN-ACK to the client with the new port number\n MESSAGE = bytes(\"SYN-ACK\" + str(port_new), 'utf-8')\n sock.sendto(MESSAGE, addr)\n\n # receiving ACK from the client\n data, addr = sock.recvfrom(1024)\n print(\"received :\", data.decode('utf-8'))\n","sub_path":"server/server_cst.py","file_name":"server_cst.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"536672299","text":"\"\"\"\nProblem B\n\n@author: Krisztian Balog\n\"\"\"\n\nimport sys\n\n\nclass Solver(object):\n def __init__(self):\n self.flips = {}\n\n @staticmethod\n def flip(s):\n s2 = \"\"\n for c in s:\n s2 += \"-\" if c == \"+\" else \"+\"\n return s2\n\n def solve(self, s):\n \"\"\"Classic dynamic programming.\"\"\"\n if s not in self.flips: # if it has not already been computed\n n = len(s)\n allp = n * \"+\"\n if s == allp: # all +: no flips needed\n self.flips[s] = 0\n elif s == n * \"-\": # all -: 1 flip needed\n self.flips[s] = 1\n else: # contains both + and -\n for i in range(n):\n top = self.flip(s[0:i + 1]) # top of the stack (flipped)\n bottom = s[i + 1:n + 1] # bottom of the stack\n f2 = 1 + self.solve(top) + self.solve(bottom) # #flips needed\n if s not in self.flips or f2 < self.flips[s]:\n self.flips[s] = f2\n # print(str(i) + \": \" + s + \" => \" + top + \" | \" + bottom + \" (\" + str(f2) + \") \")\n return self.flips[s]\n\n\ndef run(infile, outfile):\n with open(infile, \"r\") as f:\n t = int(f.readline().strip())\n cases = [f.readline().strip() for i in range(t)]\n solver = Solver()\n with open(outfile, \"w\") as f:\n for i, s in enumerate(cases):\n f.write(\"Case #\" + str(i + 1) + \": \" + str(solver.solve(s)) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n run(\"B-large.in\", \"B-large.out\")\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_mcmaster_b.py","file_name":"16_0_2_mcmaster_b.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"316356205","text":"from dagster_graphql.test.utils import execute_dagster_graphql\n\nfrom dagster.core.instance import DagsterInstance\n\nfrom .utils import define_context, sync_execute_get_run_log_data\n\nCOMPUTE_LOGS_QUERY = '''\n query ComputeLogsQuery($runId: ID!, $stepKey: String!) {\n pipelineRunOrError(runId: $runId) {\n ... on PipelineRun {\n runId\n computeLogs(stepKey: $stepKey) {\n stdout {\n data\n }\n }\n }\n }\n }\n'''\n\n\ndef test_get_compute_logs_over_graphql(snapshot):\n payload = sync_execute_get_run_log_data(\n {'executionParams': {'selector': {'name': 'spew_pipeline'}, 'mode': 'default'}}\n )\n run_id = payload['runId']\n\n result = execute_dagster_graphql(\n define_context(instance=DagsterInstance.local_temp()),\n COMPUTE_LOGS_QUERY,\n variables={'runId': run_id, 'stepKey': 'spew.compute'},\n )\n compute_logs = result.data['pipelineRunOrError']['computeLogs']\n snapshot.assert_match(compute_logs)\n","sub_path":"python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_compute_logs.py","file_name":"test_compute_logs.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"496208515","text":"# Snake Game!\n\nimport pygame # all the necessary for 2D game, sound and etc\nimport sys # has exist\nimport random # random positioned food for snake\nimport time # for sleeping in the end or on error\n\ncheckErrors = pygame.init() # initializing pygame\nif checkErrors[1] > 0: # (6,0) tuple, we check if there is any errors\n print(\"(!) Had {0} initializing errors, \"\n \"exiting...\".format(checkErrors[1]))\n sys.exit(-1) # exiting system\nelse:\n print(\"(+) PyGame successfully initialized!\")\n\n# Play surface\nplaySurface = pygame.display.set_mode((720, 460)) # Creating player surface\npygame.display.set_caption(\"Snake Game!\") # Creating title for player surface\n\n# Colors\nred = pygame.Color(255, 0, 0) # game over # R-red,G-green,B-blue\ngreen = pygame.Color(0, 255, 0) # snake\nblack = pygame.Color(0, 0, 0) # score\nwhite = pygame.Color(255, 255, 255) # background\nbrown = pygame.Color(165, 42, 42) # food\n\n# FPS = Frames Per Second\nfpsController = pygame.time.Clock()\n\n# Important variables\nsnakePos = [100, 50] # [x,y] coordinates\nsnakeBody = [[100, 50], [90, 50], [80, 50]] # snake body with list of lists\n\n# Food position randomly\nfoodPos = [random.randrange(1, 72) * 10, random.randrange(1, 46) * 10] # [x,y] coordinates randomly\nfoodSpawn = True\n\ndirection = \"RIGHT\"\nchangeTo = direction\n\nscore = 0\nticksPerSecond = 20\n\n# Game over function\ndef gameOver():\n myFont = pygame.font.SysFont(\"monaco\", 72)\n goSurf = myFont.render(\"Game Over!\", True, red)\n goRect = goSurf.get_rect()\n goRect.midtop = (360, 15)\n playSurface.blit(goSurf, goRect)\n showScore(0)\n pygame.display.flip() # to show smth on screan\n time.sleep(4)\n pygame.quit() # for pygame to exit\n sys.exit() # for console / game to exit\n\ndef showScore(choice=1):\n sFont = pygame.font.SysFont(\"monaco\", 24)\n sSurf = sFont.render(\"Score : {0}\".format(score), True, black)\n sRect = sSurf.get_rect()\n if choice == 1:\n sRect.midtop = (80,10)\n else:\n sRect.midtop = (360, 120)\n playSurface.blit(sSurf, sRect)\n\n# Main logic of the game\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n changeTo = \"RIGHT\"\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n changeTo = \"LEFT\"\n if event.key == pygame.K_UP or event.key == ord('w'):\n changeTo = \"UP\"\n if event.key == pygame.K_DOWN or event.key == ord('s'):\n changeTo = \"DOWN\"\n if event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(quit()))\n\n # Validation of direction\n if changeTo == \"RIGHT\" and not direction == \"LEFT\":\n direction = \"RIGHT\"\n if changeTo == \"LEFT\" and not direction == \"RIGHT\":\n direction = \"LEFT\"\n if changeTo == \"UP\" and not direction == \"DOWN\":\n direction = \"UP\"\n if changeTo == \"DOWN\" and not direction == \"UP\":\n direction = \"DOWN\"\n\n # increasing x,y values of snake movement\n if direction == \"RIGHT\":\n snakePos[0] += 10\n if direction == \"LEFT\":\n snakePos[0] -= 10\n if direction == \"UP\":\n snakePos[1] -= 10\n if direction == \"DOWN\":\n snakePos[1] += 10\n\n # Snake body mechanism and food eating growing\n snakeBody.insert(0, list(snakePos))\n if snakePos[0] == foodPos[0] and snakePos[1] == foodPos[1]: # true then grow\n score += 1\n ticksPerSecond +=0.4\n foodSpawn = False # ate a food and not spawn now\n else:\n snakeBody.pop()\n\n if foodSpawn == False:\n foodPos = [random.randrange(1, 72) * 10, random.randrange(1, 46) * 10] # [x,y] coordinates randomly\n foodSpawn = True\n\n playSurface.fill(white) # fill() means to fill entire screen with given color & without .flip() wont work\n for pos in snakeBody:\n pygame.draw.rect(playSurface, green, pygame.Rect(pos[0], pos[1], 10, 10))\n\n pygame.draw.rect(playSurface, brown, pygame.Rect(foodPos[0], foodPos[1], 10, 10))\n\n if snakePos[0] > 700 or snakePos[0] < 0:\n gameOver()\n if snakePos[1] > 440 or snakePos[1] < 0:\n gameOver()\n\n for block in snakeBody[1:]:\n if snakePos[0]== block[0] and snakePos[1]== block[1]:\n gameOver()\n\n showScore()\n pygame.display.flip()\n fpsController.tick(ticksPerSecond) #tick() means how many ticks per second\n\n\n#pyinstaller - galima executable padaryti","sub_path":"games/snakeGame.py","file_name":"snakeGame.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"565099503","text":"# Model diagnostics that were output fom the training\n\nfrom evaluate import *\nparent_dir = \"/data/aims/sychoi/\"\nresults_dir = parent_dir + \"plots_Final/\"\n# Currently two subdirectories under results_MoS2, 1vac & 2vac\n\n# Load diagnostic files\n# List of directories with each trained model as an element in the list\nlabel_list = [\"1vacancy\", \"2vacancy\"] \nresults_dir_list = [\"{}{}/\".format(results_dir, label) for label in label_list] \n\ndiagnostics_data = get_diagnostic_data(results_dir_list)\n\n\n# Plot diagnostic data using matplotlib\n# Plot function = log(1-TP/(TP+FN))\nplot_diagnostics(diagnostics_data, label_list, diag=\"F1\", log=True, invert=True, N=2)\n\n\n\n\n","sub_path":"code/3_evaluate/plot_diagnostics.py","file_name":"plot_diagnostics.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"256229183","text":"if \"lvgl\" not in dir():\n import lvgl\n\nif \"disp\" not in dir():\n import ujson\n import os\n\n #LCD ili9341初始化\n from ili9341 import ili9341\n from xpt2046 import xpt2046\n\n TFT_IS_PORTRAIT =1 #竖屏:1 ,横屏:0 ;\n TOUCH_READY = 0 #用于检测触摸屏是否已经校准过;\n\n disp = ili9341(\n miso=12,\n mosi=13,\n clk=14,\n cs=15,\n dc=21,\n rst=33,\n power=50, #硬件不支持,随便配一个参数\n backlight=51, #硬件不支持,随便配一个参数\n backlight_on= 1,\n power_on= 1,\n width=240 if TFT_IS_PORTRAIT else 320,\n height=320 if TFT_IS_PORTRAIT else 240,\n rot=ili9341.PORTRAIT if TFT_IS_PORTRAIT else ili9341.LANDSCAPE #垂直方向PORTRAIT ;水平方向:LANDSCAPE\n )\n\n #触摸屏设置校准\n TOUCH_CS = 2 #触摸屏CS片选引脚\n TOUCH_INTERRUPT=0 #横屏\n\n if TFT_IS_PORTRAIT:\n TOUCH_CALI_FILE = \"touch_cali_PORTRAIT.json\" #保存为竖屏触摸参数\n else:\n TOUCH_CALI_FILE = \"touch_cali_LANDSCAPE.json\" #保存为横屏触摸参数\n\n #从没做过触摸校准\n if TOUCH_CALI_FILE not in os.listdir():\n touch = xpt2046(\n cs=TOUCH_CS,\n transpose=TFT_IS_PORTRAIT,\n )\n\n from touch_cali import TouchCali\n\n touch_cali = TouchCali(touch, TOUCH_CALI_FILE)\n touch_cali.start()\n\n #已经做过触摸校准,直接调用触摸参数文件\n else:\n with open(TOUCH_CALI_FILE, 'r') as f:\n param = ujson.load(f)\n touch_x0 = param['cal_x0']\n touch_x1 = param['cal_x1']\n touch_y0 = param['cal_y0']\n touch_y1 = param['cal_y1']\n\n touch = xpt2046(\n cs=TOUCH_CS,\n transpose=TFT_IS_PORTRAIT,\n cal_x0=touch_x0,\n cal_x1=touch_x1,\n cal_y0=touch_y0,\n cal_y1=touch_y1,\n )\n\n TOUCH_READY = 1 #表示已经配置好触摸参数","sub_path":"esp32/ui_start.py","file_name":"ui_start.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"446099137","text":"\"\"\"\nSecret_Messages.02.py\n\nCode to return N bit values given a text string.\n\nUsage: Secret_Messages.02.py \n\"\"\"\n\ndef string_to_nbits(data, num_bits):\n \"\"\"Convert a sequence of 8 bit characters into a list of N bit values.\n\n data a sequence of 8 bit characters\n num_bits the number of bits in the N bit values\n\n Returns a list of the N bit values.\n \"\"\"\n\n result = []\n\n nbit_mask = 2**num_bits - 1 # get a \"bit mask\" with N 1s at the right\n for ch in data:\n ch_value = ord(ch) # convert character to a decimal value\n for _ in range(8 // num_bits): # do 8 times for 1 bit, etc\n result.append(ch_value & nbit_mask) # get right N bits from character value\n ch_value >>= num_bits # shift to remove right N bits\n\n return result\n\nif __name__ == '__main__':\n import sys\n\n # get test text and number of bits to return\n if len(sys.argv) != 3:\n print('Usage: Secret_Messages.02.py ')\n sys.exit(1)\n number_of_bits = int(sys.argv[1])\n data = sys.argv[2]\n \n print(f'number_of_bits={number_of_bits}')\n print(f\"data='{data}'\")\n for ch in data:\n print(f\" {ord(ch):08b} = '{ch}'\")\n nbits_list = string_to_nbits(data, number_of_bits)\n for nbit_value in nbits_list:\n print(f'nbit_value={nbit_value:0{number_of_bits}b}')\n \n","sub_path":"Secret_Messages/Secret_Messages.02.py","file_name":"Secret_Messages.02.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"581454203","text":"import os\nfrom pymediainfo import MediaInfo\nfrom flask_pymongo import PyMongo\n\ndef FileMediaInfo(db, MEDIA_DIR):\n db.delete_many({})\n fileList = []\n fileNameList = os.listdir(MEDIA_DIR)\n for item in fileNameList:\n fileInfo = {}\n info = MediaInfo.parse(os.path.join(MEDIA_DIR,item)).tracks[0]\n fileInfo['name'] = info.file_name\n fileInfo['complete_name'] = info.complete_name\n fileInfo['type'] = info.file_extension\n fileInfo['size'] = info.file_size\n fileInfo['duration'] = info.duration\n fileList.append(fileInfo)\n db.insert_many(fileList)\n return list(db.find({}))","sub_path":"server/mediainfo.py","file_name":"mediainfo.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"138454056","text":"import socket\nimport struct\nimport time\n\n# Create a TCP/IP socket\nTCP_IP = '192.168.1.198'\nTCP_PORT = 8899\nBUFFER_SIZE = 77\nsock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\nsock.connect ((TCP_IP, TCP_PORT))\n\ntry:\n req = struct.pack ('8B', 0x01, 0x03, 0x00, 0x00, 0x00, 0x24, 0x45, 0xD1)\n sock.send (req)\n print (\"TX: (%s)\" % req)\n rec = sock.recv (BUFFER_SIZE)\n print (\"RX: (%s)\" % rec)\n for i in rec:\n print(hex (ord (i)))\n x = int (hex (ord (i)), 16)\n print(x)\n time.sleep (10)\n\n\nfinally:\n print ('\\nCLOSING SOCKET')\n sock.close ()\n","sub_path":"test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"154699265","text":"import pandas as pd\n\n# analyse the tabs and list the unique tabs with their occurences\ndf = pd.read_csv(\"tabs.csv\")\ndf.columns = ['Company', 'Tab']\npivoted = pd.pivot_table(df, index=['Company','Tab'], aggfunc='size')\ndf_aggregation = pivoted.to_frame().reset_index()\ndf_aggregation.rename(columns={0: 'Occurrences'}, inplace=True)\n\ntab_analyse = df_aggregation['Tab'].value_counts()\ndf_tab_analyse = tab_analyse.to_frame().reset_index()\ndf_tab_analyse.to_csv(\"tab_analyse.csv\", sep='\\t', encoding='utf-8')","sub_path":"AnalyseSubTabs.py","file_name":"AnalyseSubTabs.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"13833561","text":"#encoding:UTF-8\nimport httplib2\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.file import Storage\nfrom clouds.GoogleApiPython3x.apiclient import errors\nfrom clouds.GoogleApiPython3x.apiclient.discovery import build\nfrom clouds.GoogleApiPython3x.apiclient.http import MediaFileUpload\n\nimport os.path\n\nfrom cloud import Cloud\nfrom utils import File, split_filepath, error_codes\n\n# There is no official Google Api for Python 3\n# and httplib2 has a bug with byte strings processing\n# so I keep them locally for now\nPORTABLE = True\n\n\n\n\nREDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'\nGOOGLE_DIR_MTYPE = 'application/vnd.google-apps.folder'\nSCOPES = ['https://www.googleapis.com/auth/drive']\n\n\"\"\"\n'https://www.googleapis.com/auth/drive.file'\n'https://www.googleapis.com/auth/drive.metadata.readonly',\n'https://www.googleapis.com/auth/drive.readonly',\n'https://www.googleapis.com/auth/drive.appdata',\n'https://www.googleapis.com/auth/drive'\n\"\"\"\n\nclass GoogleDrive(Cloud):\n #url = \"www.googleapis.com/drive/v2\"\n\n\n def __init__(self, secret_json_file, home_folder=''):\n super(GoogleDrive, self).__init__(home_folder)\n\n credential_file = secret_json_file + '.cred'\n if not os.path.isfile(credential_file):\n self._initial_auth_(secret_json_file, credential_file)\n\n self.drive_service = self._get_build_service_(secret_json_file, credential_file)\n\n self.dirs_cache = self._get_new_cache_level_('root')\n\n\n\n\n # list directory on server\n def _ls_(self, folder):\n folderId = self._get_folder_id_(folder)\n files = []\n\n def callback(children):\n for child in children.get('items', []):\n files.append(self._elem2file_(child))\n return\n\n if folderId is not None:\n param = {}\n param['q'] = \"'%s' in parents and trashed = false\" % folderId\n param['fields'] = 'items(createdDate,fileSize,id,mimeType,modifiedDate,originalFilename,title)'\n self._get_children_files_(callback, param)\n\n return files\n\n\n # download file from server\n def _download_(self, remote_file, local_file):\n file_id = self._get_file_id_(remote_file)\n\n if file_id is not None:\n try:\n file = self.drive_service.files().get(fileId=file_id).execute()\n content = self._download_file_(file)\n if content is not None:\n with open(local_file, 'wb') as f:\n f.write(content)\n return local_file\n\n except errors.HttpError as error:\n print( 'An error occurred: %s' % error)\n return None\n\n\n\n # upload file to server\n def _upload_(self, local_file, remote_file):\n folder, filename = split_filepath(remote_file)\n self._mkdir_(folder)\n\n folder_id = self._get_folder_id_(folder)\n if folder_id is not None:\n mtype = '*/*'\n media_body = MediaFileUpload(local_file, resumable=True, mimetype=mtype)\n\n body = {\n 'title': filename,\n 'mimeType': mtype,\n 'parents': [{'id':folder_id}]\n }\n\n try:\n file = self.drive_service.files().insert(body=body, media_body=media_body).execute()\n\n # Uncomment the following line to print the File ID\n # print 'File ID: %s' % file['id']\n\n return error_codes.OK\n except errors.HttpError as error:\n print('An error occured: %s' % error)\n #return None\n\n return error_codes.ERROR\n\n # delete file or directory on server\n def _delete_(self, filepath):\n file_id = self._get_file_id_(filepath)\n if file_id is not None:\n try:\n self.drive_service.files()._delete_(fileId=file_id).execute()\n return error_codes.OK\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n return error_codes.ERROR\n\n\n\n\n def _mkdir_(self, path):\n dirs = path.strip('/').split('/')\n\n # Check if path exists and update cache for max deepness\n if self._get_folder_id_(path) is None:\n cache_dir = self.dirs_cache\n for directory in dirs:\n if cache_dir['children'] is None or directory not in cache_dir['children']:\n response = self._create_dir_(cache_dir['id'], directory)\n if response is not None:\n id = response.get('id')\n new_cache_dict = self._get_new_cache_level_(id)\n\n if cache_dir['children'] is None: cache_dir['children'] = {}\n cache_dir['children'][directory] = new_cache_dict\n else:\n return error_codes.ERROR\n cache_dir=cache_dir['children'][directory]\n return error_codes.OK\n\n # Code from Google example\n def _download_file_(self, drive_file):\n \"\"\"Download a file's content.\n\n Args:\n service: Drive API service instance.\n drive_file: Drive File instance.\n\n Returns:\n File's content if successful, None otherwise.\n \"\"\"\n download_url = drive_file.get('downloadUrl')\n if download_url:\n resp, content = self.drive_service._http.request(download_url)\n if resp.status == 200:\n print('Status: %s' % resp)\n return content\n else:\n print('An error occurred: %s' % resp)\n return None\n else:\n # The file doesn't have any content stored on Drive.\n return None\n\n\n def _create_dir_(self, parentId, folder_name):\n body = {\n 'title': folder_name,\n 'mimeType': GOOGLE_DIR_MTYPE,\n 'parents' : [{'id':parentId}]\n }\n try:\n result = self.drive_service.files().insert(body=body).execute()\n return result\n except errors.HttpError as error:\n print('An error occured: %s' % error)\n return None\n\n # return id for last folder in path or None if\n # path does not exist\n def _get_folder_id_(self, path):\n if path == '' or path == '/':\n return 'root'\n #remove first and last slashes and split\n path_list = path.strip('/').split('/')\n\n curr_dir_id = 'root'\n curr_cache_level = self.dirs_cache\n for p in path_list:\n if curr_cache_level['children'] is None:\n curr_cache_level['children'] = self._get_children_dirs_(curr_dir_id)\n\n if p in curr_cache_level['children']:\n curr_cache_level = curr_cache_level['children'][p]\n curr_dir_id = curr_cache_level['id']\n else:\n return None\n return curr_dir_id\n\n # return dict of folders that are inside given one\n def _get_children_dirs_(self, folderId):\n dirs = {}\n\n param = {}\n param['q'] = \"mimeType = '%s' and '%s' in parents\" % (GOOGLE_DIR_MTYPE, folderId)\n param['fields'] = 'items(id,title)'\n\n\n def callback(children):\n for child in children.get('items', []):\n _name = child['title']\n _id = child['id']\n dirs[_name] = self._get_new_cache_level_(_id)\n return\n\n self._get_children_files_(callback, param)\n\n return dirs\n\n # calls callback function for each chunk of files obj (include folders)\n def _get_children_files_(self, callback, params={}):\n page_token = None\n while True:\n try:\n if page_token:\n params['pageToken'] = page_token\n children = self.drive_service.files().list(**params).execute()\n\n callback(children)\n\n page_token = children.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n break\n\n def _get_file_id_(self, filepath):\n folder, filename = split_filepath(filepath)\n\n # Trying to get fileid\n file_id = None\n files = self._ls_(folder)\n for file in files:\n if file.name == filename:\n file_id = file.id\n break\n return file_id\n\n def _elem2file_(self, elem):\n id = elem.get('id', None)\n name = elem.get('title', '')\n #orig_name = elem.get('originalFilename', '')\n mtype = elem.get('mimeType', '')\n size = elem.get('fileSize', 0)\n mtime = elem.get('modifiedDate', '')\n ctime = elem.get('createdDate', '')\n\n if mtype == GOOGLE_DIR_MTYPE:\n mtype = File.DIRECTORY_MTYPE\n return File(id, name, mtype, size, mtime, ctime)\n\n # User needs to open link in web browser to get secret code\n def _initial_auth_(self, secret_json, credfile):\n flow = flow_from_clientsecrets(secret_json, SCOPES, REDIRECT_URI)\n authorize_url = flow.step1_get_authorize_url()\n print('Go to the following link in your browser: \\n' + authorize_url)\n code = input('Enter verification code: ').strip()\n credentials = flow.step2_exchange(code)\n storage = Storage(credfile)\n storage.put(credentials)\n #print(\"All finished initializing, run again\")\n\n\n def _get_build_service_(self, secret_json_file, credential_file):\n storage = Storage(credential_file)\n creds = storage.get()\n flow = flow_from_clientsecrets(secret_json_file,SCOPES)\n\n http = httplib2.Http()\n http = creds.authorize(http)\n return build('drive', 'v2', http=http)\n\n # creates a new dict that needed to store caches of brunches of directory tree\n def _get_new_cache_level_(self, id, children=None):\n return {'id':id, 'children':children}\n","sub_path":"clouds/google_drive.py","file_name":"google_drive.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"463701074","text":"#Create spectral - spatial fusion model\nfrom .layers import *\n\ndef create_model(height=11, width=11, channels=48, classes=2, weighted_sum=True):\n \"\"\"\n \"\"\"\n input_shape = (height, width, channels)\n inputs = layers.Input(shape=input_shape)\n\n #spatial subnetwork\n spatial_layers = spatial_network(inputs, classes=classes)\n\n #spectral network\n spectral_layers = spectral_network(inputs, classes=classes)\n\n #Learn weighted average\n outputs = submodule_consensus(spatial_layers,\n spectral_layers,\n weighted_sum=weighted_sum)\n\n #outputs = layers.Average()([spatial_layers, spectral_layers])\n\n model = Model(inputs=inputs, outputs=outputs, name=\"DeepTreeAttention\")\n\n return model","sub_path":"DeepTreeAttention/models/Hang2020.py","file_name":"Hang2020.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"388874943","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: zhang\n@time: 2017/3/26\n\"\"\"\n# import os,sys,getopt\n#\n# def main(argv):\n# opts, args = getopt.getopt(argv[1:], 's:p:')\n# print('argv', argv)\n# print('argv[1:]', argv[1:])\n# print('opts', opts)\n# print('args', args)\n#\n#\n# if __name__ == '__main__':\n# main(sys.argv)\n\n# #!/usr/bin/python\n# # -*- coding: UTF-8 -*-\n#\n# import sys, getopt\n#\n# def main(argv):\n# inputfile = ''\n# outputfile = ''\n#\n# opts, args = getopt.getopt(argv[1:],\"hi:o:\",[\"ifile=\",\"ofile=\"])\n# print('opts:', opts)\n# print('args:', args)\n#\n#\n# for opt, arg in opts:\n# if opt == '-h':\n# print('test.py -i -o ')\n# sys.exit()\n# elif opt in (\"-i\", \"--ifile\"):\n# inputfile = arg\n# elif opt in (\"-o\", \"--ofile\"):\n# outputfile = arg\n# print('输出文件为:', inputfile)\n# print('输出的文件为:', outputfile)\n#\n# if __name__ == \"__main__\":\n# main(sys.argv)\nimport json\n\ndef test(self):\n file_name = '1.png'\n file_size = 4563\n file_info = file_name, file_size\n str = json.dumps(file_info)\n print(\"str:\",str)\n\na = test\na(123)","sub_path":"test/test1/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"634414443","text":"from bisect import bisect_left, bisect_right\nfrom enum import Enum\n\nfrom core.constants import data_path, upstream_length\n\npath_to_annotations = data_path + 'AL123456_rev.gff'\n\n\nclass CDSType(Enum):\n Gene = 0\n ncRNA = 1\n rRNA = 2\n tRNA = 3\n miscRNA = 4\n upstream = 5\n\n\nclass CDS:\n # type: CDSType\n # start: int # 1-based genome coordinate\n # end: int\n # strand: int # 1 or -1\n # name: str\n # synonym: list\n # is_pseudogene: bool\n # product: str\n # exists_in_proteom: bool\n # is_hypothetical: bool\n\n def __init__(self, type: CDSType, start: int, end: int, strand: int, name: str, synonym: list=None, desc: str=None,\n locus: str=None):\n self.name: str = name\n self.start: int = start\n self.end: int = end\n self.strand: int = strand\n self.type: CDSType = type\n self.synonym: list = synonym\n self.locus: str = locus\n if desc is not None:\n self.is_pseudogene: bool = 'pseudogene' in desc\n i = desc.find('product')\n if i != -1:\n j = desc.find('\\\"', i + 7)\n k = desc.find('\\\"', j + 1)\n self.product: str = desc[j + 1: k]\n else:\n self.product: str = None\n self.exists_in_proteom: bool = 'identified in proteomics study' in desc\n self.is_hypothetical: bool = 'Hypothetical' in desc or 'hypothetical' in desc\n else:\n # if self.type != CDSType.upstream:\n # print('no desc for ' + name)\n self.is_pseudogene: bool = False\n self.product: str = None\n self.exists_in_proteom: bool = False\n self.is_hypothetical: bool = False\n\n def __lt__(self, other):\n return self.start < other.start\n\n\ndef read_annotations(upstream_length, filter_by_gene_len=True):\n genes = {}\n nc_rna = {}\n r_rna = {}\n t_rna = {}\n misc_rna = {}\n upstream = {}\n cds_desc = {}\n with open(path_to_annotations, 'r') as f:\n for line in f.readlines()[1:]:\n s = line.strip().split('\\t')\n type = s[2]\n start = int(s[3])\n end = int(s[4])\n if type in ('Repeat_region', 'mobile_element'):\n continue\n strand = 1 if s[6] == '+' else -1\n ns = s[8].split(';')\n name = ns[0].split(' ')[1]\n syn = None\n if 'locus_tag' in ns[-1]:\n locus = ns[-1].split(' ')[-1]\n else:\n locus = None\n if type == 'Gene':\n if len(ns) >= 1:\n for name_tag in ns[1:]:\n if 'gene_synonym' in name_tag:\n if syn is None:\n syn = []\n syn.append(name_tag.split(' ')[2])\n genes[name] = (start, end, strand, syn, locus)\n if strand == 1:\n upstream[name] = (start - upstream_length, start - 1, strand, syn, locus)\n else:\n upstream[name] = (end + 1, end + upstream_length, strand, syn, locus)\n elif type == 'ncRNA':\n if len(ns) >= 1:\n for name_tag in ns[1:]:\n if 'gene_synonym' in name_tag:\n if syn is None:\n syn = []\n syn.append(name_tag.split(' ')[2])\n nc_rna[name] = (start, end, strand, syn, locus)\n elif type == 'rRNA':\n r_rna[name] = (start, end, strand)\n elif type == 'tRNA':\n t_rna[name] = (start, end, strand)\n elif type == 'Misc._RNA':\n misc_rna[name] = (start, end, strand)\n elif type == 'CDS':\n desc = s[8].split(';')\n ns = desc[2].split()\n name = ns[1]\n # print('found desc for ' + name)\n cds_desc[name] = line\n\n cds_list = []\n for name, value in r_rna.items():\n start, end, strand, syn, locus = genes.pop(name, None)\n cds_list.append(CDS(CDSType.rRNA, start, end, strand, name, syn, cds_desc.get(name), locus))\n for name, value in nc_rna.items():\n if name in genes:\n start, end, strand, syn, locus = genes.pop(name, None)\n cds_list.append(CDS(CDSType.ncRNA, start, end, strand, name, syn, cds_desc.get(name), locus))\n else:\n cds_list.append(CDS(CDSType.ncRNA, value[0], value[1], value[2], name, desc=cds_desc.get(name)))\n for name, value in t_rna.items():\n if name in genes:\n start, end, strand, syn, locus = genes.pop(name, None)\n cds_list.append(CDS(CDSType.tRNA, start, end, strand, name, syn, cds_desc.get(name), locus))\n else:\n cds_list.append(CDS(CDSType.tRNA, value[0], value[1], value[2], name, desc=cds_desc.get(name)))\n for name, value in misc_rna.items():\n if name in genes:\n start, end, strand, syn, locus = genes.pop(name, None)\n cds_list.append(CDS(CDSType.miscRNA, start, end, strand, name, syn, cds_desc.get(name), locus))\n else:\n cds_list.append(CDS(CDSType.miscRNA, value[0], value[1], value[2], name, desc=cds_desc.get(name)))\n wrongly_annotated = set()\n for name, value in genes.items():\n start = value[0]\n end = value[1]\n if not filter_by_gene_len or (end - start + 1) % 3 == 0:\n cds_list.append(CDS(CDSType.Gene, start, end, value[2], name, value[3], cds_desc.get(name), value[4]))\n else:\n wrongly_annotated.add(name)\n print(name + ' len % 3 != 0')\n for name, value in upstream.items():\n if name not in wrongly_annotated:\n cds_list.append(CDS(CDSType.upstream, value[0], value[1], value[2], name, value[3]))\n cds_list.sort()\n return cds_list\n\n\ndef localize_all_variants(pos_list: 'list[int]', cds_list: 'list[CDS]', keep_genes_set: 'set[str]'=None) -> 'dict[int, CDS]':\n \"\"\"\n Finds CDS for each gene coord from all_snps list.\n\n :param pos_list: list of genome coords\n :param cds_list: list of CDS\n :param keep_genes_set: a set of CDS names, if non None - the others will be filtered out\n :return: coord to CDS dictionary\n \"\"\"\n pos_list.sort()\n res = {}\n for cds in cds_list:\n if keep_genes_set is not None and cds.name not in keep_genes_set:\n continue\n i = bisect_left(pos_list, cds.start)\n j = bisect_right(pos_list, cds.end, lo=i)\n for k in range(i, j):\n res[pos_list[k]] = cds\n return res\n\n\nif __name__ == '__main__':\n cds_list = read_annotations(upstream_length, False)\n for cds in cds_list:\n if cds.is_hypothetical and cds.exists_in_proteom:\n print(cds.name + ' is hypothetical but exists in proteom')\n if not cds.is_hypothetical and not cds.exists_in_proteom and cds.type == CDSType.Gene:\n print(cds.name + ' is not hypothetical but does not exist in proteom')\n","sub_path":"core/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"326230949","text":"\"\"\"\nClone of 2048 game.\n\"\"\"\n\nimport poc_2048_gui \nimport random\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\n# Offsets for computing tile indices in each direction.\n# DO NOT MODIFY this dictionary. \nOFFSETS = {UP: (1, 0), \n DOWN: (-1, 0), \n LEFT: (0, 1), \n RIGHT: (0, -1)} \n \ndef merge(line):\n \"\"\"\n Helper function that merges a single row or column in 2048\n \"\"\"\n result = [0]*len(line)\n counter = 0\n merged = False\n for element in line:\n if element != 0:\n if element == result[counter - 1] and merged == False:\n result[counter - 1] = element * 2\n merged = True\n else: \n result[counter] = element\n counter = counter + 1\n merged = False\n return result\n\nclass TwentyFortyEight:\n \"\"\"\n Class to run the game logic.\n \"\"\"\n\n def __init__(self, grid_height, grid_width):\n self.grid_height = grid_height\n self.grid_width = grid_width\n TwentyFortyEight.reset(self)\n self.up_list = [(0, col) for col in range(grid_width)]\n self.down_list = [(grid_height - 1, col) for col in range(grid_width)]\n self.right_list = [(row, grid_width - 1) for row in range(grid_height)]\n self.left_list = [(row, 0) for row in range(grid_height)]\n self.initial_tiles = {UP: self.up_list, DOWN: self.down_list, LEFT: self.left_list, RIGHT: self.right_list} \n \n def reset(self):\n \"\"\"\n Reset the game so the grid is empty.\n \"\"\"\n self.board = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\n \n def __str__(self):\n \"\"\"\n Return a string representation of the grid for debugging.\n \"\"\"\n return str(self.board)\n \n def get_grid_height(self):\n \"\"\"\n Get the height of the board.\n \"\"\"\n return self.grid_height\n \n def get_grid_width(self):\n \"\"\"\n Get the width of the board.\n \"\"\"\n return self.grid_width\n \n def move(self, direction):\n \"\"\"\n Move all tiles in the given direction and add\n a new tile if any tiles moved.\n \"\"\"\n merge_toward = self.initial_tiles[direction] \n temp_board = self.board\n new_number_flag = False\n if direction == LEFT or direction == RIGHT:\n range_variable = self.grid_width\n elif direction == UP or direction == DOWN:\n range_variable = self.grid_height\n for item in merge_toward:\n temp_list = []\n for counter in range(range_variable):\n temp_list.append([item[0] + counter*OFFSETS[direction][0], item[1] + counter*OFFSETS[direction][1]])\n numbers_list = []\n for position in range(len(temp_list)):\n numbers_list.append(self.board[temp_list[position][0]][temp_list[position][1]])\n merged_numbers_list = merge(numbers_list)\n if merged_numbers_list != numbers_list:\n new_number_flag = True \n for position in range(len(temp_list)):\n temp_board[temp_list[position][0]][temp_list[position][1]] = merged_numbers_list[position]\n self.board = temp_board\n if new_number_flag == True:\n self.new_tile() \n new_number_flag = False\n\n def new_tile(self):\n \"\"\"\n Create a new tile in a randomly selected empty \n square. The tile should be 2 90% of the time and\n 4 10% of the time.\n \"\"\"\n new_row = random.randrange(0, self.grid_height) \n new_col = random.randrange(0, self.grid_width) \n random_number = random.randrange(0, 10)\n if random_number == 0:\n new_number = 4\n else: \n new_number = 2\n if self.board[new_row][new_col] == 0:\n self.board[new_row][new_col] = new_number\n else:\n self.new_tile()\n \n def set_tile(self, row, col, value):\n \"\"\"\n Set the tile at position row, col to have the given value.\n \"\"\" \n self.board[row][col] = value\n\n def get_tile(self, row, col):\n \"\"\"\n Return the value of the tile at position row, col.\n \"\"\" \n return self.board[row][col]\n\npoc_2048_gui.run_gui(TwentyFortyEight(4, 5))","sub_path":"twentyfortyeight.py","file_name":"twentyfortyeight.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"155778082","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nfrom datetime import datetime\nimport logging\nimport hashlib\nimport uuid\nfrom scoring import get_interests, get_score\nfrom optparse import OptionParser\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom store import Store\nimport re\nfrom settings.api_config import *\n\n\nclass AbstractField(object):\n def __init__(self, required=False, nullable=True):\n self.required = required\n self.nullable = nullable\n self.value = None\n\n def __get__(self, instance, owner):\n return self.value\n\n def __set__(self, instance, value):\n if (not self.nullable or self.required) and value is None:\n raise ValueError(\"Must be not Null: %s\" % value)\n self.validate(value)\n self.value = value\n\n def validate(self, value):\n return\n\n\nclass CharField(AbstractField):\n def validate(self, value):\n if value and not isinstance(value, str):\n raise ValueError(\"Must be string: %s\" % value)\n\n\nclass ArgumentsField(AbstractField):\n def validate(self, value):\n if not isinstance(value, dict):\n raise ValueError(\"Must be dict: %s\" % value)\n\n\nclass EmailField(CharField):\n def validate(self, value):\n super().validate(value)\n if value and not ('@' in value):\n raise ValueError(\"Must contain @: %s\" % value)\n\n\nclass PhoneField(AbstractField):\n def validate(self, value):\n phone_pattern = re.compile('^7.{10}$')\n if not value:\n return\n elif not isinstance(value, (str, int)):\n raise ValueError(\"Must be str or int: %s\" % value)\n elif not phone_pattern.match(str(value)):\n raise ValueError(\"Not a phone number: %s\" % value)\n\n\nclass DateField(AbstractField):\n def validate(self, value):\n if value:\n datetime.strptime(str(value), '%d.%m.%Y')\n\n\nclass BirthDayField(AbstractField):\n def validate(self, value):\n if value and not (datetime.now() - datetime.strptime(str(value), '%d.%m.%Y')).days <= 365 * 70:\n raise ValueError(\"Must be <=70 years old: %s\" % value)\n\n\nclass GenderField(AbstractField):\n def validate(self, value):\n if value and value not in (UNKNOWN, MALE, FEMALE):\n raise ValueError(\"Must be int 0, 1 or 2: %s\" % value)\n\n\nclass ClientIDsField(AbstractField):\n def validate(self, value):\n if not isinstance(value, list) or any(not isinstance(x, int) for x in value):\n raise ValueError(\"Must be list of ints: %s\" % value)\n elif len(value) == 0:\n raise ValueError(\"Clients list is empty: %s\" % value)\n\n\nclass ClientsInterestsRequest(object):\n client_ids = ClientIDsField(required=True)\n date = DateField(required=False, nullable=True)\n\n def __init__(self, client_ids, date=None):\n self.client_ids = client_ids\n self.date = date\n\n\nclass OnlineScoreRequest(object):\n first_name = CharField(required=False, nullable=True)\n last_name = CharField(required=False, nullable=True)\n email = EmailField(required=False, nullable=True)\n phone = PhoneField(required=False, nullable=True)\n birthday = BirthDayField(required=False, nullable=True)\n gender = GenderField(required=False, nullable=True)\n\n def __init__(self, first_name=None, last_name=None, email=None, phone=None, birthday=None, gender=None):\n self.first_name = first_name\n self.last_name = last_name\n self.email = email\n self.phone = phone\n self.birthday = birthday\n self.gender = gender\n\n if not ((phone and email) or (first_name and last_name) or (gender is not None and birthday)):\n raise AttributeError('Must be at least one pair: phone+email or first+last_name or gender+birthday')\n\n\nclass MethodRequest(object):\n account = CharField(required=False, nullable=True)\n login = CharField(required=True, nullable=True)\n token = CharField(required=True, nullable=True)\n arguments = ArgumentsField(required=True, nullable=True)\n method = CharField(required=True, nullable=False)\n\n @property\n def is_admin(self):\n return self.login == ADMIN_LOGIN\n\n def __init__(self, login, token, arguments, method, account=None):\n self.account = account\n self.login = login\n self.token = token\n self.arguments = arguments\n self.method = method\n\n\ndef check_auth(request):\n if request.is_admin:\n digest = hashlib.sha512((datetime.now().strftime(\"%Y%m%d%H\") + ADMIN_SALT).encode('utf8')).hexdigest()\n else:\n digest = hashlib.sha512((request.account + request.login + SALT).encode('utf8')).hexdigest()\n if digest == request.token:\n return True\n return False\n\n\ndef method_handler(request, ctx, store):\n methods = {\n 'online_score' : get_score_handler,\n 'clients_interests': get_interests_handler\n }\n try:\n method_request = MethodRequest(**request['body'])\n if check_auth(method_request):\n response, code = methods[method_request.method](method_request, ctx, store)\n else:\n response, code = ERRORS[FORBIDDEN], FORBIDDEN\n except (TypeError, ValueError, AttributeError):\n response, code = ERRORS[INVALID_REQUEST], INVALID_REQUEST\n return response, code\n\n\ndef get_score_handler(method_request, ctx, store):\n score_request = OnlineScoreRequest(**method_request.arguments)\n score = get_score(store,\n phone=score_request.phone,\n email=score_request.email,\n birthday=score_request.birthday,\n gender=score_request.gender,\n first_name=score_request.first_name,\n last_name=score_request.last_name)\n ctx['has'] = method_request.arguments\n return {'score': score}, OK\n\n\ndef get_interests_handler(method_request, ctx, store):\n interests_request = ClientsInterestsRequest(**method_request.arguments)\n ctx['nclients'] = len(interests_request.client_ids)\n return {client_id: get_interests(store, client_id) for client_id in interests_request.client_ids}, OK\n\n\nclass MainHTTPHandler(BaseHTTPRequestHandler):\n router = {\n \"method\": method_handler\n }\n store = Store()\n\n def get_request_id(self, headers):\n return headers.get('HTTP_X_REQUEST_ID', uuid.uuid4().hex)\n\n def do_POST(self):\n response, code = {}, OK\n context = {\"request_id\": self.get_request_id(self.headers)}\n request = None\n try:\n data_string = self.rfile.read(int(self.headers['Content-Length']))\n request = json.loads(data_string)\n except Exception:\n code = BAD_REQUEST\n if request:\n path = self.path.strip(\"/\")\n logging.info(\"%s: %s %s\" % (self.path, data_string, context[\"request_id\"]))\n if path in self.router:\n try:\n response, code = self.router[path]({\"body\": request, \"headers\": self.headers}, context, self.store)\n except Exception as e:\n logging.exception(\"Unexpected error: %s\" % e)\n code = INTERNAL_ERROR\n else:\n code = NOT_FOUND\n self.send_response(code)\n self.send_header(\"Content-Type\", \"application/json\")\n self.end_headers()\n if code not in ERRORS:\n r = {\"response\": response, \"code\": code}\n else:\n r = {\"error\": response or ERRORS.get(code, \"Unknown Error\"), \"code\": code}\n context.update(r)\n logging.info(context)\n self.wfile.write(json.dumps(r).encode('utf8'))\n return\n\n\nif __name__ == \"__main__\":\n op = OptionParser()\n op.add_option(\"-p\", \"--port\", action=\"store\", type=int, default=8080)\n op.add_option(\"-l\", \"--log\", action=\"store\", default=None)\n (opts, args) = op.parse_args()\n logging.basicConfig(filename=opts.log, level=logging.INFO,\n format='[%(asctime)s] %(levelname).1s %(message)s', datefmt='%Y.%m.%d %H:%M:%S')\n server = HTTPServer((\"localhost\", opts.port), MainHTTPHandler)\n logging.info(\"Starting server at %s\" % opts.port)\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n server.server_close()\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"146388015","text":"import torch\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport random\nfrom torch.utils.data import Dataset, DataLoader\n\n# device and seed configure\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# for reproducibility\nrandom.seed(777)\ntorch.manual_seed(777)\nif device == 'cuda':\n torch.cuda.manual_seed_all(777)\n\n# parameters\nlearning_rate = 0.001\nepochs = 15\nbatch_size = 100\n\n# dataset\nmnist_train = dsets.MNIST(root='MNIST_data/', train=True,\n transform=transforms.ToTensor(), download=True)\nmnist_test = dsets.MNIST(root='MNIST_data/', train=False,\n transform=transforms.ToTensor(), download=True)\n\n\n# dataloader\ndata_loader = DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, drop_last=True)\n# drop_last 는 batch_size에 맞춰서 이미지를 나눠서 가지고 오고 마지막에 batch_size에 맞지 않아 남은 이지미들을 어떻게\n# 처리할지를 다루는 인자인데, True를 사용하면 나머지 이미지를 사용하지 않겠다는 뜻이다.\n\n\n# nn Linear layer 만들기\nlinear1 = torch.nn.Linear(784, 256, bias=True)\nlinear2 = torch.nn.Linear(256, 256, bias=True)\nlinear3 = torch.nn.Linear(256, 10, bias=True)\nrelu = torch.nn.ReLU()\n\n# Linear layer의 weight를 normalization시켜주기\ntorch.nn.init.xavier_uniform_(linear1.weight)\ntorch.nn.init.xavier_uniform_(linear2.weight)\ntorch.nn.init.xavier_uniform_(linear3.weight)\n\n# 모델 정의\nmodel = torch.nn.Sequential(linear1, relu, linear2, relu, linear3).to(device)\n###>>> 왜 linear3다음에는 relu를 하지않는가\n###>>> 우리가 사용할 criterion은 CrossEntropyLoss인데 여기에는 마지막에 softmax activation이 포함되어 있기 때문이다.\n\n# loss / optimizer 정의\ncriterion = torch.nn.CrossEntropyLoss().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\ntotal_batch = len(data_loader)\nfor epoch in range(epochs):\n avg_loss = 0\n for X, Y in data_loader:\n X = X.view(-1, 28*28).to(device)\n Y = Y.to(device)\n\n hypothesis = model(X)\n loss = criterion(hypothesis, Y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n avg_loss += loss / total_batch\n\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_loss))\n\nprint('Learning finished')\n\n\nwith torch.no_grad(): # --> 여기에서는(test에서는) gradient를 계산하지 않고 진행한다는 뜻이다.\n X_test = mnist_test.test_data.view(-1, 28*28).float().to(device)\n Y_test = mnist_test.test_labels.to(device)\n\n prediction = model(X_test)\n correct_prediction = torch.argmax(prediction, dim=1) == Y_test\n accuracy = correct_prediction.float().mean()\n print('Accuracy:', accuracy.item())\n\n r = random.randint(0, len(mnist_test) - 1)\n X_single_data = mnist_test.test_data[r:r + 1].view(-1, 28 * 28).float().to(device)\n Y_single_data = mnist_test.test_labels[r:r + 1].to(device)\n\n print('Label: ', Y_single_data.item())\n single_prediction = model(X_single_data)\n print('Prediction: ', torch.argmax(single_prediction, 1).item())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"xavier_initialization_MNIST.py","file_name":"xavier_initialization_MNIST.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"184552393","text":"#/usr/bin/env python3\n\nfrom pirc522 import RFID\n\nBLKSIZE = 16\n\nSECT_ST = 4\nAID_BLOCK = 4\nNAME_BLOCK = 5\nSURNAME_BLOCK = 6\n\ndef strtoblk(string):\n blk = list(bytes(string.encode('ascii')))\n while len(blk) < BLKSIZE:\n blk += [0]\n return blk\n\n\ndef inttoblk(num):\n return (num).to_bytes(BLKSIZE, 'little') \n\n\nclass CardHandler(RFID):\n\n def __del__(self):\n self.cleanup()\n\n def _interact_card(self):\n self.wait_for_tag()\n error, tag_type = self.request()\n if error:\n return False\n error, uid = self.anticoll()\n if error:\n return False\n if self.select_tag(uid):\n return False\n if not self.card_auth(self.auth_a, SECT_ST, [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF], uid):\n return True\n\n def read_card(self):\n if not self._interact_card():\n return None\n error, aid_list = self.read(AID_BLOCK)\n if error:\n return None\n error, sname_list = self.read(NAME_BLOCK)\n if error:\n return None\n error, fname_list = self.read(SURNAME_BLOCK)\n if error:\n return None\n aid = int.from_bytes(bytes(aid_list), 'little')\n sname = bytes(sname_list).decode('ascii').rstrip('\\0')\n fname = bytes(fname_list).decode('ascii').rstrip('\\0')\n self.stop_crypto()\n return aid, sname, fname\n\n def write_card(self, aid, sname, fname):\n if not self._interact_card():\n return False\n if self.write(AID_BLOCK, inttoblk(aid)):\n return False\n if self.write(NAME_BLOCK, strtoblk(sname)):\n return False\n if self.write(SURNAME_BLOCK, strtoblk(fname)):\n return False\n return True\n","sub_path":"server/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"567071336","text":"\n# coding: utf-8\n\n# In[1]:\n\n### This file contains the Naive Bayes Implementation on the Amazon Review Dataset.\n### The below contains the libraries used.\n### @Author: Chaitanya Sri Krishna Lolla.\nimport pandas as pd\nimport numpy as np\nimport nltk\nimport string\n#get_ipython().magic('matplotlib inline')\n#import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import metrics\nfrom nltk.corpus import stopwords\nfrom sklearn.naive_bayes import GaussianNB\n\n\n# In[2]:\n\n## Loading of the Training dataset and splitting the classes into two classes positive and negative.\nreviews = pd.read_csv('amazon_baby_train.csv')\nreviews.shape\nreviews = reviews.dropna()\nreviews.shape\n\nscores = reviews['rating']\nreviews['rating'] = reviews['rating'].apply(lambda x: 1 if x >= 3 else 0)\nprint(\"Done formation of the Training dataset.\")\n## The calculation of the Mean and standard deviation for the Training Classes.\nprint(\"The mean of output classes in the Training dataset is:\")\nprint(scores.mean())\nprint(\"The standard deviation of the output classes in the Training dataset is:\")\nprint(scores.std())\n\n\n# In[3]:\n\n### Distribution of the Training Output classes.\n#reviews.groupby('rating')['review'].count()\n\n\n# In[4]:\n\n#reviews.groupby('rating')['review'].count().plot(kind='bar', color= ['r','g'],title='Label Distribution', figsize = (10,6))\n\n\n# In[5]:\n\n## This method is responsible for splitting the data into positive and negative reviews.\ndef splitPosNeg(Summaries):\n neg = reviews.loc[Summaries['rating']== 0]\n pos = reviews.loc[Summaries['rating']== 1]\n return [pos,neg]\n\n\n# In[6]:\n\n[pos,neg] = splitPosNeg(reviews)\n\n\n# In[7]:\n\n## Pre Processing Steps which uses lemmitizer and stopwords to clean the reviews.\nlemmatizer = nltk.WordNetLemmatizer()\nstop = stopwords.words('english')\ntranslation = str.maketrans(string.punctuation,' '*len(string.punctuation))\n\ndef preprocessing(line):\n tokens=[]\n line = line.translate(translation)\n line = nltk.word_tokenize(line.lower())\n stops = stopwords.words('english')\n stops.remove('not')\n stops.remove('no')\n line = [word for word in line if word not in stops]\n for t in line:\n stemmed = lemmatizer.lemmatize(t)\n tokens.append(stemmed)\n return ' '.join(tokens)\n\n\n# In[8]:\n\n### This method actually preprocesses the data.\npos_data = []\nneg_data = []\nfor p in pos['review']:\n pos_data.append(preprocessing(p))\n\nfor n in neg['review']:\n neg_data.append(preprocessing(n))\nprint(\"Done forming the positive and negative reveiws.\")\n\n\n# In[9]:\n\n### The formation of the Training Data.\ntraining_data = pos_data + neg_data\ntraining_labels = np.concatenate((pos['rating'].values,neg['rating'].values))\nprint(\"Done formation of the training features.\")\n\n\n# In[10]:\n\n### This tokenizes the training data using word_tokenize.\ntokens = []\nfor line in training_data:\n l = nltk.word_tokenize(line)\n for w in l:\n tokens.append(w)\n\n\n# In[11]:\n\n### This tries to find the word features from the training dataset using the frequency distribution.\nword_features = nltk.FreqDist(tokens)\nprint(len(word_features))\n\n\n# In[12]:\n\n### Identifying the training top words for formation of the sparse matrix.\ntraining_topwords = [fpair[0] for fpair in list(word_features.most_common(5000))]\nprint(word_features.most_common(25))\n\n\n# In[13]:\n\n## Printing the top 20 words and its count.\nword_his = pd.DataFrame(word_features.most_common(20), columns = ['words','count'])\nprint(word_his)\n\n\n# In[14]:\n\n### This method is repsonsible for forming the sparse matrix using the training top words.\nvec = CountVectorizer()\nc_fit = vec.fit_transform([' '.join(training_topwords)])\n\n\n# In[15]:\n\ntf_vec = TfidfTransformer()\ntf_fit = tf_vec.fit_transform(c_fit)\n\n\n# In[16]:\n\n## This is responsible for forming the training features using the training data and top words. \nctr_features = vec.transform(training_data)\ntraining_features = tf_vec.transform(ctr_features)\n\n\n# In[17]:\n\nprint(training_features.shape)\n\n\n# In[20]:\n\n### Naive Bayes Classification model using Gaussian Naive Bayes Implementation without any priors.\nclf = GaussianNB()\ntraining_features = training_features.toarray()\nclf = clf.fit(training_features,training_labels)\nprint(\"Classification is Done using Gaussian NB.\")\n\n\n# In[ ]:\n\n## Formation of the Errors and Accuracies of Training dataset.\noutput_Predicted = clf.predict(training_features);\naccuracy_training = metrics.accuracy_score(output_Predicted,training_labels)\nprint(accuracy_training* 100)\n\n\n# In[ ]:\n\n### This is responsible for loading the Testing dataset.\nreviews = pd.read_csv('amazon_baby_test.csv')\nreviews.shape\nreviews = reviews.dropna()\nreviews.shape\n\n\nscores = reviews['rating']\nreviews['rating'] = reviews['rating'].apply(lambda x: 1 if x >= 3 else 0)\nprint(\"Done loading the testing dataset.\")\n\nprint(\"Mean of the output classes in Testing dataset:\")\nprint(scores.mean())\nprint(\"Standard Deviation of the output classes in the Testing dataset is:\")\nprint(scores.std())\n\n\n# In[ ]:\n\n### Splitting the testing data into postive and negative reviews.\n[pos,neg] = splitPosNeg(reviews)\n\n\n# In[ ]:\n\npos_data = []\nneg_data = []\nfor p in pos['review']:\n pos_data.append(preprocessing(p))\n\nfor n in neg['review']:\n neg_data.append(preprocessing(n))\nprint(\"Done forming the positive and negative reviews in testing dataset.\")\n\n\n# In[ ]:\n\n## Formation of the Testing data.\ntesting_data = pos_data + neg_data\ntesting_labels = np.concatenate((pos['rating'].values,neg['rating'].values))\n\n\n# In[ ]:\n\n## Formation of tokesn in testing dataset.\nt = []\nfor line in testing_data:\n l = nltk.word_tokenize(line)\n for w in l:\n t.append(w)\n\n\n# In[ ]:\n\n## Formation of the word features for the testing dataset.\nword_features = nltk.FreqDist(t)\nprint(len(word_features))\n\n\n# In[ ]:\n\ntopwords = [fpair[0] for fpair in list(word_features.most_common(5002))]\nprint(word_features.most_common(25))\n\n\n# In[ ]:\n\n## Printing the count and top words formed.\nword_his = pd.DataFrame(word_features.most_common(20), columns = ['words','count'])\nprint(word_his)\n\n\n# In[ ]:\n\n### Formation of the sparse matrix\nvec = CountVectorizer()\nc_fit = vec.fit_transform([' '.join(topwords)])\n\n\n# In[ ]:\n\ntf_vec = TfidfTransformer()\ntf_fit = tf_vec.fit_transform(c_fit)\n\n\n# In[ ]:\n\ncte_features = vec.transform(testing_data)\nte_features = tf_vec.transform(cte_features)\n\n\n# In[ ]:\n\nte_features.shape\n\n\n# In[ ]:\n\nte_features = te_features.toarray()\ntePredication = clf.predict(te_features)\nteAccuracy = metrics.accuracy_score(tePredication,testing_labels)\nprint(\"Accuracy of the Testing dataset is:\")\nprint(teAccuracy * 100)\n\n\n# In[ ]:\n\n# printing the metrics\nprint(metrics.classification_report(labels, tePredication))\n\n","sub_path":"NaiveBayes/SL_NaiveBayes_AmazonReview.py","file_name":"SL_NaiveBayes_AmazonReview.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"494633166","text":"import json\r\n\r\nimport numpy as np\r\n\r\nimport analysis\r\nimport database\r\nimport util\r\n\r\nlogger = util.Logger(__name__)\r\n\r\n\r\nclass AverageAggression:\r\n def on_get(self, req, resp):\r\n logger.info(\"GET /average/aggression\")\r\n conn = database.get_connection()\r\n games = database.select_all_games(conn=conn)\r\n\r\n stats = {\r\n \"kp\": [], \"fw_kills\": [], \"positioning\": [], \"ganking\": [], \"aggression\": 0\r\n }\r\n\r\n for game in games:\r\n team_kills = database.select_kill_timeline(conn=conn, game_id=game[\"gameid\"], team_id=game[\"s1_teamid\"])\r\n p1_frames = database.select_participant_frames(conn=conn, participant_id=game[\"s1_participantid\"])\r\n frames = database.select_game_frames(conn=conn, game_id=game[\"gameid\"])\r\n kills = database.select_all_kill_timeline(conn=conn, game_id=game[\"gameid\"])\r\n\r\n # Kill Participation\r\n stats[\"kp\"].append(analysis.base_analysis.kill_participation(\r\n participant=game[\"s1_participantid\"],\r\n kills=team_kills,\r\n ))\r\n stats[\"fw_kills\"].append(analysis.aggression.forward_kills(\r\n participant=game[\"s1_participantid\"],\r\n kills=team_kills\r\n ))\r\n stats[\"positioning\"].append(analysis.aggression.positioning(\r\n team_id=game[\"s1_teamid\"],\r\n frames=p1_frames\r\n ))\r\n stats[\"ganking\"].append(analysis.aggression.ganking(\r\n participant=game[\"s1_participantid\"],\r\n role=util.get_canonic_lane(lane=game[\"s1_lane\"], role=game[\"s1_role\"]),\r\n frames=frames,\r\n kills=kills\r\n ))\r\n\r\n stats[\"kp\"] = np.nanmean(np.array(stats[\"kp\"]))\r\n stats[\"fw_kills\"] = np.nanmean(np.array(stats[\"fw_kills\"]))\r\n stats[\"positioning\"] = np.nanmean(np.array(stats[\"positioning\"]))\r\n stats[\"ganking\"] = np.nanmean(np.array(stats[\"ganking\"]))\r\n\r\n stats[\"aggression\"] = analysis.aggression.aggression(stats[\"kp\"], stats[\"fw_kills\"], stats[\"positioning\"],\r\n stats[\"ganking\"])\r\n\r\n resp.body = json.dumps(stats)\r\n\r\n\r\nclass AverageBasics:\r\n def on_get(self, req, resp):\r\n logger.info(\"GET /average/aggression\")\r\n conn = database.get_connection()\r\n games = database.select_all_games(conn=conn)\r\n\r\n wr = analysis.base_analysis.win_rate(games)\r\n\r\n kda = {\"kills\": 0, \"deaths\": 0, \"assists\": 0}\r\n cs = 0\r\n gold_diff = {\"overall\": [], \"early\": [], \"mid\": [], \"late\": []}\r\n\r\n for game in games:\r\n stats = database.select_stats(conn=conn, statid=game[\"s1_statid\"])\r\n kda[\"kills\"] += stats.kills\r\n kda[\"deaths\"] += stats.deaths\r\n kda[\"assists\"] += stats.assists\r\n\r\n cs += stats.total_minions_killed\r\n\r\n p1_frames = database.select_participant_frames(conn=conn, participant_id=game[\"s1_participantid\"])\r\n p1_opponent = database.select_opponent(\r\n conn=conn,\r\n participant_id=game[\"s1_participantid\"],\r\n game_id=game[\"gameid\"],\r\n position=(game[\"s1_lane\"], game[\"s1_role\"])\r\n )\r\n if p1_opponent is not None:\r\n p1_opponent_frames = database.select_participant_frames(conn=conn,\r\n participant_id=p1_opponent.participant_id)\r\n if len(p1_opponent_frames) > 0:\r\n p1_gold_diff = analysis.base_analysis.gold_diff(frames=p1_frames,\r\n opponent_frames=p1_opponent_frames)\r\n gold_diff[\"overall\"].append(p1_gold_diff[\"overall\"])\r\n gold_diff[\"early\"].append(p1_gold_diff[\"early\"])\r\n gold_diff[\"mid\"].append(p1_gold_diff[\"mid\"])\r\n gold_diff[\"late\"].append(p1_gold_diff[\"late\"])\r\n\r\n gold_diff[\"overall\"] = np.nanmean(np.array(gold_diff[\"overall\"]))\r\n gold_diff[\"early\"] = np.nanmean(np.array(gold_diff[\"early\"]))\r\n gold_diff[\"mid\"] = np.nanmean(np.array(gold_diff[\"mid\"]))\r\n gold_diff[\"late\"] = np.nanmean(np.array(gold_diff[\"late\"]))\r\n\r\n resp.body = json.dumps({\r\n \"win_rate\": wr,\r\n \"kda\": analysis.base_analysis.avg_kda(kda[\"kills\"], kda[\"deaths\"], kda[\"assists\"]),\r\n \"cs\": cs / len(games),\r\n \"gold_diff\": gold_diff,\r\n })\r\n\r\n\r\nclass AverageWinRate:\r\n def on_get(self, req, resp):\r\n \"\"\"Calculates classification model for Millionaire class.\"\"\"\r\n logger.info(\"GET /average/win-rate\")\r\n conn = database.get_connection()\r\n\r\n summoners = database.select_all_summoners(conn=conn)\r\n rates = []\r\n\r\n for summoner in summoners:\r\n games = database.select_summoner_games(conn=conn, account_id=summoner[\"accountid\"])\r\n if len(games) > 2:\r\n wr = analysis.base_analysis.win_rate(games)\r\n rates.append(wr)\r\n\r\n resp.body = json.dumps({\r\n \"wr\": np.nanmean(rates)\r\n })\r\n\r\n\r\nclass AverageCs:\r\n def on_get(self, req, resp):\r\n logger.info(\"GET /average/aggression\")\r\n conn = database.get_connection()\r\n games = database.select_all_games(conn=conn)\r\n\r\n shares = []\r\n cs_diff = {\"overall\": [], \"early\": [], \"mid\": [], \"late\": []}\r\n\r\n for game in games:\r\n stats = database.select_stats(conn=conn, statid=game[\"s1_statid\"])\r\n team_cs = database.select_team_cs(conn=conn, team_id=game[\"s1_teamid\"], game_id=game[\"gameid\"])\r\n\r\n e_jgl = stats.neutral_minions_killed_enemy_jungle if stats.neutral_minions_killed_enemy_jungle is not None else 0\r\n t_jgl = stats.neutral_minions_killed_team_jungle if stats.neutral_minions_killed_team_jungle is not None else 0\r\n\r\n cs = stats.total_minions_killed + e_jgl + t_jgl\r\n if team_cs[0][\"cs\"] is None:\r\n continue\r\n cs_share = analysis.base_analysis.cs_share(cs, team_cs[0][\"cs\"])\r\n shares.append(cs_share)\r\n\r\n p1_frames = database.select_participant_frames(conn=conn, participant_id=game[\"s1_participantid\"])\r\n p1_opponent = database.select_opponent(\r\n conn=conn,\r\n participant_id=game[\"s1_participantid\"],\r\n game_id=game[\"gameid\"],\r\n position=(game[\"s1_lane\"], game[\"s1_role\"])\r\n )\r\n if p1_opponent is not None:\r\n p1_opponent_frames = database.select_participant_frames(conn=conn,\r\n participant_id=p1_opponent.participant_id)\r\n if len(p1_opponent_frames) > 0:\r\n p1_cs_diff = analysis.base_analysis.cs_diff(frames=p1_frames,\r\n opponent_frames=p1_opponent_frames)\r\n cs_diff[\"overall\"].append(p1_cs_diff[\"overall\"])\r\n cs_diff[\"early\"].append(p1_cs_diff[\"early\"])\r\n cs_diff[\"mid\"].append(p1_cs_diff[\"mid\"])\r\n cs_diff[\"late\"].append(p1_cs_diff[\"late\"])\r\n\r\n resp.body = json.dumps({\r\n \"cs_share\": np.nanmean(shares),\r\n \"cs_diff\": {\r\n \"overall\": np.nanmean(cs_diff[\"overall\"]),\r\n \"early\": np.nanmean(cs_diff[\"early\"]),\r\n \"mid\": np.nanmean(cs_diff[\"mid\"]),\r\n \"late\": np.nanmean(cs_diff[\"late\"]),\r\n }\r\n })\r\n","sub_path":"analyzer/views/Averages.py","file_name":"Averages.py","file_ext":"py","file_size_in_byte":7719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"498168384","text":"#! /usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import ServerFactory, Protocol\nfrom twisted.internet.protocol import Protocol, Factory\nfrom twisted.protocols import basic\nimport msg\n\nPROTOCOL = 3\n\nclass ScrabbleProtocol(basic.NetstringReceiver):\n def connectionMade(self):\n print(\"Connect %s\" % self.transport.getPeer())\n m = msg.msg(\"serverok\", (PROTOCOL,))\n self.envoi(m)\n app = self.factory.parent\n if not app.partie_on :\n reactor.callLater(0, app.debut_game, app.options.inter)\n\n def connectionLost(self, reason):\n self.factory.parent.deconnect(self)\n print(\"Deconnect %s\" % self.transport.getPeer())\n\n def stringReceived(self, dump):\n mm = msg.msg.load(dump)\n if self.factory.parent.options.verbose :\n print(\"<- %s\" % mm)\n self.factory.parent.traite(self, mm)\n\n def envoi(self, mm):\n if self.factory.parent.options.verbose :\n print(\"-> %s\" % mm)\n self.sendString(mm.dump())\n return mm\n\nclass ScrabbleFactory(Factory):\n protocol = ScrabbleProtocol\n def __init__(self, parent):\n self.parent = parent\n","sub_path":"server/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"480332681","text":"def lesser_of_two_evens(a,b):\n\t# check the both number are evens\n\targ1 = a\n\targ2 = b\n\tif arg1 % 2 == 0 and arg2 % 2 == 0:\n\t\tif arg1 > arg2:\n\t\t\treturn arg2\n\t\telif arg1 < arg2:\n\t\t\treturn arg1\n\t\telif arg1 == arg2:\n\t\t\treturn 'both even numbers are equal'\n\telif arg1 % 2 == 0 and arg2 % 2 != 0:\n\t\tif arg1 > arg2:\n\t\t\treturn arg1\n\t\telse:\n\t\t\treturn arg2\n\telif arg1 % 2 != 0 and arg2 % 2 == 0:\n\t\tif arg1 > arg2:\n\t\t\treturn arg1\n\t\telse:\n\t\t\treturn arg2\n\n\nresult = lesser_of_two_evens(2,4)\nprint(result)\nresult = lesser_of_two_evens(2,5)\nprint(result)\n\ndef animal_cracker(text):\n\tstring = text\n\tstring_list = string.split()\n\tprint(string_list[0], string_list[1])\n\tif string_list[0][0] == string_list[1][0]:\n\t\treturn True\n\n\telse:\n\t\treturn False\n\n\nresult = animal_cracker('Levelheaded Llama')\nprint(result)\nresult = animal_cracker('Crazy Kangaroo')\nprint(result)\n\ndef makes_twenty(n1,n2):\n\tif n1 == 20 or n2 == 20:\n\t\treturn True\n\telse:\n\t\tsum_of_two_numbers = n1 + n2\n\t\tif sum_of_two_numbers == 20:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\nresult = makes_twenty(20,10)\nprint(result)\nresult = makes_twenty(12,8)\nprint(result)\nresult = makes_twenty(2,3)\nprint(result)\n\nprint('\\n')\n\ndef old_macdonald(name):\n\tfirst_half = name[:3]\n\tsecond_half = name[3:]\n\treturn first_half.capitalize()+second_half.capitalize()\n\n\nresult = old_macdonald('macdonald')\nprint(result)\n\ndef master_yoda(text):\n\tmy_word_list = text.split()\n\treversed_list = my_word_list[::-1]\n\treturn ' '.join(reversed_list)\n\t\t\n\t\n\nresult = master_yoda('i am home')\nprint(result) \n\n\ndef has_33(number_list):\n\tfor i in range(0, len(number_list)-1):\n\t\tif number_list[i] == 3 and number_list[i+1] == 3:\n\t\t\treturn True\n\n\treturn False\n\n\nresult = has_33([1,3,3])\nprint(result)\nresult = has_33([1,3,1,3])\nprint(result)\nresult = has_33([3,1,3])\nprint(result)\n\ndef paper_doll(text):\n\tstring = ''\n\tfor i in text:\n\n\t\tstring = string + i*3\n\treturn string\n\n\nresult = paper_doll('hello')\nprint(result)\nresult = paper_doll('Mississippi')\nprint(result)\n\ndef blackjack(a,b,c):\n\tsum = a + b + c\n\tif sum <= 21:\n\t\treturn sum\n\telse:\n\t\ttotal = sum - 11\n\t\ttotal = total + 10\n\t\tif total <= 21:\n\t\t\treturn total\n\t\telse:\n\t\t\treturn 'Bust'\n\nresult = blackjack(5,6,7)\nprint(result)\nresult = blackjack(9,9,9)\nprint(result)\nresult = blackjack(9,9,11)\nprint(result)\n\ndef spy_game(arr):\n\tnew_array = [0,0,7,'x']\n\tfor num in arr:\n\t\tif num == new_array[0]:\n\t\t\tnew_array.pop(0)\n\treturn len(new_array) == 1\n\nresult = spy_game([1,2,4,0,0,7,5])\nprint(result)\nresult = spy_game([1,0,2,4,0,5,7])\nprint(result)\n\nresult = spy_game([1,7,2,0,4,5,0])\nprint(result)\n\n\n\n\n\n\n","sub_path":"functionPractice.py","file_name":"functionPractice.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"583015443","text":"import pandas as pd\nfrom pprint import pprint\nimport time as time\nimport signal\nimport re\n\nfrom tqdm import tqdm\n\nimport scipy\nfrom scipy import sparse\nfrom scipy import stats\n\nimport pubchempy as pcp\nfrom chembl_webresource_client.new_client import new_client \nimport json\nimport requests\n\nimport copy\n\nimport sys\nsys.path.append(\"..\")\nimport utils\nimport numpy as np\n\n\n\n#all labels:\ninteraction_matrix = sparse.load_npz('../data/interaction_matrix_pchembl.npz')\nsmiles = pd.read_csv('../data/pchembl_chemicals.csv')\ntargets = pd.read_csv('../data/subset_targets.csv')\nprobability_matrix = utils.train_label_correlation(interaction_matrix)\nprobability_arr = probability_matrix.toarray()\n\narr = probability_matrix.toarray()\narr = arr - interaction_matrix\narr_sorted = np.dstack(np.unravel_index(np.argsort(-arr.ravel()), (arr.shape[0], arr.shape[1])))[0]\nprint('Should be a high number < 1:')\nprint(probability_arr[arr_sorted[0][0]][arr_sorted[0][1]])\nprint('Should be a low number >= 0:')\nprint(probability_arr[arr_sorted[-1][0]][arr_sorted[-1][1]])\nprint('Sorted array indices:')\n\n\n\ndef clean_text(input_string):\n #source: https://stackoverflow.com/questions/34860982/replace-the-punctuation-with-whitespace\n #replace these with whitespace:\n clean_string = re.sub(r\"\"\"\n [(),.;@#?!&$]+ # Accept one or more copies of punctuation\n \\ * # plus zero or more copies of a space,\n \"\"\",\n \" \", # and replace it with a single space\n input_string.lower(), flags=re.VERBOSE)\n \n #replace these with nothing:\n clean_string = clean_string.replace('-', '')\n clean_string = clean_string.replace('=', '')\n return clean_string\n\n\ndef get_synonyms(tid):\n target = new_client.target\n res = target.filter(target_chembl_id=tid)\n target_synonyms = [i['component_synonym'] for i in res[0]['target_components'][0]['target_component_synonyms']]\n #clean:\n target_synonyms = [clean_text(i) for i in target_synonyms]\n #make all lowercase to improve correct matchings:\n #target_synonyms = [i.lower() for i in target_synonyms]\n #remove all punctuations to improve correct matchings:\n #target_synonyms = [i.translate(str.maketrans('', '', string.punctuation)) for i in target_synonyms]\n \n return target_synonyms\n\ndef get_cid(smi):\n try:\n c = pcp.get_compounds(smi, 'smiles')[0]\n return c.cid\n except Exception as e:\n print(e)\n return None\n\ndef get_assay_summary(cid):\n b = json.loads(requests.get('https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/'+str(cid)+'/assaysummary/JSON').content)\n return b\n\ndef get_relevant_aids(assay_summary, synonyms):\n #iterates through all assays and checks for overlap in Assay Name with any of the synonyms. \n #if there is a shared word, returns the pubchem assay ID. \n #relevant_aids = list()\n bioactivity_outcomes = list()\n assay_names = list()\n \n for i in assay_summary['Table']['Row']:\n assay_name = i['Cell'][11]\n #trick from word embedding - remove all punctuations to improve word matching\n #assay_name = assay_name.translate(str.maketrans('', '', string.punctuation))\n clean_assay_name = clean_text(assay_name)\n #now match words:\n if len(set(synonyms).intersection(clean_assay_name.split()))>0:\n \n ###This is the variable that stores the 'active' or 'unspecified' or 'inactive' string:\n bioactivity_outcome = i['Cell'][6]\n ###\n \n bioactivity_outcomes.append(bioactivity_outcome)\n assay_names.append(assay_name)\n \n #this stores the AID number\n #relevant_aids.append(i['Cell'][0])\n\n return bioactivity_outcomes, assay_names#relevant_aids\n\ndef get_assay_details(aid, cid):\n b = json.loads(requests.get('https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/'+str(aid)+'/JSON?cid='+str(cid)).content)\n return b\n\ndef get_pair_details(pair):\n smi = smiles['canonical_smiles'].iloc[pair[0]]\n instance_id = smiles['instance_id'].iloc[pair[0]]\n predicted_target = targets['pref_name'].iloc[pair[1]]\n tid = targets[targets['pref_name']==predicted_target]['chembl_id'].iloc[0]\n return smi, instance_id, tid, predicted_target\n\n\ndef fetch_assay_details(tid, smi):\n if tid in synonym_dict:\n synonyms = synonym_dict[tid]\n else:\n synonyms = get_synonyms(tid)\n synonym_dict[tid] = synonyms\n\n \n if smi in cid_dict:\n compound_id = cid_dict[smi]\n else:\n compound_id = get_cid(smi)\n cid_dict[smi] = compound_id\n \n if compound_id in assay_dict:\n assay_summary = assay_dict[compound_id]\n else: \n assay_summary = get_assay_summary(compound_id)\n assay_dict[compound_id]=assay_summary\n \n return synonyms, compound_id, assay_summary\n\ncount = 0 \nsynonym_dict = dict()\ncid_dict = dict()\nassay_dict = dict()\nassays_long = pd.DataFrame(columns=['ligandIdx', 'targetIdx', 'instance_id', 'pref_name', 'outcome', 'assayname'])\nrownum=0\n\n###This handles annoying cases that take forever (i.e. hung process)\n#Close session after 15 seconds:\ndef handler(signum, frame):\n print('Time alarm')\n raise Exception('Action took too much time')\ndef signal_handler(signal, frame):\n print(\"\\nprogram exiting gracefully\")\n sys.exit(0)\nsignal.signal(signal.SIGINT, signal_handler) #\n\n\nfor count, pair in tqdm(enumerate(arr_sorted[2595:10000]), smoothing=0, total=10000):\n print(f'testing {count}th pair: {pair} ... ', end=' ')\n #if the try block takes more than 15 seconds, kill it.\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(15) #Set the parameter to the amount of seconds you want to wait\n \n try:\n smi, instance_id, tid, pref_name = get_pair_details(pair)\n \n synonyms, compound_id, assay_summary = fetch_assay_details(tid, smi )\n \n \n if 'Fault' in assay_summary.keys():\n None\n #print('No assays present.')\n else:\n bioactivity_outcomes, assay_names = get_relevant_aids(assay_summary, synonyms)\n for outcome, aname in zip(bioactivity_outcomes, assay_names):\n assays_long.loc[rownum]=[pair[0], pair[1], instance_id, pref_name, outcome, aname]\n rownum += 1\n \n if count%100==0:\n assays_long.to_csv('assays_long.csv')\n with open('synonym_dict.json', 'w') as fp:\n json.dump(synonym_dict, fp)\n with open('assay_dict.json', 'w') as fp:\n json.dump(assay_dict, fp)\n with open('cid_dict.json', 'w') as fp:\n json.dump(cid_dict, fp)\n print(' - finished.')\n except (KeyboardInterrupt, Exception):\n print('took too long. moving on.')\n\nassays_long.to_csv('assays_long.csv')\nwith open('synonym_dict.json', 'w') as fp:\n json.dump(synonym_dict, fp)\nwith open('assay_dict.json', 'w') as fp:\n json.dump(assay_dict, fp)\nwith open('cid_dict.json', 'w') as fp:\n json.dump(cid_dict, fp)\n","sub_path":"4_pubchem_validation/pubchem_validation.py","file_name":"pubchem_validation.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"50500316","text":"import utm\nimport numpy as np\nfrom netCDF4 import Dataset\n\n\ndef convert_to_utm(coords):\n utm_coords = []\n for point in coords:\n for lat, lon in point:\n utm_result = utm.from_latlon(lat, lon)\n utm_point = utm_result[0], utm_result[1]\n utm_coords.append(utm_point)\n return utm_coords\n\nn = Dataset('2016102515.nc', 'r')\nlons = n.variables['x'][:]\nlats = n.variables['y'][:]\nlat_0 = lats.mean()\nlon_0 = lons.mean()\nprcips = n.variables['rainfall_depth'][:]\nn.close()\n\n# get just the bottom row and the right column coordinates\nright_col_coords = np.dstack((lats, np.repeat(lons.min(), len(lats))))\nbottom_row_coords = np.dstack((np.repeat(lats.min(), len(lons)), lons))\n\n# convert to utm\nright_col_utms = convert_to_utm(right_col_coords)\nbottom_row_utms = convert_to_utm(bottom_row_coords)\n\nprojected_ys = np.array([a[1] for a in right_col_utms])\nprojected_xs = np.array([a[0] for a in bottom_row_utms])\n\n","sub_path":"PhaseI/DataPreparation/wgs_to_utm.py","file_name":"wgs_to_utm.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"263037683","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import*\nfrom tensorflow.keras.models import*\nfrom tensorflow.keras.backend import *\nimport keras\n\n_BATCH_NORM_EPSILON = 1e-5\n_BATCH_NORM_DECAY = 0.997\n\ndef shape_list(x):\n \"\"\"Deal with dynamic shape in tensorflow cleanly.\"\"\"\n static = x.shape.as_list()\n dynamic = tf.shape(x)\n return [dynamic[i] if s is None else s for i, s in enumerate(static)]\n\ndef merge_two_last_dims(x):\n b, d, f, c = shape_list(x)\n return tf.reshape(x, shape=[b, -1, f * c])\ndef ctc_lambda_func(args):\n\n y_pred, labels, input_length, label_length = args\n input_length = input_length //2\n return tf.reduce_mean(tf.keras.backend.ctc_batch_cost(labels, y_pred, input_length, label_length))\n\ndef SpeechModel (model,\n name: str = \"deepspeech2\"):\n #super(ConvModule, self).__init__(**kwargs)\n\n vocabulary_size = 95\n conv_type= \"conv2d\"\n conv_kernels = [32 , 32 ,96 ]\n conv_strides=[[2,2],[1,2],[1,2] ]\n conv_filters=[[41,11] , [21,11] , [21,11]]\n conv_dropout=0.1\n rnn_nlayers= 5\n nsubblocks = 2\n padding = [[5,20] , [5,10] ,[5,10]]\n block_channels = [256, 384, 512, 640, 768]\n block_kernels= [11, 13, 17, 21, 25]\n block_dropout = 0.2\n rnn_type= \"lstm\"\n rnn_units= 800\n rnn_bidirectional=True\n rnn_rowconv= 0\n rnn_dropout= 0.1\n fc_nlayers= 0\n fc_units= 1600\n fc_dropout= 0.1\n assert len(conv_kernels) == len(conv_strides) == len(conv_filters)\n x = []\n #assert dropout >= 0.0 \n input_ = tf.keras.Input(name = 'inputs' , shape = (model['max_input_length'] , 80 , 1 ))\n output = input_\n \n for i in range(len(conv_kernels)): \n output =tf.pad(\n output,\n [[0, 0], [padding[i][0], padding[i][0]], [padding[i][1], padding[i][1]], [0, 0]]) \n output = Conv2D(conv_kernels[i] , kernel_size= conv_filters[i] , strides =conv_strides[i] , padding='same' , dilation_rate=1, dtype = tf.float32 )(output)\n output = tf.keras.layers.BatchNormalization(\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON)(output)\n output = tf.keras.layers.LeakyReLU()(output)\n output = tf.keras.layers.Dropout(conv_dropout)(output)\n \n batch_size = tf.shape(output)[0]\n feat_size = output.get_shape().as_list()[2]\n tail = output.get_shape().as_list()[3]\n output = tf.reshape(\n output,\n [batch_size , -1 , feat_size * tail]\n )\n \n output = keras.layers.Masking()(output)\n \n x = output\n output = tf.keras.layers.Dense(fc_units)(output)\n output = tf.keras.layers.LeakyReLU()(output)\n output = tf.keras.layers.Dense(fc_units)(output)\n x = tf.keras.layers.Dense(fc_units)(x)\n output = keras.layers.add([output ,x])\n output = tf.keras.layers.Dense(fc_units)(output)\n output = tf.keras.layers.Dense(fc_units)(output)\n output = tf.keras.layers.LeakyReLU()(output)\n #output = tf.keras.layers.ZeroPadding1D(padding=(0, 1711))(output)\n \n for i in range(5):\n if ( i != 0 ):\n output = tf.keras.layers.BatchNormalization(\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON)(output)\n lstm = tf.keras.layers.LSTM(rnn_units , dropout = rnn_dropout , return_sequences=True , use_bias=True)\n output = tf.keras.layers.Bidirectional(lstm )(output)\n\n \n \n output = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(fc_units))(output)\n output = tf.keras.layers.BatchNormalization(\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON)(output)\n output = tf.keras.layers.LeakyReLU()(output)\n output = tf.keras.layers.Dropout(fc_dropout)(output)\n output = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(units=vocabulary_size, activation=\"softmax\",\n use_bias=True ))(output)\n labels = Input(name='labels', shape=model['max_label_length'], dtype='int64')\n input_length = Input(name='input_lengths', shape=[1], dtype='int64')\n label_length = Input(name='label_lengths', shape=[1], dtype='int64')\n\n loss_out = Lambda(ctc_lambda_func, output_shape=(1,),\n name='ctc')([output, labels, input_length, label_length])\n return tf.keras.Model(inputs=[input_, labels, input_length, label_length], outputs=[loss_out]) , tf.keras.Model(inputs=input_ , outputs=output)\n \n \n \n \n","sub_path":"deepspeech.py","file_name":"deepspeech.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"299917728","text":"import cv2\nimport PIL \nfrom PIL import Image\nfrom PIL import ImageTk\nfrom tkinter import *\nimport numpy as np\n\n\n\n\n\n\nclass videoStream:\n panel = None\n ventana = None\n camera = None\n\n def __init__(self):\n self.ventana = Tk()\n self.ventana.title('Stream')\n self.ventana.geometry(\"400x400\")\n\n self.panel = Label(self.ventana)\n self.panel.pack()\n\n self.camera = cv2.VideoCapture(0)\n self.camera1()\n self.ventana.mainloop()\n\n def camera1(self):\n _,frame = self.camera.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = np.asarray(frame)\n frame = PIL.Image.fromarray(frame)\n frame = ImageTk.PhotoImage(frame)\n self.panel.configure(image=frame)\n self.panel.image = frame\n self.panel.after(1,self.camera)\n\n\n\n","sub_path":"GUIH/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"404938499","text":"import httplib2\nimport uritemplate\n\nfrom auditsapp import constants\n\nfrom oauth2client.anyjson import simplejson\n\ndef get(account_id, webproperty_id, profile_id, tkn):\n\t# create http object for sending request through GA REST API\n\thttp = httplib2.Http()\n\toptions = {\n\t\t'accountId': account_id,\n\t\t'webPropertyId': webproperty_id,\n\t\t'profileId': profile_id\n\t}\n\trequest_url = uritemplate.expand(constants.GA_MANAGEMENT_PROFILES_API_URI, options)\n\t\n\t# GET goals\n\tresp, content = http.request(request_url, \"GET\", headers={\"Authorization\": \"Bearer \"+tkn})\n\t\n\t# format to json\n\tprofile = simplejson.loads(content) \n\t\n\t# initialize result object to be returned later\n\tresult = {\n\t\t'description': constants.ECOMMERCE_TRACKING_DESCRIPTION,\n\t\t'headers': ['E-Commerce Tracking'],\n\t\t'data': [],\n\t}\n\t\n\t# process response\n\tif profile.get('eCommerceTracking') == True:\n\t\trow = {\n\t\t\t'tracking': 'YES'\n\t\t}\n\t\tresult.get('data').append(row)\n\telse: \n\t\trow = {\n\t\t\t'tracking': 'NO'\n\t\t}\n\t\tresult.get('data').append(row)\n\treturn result","sub_path":"auditsapp/audit/ecommercetracking.py","file_name":"ecommercetracking.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"380954722","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 15 15:51:19 2014\n\n@author: aaditya prakash\n\"\"\"\n\nimport time\n\n\ndef fun__(strIn):\n strOutput = []\n strList = strIn.split(\"\\n\")\n numT = int(strList[0])\n \n for i in range(numT):\n llIn = strList[i+1]\n res = Strategy(llIn)\n strOutput.append(\"Case #\" + str(i+1) + \": \" + str(res))\n \n fileOut = file(\"C-large-OUT3.txt\", \"w\")\n for item in strOutput:\n print>>fileOut, item\n fileOut.close()\n \ndef Strategy(llIn):\n A, B = map(int, llIn.split(' '))\n count =0\n for n in xrange(A, B):\n s = str(n)\n #print 'n ', s \n nLl = map(int, [s[i:]+s[:i] for i in xrange(len(s))])\n #print nLl\n #if(count > 10): break\n setL = set()\n for i in range(1,len(nLl)):\n if nLl[i]<= B and nLl[i] > n: \n setL.update([nLl[i]])\n count += len(setL)\n #print '*******', n, nLl[i], B\n# for m in xrange(n+1, B+1):\n# if m in nLl:\n# print \"*******\", n,m, B\n# count += 1\n \n return count\n \n\nfInput = file(\"C-large-practice.in\", \"r\")\nstrIn = fInput.read()\ntimeStart = time.clock()\n\nfun__(strIn)\n\n#print Strategy('1 9')\n#print Strategy('10 40')\n#print Strategy('100 500')\n#print Strategy('1111 2222')\n\nprint('Time (sec):' + str(time.clock() - timeStart))\n\n\n\n\n","sub_path":"2012/Recycled_Numbers.py","file_name":"Recycled_Numbers.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"593114434","text":"import tensorflow as tf\nfrom test0409Readdata import *\nimport numpy as np\nimport random\n# from ../helper import *\nfrom tensorflow.contrib import seq2seq\n\n\ndef random_distribution():\n \"\"\"生成一个随机的概率列\"\"\"\n b = np.random.uniform(0.0, 1.0, size=[1, vocab_size])\n return b / np.sum(b, 1)[:, None]\n\n\ndef sample_distribution(distribution): # 在概率下选择\n \"\"\"从假定为标准化数组的分布中抽取一个元素作为样本概率。\n \"\"\"\n r = random.uniform(0, 1)\n s = 0\n for i in range(len(distribution[0])):\n s += distribution[0][i]\n if s >= r:\n return i\n return len(distribution) - 1\n\n\ndef sample(prediction):\n d = sample_distribution(prediction)\n re = []\n re.append(d)\n return re\n\n\n# 学习率\nlearning_rate =1 # 1.0\n# 训练步长\nnum_steps =60 # 35\n# lstm层中包含的unit个数\nhidden_size = 300\n# dropout时的保留概率\nkeep_prob =0.8 # 1.0\nlr_decay = 0.5\n# batch大小\nbatch_size =20 # 20\n# lstm层数\nnum_layers = 3\n# 训练循环次数\nmax_epoch =1000# 14\n\n\n# 序列化 处理训练数据\nx, y, id_to_word = dataproducer(batch_size, num_steps)\nvocab_size = len(id_to_word)\n\nsize = hidden_size\n\n# 基础的LSTM循环网络单元\nlstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.5)\n# 网络中每个单元在每次有数据流入时以一定的概率(keep prob)正常工作\nlstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)\n# 构建多个独立的串联循环网络结构,产生num_layers个独立的LSTM网络结构操作列表\ncell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell], num_layers)\n\n# 构造多层LSTM前,使用zero_state函数构建全零初始输出特征和状态特征\ninitial_state = cell.zero_state(batch_size, tf.float32)\nstate = initial_state\n# 获取已存在的变量(要求不仅名字,而且初始化方法等各个参数都一样),如果不存在,就新建一个。\nembedding = tf.get_variable('embedding', [vocab_size, size])\ninput_data = x\ntargets = y\n\n# 形参,用于定义过程,在执行的时候再赋具体的值\ntest_input = tf.placeholder(tf.int32, shape=[1])\ntest_initial_state = cell.zero_state(1, tf.float32)\n\n# 从随机产生的初始词向量集合embedding中获取对应的样本特征\ninputs = tf.nn.embedding_lookup(embedding, input_data)\ntest_inputs = tf.nn.embedding_lookup(embedding, test_input)\n\noutputs = []\ninitializer = tf.random_uniform_initializer(-0.1, 0.1)\n\n# 通过变量名 Model 获取变量\n# 当reuse为False或者None时(这也是默认值),同一个tf.variable_scope下面的变量名不能相同\n\nwith tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n with tf.variable_scope(\"r\", reuse=None, initializer=initializer):\n # softmax_w、softmax_b是尺寸 [size, vocab_size]、 [vocab_size]的随机张量,表示线性分类模型的参数\n softmax_w = tf.get_variable('softmax_w', [size, vocab_size])\n softmax_b = tf.get_variable('softmax_b', [vocab_size])\n with tf.variable_scope(\"RNN\", reuse=None, initializer=initializer):\n for time_step in range(num_steps):\n\n # 利用scope.reuse_variables()告诉TF想重复利用RNN的参数\n if time_step > 0: tf.get_variable_scope().reuse_variables()\n # 实现对LSTM网络的1次循环调用\n (cell_output, state) = cell(inputs[:, time_step, :], state, )\n outputs.append(cell_output)\n # 调整矩阵维度 outputs为被调整维度的张量 [-1, size]为要调整为的形状\n output = tf.reshape(outputs, [-1, size])\n\n # 两个矩阵中对应元素各自相乘,表示预测的分类置信度结果\n logits = tf.matmul(output, softmax_w) + softmax_b\n # 实现损失函数的计算\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [tf.reshape(targets, [-1])],\n [tf.ones([batch_size * num_steps])])\n\n global_step = tf.Variable(0)\n #exponential_decay() 实现指数衰减学习率\n learning_rate = tf.train.exponential_decay( 10.0, global_step, 5000, 0.1, staircase=True)\n # 对所有步骤中的所有变量使用恒定的学习率\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # 计算梯度\n gradients, v = zip(*optimizer.compute_gradients(loss))\n # clip_by_global_norm是梯度缩放输入是所有trainable向量的梯度,和所有trainable向量,返回第一个clip好的梯度,第二个globalnorm\n # 实现梯度剪裁防止梯度爆炸\n gradients, _ = tf.clip_by_global_norm(gradients, 1.25)\n # 使用计算得到的梯度来更新对应的variable\n optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step)\n\n cost = tf.reduce_sum(loss) / batch_size\n # predict:\n teststate = test_initial_state\n (celloutput, teststate) = cell(test_inputs, teststate)\n partial_logits = tf.matmul(celloutput, softmax_w) + softmax_b\n partial_logits = tf.nn.softmax(partial_logits)\n\n# 保存模型参数和Summary\nsv = tf.train.Supervisor(logdir=None)\n\nwith sv.managed_session() as session:\n\n costs = 0\n iters = 0\n for i in range(max_epoch):\n _, l = session.run([optimizer, cost])\n costs += l\n iters += num_steps\n perplextity = np.exp(costs / iters)\n if i % 20 == 0:\n print(perplextity)\n\n if i % 100 == 0:\n p = random_distribution()\n b = sample(p)\n sentence = id_to_word[b[0]]\n for j in range(200):\n test_output = session.run(partial_logits, feed_dict={test_input: b})\n b = sample(test_output)\n sentence += id_to_word[b[0]]\n print(sentence)\n\n writer = tf.summary.FileWriter(\"logs\", tf.get_default_graph())\n writer.close()","sub_path":"Chinese-novel-generation-master/test0409/test0409.py","file_name":"test0409.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"613040249","text":"#fizzbuzzpluzz.py\nimport sys\nimport json\nimport pytest\n\n#setting MasterList equal to an empty list to use later for testing\nMasterList =[]\n\n# storing positional args\nFrange = sys.argv\nArgRange = len(Frange)\n\n# This ensures that the positional args are type 'int',\n# as we only expect integers to be input for the positional args\n\narg1 = int(Frange[2])\narg2 = int(Frange[3])\n\n#In order to make fizzbuzzpuzz \"inclusive\" with respect to the range given,\n# we must create inclusiveArg2\n\ninclusiveArg2 = (arg2 + 1)\n\n\n# Used to get JSON file:\nwith open('./fbp-config.json') as G:\n\tdata = json.load(G)\n\nMatchList1 = data[\"MatchOne\"]\nMatchList2 = data[\"MatchTwo\"]\n\n\ndef Convert(lst): \n\t\"\"\"\n\tThis function converts the list we get from the JSON file,\n\tand converts it into a dictionary to be used later in the script.\n\t\"\"\"\n\n\tres_dct = {lst[x]: lst[x + 1] for x in range(0, len(lst), 2)} \n\treturn res_dct \n\n\n# this is where match1 and match2 get converted\nMatch1 = (Convert(MatchList1))\nMatch2 = (Convert(MatchList2))\n\n# boil down match1 and match2 to be able to use their the values for their respective key pairs\n\ndef Match1KV():\n\t\"\"\"\n\tThis function takes the dictionary information from MatchOne and\n\tallows us to use the information in the key or the value without\n\thaving call a reference to the dictionary.\n\t\n\t\"\"\"\n\n\tfor key, value in Match1.items():\n\t\tMatch1KV.M1key = key\n\t\tMatch1KV.M1Val = value\n\n\ndef Match2KV():\n\n\t\"\"\"\n\tThis function takes the dictionary information from MatchTwo and\n\tallows us to use the information in the key or the value without\n\thaving call a reference to the dictionary.\n\t\n\t\"\"\"\n\n\tfor key, value in Match2.items():\n\t\tMatch2KV.M2key = key\n\t\tMatch2KV.M2Val = value\n\t\t\n# this run the above functions to be able to use the boiled down values from the config JSON files\nMatch1KV()\nMatch2KV()\n\n#assigning variables to be used in fizzbuzzpluzz logic\nM1_Key = Match1KV.M1key\nM1_Val = Match1KV.M1Val\nM2_Key = Match2KV.M2key\nM2_Val = Match2KV.M2Val\n\n# logic for printing out fizz, buzz, or any other name listed in the config file,\n# along with its associated value\n\ndef fizzbuzzpluzz(k):\n\n\t\"\"\"\n\tThis function uses the information accessed in the JSON config to determine\n\twhen one of the numbers selected in the JSON congif is called as well as what word to\n\toutput as specified by the JSON config\n\n\tThis function also handles the condition when a number in the range meets both conditions.\n\n\t\"\"\"\n\n\tif k % M1_Val == 0 and k % M2_Val == 0 :\n\t\treturn M1_Key + M2_Key\n\telif k % M1_Val == 0:\n\t\treturn M1_Key\n\telif k % M2_Val == 0:\n\t\treturn M2_Key\n\telse:\n\t\treturn str(k)\n\n\n#prints the range of values, inputting fizz, buzz, or any other name listed in the config file\nprint(\"\\n\".join(fizzbuzzpluzz(k) for k in range(arg1, inclusiveArg2) ))\n","sub_path":"fizzbuzzpluzz.py","file_name":"fizzbuzzpluzz.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"13732132","text":"import unittest\nfrom song import Song\nfrom playlist import Playlist\n\n\nclass TestPlaylist(unittest.TestCase):\n def setUp(self):\n self.plist = Playlist(\"plist out\")\n self.test_song = Song(\n \"Anaconda\",\n \"Nicky Minaj\",\n \"Unknown Shit\",\n 1,\n 300,\n 320\n )\n self.test_song2 = Song(\n \"Anaconda2\",\n \"Nicky Minaj\",\n \"Unknown Shit\",\n 4,\n 300,\n 120\n )\n self.plist.add_song(self.test_song)\n self.plist.add_song(self.test_song)\n self.plist.add_song(self.test_song2)\n\n def test_init(self):\n self.assertEqual(self.plist.name, \"plist out\")\n\n def test_add_song(self):\n self.assertIn(self.test_song, self.plist.songs)\n\n def test_remove_song(self):\n self.plist.remove_song(\"test_song\")\n self.assertNotIn(self.test_song, self.plist.songs)\n\n def test_total_length(self):\n self.assertEqual(self.plist.total_length(), 900)\n\n def test_rating_out_of_range(self):\n for item in self.plist.songs:\n self.assertIn(item.rating, [1, 2, 3, 4, 5])\n\n def test_remove_disrated(self):\n self.plist.remove_disrated(3)\n self.assertIn(self.test_song2, self.plist.songs)\n self.assertNotIn(self.test_song, self.plist.songs)\n\n def test_remove_bad_quality(self):\n self.plist.remove_bad_quality()\n self.assertNotIn(self.test_song2, self.plist.songs)\n\n def test_show_artists(self):\n self.assertEqual(self.plist.show_artists(), [\"Nicky Minaj\"])\n if __name__ == '__main__':\n unittest.main()\n","sub_path":"week2/musicLibrary/testPlayList.py","file_name":"testPlayList.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"115327403","text":"import pygame\n\nclass Runner(pygame.sprite.Sprite):\n\tdef __init__(self):\n\t\tpygame.sprite.Sprite.__init__(self)\n\n\t\tself.spriteImage= '17-2runnersprite.png'\n\t\tself.spriteWidth= 70\n\t\tself.spriteHeight= 100\n\t\tself.spriteSheet= pygame.image.load(self.spriteImage).convert()\n\t\tself.spriteColumns= 14\n\t\tself.currentFrame= 0\n\t\tself.image= pygame.surface.Surface((self.spriteWidth, self.spriteHeight))\n\n\t\trect= (self.spriteWidth*self.currentFrame, 0, self.spriteWidth, self.spriteHeight)\n\t\tself.image.blit( self.spriteSheet, (0, 0), rect)\n\t\tself.image.set_colorkey(pygame.color.Color(255, 0, 255))\n\t\tself.rect= self.image.get_rect()\n\n\tdef Update(self):\n\t\tif self.currentFrame==self.spriteColumns-1:\n\t\t\tself.currentFrame= 0\n\t\telse:\n\t\t\tself.currentFrame+= 1\n\n\t\trect= (self.spriteWidth*self.currentFrame, 0, self.spriteWidth, self.spriteHeight)\n\t\tself.image.blit( self.spriteSheet, (0, 0), rect)","sub_path":"뇌를 자극하는 파이썬 3/17-2runner.py","file_name":"17-2runner.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"274536108","text":"from pyspark import SparkConf, SparkContext\n\nconf = SparkConf().setMaster(\"local\").setAppName(\"Customer Total Value\")\nsc = SparkContext(conf=conf)\n\ndef parse_line(line):\n l = line.split(',')\n customer_id = int(l[0])\n value = float(l[2])\n return (customer_id, value)\n\nt = sc.textFile(\"./customer-orders.csv\")\ntuples = t.map(parse_line)\nvalues = tuples.reduceByKey(lambda x, y: x + y)\nvalue_id = values.map(lambda x: (x[1], x[0])).sortByKey()\nresults = value_id.collect()\n\nfor result in results:\n print(result)","sub_path":"customers_sort.py","file_name":"customers_sort.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"56124506","text":"from django.urls import path\nfrom .views import Index, NewEvent, CreatAgape, CreatCulte, CreatCulteSpecial, \\\n CreatEnseignement, CreatPriere, CreatPartage, ListEvents, DetailEvents\n\n\nurlpatterns = [\n # page d'accueil\n path('', Index.as_view(), name='acceuil'),\n # vue de creation\n path('evenement/nouveau', NewEvent.as_view(), name='ajout__event'),\n # formulaire de creations\n path('evenement/nouveau/agape', CreatAgape.as_view(), name='ajout__agape'),\n path('evenement/nouveau/culte', CreatCulte.as_view(), name='ajout__culte'),\n path('evenement/nouveau/cultespecial', CreatCulteSpecial.as_view(), name='ajout__cultespecial'),\n path('evenement/nouveau/enseignement', CreatEnseignement.as_view(), name='ajout__enseignement'),\n path('evenement/nouveau/priere', CreatPriere.as_view(), name='ajout__priere'),\n path('evenement/nouveau/partage', CreatPartage.as_view(), name='ajout__partage'),\n # listing\n path('evenement/', ListEvents.as_view(), name='liste_activites'),\n # details\n path('evenement/detail/', DetailEvents.as_view(), name=\"evenement-detail\"),\n]\n","sub_path":"evenements/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"596428894","text":"from pygame import *\nfrom tools.setMap import *\nfrom tools.makeMap import *\nimport sys\ndef choiceMode(window,DisplayList):\n\n # global DisplayList\n\n status = True\n while status:\n eventList = event.get()\n for eventEle in eventList:\n if eventEle.type ==QUIT:\n sys.exit()\n elif eventEle.type == KEYDOWN:\n if eventEle.key == K_1:\n setMap('C:\\\\Users\\liaoz\\Desktop\\机器人开发_基础课程练习\\Day15\\map\\\\1.map', DisplayList, window)\n status = False\n elif eventEle.key == K_2:\n DisplayList=setMap('C:\\\\Users\\liaoz\\Desktop\\机器人开发_基础课程练习\\Day15\\map\\\\2.map', DisplayList, window)\n status = False\n elif eventEle.key == K_3:\n makemap = MakeMap()\n DisplayList=setMap('C:\\\\Users\\liaoz\\Desktop\\机器人开发_基础课程练习\\Day15\\map\\\\2.map', DisplayList, window)\n status=False\n # else:print(DisplayList)\n # return DisplayList\n # input()","sub_path":"display/choiceWindow.py","file_name":"choiceWindow.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"628164814","text":"import numpy as np\nfrom nearest_neighbors import *\n\n\ndef kfold(n, n_folds):\n k_list = []\n for i in range(0, n, round(n / n_folds + 0.5)):\n k_list.append((np.hstack((np.arange(0, i), np.arange(i + round(n / n_folds + 0.5), n))), np.arange(i, min(i + round(n / n_folds + 0.5), n))))\n if len(k_list) != n_folds:\n k_list.append((np.arange(0, n - 1), np.arange(n - 1,n)))\n return k_list\n\ndef knn_cross_val_score(X, y, k_list, score, cv, **kwargs):\n acc = {}\n for key in k_list:\n acc[key] = np.array([])\n a = KNNClassifier(k=max(k_list), **kwargs)\n if cv == None:\n cv = kfold(X.shape[0], 3)\n for m,i in enumerate(cv):\n a.fit(X[i[0]], y[i[0]])\n dist, kneighbors = a.find_kneighbors(X[i[1]], True)\n for j,k in enumerate(k_list):\n ans = []\n dist_k = dist[:,:k]\n kneighbors_k = kneighbors[:,:k]\n for ind,l in enumerate(kneighbors_k):\n if a.weights:\n count_el = np.bincount(y[i[0]][l].astype('int64'), weights = 1 / (dist_k[ind] + 0.00001))\n else:\n count_el = np.bincount(y[i[0]][l].astype('int64'))\n ans.append(str(count_el.argmax()))\n acc[k] = np.append(acc[k], (np.array(ans).astype('int64') == np.array(y[i[1]]).astype('int64')).sum() / len(i[1]))\n\n return acc","sub_path":"cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"15518941","text":"import unicodedata\nimport re\nimport string\n\n\n_V = {'a', 'e', 'ẹ', 'i', 'o', 'ọ', 'u', 'à', 'è', '̀ẹ', 'ì', 'ò', '̀ọ', 'ù', 'á', 'é', '́ẹ', 'í', 'ó', '́ọ', 'ú'}\nV = {'a', 'e', 'ẹ', 'i', 'o', 'ọ', 'u', 'à', 'è', 'ḕ', 'ì', 'ò', 'ṑ', 'ù', 'á', 'é', 'ḗ', 'í', 'ó', 'ṓ', 'ú',\n 'A', 'Ó', 'À', 'Ṓ', 'Ḕ', 'È', 'O', 'I', 'Ḗ', 'Á', 'Ṑ', 'Í', 'Ì', 'Ẹ', 'E', 'U', 'É', 'Ú', 'Ò', 'Ọ', 'Ù'}\n\n_Vn = {'an', 'ẹn', 'in', 'ọn', 'un', 'àn', '̀ẹn', 'ìn', '̀ọn', 'ùn', 'án', '́ẹn', 'ín', '́ọn', 'ún'}\nVn = {'an', 'ẹn', 'in', 'ọn', 'un', 'àn', 'ḕn', 'ìn', 'ṑn', 'ùn', 'án', 'ḗn', 'ín', 'ṓn', 'ún',\n 'ÚN', 'UN', 'ÍN', 'ÁN', 'ÀN', 'ÌN', 'ẸN', 'ÙN', 'ḖN', 'ṒN', 'ỌN', 'AN', 'ṐN', 'IN', 'ḔN'}\n\nN = {'m', 'n', 'ṁ', 'ǹ', 'ḿ', 'ń',\n 'Ǹ', 'M', 'N', 'Ḿ', 'Ṁ', 'Ń'}\n\nC = {'b', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'p', 'r', 's', 'ṣ', 't', 'w', 'y',\n 'G', 'Y', 'D', 'L', 'M', 'W', 'F', 'N', 'S', 'R', 'Ṣ', 'J', 'P', 'K', 'H', 'B', 'T'}\nD = {'gb', 'GB'}\n\nyo_to_la = {'̀ẹ': 'ḕ', '̀ọ': 'ṑ', '́ẹ': 'ḗ', '́ọ': 'ṓ', '̀m': 'ṁ', '-': '',\n 'ẹ̀': 'ḕ', 'ọ̀': 'ṑ', 'ẹ́': 'ḗ', 'ọ́': 'ṓ', 'm̀': 'ṁ'}\n# la_to_ya = {'ḕ': '̀ẹ', 'ṑ': '̀ọ', 'ḗ': '́ẹ', 'ṓ': '́ọ'}\nla_to_ya = {'ḕ': 'ẹ̀', 'ṑ': 'ọ̀', 'ḗ': 'ẹ́', 'ṓ': 'ọ́'}\n\n\ndef syllabicate(word):\n word = word.lower()\n word = multi_replace(word, yo_to_la)\n\n syllable = ''\n while len(word) >= 1:\n if len(word) > 3:\n if word[-4:-2] in D and word[-2:] in Vn: # DVn\n syllable = word[-4:] + ' ' + syllable\n word = word[0:-4]\n else:\n if word[-3:-1] in D and word[-1] in V: # DV Structure\n syllable = word[-3:] + ' ' + syllable\n word = word[0:-3]\n\n elif word[-3] in C and word[-2:] in Vn: # CVn Structure\n syllable = word[-3:] + ' ' + syllable\n word = word[0:-3]\n else:\n if word[-2] in C and word[-1] in V: # CV Structure\n syllable = word[-2:] + ' ' + syllable\n word = word[0:-2]\n elif word in Vn: # Vn Structure\n syllable = word[-2:] + ' ' + syllable\n word = word[0:-2]\n else:\n if word[-1] in V: # V Structure\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n elif word[-1] in N:\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n else:\n raise ValueError('An error occurred with last char')\n\n elif len(word) == 3:\n if word[-3:-1] in D and word[-1] in V: # DV Structure\n syllable = word[-3:] + ' ' + syllable\n word = word[0:-3]\n\n elif word[-3] in C and word[-2:] in Vn: # CVn Structure\n syllable = word[-3:] + ' ' + syllable\n word = word[0:-3]\n else:\n if word[-2] in C and word[-1] in V: # CV Structure\n syllable = word[-2:] + ' ' + syllable\n word = word[0:-2]\n elif word in Vn: # Vn Structure\n syllable = word[-2:] + ' ' + syllable\n word = word[0:-2]\n else:\n if word[-1] in V: # V Structure\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n elif word[-1] in N:\n syllable = word + ' ' + syllable\n word = word[0:-1]\n else:\n raise ValueError('An error occurred with last char')\n\n elif len(word) == 2:\n if word[-2] in C and word[-1] in V: # CV Structure\n syllable = word[-2:] + ' ' + syllable\n word = word[0:-2]\n elif word in Vn: # Vn Structure\n syllable = word[-2:] + ' ' + syllable\n word = word[0:-2]\n else:\n if word[-1] in V: # V Structure\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n elif word[-1] in N:\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n else:\n raise ValueError('An error occurred with last char')\n\n else:\n if word[-1] in V: # V Structure\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n elif word[-1] in N:\n syllable = word[-1:] + ' ' + syllable\n word = word[0:-1]\n else:\n raise ValueError('An error occurred with last char')\n\n return multi_replace(syllable, la_to_ya).strip().split(' ')\n\n\ndef multi_replace(string, replacements):\n \"\"\"\n Given a string and a replacement map, it returns the replaced string.\n :param str string: string to execute replacements on\n :param dict replacements: replacement dictionary {value to find: value to replace}\n :rtype: str\n \"\"\"\n # Place longer ones first to keep shorter substrings from matching where the longer ones should take place\n # For instance given the replacements {'ab': 'AB', 'abc': 'ABC'} against the string 'hey abc', it should produce\n # 'hey ABC' and not 'hey ABc'\n substrs = sorted(replacements, key=len, reverse=True)\n\n # Create a big OR regex that matches any of the substrings to replace\n regexp = re.compile('|'.join(map(re.escape, substrs)))\n\n # For each match, look up the new string in the replacements\n return regexp.sub(lambda match: replacements[match.group(0)], string)\n\n\n\ndef pre_process_word(sentence):\n sentence = re.sub(r'\\d+', '', sentence) # remove digits\n sentence = sentence.translate(sentence.maketrans('', '', string.punctuation)) # remove punctuation\n return sentence.strip()\n\n\n\n\n\n","sub_path":"syllabicator/syllabicator_i.py","file_name":"syllabicator_i.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"412572326","text":"import json\n\nfrom connection import app_search, engine_name\n\ndocuments = []\npages = app_search.list_documents(engine_name=engine_name,)[\"meta\"][\n \"page\"\n][\"total_pages\"]\n\nfor page in range(1, pages + 1):\n documents.extend(\n app_search.list_documents(\n engine_name=engine_name,\n current_page=page,\n )[\"results\"]\n )\n\nwith open(\"diversityorgs.tech.json\", \"w\") as json_file:\n json.dump(documents, json_file)\n","sub_path":"upload/download_documents.py","file_name":"download_documents.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"326813441","text":"# coding=utf-8\n\"\"\"Unit tests for activitypub.py.\n\nTODO: test error handling\n\"\"\"\nfrom __future__ import unicode_literals\nimport copy\nimport json\nimport urllib\n\nfrom mock import call, patch\nfrom oauth_dropins.webutil import util\nfrom oauth_dropins.webutil.testutil import requests_response\nimport requests\n\nimport activitypub\nfrom activitypub import app\nimport common\nfrom models import Follower, MagicKey, Response\nimport testutil\n\n\nREPLY_OBJECT = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'type': 'Note',\n 'content': 'A ☕ reply',\n 'id': 'http://this/reply/id',\n 'url': 'http://this/reply',\n 'inReplyTo': 'http://orig/post',\n 'cc': ['https://www.w3.org/ns/activitystreams#Public'],\n}\nREPLY_OBJECT_WRAPPED = copy.deepcopy(REPLY_OBJECT)\nREPLY_OBJECT_WRAPPED['inReplyTo'] = common.redirect_wrap(\n REPLY_OBJECT_WRAPPED['inReplyTo'])\nREPLY = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'type': 'Create',\n 'id': 'http://this/reply/as2',\n 'object': REPLY_OBJECT,\n}\n# based on example Mastodon like:\n# https://github.com/snarfed/bridgy-fed/issues/4#issuecomment-334212362\n# (reposts are very similar)\nLIKE = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'id': 'http://this/like#ok',\n 'type': 'Like',\n 'object': 'http://orig/post',\n 'actor': 'http://orig/actor',\n}\nLIKE_WRAPPED = copy.deepcopy(LIKE)\nLIKE_WRAPPED['object'] = common.redirect_wrap(LIKE_WRAPPED['object'])\nLIKE_WITH_ACTOR = copy.deepcopy(LIKE)\nLIKE_WITH_ACTOR['actor'] = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'id': 'http://orig/actor',\n 'type': 'Person',\n 'name': 'Ms. Actor',\n 'preferredUsername': 'msactor',\n 'image': {'type': 'Image', 'url': 'http://orig/pic.jpg'},\n}\n\nFOLLOW = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'id': 'https://mastodon.social/6d1a',\n 'type': 'Follow',\n 'actor': 'https://mastodon.social/users/swentel',\n 'object': 'https://realize.be/',\n}\nFOLLOW_WRAPPED = copy.deepcopy(FOLLOW)\nFOLLOW_WRAPPED['object'] = 'http://localhost/realize.be'\nACTOR = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'id': FOLLOW['actor'],\n 'type': 'Person',\n 'inbox': 'http://follower/inbox',\n}\nFOLLOW_WITH_ACTOR = copy.deepcopy(FOLLOW)\nFOLLOW_WITH_ACTOR['actor'] = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'id': FOLLOW['actor'],\n 'type': 'Person',\n 'inbox': 'http://follower/inbox',\n}\nFOLLOW_WRAPPED_WITH_ACTOR = copy.deepcopy(FOLLOW_WRAPPED)\nFOLLOW_WRAPPED_WITH_ACTOR['actor'] = FOLLOW_WITH_ACTOR['actor']\n\nACCEPT = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'type': 'Accept',\n 'id': 'tag:localhost:accept/realize.be/https://mastodon.social/6d1a',\n 'actor': 'http://localhost/realize.be',\n 'object': {\n 'type': 'Follow',\n 'actor': 'https://mastodon.social/users/swentel',\n 'object': 'http://localhost/realize.be',\n }\n}\n\n@patch('requests.post')\n@patch('requests.get')\n@patch('requests.head')\nclass ActivityPubTest(testutil.TestCase):\n\n def test_actor_handler(self, _, mock_get, __):\n mock_get.return_value = requests_response(\"\"\"\n\nMrs. ☕ Foo\n\n\"\"\", url='https://foo.com/')\n\n got = app.get_response('/foo.com')\n mock_get.assert_called_once_with('http://foo.com/', headers=common.HEADERS,\n timeout=util.HTTP_TIMEOUT)\n self.assertEquals(200, got.status_int)\n self.assertEquals(common.CONTENT_TYPE_AS2, got.headers['Content-Type'])\n self.assertEquals({\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'type' : 'Person',\n 'name': 'Mrs. ☕ Foo',\n 'summary': '',\n 'preferredUsername': 'foo.com',\n 'id': 'http://localhost/foo.com',\n 'url': 'http://localhost/r/https://foo.com/about-me',\n 'inbox': 'http://localhost/foo.com/inbox',\n 'outbox': 'http://localhost/foo.com/outbox',\n 'publicKey': {\n 'id': 'foo.com',\n 'publicKeyPem': MagicKey.get_by_id('foo.com').public_pem(),\n },\n }, json.loads(got.body))\n\n def test_actor_handler_no_hcard(self, _, mock_get, __):\n mock_get.return_value = requests_response(\"\"\"\n\n\n\n\"\"\")\n\n got = app.get_response('/foo.com')\n mock_get.assert_called_once_with('http://foo.com/', headers=common.HEADERS,\n timeout=util.HTTP_TIMEOUT)\n self.assertEquals(400, got.status_int)\n self.assertIn('representative h-card', got.body)\n\n def test_inbox_reply_object(self, *mocks):\n self._test_inbox_reply(REPLY_OBJECT, REPLY_OBJECT, *mocks)\n\n def test_inbox_reply_object_wrapped(self, *mocks):\n self._test_inbox_reply(REPLY_OBJECT_WRAPPED, REPLY_OBJECT, *mocks)\n\n def test_inbox_reply_create_activity(self, *mocks):\n self._test_inbox_reply(REPLY, REPLY, *mocks)\n\n def _test_inbox_reply(self, as2, expected_as2, mock_head, mock_get, mock_post):\n mock_head.return_value = requests_response(url='http://orig/post')\n mock_get.return_value = requests_response(\n '')\n mock_post.return_value = requests_response()\n\n got = app.get_response('/foo.com/inbox', method='POST',\n body=json.dumps(as2))\n self.assertEquals(200, got.status_int, got.body)\n mock_get.assert_called_once_with(\n 'http://orig/post', headers=common.HEADERS, verify=False)\n\n expected_headers = copy.deepcopy(common.HEADERS)\n expected_headers['Accept'] = '*/*'\n mock_post.assert_called_once_with(\n 'http://orig/webmention',\n data={\n 'source': 'http://localhost/render?source=http%3A%2F%2Fthis%2Freply&target=http%3A%2F%2Forig%2Fpost',\n 'target': 'http://orig/post',\n },\n allow_redirects=False,\n headers=expected_headers,\n verify=False)\n\n resp = Response.get_by_id('http://this/reply http://orig/post')\n self.assertEqual('in', resp.direction)\n self.assertEqual('activitypub', resp.protocol)\n self.assertEqual('complete', resp.status)\n self.assertEqual(expected_as2, json.loads(resp.source_as2))\n\n def test_inbox_like(self, mock_head, mock_get, mock_post):\n mock_head.return_value = requests_response(url='http://orig/post')\n mock_get.side_effect = [\n # source actor\n requests_response(LIKE_WITH_ACTOR['actor'], headers={'Content-Type': common.CONTENT_TYPE_AS2}),\n # target post webmention discovery\n requests_response(\n ''),\n ]\n mock_post.return_value = requests_response()\n\n got = app.get_response('/foo.com/inbox', method='POST',\n body=json.dumps(LIKE_WRAPPED))\n self.assertEquals(200, got.status_int)\n\n as2_headers = copy.deepcopy(common.HEADERS)\n as2_headers.update(common.CONNEG_HEADERS_AS2_HTML)\n mock_get.assert_has_calls((\n call('http://orig/actor', headers=as2_headers, timeout=15),\n call('http://orig/post', headers=common.HEADERS, verify=False),\n ))\n\n args, kwargs = mock_post.call_args\n self.assertEquals(('http://orig/webmention',), args)\n self.assertEquals({\n # TODO\n 'source': 'http://localhost/render?source=http%3A%2F%2Fthis%2Flike__ok&target=http%3A%2F%2Forig%2Fpost',\n 'target': 'http://orig/post',\n }, kwargs['data'])\n\n resp = Response.get_by_id('http://this/like__ok http://orig/post')\n self.assertEqual('in', resp.direction)\n self.assertEqual('activitypub', resp.protocol)\n self.assertEqual('complete', resp.status)\n self.assertEqual(LIKE_WITH_ACTOR, json.loads(resp.source_as2))\n\n def test_inbox_follow_accept(self, mock_head, mock_get, mock_post):\n mock_head.return_value = requests_response(url='https://realize.be/')\n mock_get.side_effect = [\n # source actor\n requests_response(FOLLOW_WITH_ACTOR['actor'],\n content_type=common.CONTENT_TYPE_AS2),\n # target post webmention discovery\n requests_response(\n ''),\n ]\n mock_post.return_value = requests_response()\n\n got = app.get_response('/foo.com/inbox', method='POST',\n body=json.dumps(FOLLOW_WRAPPED))\n self.assertEquals(200, got.status_int)\n\n as2_headers = copy.deepcopy(common.HEADERS)\n as2_headers.update(common.CONNEG_HEADERS_AS2_HTML)\n mock_get.assert_has_calls((\n call(FOLLOW['actor'], headers=as2_headers, timeout=15),\n ))\n\n # check AP Accept\n self.assertEqual(2, len(mock_post.call_args_list))\n args, kwargs = mock_post.call_args_list[0]\n self.assertEquals(('http://follower/inbox',), args)\n self.assertEquals(ACCEPT, kwargs['json'])\n\n # check webmention\n args, kwargs = mock_post.call_args_list[1]\n self.assertEquals(('https://realize.be/webmention',), args)\n self.assertEquals({\n 'source': 'http://localhost/render?source=https%3A%2F%2Fmastodon.social%2F6d1a&target=https%3A%2F%2Frealize.be%2F',\n 'target': 'https://realize.be/',\n }, kwargs['data'])\n\n resp = Response.get_by_id('https://mastodon.social/6d1a https://realize.be/')\n self.assertEqual('in', resp.direction)\n self.assertEqual('activitypub', resp.protocol)\n self.assertEqual('complete', resp.status)\n self.assertEqual(FOLLOW_WITH_ACTOR, json.loads(resp.source_as2))\n\n # check that we stored a Follower object\n follower = Follower.get_by_id('realize.be %s' % (FOLLOW['actor']))\n self.assertEqual(FOLLOW_WRAPPED_WITH_ACTOR, json.loads(follower.last_follow))\n\n def test_inbox_unsupported_type(self, *_):\n got = app.get_response('/foo.com/inbox', method='POST', body=json.dumps({\n '@context': ['https://www.w3.org/ns/activitystreams'],\n 'id': 'https://xoxo.zone/users/aaronpk#follows/40',\n 'type': 'Block',\n 'actor': 'https://xoxo.zone/users/aaronpk',\n 'object': 'http://snarfed.org/',\n }))\n self.assertEquals(501, got.status_int)\n","sub_path":"tests/test_activitypub.py","file_name":"test_activitypub.py","file_ext":"py","file_size_in_byte":10694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"11636902","text":"#1.读取文档内容\nwith open('DATA.log',encoding='UTF-8') as file_object:\n contents = file_object.read()\n\n# print(type(contents))\n# print(contents)\n#2.分割出各个手机号,看手机号间有什么规律,比如以逗号隔开或有空格\ncontent_list = contents.split(\"\\n\\n\") #把获取的内容分割成列表\n# print(content_list)\n\ncontent_list_effective_list = []\n\ncontent_list_len = len(content_list) #获取列表长度\nfor i in range(0,content_list_len): #遍历列表,统计手机号\n if \"CP=&&DataTime=\" in content_list[i]:\n content_list_effective_list.append(content_list[i])\n\nprint(content_list_effective_list)\n#3.统计手机号\nphone_list = []\n\n\n\ncontent_list_effective_list_len = len(content_list_effective_list) #获取列表长度\nfor i in range(0,content_list_effective_list_len): #遍历列表,统计手机号\n\n fengelong = content_list_effective_list[i].split(\"MN=\")\n # print(fengelong)\n if fengelong != ['']:\n fengelong_one_len = len(fengelong[0])\n # print(fengelong_one_len)\n qiepian_qian_num = fengelong_one_len+3\n fengelong_two_split_list = fengelong[1].split(\";CP=&&DataTime=\")\n shebei_MN = fengelong_two_split_list[0]\n # print(shebei_MN)\n if shebei_MN not in phone_list:\n phone_list.append(shebei_MN)\nprint(phone_list)\n\n\n#4.计算重复次数\nphone_list_len = len(phone_list)\nprint(phone_list_len)\nphone_list_sort = sorted(phone_list)\nprint(phone_list_sort)\n\nprint(\"设备MN号维度统计\")\nfor i in range(0,phone_list_len):\n count_num = 0\n for j in range(0, content_list_len): # 遍历列表,统计手机号\n if phone_list_sort[i] in content_list[j]:\n count_num = count_num +1\n print(\"%s出现的次数:%s\" % (phone_list_sort[i], count_num))\n\n\n\n # if phone_list[i] in content_list\n # count_num = content_list.count(phone_list[i]) #计算一个手机号在列表中出现的次数\n # print(\"%s出现的次数:%s\" % (phone_list[i],count_num))","sub_path":"WWSBGJTest/util/fuwu/jiexiwenjian_shebei.py","file_name":"jiexiwenjian_shebei.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"61293535","text":"import re\nimport xlrd\nfrom datetime import datetime, date\nfrom datatypes import data_store_type\n\n# special place to look for the organization name in each XLS file\nORG_NAME_CELL = (0, 2)\n\n\ndef read_xls(f, file_contents=None):\n \"\"\"\n Return a generator that opens the xls file f\n and then produces ((sheet-name, org-name, date-mode), row1, row2, ...)\n :param: f: file name\n :type f: string\n :param: file_contents: content of workbook\n :type file_contents: string\n\n :return: Generator that opens the xls file f\n and then produces ((sheet-name, org-name, date-mode), row1, row2, ...)\n :rtype: generator\n \"\"\"\n if file_contents is not None:\n wb = xlrd.open_workbook(file_contents= file_contents)\n else:\n wb = xlrd.open_workbook(f)\n assert wb.nsheets == 1\n\n sheet = wb.sheet_by_index(0)\n org_name_cell = sheet.cell(*ORG_NAME_CELL)\n yield (sheet.name, org_name_cell.value, wb.datemode)\n\n row = sheet.horz_split_pos\n while row < sheet.nrows:\n # return next non-empty row\n if not all(_is_bumf(c.value) for c in sheet.row(row)):\n yield [c.value for c in sheet.row(row)]\n row += 1\n\n\ndef _is_bumf(value):\n \"\"\"\n Return true if this value is filler, en route to skipping over empty lines\n\n :param value: value to check\n :type value: object\n\n :return: whether the value is filler\n :rtype: bool\n \"\"\"\n if type(value) in (unicode, str):\n return value.strip() == ''\n return value is None\n\n\ndef _canonicalize(dirty, dstore_tag, date_mode):\n \"\"\"\n Canonicalize dirty input from xlrd to align with\n recombinant.json datastore type specified in dstore_tag.\n\n The date_mode adheres to the xlrd Excel workbook; it has value 1\n if the workbook originates on Excel for Macinstosh and 0 otherwise.\n\n :param dirty: dirty cell content as read through xlrd\n :type dirty: object\n :param dstore_tag: datastore_type specifier in (JSON) schema for cell\n :type dstore_tag: str\n :param date_mode: Excel workbook date mode attribute\n :type date_mode: integer\n\n :return: Canonicalized cell input\n :rtype: float or unicode\n \"\"\"\n dtype = data_store_type[dstore_tag]\n if dirty is None:\n return dtype.default\n elif isinstance(dirty, float) or isinstance(dirty, int):\n if dtype.numeric:\n return float(dirty)\n elif dtype.tag == 'date':\n # excel returns dates as floats - convert back\n dt = datetime(*xlrd.xldate_as_tuple(dirty, date_mode))\n return unicode(dt.date())\n else:\n # JSON specifies text or money: content of origin is numeric string.\n # If xlrd has added .0 to present content as a float,\n # trim it before returning as numeric string\n if int(dirty) == dirty:\n return unicode(int(dirty))\n else:\n return unicode(dirty)\n elif (isinstance(dirty, basestring)) and (dirty.strip() == ''):\n # Content trims to empty: default\n return dtype.default\n elif not dtype.numeric:\n if dtype.tag == 'money':\n # User has overridden Excel format string, probably adding currency\n # markers or digit group separators (e.g.,fr-CA uses 1$ (not $1)).\n # Truncate any trailing decimal digits, retain int\n # part, and cast as numeric string.\n canon = re.sub(r'[^0-9]', '', re.sub(r'\\.[0-9 ]+$', '', str(dirty)))\n return unicode(canon)\n return unicode(dirty)\n\n # dirty is numeric: truncate trailing decimal digits, retain int part\n canon = re.sub(r'[^0-9]', '', re.sub(r'\\.[0-9 ]+$', '', str(dirty)))\n return float(canon)\n\n\ndef get_records(upload_data, fields, date_mode):\n \"\"\"\n Truncate/pad empty/missing records to expected row length, canonicalize\n cell content, and return resulting record list.\n\n :param upload_data: generator producing rows of content\n :type upload_data: generator\n :param fields: collection of fields specified in JSON schema\n :type fields: list or tuple\n :param date_mode: Excel workbook date mode attribute\n :type date_mode: int\n\n :return: canonicalized records of specified upload data\n :rtype: tuple of dicts\n \"\"\"\n records = []\n for n, row in enumerate(upload_data):\n # trailing cells might be empty: trim row to fit\n while (row and\n (len(row) > len(fields)) and\n (row[-1] is None or row[-1] == '')):\n row.pop()\n while row and (len(row) < len(fields)):\n row.append(None) # placeholder: canonicalize once only, below\n\n records.append(\n dict((\n f['datastore_id'],\n _canonicalize(v, f['datastore_type'], date_mode))\n for f, v in zip(fields, row)))\n\n return records\n","sub_path":"ckanext/recombinant/read_xls.py","file_name":"read_xls.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"359154512","text":"BLANK=\"_\"\r\nBLACK=\"B\"\r\nWHITE=\"W\"\r\nSIZE=9\r\nclass Location:\r\n def __init__(self, xcoord, ycoord):\r\n self.x=xcoord\r\n self.y=ycoord\r\n def __eq__(self, piece):\r\n return self.x==piece.x and self.y==piece.y\r\n def adj(self):\r\n adjs=[]\r\n adjs.append(Location(self.x+1, self.y))\r\n adjs.append(Location(self.x-1, self.y))\r\n adjs.append(Location(self.x, self.y+1))\r\n adjs.append(Location(self.x, self.y-1))\r\n return adjs\r\n def __str__(self):\r\n return \"[\"+str(self.x)+\", \"+str(self.y)+\"]\"\r\nclass Piece(Location):\r\n def __init__(self, xcoord, ycoord, color=BLANK):\r\n super().__init__(xcoord, ycoord)\r\n self.col=color\r\n def location(self):\r\n return Location(self.x, self.y)\r\n def isEmpty(self):\r\n return self.col==BLANK\r\n def __str__(self):\r\n return self.col\r\nclass Grid:\r\n def __init__(self):\r\n self.grid=[]\r\n for i in range(SIZE):\r\n self.grid.append([])\r\n for j in range(SIZE):\r\n self.grid[i].append(Piece(i, j))\r\n def inGrid(self, loc):\r\n return loc.x>=0 and loc.x=0 and loc.y 1:\n if \"Decomp\" in model:\n keys[i] = [k+\"%d\"%n for n in range(0,vect)]\n else:\n keys[i] = [k+\"%d\"%n for n in range(1,vect+1)]\n keys = list(flatten(keys))\n if no_subplots:\n figs = {}\n for c, k in enumerate(keys):\n fig, ax = plt.subplots(figsize=(6,4))\n if k == \"R0\":\n stoc = \"R0\"\n else:\n stoc = ''.join([i for i in k if not i.isdigit()])\n stoc_num = [int(i) for i in k if i.isdigit()]\n try:\n data = sorted(MDL.trace(stoc)[:][:,stoc_num[0]-1])\n except:\n data = sorted(MDL.trace(stoc)[:])\n plt.xlabel(\"%s value\"%k)\n plt.ylabel(\"Probability density\")\n hist = plt.hist(data, bins=20, normed=True, linewidth=1.0, color=\"white\")\n fit = norm.pdf(data, np.mean(data), np.std(data))\n plt.plot(data, fit, \"-\", label=\"Fitted PDF\", linewidth=1.5)\n plt.legend(loc='best')\n plt.grid('off')\n if save:\n save_where = '/Figures/Histograms/%s/' %filename\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n if c == 0:\n print(\"\\nSaving histogram figures in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Histo-%s-%s-%s.%s'%(model,filename,k,save_as), bbox_inches='tight')\n figs[k] = fig\n plt.close(fig)\n return figs\n\n else:\n ncols = 2\n nrows = int(ceil(len(keys)*1.0 / ncols))\n fig, ax = plt.subplots(nrows, ncols, figsize=(7,nrows*1.8))\n for c, (a, k) in enumerate(zip(ax.flat, keys)):\n if k == \"R0\":\n stoc = \"R0\"\n else:\n stoc = ''.join([i for i in k if not i.isdigit()])\n stoc_num = [int(i) for i in k if i.isdigit()]\n try:\n data = sorted(MDL.trace(stoc)[:][:,stoc_num[0]-1])\n except:\n data = sorted(MDL.trace(stoc)[:])\n plt.axes(a)\n plt.locator_params(axis = 'y', nbins = 7)\n plt.locator_params(axis = 'x', nbins = 6)\n plt.xlabel(k)\n try:\n hist = plt.hist(data, bins=20, normed=False, label=filename, edgecolor='#1f77b4', linewidth=1.0, color='#1f77b4', alpha=0.3)\n fit = norm.pdf(data, np.mean(data), np.std(data)) \n xh = [0.5 * (hist[1][r] + hist[1][r+1]) for r in range(len(hist[1])-1)]\n binwidth = old_div((max(xh) - min(xh)), len(hist[1]))\n fit *= len(data) * binwidth\n plt.plot(data, fit, \"-\", color='#ff7f0e', linewidth=1.5)\n except:\n print(\"File %s: could not plot %s histogram. Parameter is unstable (see trace).\" %(filename,k))\n plt.grid('off')\n plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))\n \n for c in range(nrows):\n ax[c][0].set_ylabel(\"Frequency\")\n\n plt.tight_layout(pad=1, w_pad=1, h_pad=0)\n for a in ax.flat[ax.size - 1:len(keys) - 1:-1]:\n a.set_visible(False)\n if save:\n save_where = '/Figures/Histograms/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving parameter histograms in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Histo-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n try: plt.close(fig)\n except: pass\n return fig\n\ndef plot_KDE(sol, var1, var2, fig=None, ax=None, save=False, save_as_png=True, fig_dpi=144):\n if True:\n save_as = 'png'\n else:\n save_as = 'pdf'\n if var1 == var2:\n fig, ax = plt.subplots(figsize=(3,3))\n plt.close(fig)\n return fig\n else:\n if fig == None or ax == None:\n fig, ax = plt.subplots(figsize=(3,3))\n MDL = sol.MDL\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n model = sol.get_model_type()\n if var1 == \"R0\":\n stoc1 = \"R0\"\n else:\n stoc1 = ''.join([i for i in var1 if not i.isdigit()])\n stoc_num1 = [int(i) for i in var1 if i.isdigit()]\n try:\n x = MDL.trace(stoc1)[:,stoc_num1[0]-1]\n except:\n x = MDL.trace(stoc1)[:]\n if var2 == \"R0\":\n stoc2 = \"R0\"\n else:\n stoc2 = ''.join([i for i in var2 if not i.isdigit()])\n stoc_num2 = [int(i) for i in var2 if i.isdigit()]\n try:\n y = MDL.trace(stoc2)[:,stoc_num2[0]-1]\n except:\n y = MDL.trace(stoc2)[:]\n xmin, xmax = min(x), max(x)\n ymin, ymax = min(y), max(y) \n # Peform the kernel density estimate\n xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]\n positions = np.vstack([xx.ravel(), yy.ravel()])\n values = np.vstack([x, y])\n kernel = gaussian_kde(values)\n kernel.set_bandwidth(bw_method='silverman')\n# kernel.set_bandwidth(bw_method=kernel.factor * 2.)\n f = np.reshape(kernel(positions).T, xx.shape)\n \n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n plt.axes(ax)\n # Contourf plot\n plt.grid(None)\n plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))\n ax.scatter(x, y, color='k', s=1)\n plt.xticks(rotation=90)\n plt.locator_params(axis = 'y', nbins = 7)\n plt.locator_params(axis = 'x', nbins = 7)\n cfset = ax.contourf(xx, yy, f, cmap=plt.cm.Blues, alpha=0.7)\n ## Or kernel density estimate plot instead of the contourf plot\n# ax.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])\n # Contour plot\n# cset = ax.contour(xx, yy, f, levels=cfset.levels[2::2], colors='k', alpha=0.8)\n # Label plot\n # ax.clabel(cset, cset.levels[::1], inline=1, fmt='%.1E', fontsize=10)\n plt.yticks(fontsize=14)\n plt.xticks(fontsize=14)\n plt.ylabel(\"%s\" %var2, fontsize=14)\n plt.xlabel(\"%s\" %var1, fontsize=14)\n # plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)\n # plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)\n if save:\n save_where = '/Figures/Bivariate KDE/%s/' %filename\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving KDE figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'KDE-%s-%s_%s_%s.%s'%(model,filename,var1,var2,save_as), dpi=fig_dpi, bbox_inches='tight')\n plt.close(fig)\n return fig\n\ndef plot_hexbin(sol, var1, var2, save=False, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n MDL = sol.MDL\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n model = sol.get_model_type()\n if var1 == \"R0\":\n stoc1 = \"R0\"\n else:\n stoc1 = ''.join([i for i in var1 if not i.isdigit()])\n stoc_num1 = [int(i) for i in var1 if i.isdigit()]\n try:\n x = MDL.trace(stoc1)[:,stoc_num1[0]-1]\n except:\n x = MDL.trace(stoc1)[:]\n if var2 == \"R0\":\n stoc2 = \"R0\"\n else:\n stoc2 = ''.join([i for i in var2 if not i.isdigit()])\n stoc_num2 = [int(i) for i in var2 if i.isdigit()]\n try:\n y = MDL.trace(stoc2)[:,stoc_num2[0]-1]\n except:\n y = MDL.trace(stoc2)[:]\n xmin, xmax = min(x), max(x)\n ymin, ymax = min(y), max(y)\n fig, ax = plt.subplots(figsize=(4,4))\n plt.grid(None)\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n# plt.scatter(x, y)\n plt.hexbin(x, y, gridsize=20, cmap=plt.cm.Blues)\n plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))\n plt.xticks(rotation=90)\n plt.locator_params(axis = 'y', nbins = 5)\n plt.locator_params(axis = 'x', nbins = 5) \n cb = plt.colorbar()\n cb.set_label('Number of observations')\n plt.yticks(fontsize=14)\n plt.xticks(fontsize=14)\n plt.ylabel(\"%s\" %var2, fontsize=14)\n plt.xlabel(\"%s\" %var1, fontsize=14)\n if save:\n save_where = '/Figures/Hexbins/%s/' %filename\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving hexbin figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Bivar-%s-%s_%s_%s.%s'%(model,filename,var1,var2,save_as), dpi=fig_dpi, bbox_inches='tight')\n plt.close(fig)\n return fig\n\ndef plot_traces(sol, no_subplots=False, save=False, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n MDL = sol.MDL\n model = get_model_type(sol)\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n keys = sorted([x.__name__ for x in MDL.deterministics]) + sorted([x.__name__ for x in MDL.stochastics])\n# keys = sorted([x.__name__ for x in MDL.stochastics])\n sampler = MDL.get_state()[\"sampler\"]\n try:\n keys.remove(\"zmod\")\n keys.remove(\"log_m_i\")\n keys.remove(\"log_tau_i\")\n keys.remove(\"cond\")\n except:\n pass\n for (i, k) in enumerate(keys):\n vect = old_div((MDL.trace(k)[:].size),(len(MDL.trace(k)[:])))\n if vect > 1:\n if \"Decomp\" in model:\n keys[i] = [k+\"%d\"%n for n in range(0,vect)]\n else:\n keys[i] = [k+\"%d\"%n for n in range(1,vect+1)]\n keys = list(flatten(keys))\n ncols = 2\n nrows = int(ceil(len(keys)*1.0 / ncols))\n if no_subplots:\n figs = {}\n for c, k in enumerate(keys):\n fig, ax = plt.subplots(figsize=(8,4))\n plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))\n if k == \"R0\":\n stoc = \"R0\"\n else:\n stoc = ''.join([i for i in k if not i.isdigit()])\n stoc_num = [int(i) for i in k if i.isdigit()]\n try:\n data = MDL.trace(stoc)[:][:,stoc_num[0]-1]\n except:\n data = MDL.trace(stoc)[:]\n x = np.arange(sampler[\"_burn\"]+1, sampler[\"_iter\"]+1, sampler[\"_thin\"])\n plt.ylabel(\"%s value\" %k)\n plt.xlabel(\"Iteration number\")\n \n if sampler[\"_burn\"] == 0:\n plt.xscale('log')\n else:\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n \n plt.plot(x, data, '-', label=k)\n plt.grid('on')\n if save:\n save_where = '/Figures/Traces/%s/' %filename\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n if c == 0:\n print(\"\\nSaving traces figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Trace-%s-%s-%s.%s'%(model,filename,k,save_as))\n figs[k] = fig\n plt.close(fig)\n return figs\n\n else:\n fig, ax = plt.subplots(nrows, ncols, figsize=(8,nrows*1.4))\n for c, (a, k) in enumerate(zip(ax.flat, keys)):\n if k == \"R0\":\n stoc = \"R0\"\n else:\n stoc = ''.join([i for i in k if not i.isdigit()])\n stoc_num = [int(i) for i in k if i.isdigit()]\n try:\n data = MDL.trace(stoc)[:][:,stoc_num[0]-1]\n except:\n data = MDL.trace(stoc)[:]\n x = np.arange(sampler[\"_burn\"]+1, sampler[\"_iter\"]+1, sampler[\"_thin\"])\n plt.axes(a)\n plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))\n plt.locator_params(axis = 'y', nbins = 6)\n plt.ylabel(k)\n plt.plot(x, data, '-')\n \n if sampler[\"_burn\"] == 0:\n plt.xscale('log')\n else:\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n \n \n# plt.xscale('log')\n plt.grid('on')\n \n plt.tight_layout(pad=0.1, w_pad=0., h_pad=-1)\n for a in ax.flat[ax.size - 1:len(keys) - 1:-1]:\n a.set_visible(False)\n \n ax[:,0][-1].set_xlabel(\"Iteration number\")\n for a in ax[:-1]:\n a[0].axes.get_xaxis().set_ticklabels([])\n \n if len(keys) % 2 == 0:\n ax[:,1][-1].set_xlabel(\"Iteration number\")\n for a in ax[:-1]:\n a[1].axes.get_xaxis().set_ticklabels([])\n else:\n ax[:,1][-2].set_xlabel(\"Iteration number\")\n for a in ax[:-2]: \n a[1].axes.get_xaxis().set_ticklabels([])\n \n if save:\n save_where = '/Figures/Traces/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving trace figures in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Trace-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n plt.close(fig)\n return fig\n\ndef plot_summary(sol, save=False, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n MDL, ch_n = sol.MDL, sol.mcmc[\"nb_chain\"]\n model = get_model_type(sol)\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n keys = sorted([x.__name__ for x in MDL.deterministics]) + sorted([x.__name__ for x in MDL.stochastics])\n try:\n keys.remove(\"zmod\")\n keys.remove(\"log_m_i\")\n keys.remove(\"log_tau_i\")\n keys.remove(\"cond\")\n except:\n pass\n for (i, k) in enumerate(keys):\n vect = old_div((MDL.trace(k)[:].size),(len(MDL.trace(k)[:])))\n if vect > 1:\n keys[i] = [k+\"%d\"%n for n in range(1,vect+1)]\n keys = list(reversed(sorted(flatten(keys))))\n try: r_hat = gelman_rubin(MDL)\n except:\n print(\"\\nTwo or more chains of equal length required for Gelman-Rubin convergence\")\n fig, axes = plt.subplots(figsize=(6,4))\n gs2 = gridspec.GridSpec(3, 3)\n ax1 = plt.subplot(gs2[:, :-1])\n ax2 = plt.subplot(gs2[:, -1], sharey = ax1)\n ax2.set_xlabel(\"R-hat\")\n ax2.plot([1,1], [-1,len(keys)], \"--\", color=\"C7\", zorder=0)\n for (i, k) in enumerate(keys):\n test = k[-1] not in [\"%d\"%d for d in range(1,8)] or k == \"R0\"\n for c in range(ch_n):\n if test:\n imp = None\n val_m = MDL.stats(k[:imp], chain=c)[k[:imp]]['mean']\n hpd_h = MDL.stats(k[:imp], chain=c)[k[:imp]]['95% HPD interval'][0]\n hpd_l = MDL.stats(k[:imp], chain=c)[k[:imp]]['95% HPD interval'][1]\n else:\n imp = -1\n val_m = MDL.stats(k[:imp], chain=c)[k[:imp]]['mean'][int(k[-1])-1]\n hpd_h = MDL.stats(k[:imp], chain=c)[k[:imp]]['95% HPD interval'][0][int(k[-1])-1]\n hpd_l = MDL.stats(k[:imp], chain=c)[k[:imp]]['95% HPD interval'][1][int(k[-1])-1]\n val = val_m\n err = [[abs(hpd_h-val_m)],\n [abs(hpd_l-val_m)]]\n if ch_n % 2 != 0: o_s = 0\n else: o_s = 0.5\n ax1.scatter(val, i - (old_div(ch_n,2))*(1./ch_n/1.4) + (1./ch_n/1.4)*(c+o_s), color=\"C0\", marker=\"o\", s=50, edgecolors='C7',alpha=0.7)\n ax1.errorbar(val, i - (old_div(ch_n,2))*(1./ch_n/1.4) + (1./ch_n/1.4)*(c+o_s), xerr=err, color=\"C7\", fmt=\" \", zorder=0)\n if ch_n >= 2:\n R = np.array(r_hat[k[:imp]])\n R[R > 3] = 3\n if test:\n ax2.scatter(R, i, color=\"C1\", marker=\"<\", s=50, alpha=0.7)\n else:\n ax2.scatter(R[int(k[-1])-1], i, color=\"C1\", marker=\"<\", s=50, alpha=0.7)\n \n ax1.set_ylim([-1, len(keys)])\n ax1.set_yticks(list(range(0,len(keys))))\n ax1.set_yticklabels(keys)\n plt.setp(ax2.get_yticklabels(), visible=False)\n ax2.set_xlim([0.5, 3.5])\n ax2.set_xticklabels([\"\",\"1\",\"2\",\"3+\"])\n ax2.set_xticks([0.5, 1, 2, 3])\n ax1.set_xlabel(\"Parameter values\")\n plt.tight_layout()\n \n if save:\n save_where = '/Figures/Summaries/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving summary figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Summary-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n try: plt.close(fig)\n except: pass\n\n return fig\n\ndef plot_autocorr(sol, save=False, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n MDL = sol.MDL\n model = get_model_type(sol)\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n keys = sorted([x.__name__ for x in MDL.deterministics]) + sorted([x.__name__ for x in MDL.stochastics])\n try:\n keys.remove(\"zmod\")\n keys.remove(\"log_m_i\")\n keys.remove(\"log_tau_i\")\n keys.remove(\"cond\") \n except:\n pass\n for (i, k) in enumerate(keys):\n vect = old_div((MDL.trace(k)[:].size),(len(MDL.trace(k)[:])))\n if vect > 1:\n keys[i] = [k+\"%d\"%n for n in range(1,vect+1)]\n keys = list(flatten(keys))\n ncols = 2\n nrows = int(ceil(len(keys)*1.0 / ncols))\n fig, ax = plt.subplots(nrows, ncols, figsize=(10,nrows*2))\n plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))\n for (a, k) in zip(ax.flat, keys):\n if k[-1] not in [\"%d\"%d for d in range(1,8)] or k ==\"R0\":\n data = sorted(MDL.trace(k)[:].ravel())\n else:\n data = sorted(MDL.trace(k[:-1])[:][:,int(k[-1])-1].ravel())\n plt.axes(a)\n plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)\n plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)\n plt.yticks(fontsize=12)\n plt.xticks(fontsize=12)\n plt.ylabel(k, fontsize=12)\n to_thin = old_div(len(data),50)\n if to_thin != 0: plt.xlabel(\"Lags / %d\"%to_thin, fontsize=12)\n else: plt.xlabel(\"Lags\", fontsize=12)\n max_lags = None\n if len(data) > 50: data= data[::to_thin]\n plt.acorr(data, usevlines=True, maxlags=max_lags, detrend=plt.mlab.detrend_mean)\n plt.grid(None)\n fig.tight_layout()\n for a in ax.flat[ax.size - 1:len(keys) - 1:-1]:\n a.set_visible(False)\n if save:\n save_where = '/Figures/Autocorrelations/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving autocorrelation figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'Autocorr-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n try: plt.close(fig)\n except: pass\n return fig\n\ndef plot_rtd(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n model = get_model_type(sol)\n if draw or save:\n fig, ax = plt.subplots(figsize=(4,3))\n uncer_m = sol.MDL.stats()[\"log_m_i\"][\"standard deviation\"]\n uncer_tau = sol.MDL.stats()[\"log_tau_i\"][\"standard deviation\"]\n \n peaks = np.atleast_1d(sol.MDL.stats()[\"log_peak_tau\"][\"mean\"])\n uncer_peaks = sol.MDL.stats()[\"log_peak_tau\"]['95% HPD interval']\n uncer_peaks = uncer_peaks[1]-uncer_peaks[0]\n \n bot95 = sol.MDL.stats()[\"log_m_i\"]['95% HPD interval'][0]\n top95 = sol.MDL.stats()[\"log_m_i\"]['95% HPD interval'][1]\n \n log_tau = sol.MDL.stats()[\"log_tau_i\"]['mean']\n log_m = sol.MDL.stats()[\"log_m_i\"]['mean']\n \n \n m_peaks = log_m[[list(log_tau).index(find_nearest(log_tau, peaks[x])) for x in range(len(peaks))]]\n plt.errorbar(log_tau, log_m, None, None, color=\"C7\", linestyle='-', label=\"RTD (95% HPD)\")\n plt.errorbar(peaks, m_peaks+0.1, None, uncer_peaks, color=\"C3\", marker=\"v\", linestyle=\"\", label=r\"$\\tau_{peak}$ (95% HPD)\")\n plt.fill_between(np.log10(sol.ccd_priors['tau']), bot95, top95, color=\"C7\", alpha=0.3)\n plt.axvline(sol.MDL.stats()[\"log_mean_tau\"]['mean'],color=\"#2ca02c\",linestyle='--', label=r\"$\\bar{\\tau}$ (95% HPD)\")\n plt.axvline(sol.MDL.stats()[\"log_half_tau\"]['mean'],color='#1f77b4',linestyle=':', label=r\"$\\tau_{50}$ (95% HPD)\")\n inter = sol.MDL.stats()[\"log_mean_tau\"]['95% HPD interval']\n plt.axvspan(inter[0], inter[1], alpha=0.3, color=\"#2ca02c\")\n inter = sol.MDL.stats()[\"log_half_tau\"]['95% HPD interval']\n plt.axvspan(inter[0], inter[1], alpha=0.3, color='#1f77b4')\n plt.axvspan(min(log_tau), min(log_tau)+1, alpha=0.1, color='C7', hatch='xx')\n plt.axvspan(max(log_tau)-1, max(log_tau), alpha=0.1, color='C7', hatch='xx')\n plt.xlim([min(log_tau), max(log_tau)])\n plt.xlabel(r\"log$_{10}\\tau$ ($\\tau$ in s)\")\n plt.ylabel(\"log$_{10}$m\")\n plt.xlabel(r\"$log_{10}\\tau$ ($\\tau$ in s)\")\n plt.ylabel(r\"$log_{10}$m\")\n plt.grid('off')\n plt.legend(numpoints=1, fontsize=9, loc=1,labelspacing=0.1)\n \n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n\n fig.tight_layout()\n if save:\n save_where = '/Figures/Debye distributions/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n\n save_path = working_path+save_where\n print(\"\\nSaving relaxation time distribution figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'RTD-%s-%s.%s'%(model, filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n# try: plt.close(fig)\n# except: pass\n if draw:\n return fig\n else: \n plt.close(fig)\n return None\n\ndef save_resul(sol):\n # Fonction pour enregistrer les résultats\n MDL, pm, filepath = sol.MDL, sol.pm, sol.filename\n model = get_model_type(sol)\n sample_name = filepath.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n save_where = '/Results/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where+\"%s/\"%sample_name\n print(\"\\nSaving csv file in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n if sol.model == 'PDecomp': \n tag = 0\n else: \n tag = 1\n A = []\n B = []\n headers = []\n keys = sorted(pm.keys())\n if sol.model == \"CCD\":\n print(\"moving\")\n keys += [keys.pop(keys.index(\"peak_tau\"))]\n \n keys = [k for k in keys if \"_std\" not in k]\n \n for c, key in enumerate(keys):\n \n \n A.append(list(np.array(pm[key]).ravel()))\n B.append(list(np.array(pm[key+\"_std\"]).ravel())) \n\n length = len(np.atleast_1d(pm[key]))\n \n if length > 1:\n for i in range(len(A[c])):\n headers.append(model+\"_\"+key+\"_%d\" %(i+tag))\n headers.append(model+\"_\"+key+(\"_%d\"%(i+tag))+\"_std\")\n else: \n headers.append(model+\"_\"+key)\n headers.append(model+\"_\"+key+\"_std\")\n\n A=flatten(A)\n B=flatten(B)\n\n results = [None]*(len(A)+len(B))\n results[::2] = A\n results[1::2] = B\n\n\n headers = ','.join(headers)\n results = np.array(results)\n\n if sol.model == 'PDecomp': \n tau_ = sol.data[\"tau\"]\n add = [\"tau\"+\"%d\"%(i) for i in range(len(tau_))]\n add = ',' + ','.join(add)\n headers += add\n results = np.concatenate((results,tau_))\n headers = \"Z_max,c_exponent,\" + headers\n results = np.concatenate((np.array([sol.data[\"Z_max\"]]),np.array([sol.c_exp]),results))\n np.savetxt(save_path+'INV_%s-%s_%s.csv' %(sol.model,model,sample_name), results[None],\n header=headers, comments='', delimiter=',')\n vars_ = [\"%s\"%x for x in MDL.stochastics]+[\"%s\"%x for x in MDL.deterministics]\n if \"zmod\" in vars_: vars_.remove(\"zmod\")\n MDL.write_csv(save_path+'STATS_%s-%s_%s.csv' %(sol.model,model,sample_name), variables=(vars_))\n\ndef merge_results(sol,files):\n model = get_model_type(sol)\n save_where = '/Batch results/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n \n print(\"\\nChecking for longest csv file\")\n lengths = []\n for f in files:\n to_merge_temp = working_path+\"/Results/%s/INV_%s-%s_%s.csv\" %(f,sol.model,model,f)\n headers_temp = np.genfromtxt(to_merge_temp, delimiter=\",\", dtype=str, skip_footer=1)\n lengths.append(len(headers_temp))\n \n to_merge_max = working_path+\"/Results/%s/INV_%s-%s_%s.csv\" %(files[lengths.index(max(lengths))],sol.model,model,files[lengths.index(max(lengths))])\n headers = np.genfromtxt(to_merge_max, delimiter=\",\", dtype=str, skip_footer=1)\n\n print(\"\\nMerging csv files\")\n if not path.exists(save_path):\n makedirs(save_path)\n# to_merge = working_path+\"/Results/%s/INV_%s_%s.csv\" %(files[0],model,files[0])\n# headers = np.genfromtxt(to_merge, delimiter=\",\", dtype=str, skip_footer=1)\n merged_inv_results = np.zeros((len(files), len(headers)))\n merged_inv_results.fill(np.nan)\n for i, f in enumerate(files):\n to_add = np.loadtxt(working_path+\"/Results/%s/INV_%s-%s_%s.csv\" %(f,sol.model,model,f), delimiter=\",\", skiprows=1)\n merged_inv_results[i][:to_add.shape[0]] = to_add\n rows = np.array(files, dtype=str)[:, np.newaxis]\n hd = \",\".join([\"ID\"] + list(headers))\n np.savetxt(save_path+\"Merged_%s-%s_%s_TO_%s.csv\" %(sol.model,model,files[0],files[-1]), np.hstack((rows, merged_inv_results)), delimiter=\",\", header=hd, fmt=\"%s\")\n print(\"Batch file successfully saved in:\\n\", save_path)\n\ndef plot_data(filename, headers, ph_units):\n data = get_data(filename,headers,ph_units)\n # Graphiques du data\n Z = data[\"Z\"]\n dZ = data[\"Z_err\"]\n f = data[\"freq\"]\n Zr0 = max(abs(Z))\n zn_dat = old_div(Z,Zr0)\n zn_err = old_div(dZ,Zr0)\n Pha_dat = 1000*data[\"pha\"]\n Pha_err = 1000*data[\"pha_err\"]\n Amp_dat = old_div(data[\"amp\"],Zr0)\n Amp_err = old_div(data[\"amp_err\"],Zr0)\n\n fig, ax = plt.subplots(3, 1, figsize=(6,8))\n for t in ax:\n t.tick_params(labelsize=12)\n # Real-Imag\n plt.axes(ax[0])\n plt.errorbar(zn_dat.real, -zn_dat.imag, zn_err.imag, zn_err.real, '.b', label=filename)\n plt.xlabel(sym_labels['real'], fontsize=12)\n plt.ylabel(sym_labels['imag'], fontsize=12)\n\n plt.xlim([None, 1])\n plt.ylim([0, None])\n# plt.legend(numpoints=1, fontsize=9)\n# plt.title(filename, fontsize=10)\n # Freq-Phas\n plt.axes(ax[1])\n plt.errorbar(f, -Pha_dat, Pha_err, None, '.b', label=filename)\n ax[1].set_yscale(\"log\", nonposy='clip')\n ax[1].set_xscale(\"log\")\n plt.xlabel(sym_labels['freq'], fontsize=12)\n plt.ylabel(sym_labels['phas'], fontsize=12)\n# plt.legend(loc=2, numpoints=1, fontsize=9)\n plt.ylim([1,1000])\n # Freq-Ampl\n plt.axes(ax[2])\n plt.errorbar(f, Amp_dat, Amp_err, None, '.b', label=filename)\n ax[2].set_xscale(\"log\")\n plt.xlabel(sym_labels['freq'], fontsize=12)\n plt.ylabel(sym_labels['ampl'], fontsize=12)\n plt.ylim([None,1.0])\n# plt.legend(numpoints=1, fontsize=9)\n fig.tight_layout()\n\n plt.close(fig)\n return fig\n\ndef plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n model = get_model_type(sol)\n if draw or save:\n fig, ax = plt.subplots(figsize=(4,3))\n deviance = sol.MDL.trace('deviance')[:]\n sampler_state = sol.MDL.get_state()[\"sampler\"]\n x = np.arange(sampler_state[\"_burn\"]+1, sampler_state[\"_iter\"]+1, sampler_state[\"_thin\"])\n plt.plot(x, deviance, \"-\", color=\"C3\", label=\"Model deviance\\nDIC = %.2f\\nBPIC = %.2f\" %(sol.MDL.DIC,sol.MDL.BPIC))\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Model deviance\")\n plt.legend(numpoints=1, loc=\"best\", fontsize=9)\n plt.grid('on')\n if sampler_state[\"_burn\"] == 0:\n plt.xscale('log')\n else:\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tight_layout()\n if save:\n save_where = '/Figures/ModelDeviance/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving model deviance figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n try: plt.close(fig)\n except: pass\n if draw: return fig\n else: return None\n\ndef logp_trace(model):\n \"\"\"\n return a trace of logp for model\n \"\"\"\n #init\n db = model.db\n n_samples = db.trace('deviance').length()\n logp = np.empty(n_samples, np.double)\n #loop over all samples\n for i_sample in range(n_samples):\n #set the value of all stochastic to their 'i_sample' value\n for stochastic in model.stochastics:\n try:\n value = db.trace(stochastic.__name__)[i_sample]\n stochastic.value = value\n\n except KeyError:\n print(\"No trace available for %s. \" % stochastic.__name__)\n\n #get logp\n logp[i_sample] = model.logp\n return logp\n\ndef plot_logp(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n filename = sol.filename.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n model = get_model_type(sol)\n if draw or save:\n fig, ax = plt.subplots(figsize=(4,3))\n logp = logp_trace(sol.MDL)\n sampler_state = sol.MDL.get_state()[\"sampler\"]\n x = np.arange(sampler_state[\"_burn\"]+1, sampler_state[\"_iter\"]+1, sampler_state[\"_thin\"])\n plt.plot(x, logp, \"-\", color=\"C3\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Log-likelihood\")\n plt.legend(numpoints=1, loc=\"best\", fontsize=9)\n plt.grid('on')\n if sampler_state[\"_burn\"] == 0:\n plt.xscale('log')\n else:\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tight_layout()\n if save:\n save_where = '/Figures/LogLikelihood/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving logp trace figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'LogLikelihood-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')\n try: plt.close(fig)\n except: pass\n if draw: return fig\n else: return None\n\ndef plot_fit(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):\n if save_as_png:\n save_as = 'png'\n else:\n save_as = 'pdf'\n filepath = sol.filename\n sample_name = filepath.replace(\"\\\\\", \"/\").split(\"/\")[-1].split(\".\")[0]\n model = get_model_type(sol)\n data = sol.data\n fit = sol.fit\n # Graphiques du fit\n f = data[\"freq\"]\n Zr0 = max(abs(data[\"Z\"]))\n zn_dat = old_div(data[\"Z\"],Zr0)\n zn_err = old_div(data[\"Z_err\"],Zr0)\n zn_fit = old_div(fit[\"best\"],Zr0)\n zn_min = old_div(fit[\"lo95\"],Zr0)\n zn_max = old_div(fit[\"up95\"],Zr0)\n Pha_dat = 1000*data[\"pha\"]\n Pha_err = 1000*data[\"pha_err\"]\n Pha_fit = 1000*np.angle(fit[\"best\"])\n Pha_min = 1000*np.angle(fit[\"lo95\"])\n Pha_max = 1000*np.angle(fit[\"up95\"])\n Amp_dat = old_div(data[\"amp\"],Zr0)\n Amp_err = old_div(data[\"amp_err\"],Zr0)\n Amp_fit = old_div(abs(fit[\"best\"]),Zr0)\n Amp_min = old_div(abs(fit[\"lo95\"]),Zr0)\n Amp_max = old_div(abs(fit[\"up95\"]),Zr0)\n if draw or save:\n fig, ax = plt.subplots(1, 3, figsize=(11,3))\n# for t in ax:\n# t.tick_params(labelsize=14)\n # Real-Imag\n plt.axes(ax[2])\n plt.errorbar(zn_fit.real, -zn_dat.imag, zn_err.imag, zn_err.real, '.', color=\"#1f77b4\", label='Data')\n plt.plot(zn_fit.real, -zn_fit.imag, '-', color=\"#ff7f0e\", label='Model')\n plt.fill_between(zn_fit.real, -zn_max.imag, -zn_min.imag, color=\"#ff7f0e\", alpha=0.2, label=\"95% HPD\")\n plt.xlabel(sym_labels['real'])\n plt.ylabel(sym_labels['imag'])\n plt.legend(loc='best', fontsize=9, numpoints=1)\n plt.xlim([None, 1])\n plt.ylim([0, max(-zn_dat.imag)])\n \n # Freq-Ampl\n plt.axes(ax[1])\n plt.errorbar(f, Amp_dat, Amp_err, None, '.', color=\"#1f77b4\", label='Data')\n plt.semilogx(f, Amp_fit, '-', color=\"#ff7f0e\", label='Model')\n plt.fill_between(f, Amp_max, Amp_min, color=\"#ff7f0e\", alpha=0.2, label=\"95% HPD\")\n plt.xlabel(sym_labels['freq'])\n plt.ylabel(sym_labels['ampl'])\n plt.legend(loc='best', fontsize=9, numpoints=1)\n plt.xlim([10**np.floor(min(np.log10(f))), 10**np.ceil(max(np.log10(f)))])\n plt.ylim([None,1.0])\n\n # Freq-Phas\n plt.axes(ax[0])\n plt.errorbar(f, -Pha_dat, Pha_err, None, '.', color=\"#1f77b4\", label='Data')\n plt.loglog(f, -Pha_fit, '-', color=\"#ff7f0e\", label='Model')\n ax[0].set_yscale(\"log\", nonposy='clip')\n plt.fill_between(f, -Pha_max, -Pha_min, color=\"#ff7f0e\", alpha=0.2, label=\"95% HPD\")\n plt.xlabel(sym_labels['freq'])\n plt.ylabel(sym_labels['phas'])\n plt.legend(loc='best', fontsize=9, numpoints=1)\n plt.xlim([10**np.floor(min(np.log10(f))), 10**np.ceil(max(np.log10(f)))])\n plt.ylim([1,10**np.ceil(max(np.log10(-Pha_dat)))])\n\n for a in ax:\n a.grid('on')\n\n if (-Pha_dat < 1).any() and (-Pha_dat >= 0.1).any():\n plt.ylim([0.1,10**np.ceil(max(np.log10(-Pha_dat)))]) \n if (-Pha_dat < 0.1).any() and (-Pha_dat >= 0.01).any():\n plt.ylim([0.01,10**np.ceil(max(np.log10(-Pha_dat)))]) \n \n plt.tight_layout(pad=1, h_pad=0, w_pad=0.5)\n \n if save:\n save_where = '/Figures/Fit figures/'\n working_path = getcwd().replace(\"\\\\\", \"/\")+\"/\"\n save_path = working_path+save_where\n print(\"\\nSaving fit figure in:\\n\", save_path)\n if not path.exists(save_path):\n makedirs(save_path)\n fig.savefig(save_path+'FIT-%s-%s.%s'%(model,sample_name,save_as), dpi=fig_dpi, bbox_inches='tight')\n# try: plt.close(fig)\n# except: pass\n if draw: \n return fig\n else: \n plt.close(fig)\n return None\n\ndef print_diagn(M, q, r, s):\n return raftery_lewis(M, q, r, s, verbose=0)\n\ndef plot_par():\n# rc = rcParams.items() \n rc = {u'figure.dpi': 72.0,\n u'figure.edgecolor': 'white',\n u'figure.facecolor': 'white',\n u'savefig.bbox': u'tight',\n u'savefig.directory': u'~',\n u'savefig.dpi': 200.0,\n u'savefig.edgecolor': u'white',\n u'savefig.facecolor': u'white',\n u'axes.formatter.use_mathtext': True,\n u'mathtext.default' : 'regular',\n }\n return rc\nrcParams.update(plot_par())\n","sub_path":"bisip/invResults.py","file_name":"invResults.py","file_ext":"py","file_size_in_byte":40985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"282701763","text":"\"\"\"\nScript to train a model and save the weights in a specific location\n\"\"\"\nimport argparse\nimport datetime\nimport importlib\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport tensorflow as tf\nfrom tensorboard.plugins.hparams import api as hp\nfrom tensorflow.compat.v1 import ConfigProto, InteractiveSession\n\nfrom src.extract_tf_record import tfrecord_dataloader\nfrom src.schema import Catalog\n\n# Assure reproducible experiments\ntf.random.set_seed(12)\n\n# Directory to save logs for Tensorboard\nLOG_DIR = os.path.join(\"logs\", \"fit\")\n\n# The following config setting is necessary to work on my local RTX2070 GPU\n# Comment if you suspect it's causing trouble\ntf_config = ConfigProto()\ntf_config.gpu_options.allow_growth = True\nsession = InteractiveSession(config=tf_config)\n\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n\ndef get_callbacks_tensorboard(compile_params: Dict, save_dir: str, **kwargs) -> List:\n log_file = os.path.join(save_dir, LOG_DIR, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n print(f\"Saving logs to path {log_file}\")\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_file,\n histogram_freq=1\n )\n\n hparams = kwargs\n hparams.update(compile_params)\n\n return [\n tensorboard_callback,\n hp.KerasCallback(log_file, hparams), # log hparams\n ]\n\n\ndef main(save_dir: str, config_path: str, data_path: str, plot_loss: bool) -> None:\n \"\"\"\n Train a model and save the weights\n :param data_path: directory of tfrecords which includes train and validation folders\n :param save_dir: path to directory to save weights and logs\n :param config_path: path to json config file\n :param plot_loss: plot losses at end of training if True\n \"\"\"\n assert os.path.isfile(config_path), f\"invalid config file: {config_path}\"\n with open(config_path, \"r\") as config_file:\n config = json.load(config_file)\n\n print(f\"Data path is at {data_path}\")\n assert os.path.isdir(data_path), f\"invalid data_path directory: {data_path}\"\n\n os.makedirs(save_dir, exist_ok=True)\n print(f\"Saving model and logfiles at {save_dir}\")\n\n epochs = config[\"epochs\"]\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n patch_size = (config[\"patch_size\"], config[\"patch_size\"])\n\n dataframe_path = config[\"dataframe_path\"]\n assert os.path.isfile(dataframe_path), f\"invalid dataframe path: {dataframe_path}\"\n\n train_stations = config[\"train_stations\"]\n\n dataframe = pd.read_pickle(dataframe_path)\n # add invalid attribute to datetime if t0 is invalid\n dataframe = Catalog.add_invalid_t0_column(dataframe)\n # Put all GHI values during nightime to 0.0\n for station in train_stations.keys():\n dataframe.loc[dataframe[f\"{station}_DAYTIME\"] == 0, [f\"{station}_GHI\"]] = 0\n\n target_time_offsets = [pd.Timedelta(d).to_pytimedelta() for d in config[\"target_time_offsets\"]]\n\n model_name = config[\"model_name\"]\n is_cnn_2d = model_name == \"CNN2D\" or model_name == \"VGG2D\"\n if \"seq_len\" in config.keys():\n seq_len = config[\"seq_len\"]\n print(f\"Using sequence legth of {seq_len}\")\n elif is_cnn_2d:\n print(\"2D Architecture: Using t0 picture only\")\n seq_len = 1\n else:\n print(f\"Sequence length defaulting to 5\")\n seq_len = 5\n\n rotate_imgs = bool(config[\"rotate_imgs\"]) if \"rotate_imgs\" in config else False\n prob_drop_imgs = config[\"prob_drop_imgs\"] if \"prob_drop_imgs\" in config else 0.0\n\n train_data = tfrecord_dataloader(Path(data_path, \"train\"), patch_size[0], seq_len, rotate_imgs, prob_drop_imgs)\n val_data = tfrecord_dataloader(Path(data_path, \"validation\"), patch_size[0], seq_len, False, 0)\n\n # Here, we assume that the model Class is in a module with the same name and under models\n model_module = importlib.import_module(f\".{model_name}\", package=\"models\")\n target_len = len(target_time_offsets)\n\n inp_img_seq = tf.keras.layers.Input((seq_len, patch_size[0], patch_size[1], 5))\n inp_metadata_seq = tf.keras.layers.Input((seq_len, 5))\n inp_future_metadata = tf.keras.layers.Input(target_len)\n inp_shapes = [inp_img_seq, inp_metadata_seq, inp_future_metadata]\n\n model = getattr(model_module, model_name)()\n model(inp_shapes)\n print(model.summary())\n\n compile_params = config[\"compile_params\"]\n model.compile(**compile_params)\n\n # Saves only best model for now, could be used to saved every n epochs\n model_dir = os.path.join(save_dir, config[\"saved_weights_path\"])\n os.makedirs(model_dir, exist_ok=True)\n str_time = datetime.datetime.now().strftime(\"%m%d_%Hh%M\")\n model_path = os.path.join(model_dir, f\"{model_name}_{str_time}\")\n print(f\"Saving model {model_name} to path = {model_path}\")\n model_checkpoint = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', mode='min',\n verbose=1, save_best_only=True, save_weights_only=True)\n\n # Stops training when validation accuracy does not go down for \"patience\" epochs.\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',\n patience=10, verbose=1)\n tb_callbacks = get_callbacks_tensorboard(\n compile_params, save_dir, model_name=model_name, train_batch_size=train_batch_size,\n val_batch_size=val_batch_size, patch_size=patch_size[0], seq_len=seq_len, prob_drop_imgs=prob_drop_imgs,\n rotate_imgs=rotate_imgs\n )\n\n history = model.fit(\n train_data.batch(batch_size=train_batch_size),\n epochs=epochs,\n verbose=1,\n validation_data=val_data.batch(batch_size=val_batch_size),\n callbacks=[*tb_callbacks, model_checkpoint, early_stopping]\n )\n\n if plot_loss:\n completed_epochs = len(history.history['val_loss'])\n plt.plot(range(1, completed_epochs + 1), history.history['loss'][:completed_epochs], label='train_loss')\n plt.plot(range(1, completed_epochs + 1), history.history['val_loss'], label='val_loss')\n plt.xlabel('Epoch')\n plt.ylabel('MSE')\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--save_dir\", type=str,\n help=\"path of directory where model and logfiles should be saved\")\n parser.add_argument(\"--cfg_path\", type=str,\n help=\"path to the JSON config file used to define train parameters\")\n parser.add_argument(\"--data_path\", type=str, help=\"directory of the data\")\n parser.add_argument(\"-p\", \"--plot\", help=\"plot the training and validation loss\",\n action=\"store_true\")\n args = parser.parse_args()\n main(\n save_dir=args.save_dir,\n config_path=args.cfg_path,\n data_path=args.data_path,\n plot_loss=args.plot\n )\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"39904856","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pybullet as p\nimport pybullet_data\nimport time\n\n# p.connect(p.GUI)\n# pid = p.isConnected()\n# print(pid)\n# p.setAdditionalSearchPath(pybullet_data.getDataPath())\n# robot=p.loadURDF(\"/home/amrut/Documents/python files/ur_description/urdf/ur5_robot.urdf\",basePosition=[0.,0.,0.],baseOrientation=[0.,0.0,0.,1],useFixedBase=1)\n# plane = p.loadURDF(\"plane.urdf\",basePosition=[0,0,0],useFixedBase=1) \n# robot = p.loadURDF(\"/home/amrut/Documents/sublime/src/ur5pybullet/urdf/real_arm.urdf\",basePosition=[0.,0.,1.0],baseOrientation=[0.,0.0,0.,1],useFixedBase=1)\n#path = \"/home/amrut/Documents/sublime/src/ur5pybullet/urdf/real_arm.urdf\"\n'''\n ur_robot is the main pybullet object on which the entire package has been built.\n This object loads the robot, extract its joint and linkstates. It can apply force\n or torque on joints. Presently it supports position and velocity controllers.\n'''\nclass ur_robot:\n\tdef __init__(self,path,gui=True,basePosition=[0,0,0],baseOrientation=[0,0,0,1],useFixedBase=1):\n\t\tself.start_bullet(gui=gui) # If true enables Graphical interface.\n\t\t# Loads the robot and a plane at desired position and orientation\n\t\tself.robot = p.loadURDF(path,basePosition=basePosition,baseOrientation=baseOrientation,useFixedBase=1)\n\t\tself._plane = p.loadURDF(\"plane.urdf\",basePosition=[0,0,-2],baseOrientation=[0,0,0,1],useFixedBase=1)\n\t\tself._numjoints = p.getNumJoints(self.robot)\n\t\tself._linkNames = []\n\t\tself._jointNames = []\n\t\tself._jointTypes = []\n\t\t# p.resetJointState(self.robot,jointIndex=1,targetValue=1)\n\t\tself.getJointNames()\n\t\tself._controllers = []\n\t\tself._simRate = 100\n\t\tself._joints_pos = []\n\t\tself._joints_vel = []\n\t\tself._joints_eff = []\n\t\tself.setGravity()\n\n\tdef getJointNames(self):\n\t\t''' The JointVal must be an integer'''\n\t\tjointTypes = [\"JOINT_REVOLUTE\", \"JOINT_PRISMATIC\", \"JOINT_SPHERICAL\", \"JOINT_PLANAR\", \"JOINT_FIXED\"]\n\t\tself._revotuteJoints = []\n\t\tself._revotuteJointNames = []\n\t\tfor i in range(self._numjoints):\n\t\t\tdata = p.getJointInfo(self.robot,jointIndex=i)\n\t\t\t# print(data)\n\t\t\tself._linkNames.append(data[-5].decode(\"utf-8\"))\n\t\t\tself._jointNames.append(data[1].decode(\"utf-8\"))\n\t\t\tself._jointTypes.append(jointTypes[int(data[2])])\n\t\t\tif (int(data[2]) != 4):\n\t\t\t\tself._revotuteJoints.append(i)\n\t\t\t\tself._revotuteJointNames.append(self._jointNames[i]) \n\t\t# print(self._jointNames,self._revotuteJoints)\n\t\n\tdef resetJointState(self,jointName,jointVal):\n\t\tJointindex = self._jointNames.index(jointName)\n\t\tp.resetJointState(self.robot,jointIndex=Jointindex,targetValue=1)\n\t\tprint(\"Succesfully Reset joint {} to Joint value {}\".format(jointName, jointVal))\n\t\n\tdef resetJointStates(self,jointValues):\n\t\tjointNames = self._revotuteJoints\n\t\tprint(jointNames)\n\t\tfor i in range(len(jointNames)):\n\t\t\tp.resetJointState(self.robot, jointIndex=jointNames[i], targetValue=jointValues[i])\n\t\t\n\t\tprint(\"success\\n\")\n\n\tdef resetBasePosOrn(self,basePos,baseOrn):\n\t\t''' Reset Robot Base postion and Orientation'''\n\t\tp.resetBasePositionAndOrientation(self.robot,basePosition=basePos,baseOrientation=baseOrn)\n\n\tdef getBasePosOrn(self):\n\t\tpos,orn = p.getBasePositionAndOrientation(self.robot)\n\t\treturn (pos,orn)\n\n\tdef singleJointMotorControl(self,jointName,jointVal,controllerType):\n\t\t''' Controller Types are position or velocity'''\n\t\tJointindex = self._jointNames.index(jointName)\n\n\t\tif (controllerType == 'position'):\n\t\t\tp.setJointMotorControl2(self.robot,jointIndex=Jointindex,targetPosition=jointVal,controlMode=p.POSITION_CONTROL)\n\t\telse:\n\t\t\tp.setJointMotorControl2(self.robot,jointIndex=Jointindex,targetPosition=jointVal,controlMode=p.VELOCITY_CONTROL)\n\t\tfor _ in range(self._simRate):\n\t\t\tp.stepSimulation()\n\t\t\t# time.sleep(0.0001)\n\n\tdef allJointMotorControl(self,JointPoses=None,JointVels=None,controllerType='position'):\n\n\t\tif (controllerType == 'position'):\n\n\t\t\tif JointVels == None:\n\t\t\t\tp.setJointMotorControlArray(self.robot,\n\t\t\t\t jointIndices=self._revotuteJoints[:6],\n\t\t\t\t targetPositions=JointPoses,\n\t\t\t\t\t\t\t\t\t\tcontrolMode=p.POSITION_CONTROL)\n\t\t\telse:\n\t\t\t\tp.setJointMotorControlArray(self.robot,\n\t\t\t\t jointIndices=self._revotuteJoints[:6],\n\t\t\t\t targetPositions=JointPoses,\n\t\t\t\t\t\t\t\t\t\ttargetVelocities=JointVels,\n\t\t\t\t\t\t\t\t\t\tcontrolMode=p.POSITION_CONTROL)\n\t\t\t\t\t\t\t\t\t\t# forces=6*[10],\n\t\t\t\t\t\t\t\t\t\t# positionGains=6*[1],\n\t\t\t\t\t\t\t\t\t\t# velocityGains=6*[0.5])\n\t\telse:\n\t\t\tp.setJointMotorControlArray(self.robot,\n\t\t\t\t jointIndices=self._revotuteJoints,\n\t\t\t\t targetVelocities=JointVels,\n\t\t\t\t\t\t\t\t\t\tcontrolMode=p.VELOCITY_CONTROL)\n\t\t\n\t\tfor _ in range(self._simRate):\n\t\t\tp.stepSimulation()\n\t\t\ttime.sleep(0.0001)\n\t\t# print(self.getLinkStates())\n\n\tdef setGravity(self,val=[0,0,-9.81]):\n\t\t''' Gravity value in a list'''\n\t\tp.setGravity(*val)\n\t\tprint(\"Set Gravity value as {}\".format(val))\n\n\tdef getJointStates(self):\n\t\t\"\"\" Get Joints position and velocity \"\"\"\n\n\t\tfor i in self._revotuteJoints:\n\t\t\tdata = p.getJointState(self.robot,jointIndex=i)\n\t\t\tself._joints_pos.append(data[0])\n\t\t\tself._joints_vel.append(data[1])\n\t\t\tself._joints_eff.append(data[3])\n\t\tval = (self._revotuteJointNames,self._joints_pos,self._joints_vel)\n\t\tself._joints_pos,self._joints_vel,self._joints_eff = [],[],[]\n\t\t# print(val)\n\t\treturn val\n\n\tdef start_bullet(self,gui=True):\n\t\tif (gui == True):\n\t\t\tp.connect(p.GUI)\n\t\telse:\n\t\t\tp.connect(p.DIRECT)\n\t\t\tpass\n\t\tp.setAdditionalSearchPath(pybullet_data.getDataPath())\n\n\tdef step(self):\n\t\tp.stepSimulation()\n\n\tdef getLinkStates(self):\n\t\tpos ,orn = [],[]\n\t\tfor i in range(len(self._linkNames)):\n\t\t\tdata = p.getLinkState(self.robot,linkIndex=i,computeForwardKinematics=1)\n\t\t\tpos.append(data[-2])\n\t\t\torn.append(data[-1])\n\t\t\n\tdef getInitialLinkStates(self):\n\t\tself.initial_linkstates = self.getLinkStates()\n\n# path = \"/home/amrut/Documents/sublime/src/ur5pybullet/urdf/real_arm.urdf\"\t\n# ur5 = ur_robot(path)\n# # ur5.allJointMotorControl([0.1,0,0,0,0,0,0,0])\n# # time.sleep(2)\n# # ur5.resetJointStates([2,0,0,0,0,0,0,0])\n# # ur5.resetJointState(jointName='shoulder_lift_joint',jointVal=-1)\n# ur5.getJointStates()\n# # print(p.getLinkStates(ur5.robot,linkIndices=list(range(ur5._numjoints))))\n\n# while (1):\n# \tur5.getLinkStates()\n# \tpass\n\n","sub_path":"pb_ros/src/pb_ros/scripts/pybullet_class.py","file_name":"pybullet_class.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"68405326","text":"from PIconnect._operators import OPERATORS, add_operators\nfrom PIconnect.PIData import PISeriesContainer\nfrom PIconnect.time import timestamp_to_index\n\n\n@add_operators(\n operators=OPERATORS,\n members=[\"_current_value\", \"interpolated_values\"],\n newclassname=\"VirtualPIAFAttribute\",\n attributes=[\"element\", \"attribute\"],\n)\nclass PIAFAttribute(PISeriesContainer):\n \"\"\"Container for attributes of PI AF elements in the database.\"\"\"\n\n version = \"0.1.0\"\n\n def __init__(self, element, attribute):\n super().__init__()\n self.element = element\n self.attribute = attribute\n\n def __repr__(self):\n return \"%s(%s, %s; Current Value: %s %s)\" % (\n self.__class__.__name__,\n self.name,\n self.description,\n self.current_value,\n self.units_of_measurement,\n )\n\n @property\n def name(self):\n \"\"\"Return the name of the current attribute.\"\"\"\n return self.attribute.Name\n\n @property\n def parent(self):\n \"\"\"Return the parent attribute of the current attribute, or None if it has none.\"\"\"\n if not self.attribute.Parent:\n return None\n return self.__class__(self.element, self.attribute.Parent)\n\n @property\n def children(self):\n \"\"\"Return a dictionary of the direct child attributes of the current attribute.\"\"\"\n return {\n a.Name: self.__class__(self.element, a) for a in self.attribute.Attributes\n }\n\n @property\n def description(self):\n \"\"\"Return the description of the PI Point.\"\"\"\n return self.attribute.Description\n\n @property\n def last_update(self):\n \"\"\"Return the time at which the current_value was last updated.\"\"\"\n return timestamp_to_index(self.attribute.GetValue().Timestamp.UtcTime)\n\n @property\n def units_of_measurement(self):\n \"\"\"Return the units of measurement in which values for this element are reported.\"\"\"\n return self.attribute.DefaultUOM\n\n def _current_value(self):\n return self.attribute.GetValue().Value\n\n def _interpolated_value(self, time):\n \"\"\"Return a single value for this PI Point\"\"\"\n return self.attribute.Data.InterpolatedValue(time, self.attribute.DefaultUOM)\n\n def _recorded_value(self, time, retrieval_mode):\n \"\"\"Return a single value for this PI Point\"\"\"\n return self.attribute.Data.RecordedValue(\n time, int(retrieval_mode), self.attribute.DefaultUOM\n )\n\n def _recorded_values(self, time_range, boundary_type, filter_expression):\n include_filtered_values = False\n return self.attribute.Data.RecordedValues(\n time_range,\n boundary_type,\n self.attribute.DefaultUOM,\n filter_expression,\n include_filtered_values,\n )\n\n def _interpolated_values(self, time_range, interval, filter_expression):\n \"\"\"Internal function to actually query the pi point\"\"\"\n include_filtered_values = False\n return self.attribute.Data.InterpolatedValues(\n time_range,\n interval,\n self.attribute.DefaultUOM,\n filter_expression,\n include_filtered_values,\n )\n\n def _summary(self, time_range, summary_types, calculation_basis, time_type):\n return self.attribute.Data.Summary(\n time_range, summary_types, calculation_basis, time_type\n )\n\n def _summaries(\n self, time_range, interval, summary_types, calculation_basis, time_type\n ):\n return self.attribute.Data.Summaries(\n time_range, interval, summary_types, calculation_basis, time_type\n )\n\n def _filtered_summaries(\n self,\n time_range,\n interval,\n filter_expression,\n summary_types,\n calculation_basis,\n filter_evaluation,\n filter_interval,\n time_type,\n ):\n return self.attribute.Data.FilteredSummaries(\n time_range,\n interval,\n filter_expression,\n summary_types,\n calculation_basis,\n filter_evaluation,\n filter_interval,\n time_type,\n )\n\n def _update_value(self, value, update_mode, buffer_mode):\n return self.attribute.Data.UpdateValue(\n value,\n update_mode,\n buffer_mode,\n )\n","sub_path":"PIconnect/PIAFAttribute.py","file_name":"PIAFAttribute.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"455350221","text":"import datetime\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nimport os\nfrom pathlib import Path\n\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nSECRET_KEY = '*k5s)mhyfq@^mh3#!nops^3o^ib4+hf%q7c4=)#83y(bvvn^b^'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nSITE_ID = 1\nBASE_URL = os.getenv('BASE_URL', 'http://127.0.0.1:8000')\n\nALLOWED_HOSTS = ['*']\n\nCORS_ORIGIN_ALLOW_ALL = True\n\n\n# Application definition\nSYSTEM_APPS = [\n 'base.apps.BaseConfig',\n 'activity.apps.ActivityConfig',\n 'analytics.apps.AnalyticsConfig',\n 'ads.apps.AdsConfig',\n 'case.apps.CaseConfig',\n 'chat.apps.ChatConfig',\n 'debate.apps.DebateConfig',\n 'notification.apps.NotificationConfig',\n 'profiles.apps.ProfilesConfig',\n 'payments.apps.PaymentsConfig',\n 'proof.apps.ProofConfig',\n 'tracker.apps.TrackerConfig',\n 'verification.apps.VerificationConfig',\n]\n\nSOCIAL_AUTHENTICATION_PROVIDERS = [\n 'allauth',\n 'allauth.account',\n\n 'rest_auth.registration',\n\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.google',\n 'allauth.socialaccount.providers.facebook',\n 'allauth.socialaccount.providers.twitter',\n]\n\nTHIRD_PARTY_APPS = [\n 'rest_framework',\n 'rest_framework.authtoken',\n 'rest_auth',\n 'django.contrib.sites',\n 'django_extensions',\n 'drf_yasg',\n 'storages',\n 'corsheaders',\n 'channels'\n] + SOCIAL_AUTHENTICATION_PROVIDERS\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n] + THIRD_PARTY_APPS + SYSTEM_APPS\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'rozprava.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [BASE_DIR / 'templates']\n ,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nAUTHENTICATION_BACKENDS = [\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n )\n}\n\nREST_AUTH_SERIALIZERS = {\n 'JWT_SERIALIZER': 'profiles.serializers.JWTSerializer'\n}\nREST_AUTH_REGISTER_SERIALIZERS = {\n 'REGISTER_SERIALIZER': 'profiles.serializers.RegisterSerializer'\n}\n\nREST_USE_JWT = True\nJWT_AUTH = {\n 'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=7 * 24 * 60 * 60)\n}\n\nWSGI_APPLICATION = 'rozprava.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': os.getenv('POSTGRES_ENGINE', 'django.db.backends.sqlite3'),\n 'NAME': os.getenv('POSTGRES_DB', BASE_DIR / 'db.sqlite3'),\n 'USER': os.getenv('POSTGRES_USER', 'rozprava'),\n 'PASSWORD': os.getenv('POSTGRES_PASSWORD'),\n 'HOST': os.getenv('POSTGRES_HOST', 'localhost'),\n 'PORT': 5432\n }\n}\n\nASGI_APPLICATION = \"chat.websocket.routing.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": ['redis://redis:6379/4']\n }\n },\n}\n\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Celery Configuration Options\nCELERY_BROKER_URL = 'redis://redis:6379'\nCELERY_RESULT_BACKEND = 'redis://redis:6379'\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIMEZONE = \"India/Kolkata\"\nCELERY_TASK_TRACK_STARTED = True\nCELERY_TASK_TIME_LIMIT = 30 * 60\n\n# Email Configuration Options\nEMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'\n# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv('EMAIL_USER')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_PASSWORD')\n\n# Sentry Configurations\nsentry_sdk.init(\n dsn=\"https://a0fa2c4d506345b09205d75dc0e220e0@o647115.ingest.sentry.io/5759559\",\n integrations=[DjangoIntegration()],\n\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production.\n traces_sample_rate=1.0,\n\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True\n)\n\n# Storage Configurations\nAWS_STORAGE_PERMITTED = os.getenv('AWS_STORAGE_PERMITTED', False)\n\nif AWS_STORAGE_PERMITTED:\n AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\n AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\n AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\n\n AWS_DEFAULT_ACL = 'public-read'\n AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'\n AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n\n STATIC_LOCATION = 'static'\n STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'\n STATICFILES_STORAGE = 'rozprava.storage_backends.StaticStorage'\n\n PUBLIC_MEDIA_LOCATION = 'media'\n MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/'\n DEFAULT_FILE_STORAGE = 'rozprava.storage_backends.PublicMediaStorage'\n\n PRIVATE_MEDIA_LOCATION = 'private'\n PRIVATE_FILE_STORAGE = 'rozprava.storage_backends.PrivateMediaStorage'\n\nelse:\n STATIC_URL = '/static/'\n STATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\n\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'templates/raw/assets/')\n]\n","sub_path":"settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"498375459","text":"# -*- coding: utf-8 -*-\nfrom extdirect.django.serializer import Serializer as ExtSerializer\nfrom StringIO import StringIO\nfrom django.utils.encoding import smart_str, smart_unicode\nfrom django.utils import datetime_safe\nfrom catalog.direct import ColumnModel\nfrom django.contrib import admin\nfrom django.core import urlresolvers\n\n\nclass Serializer(ExtSerializer):\n \"\"\"Overrides functions defined in extdirect.django.\n Field lookups narrowed to ColumnModel query\n \"\"\"\n def start_object(self, obj):\n self._current = {}\n self._content_object = obj.content_object\n self._admin_cls = admin.site._registry[type(self._content_object)]\n\n def handle_field(self, obj, field):\n try:\n value = admin.util.lookup_field(field.name, self._content_object, self._admin_cls)[2]\n except AttributeError:\n value = ''\n self._current[field.name] = smart_unicode(value, strings_only=True)\n\n def handle_model(self, obj):\n url = urlresolvers.reverse('admin:%s_%s_change' %\n (obj.content_object._meta.app_label, obj.content_object._meta.module_name),\n args=[obj.object_id],\n )\n self._current['url'] = url\n\n def serialize(self, queryset, **options):\n \"\"\"\n Serialize a queryset.\n \"\"\"\n self.options = options\n self.stream = options.get(\"stream\", StringIO())\n self.meta = options.get('meta', dict(root='records', total='total'))\n self.extras = options.get('extras', [])\n total = options.get(\"total\", queryset.count())\n self.start_serialization(total)\n\n colmodel = ColumnModel(admin.site)\n fields = colmodel.fields.values()\n for obj in queryset:\n self.start_object(obj)\n for field in fields:\n self.handle_field(obj, field)\n self.handle_model(obj)\n self.end_object(obj)\n self.end_serialization()\n return self.getvalue()\n","sub_path":"build/django-catalog/catalog/grid_to_json.py","file_name":"grid_to_json.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"144958385","text":"#!/usr/bin/env python3\n\nimport os\nimport secrets\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, BASE_DIR)\n\nimport storage\nimport config\n\nfrom helplib import models\n\nSCRIPTS_DIR = os.path.join(BASE_DIR, 'scripts')\n\n_CONFIG_INITIALIZATION_QUERY = 'INSERT INTO globalconfig (real_round, game_running) VALUES (%s, %s)'\n\n_TEAM_INSERT_QUERY = 'INSERT INTO Teams (name, ip, token) VALUES (%s, %s, %s) RETURNING id'\n\n_TASK_INSERT_QUERY = \"\"\"\nINSERT INTO Tasks (name, checker, gets, puts, places, checker_timeout, env_path, checker_returns_flag_id) \nVALUES (%s, %s, %s, %s, %s, %s, %s, %s) RETURNING id\n\"\"\"\n\n_TEAMTASK_INSERT_QUERY = \"INSERT INTO TeamTasks (task_id, team_id, round, score, status) VALUES (%s, %s, %s, %s, %s)\"\n\n\ndef run():\n conn = storage.get_db_pool().getconn()\n curs = conn.cursor()\n\n create_query_path = os.path.join(SCRIPTS_DIR, 'create_query.sql')\n create_query = open(create_query_path).read()\n curs.execute(create_query)\n\n curs.execute(_CONFIG_INITIALIZATION_QUERY, (0, 0))\n\n teams_config = config.get_teams_config()\n teams = []\n\n for team_conf in teams_config:\n team_token = secrets.token_hex(8)\n team = models.Team(id=None, **team_conf, token=team_token)\n curs.execute(_TEAM_INSERT_QUERY, (team.name, team.ip, team_token))\n team.id, = curs.fetchone()\n teams.append(team)\n\n tasks_config = config.get_tasks_config()\n tasks = []\n\n game_config = config.get_game_config()\n global_env_path = game_config['env_path']\n checkers_path = game_config['checkers_path']\n global_default_score = game_config['default_score']\n\n for task_conf in tasks_config:\n if 'env_path' not in task_conf:\n task_conf['env_path'] = global_env_path\n\n if 'default_score' not in task_conf:\n task_conf['default_score'] = global_default_score\n\n task_conf['checker'] = os.path.join(checkers_path, task_conf['checker'])\n\n task = models.Task(id=None, **task_conf)\n curs.execute(\n _TASK_INSERT_QUERY,\n (\n task.name,\n task.checker,\n task.gets,\n task.puts,\n task.places,\n task.checker_timeout,\n task.env_path,\n int(task.checker_returns_flag_id),\n )\n )\n task.id, = curs.fetchone()\n tasks.append(task)\n\n for team in teams:\n for task in tasks:\n curs.execute(_TEAMTASK_INSERT_QUERY, (task.id, team.id, 0, task.default_score, -1))\n\n conn.commit()\n curs.close()\n storage.get_db_pool().putconn(conn)\n\n storage.caching.cache_teamtasks(round=0)\n game_state = storage.game.get_game_state(round=0)\n with storage.get_redis_storage().pipeline(transaction=True) as pipeline:\n pipeline.set('game_state', game_state.to_json())\n pipeline.publish('scoreboard', game_state.to_json())\n pipeline.execute()\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"backend/scripts/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"37860606","text":"import unittest\nfrom app.PyAES_Helpers import *\n\n\nclass PyAES_Helpers_tests(unittest.TestCase):\n def test_hex_to_matrix_result(self):\n string = \"72CF72D0FB5DC3813B5E47F85F952EDF\"\n matrix = [[0x72, 0xCF, 0x72, 0xD0], [0xFB, 0x5D, 0xC3, 0x81], [0x3B, 0x5E, 0x47, 0xF8],\n [0x5F, 0x95, 0x2E, 0xDF]]\n result = PyAES_Helpers.hex_to_matrix(string)\n self.assertEqual(result, matrix)\n\n def test_matrix_to_hex_result(self):\n string = \"72CF72D0FB5DC3813B5E47F85F952EDF\"\n matrix = [[0x72, 0xCF, 0x72, 0xD0], [0xFB, 0x5D, 0xC3, 0x81], [0x3B, 0x5E, 0x47, 0xF8],\n [0x5F, 0x95, 0x2E, 0xDF]]\n result = PyAES_Helpers.matrix_to_hex(matrix)\n self.assertEqual(result, string)\n\n def test_get_matrix_column_result(self):\n matrix = [[0x72, 0xCF, 0x72, 0xD0], [0xFB, 0x5D, 0xC3, 0x81], [0x3B, 0x5E, 0x47, 0xF8],\n [0x5F, 0x95, 0x2E, 0xDF]]\n columns = [[0x72, 0xFB, 0x3B, 0x5F], [0xCF, 0x5D, 0x5E, 0x95], [0x72, 0xC3, 0x47, 0x2E],\n [0xD0, 0x81, 0xF8, 0xDF]]\n for i in range(4):\n result = PyAES_Helpers.get_matrix_column(matrix, i)\n self.assertEqual(result, columns[i])\n\n def test_set_matrix_column_result(self):\n matrices = [[], [], [], []]\n matrices[0] = [[0x34, 0xCF, 0x72, 0xD0], [0xFD, 0x5D, 0xC3, 0x81], [0x82, 0x5E, 0x47, 0xF8],\n [0x4E, 0x95, 0x2E, 0xDF]]\n matrices[1] = [[0x72, 0x34, 0x72, 0xD0], [0xFB, 0xFD, 0xC3, 0x81], [0x3B, 0x82, 0x47, 0xF8],\n [0x5F, 0x4E, 0x2E, 0xDF]]\n matrices[2] = [[0x72, 0xCF, 0x34, 0xD0], [0xFB, 0x5D, 0xFD, 0x81], [0x3B, 0x5E, 0x82, 0xF8],\n [0x5F, 0x95, 0x4E, 0xDF]]\n matrices[3] = [[0x72, 0xCF, 0x72, 0x34], [0xFB, 0x5D, 0xC3, 0xFD], [0x3B, 0x5E, 0x47, 0x82],\n [0x5F, 0x95, 0x2E, 0x4E]]\n new_column = [0x34, 0xFD, 0x82, 0x4E]\n\n for i in range(4):\n original_matrix = [[0x72, 0xCF, 0x72, 0xD0], [0xFB, 0x5D, 0xC3, 0x81], [0x3B, 0x5E, 0x47, 0xF8],\n [0x5F, 0x95, 0x2E, 0xDF]]\n PyAES_Helpers.set_matrix_column(original_matrix, i, new_column)\n self.assertEqual(original_matrix, matrices[i])\n\n def test_galois_multiply_result(self):\n bytes_a = [0xCB, 0x3E, 0x30, 0xAB, 0x6D]\n bytes_b = [0x4E, 0xA4, 0x9D, 0x65, 0x8D]\n result_bytes = [0x9E, 0x4B, 0x35, 0xEE, 0xBB]\n for i in range(5):\n result = PyAES_Helpers.galois_multiply(bytes_a[i], bytes_b[i])\n self.assertEqual(result, result_bytes[i])\n\n def test_rot_word_result(self):\n word = 0xF4BC32E6\n rotated_word = 0xBC32E6F4\n result = PyAES_Helpers.rot_word(word)\n self.assertEqual(result, rotated_word)\n\n def test_bytes_to_word_result(self):\n byte_list = [0xF4, 0xBC, 0x32, 0xE6]\n word = 0xF4BC32E6\n result = PyAES_Helpers.bytes_to_word(byte_list)\n self.assertEqual(result, word)\n\n def test_word_to_bytes_result(self):\n byte_list = [0xF4, 0xBC, 0x32, 0xE6]\n word = 0xF4BC32E6\n result = PyAES_Helpers.word_to_bytes(word)\n self.assertEqual(result, byte_list)\n\n def test_mix_column_result(self):\n columns = [[0X09, 0X28, 0X7F, 0X47], [0X08, 0XE3, 0XEE, 0XDA], [0X62, 0X04, 0X2C, 0X4A],\n [0XBF, 0X6F, 0X74, 0X6A], [0XCA, 0X55, 0X44, 0X33]]\n result_columns = [[0x52, 0x9F, 0x16, 0xC2], [0x1A, 0x26, 0x59, 0xBA], [0xAE, 0x54, 0xE0, 0x1A],\n [0xCA, 0x97, 0x86, 0x15], [0x07, 0x9F, 0x42, 0x32]]\n for i in range(5):\n result = PyAES_Helpers.mix_column(columns[i])\n self.assertEqual(result, result_columns[i])\n\n def test_inv_mix_column_result(self):\n columns = [[0X09, 0X28, 0X7F, 0X47], [0X08, 0XE3, 0XEE, 0XDA], [0X62, 0X04, 0X2C, 0X4A],\n [0XBF, 0X6F, 0X74, 0X6A], [0XCA, 0X55, 0X44, 0X33]]\n result_columns = [[0x59, 0xF0, 0x1D, 0xAD], [0x0D, 0x60, 0x4E, 0xFC], [0x8D, 0x77, 0xC3, 0x39],\n [0xE1, 0xA9, 0xAD, 0x2B], [0x08, 0x1D, 0x4D, 0xB0]]\n\n for i in range(5):\n result = PyAES_Helpers.inv_mix_column(columns[i])\n self.assertEqual(result, result_columns[i])\n\n def test_string_to_hex_result(self):\n string = \"PythonRocks!\"\n hex_string = \"507974686F6E526F636B7321\"\n result = PyAES_Helpers.string_to_hex(string)\n self.assertEqual(result, hex_string)\n\n def test_hex_to_string_result(self):\n string = \"PythonRocks!\"\n hex_string = \"507974686F6E526F636B7321\"\n result = PyAES_Helpers.hex_to_string(hex_string)\n self.assertEqual(result, string)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_PyAES_Helpers.py","file_name":"test_PyAES_Helpers.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"454416267","text":"from PIL import Image\nimport sqlite3 as sql\nimport sys\nimport hashlib\nimport json\n\ndb = sql.connect(':memory:')\n\nres = db.execute(\"create table Imagem(id integer primary key autoincrement, img text)\")\nres = db.execute(\"create table Objeto(id integer primary key autoincrement, img text, type text, color text, id_orig integer)\");\n\n\ndef main(argv):\n\tid_img=1\n\thash_img = codif_img(argv[1])\n\timage_entry(hash_img)\n\tobject_entry(imagem, tipo, cor, id_original)\n\tread_image(id_img)\n\tread_object(tipo, cor)\n\tdel_image(id_img)\n\tread_image(id_img)\n\t\n\tdb.close()\n\n#codifica as imagens\ndef codif_img(img_name):\n\th = hashlib.md5()\n\th.update(img_name.encode('utf-8'))\n\treturn h.hexdigest()\n\n#entrada na database das imagens originais\t\ndef image_entry(hash_img):\t\t\n\tres = db.execute(\"insert into Imagem (img) values (?) \", (hash_img,))\n\n#entrada na database dos objetos\ndef object_entry(imagem, tipo, cor, id_og):\n\tres = db.execute(\"insert into Objeto (img, type, color, id_orig) values (?, ?, ?, ?) \", (imagem, tipo, cor, id_og))\n\t\n#apaga da database as imagens\ndef del_image(id_og):\n\tres = db.execute(\"delete from Imagem where id like ?\", (id_og,))\n\t\n#devolve o nome da imagem pelo id\t\ndef read_image(id_og):\n\tres = db.execute(\"select * from Imagem where id like ?\", (id_og,))\n\tprint(res.fetchone())\n\treturn res.fetchone()\n\t\n#devolve o nome da imagem/objeto pelas carateristicas tipo ou cor\t\ndef read_object(search):\n\tres = db.execute(\"select img from Object where type like or color like\", (search,)) \t\n\treturn res.fetchall()\n\t\t\nmain(sys.argv)\n\n\n","sub_path":"1ºAno/LABI/proj2/cherrypyWithStaticContent/hello/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"221809698","text":"\"\"\" Answer a specific exercise of a programming contest.\n\nThis module contains all the code needed to answer a given exercise during a\nprogramming contest. Due to its quick creation and the fact that it's probably\nnot following proper Python guidelines (as described in multiples PEP\ndocuments), it shouldn't be used in production.\n\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation, either version 3 of the License, or (at your option) any later\nversion.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT\nANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License along with\nthis program. If not, see .\n\"\"\"\n\nimport statistics\nimport math\nimport datetime\nimport collections\n\n\n__author__ = \"Bizzozzéro Nicolas\"\n__contact__ = \"nicolasbizzozzero[at]gmail.com\"\n__copyright__ = \"Copyright 2019, Bizzozzéro Nicolas\"\n__date__ = \"2019/03/26\"\n__license__ = \"GPLv3\"\n\n\ndef identity(*args):\n \"\"\" Always returns the same value that was used as its argument.\n\n Example:\n >>> identity(1)\n 1\n >>> identity(1, 2)\n (1, 2)\n \"\"\"\n if len(args) == 1:\n return args[0]\n return args\n\n\ndef parsin(*, l=1, vpl=1, cf=identity, s=\" \"):\n \"\"\" Can parse inputs usually used in competitive programming problems.\n Arguments:\n - l, as in \"Lines\", the number of lines to parse at once.\n - vpl, as in \"Values Per Line\", the number of values to parse per line.\n - cf, as in \"Cast Function\", the function to apply to each parsed element.\n - s, as in \"Separator\", the string separating multiple values in the same\n line.\n \"\"\"\n if l == 1:\n if vpl == 1:\n return cf(input())\n else:\n return list(map(cf, input().split(s)))\n else:\n if vpl == 1:\n return [cf(input()) for _ in range(l)]\n else:\n return [list(map(cf, input().split(s)))\n for _ in range(l)]\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Contests/2019-03-26 - Battle Dev 13/exo1/exo1.py","file_name":"exo1.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"369306540","text":"\nimport textract\nimport re\n\noutput_file = \"/Users/alexjoseph/Learning/Data/credit_card_statements/decrypted_yes.pdf\"\n\nfrom pdf2image import convert_from_path\npages = convert_from_path(output_file, 500)\n\nfor i in range(len(pages)):\n pages[i].save(output_file_image + '_' + str(i) + '.jpg', 'JPEG')\n\ntext = textract.process(output_file_image + '_' + str(1) + '.jpg', encoding='ascii',\n method='tesseract')\n\nsplit_text = text.decode('utf-8').split('\\n')\n\nsplit_text_with_amount = list(filter(lambda k: re.findall(r\"^[0-9].*(Dr|Cr)$\", k), split_text))\n\ntotal_credit = list(filter(lambda k: re.findall(r\"^[0-9].*(Cr)$\", k), split_text))\ntotal_debit = list(filter(lambda k: re.findall(r\"^[0-9].*(Dr)$\", k), split_text))\ntotal_debit_numeric = [float(x.replace(\" Dr\", \"\").replace(',', '')) for x in total_debit]\ntotal_credit_numeric = [float(x.replace(\" Cr\", \"\").replace(',', '')) for x in total_credit]\ntotal_credit_numeric\n\nsum(total_debit_numeric)\n\n\npages = convert_from_path(input_file_hdfc, 500)\nfor i in range(len(pages)):\n pages[i].save(output_file_image_hdfc + '_' + str(i) + '.jpg', 'JPEG')\n\ntext = textract.process(output_file_image_hdfc + '_' + str(1) + '.jpg', encoding='ascii',\n method='tesseract')\n\nsplit_text\n","sub_path":"credit_card_parsing/yes_bank_ocr.py","file_name":"yes_bank_ocr.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"205296313","text":"#-*- coding:utf-8 -*-\r\n#Author:Hong Liu\r\n\r\nimport socket\r\nserver=socket.socket()\r\nserver.bind(('localhost',6969)) #绑定要监听的端口\r\nserver.listen(5) # 监听,最多允许挂起5个连接\r\nwhile True:\r\n conn, addr = server.accept() #等待 返回对方的连接实例和地址 阻塞\r\n while True:\r\n data = conn.recv(1024) #一次接收不能太大 建议最大8192 (8K) 默认阻塞\r\n print('recv:',data)\r\n if not data:\r\n break\r\n conn.send(data.upper()) #sendall =循环send 避免客户端文件发不完\r\n\r\n\r\nserver.close()","sub_path":"day6/socket/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"222642080","text":"from torchtext.utils import unicode_csv_reader\nfrom torchtext.data.datasets_utils import _RawTextIterableDataset\nfrom torchtext.data.datasets_utils import _wrap_split_argument\nfrom torchtext.data.datasets_utils import _add_docstring_header\nfrom torchtext.data.datasets_utils import _download_extract_validate\nimport io\nimport os\nimport logging\n\nURL = 'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbZVhsUnRWRDhETzA'\n\nMD5 = '57d28bd5d930e772930baddf36641c7c'\n\nNUM_LINES = {\n 'train': 3000000,\n 'test': 650000,\n}\n\n_PATH = 'amazon_review_full_csv.tar.gz'\n\n_EXTRACTED_FILES = {\n 'train': f'{os.sep}'.join(['amazon_review_full_csv', 'train.csv']),\n 'test': f'{os.sep}'.join(['amazon_review_full_csv', 'test.csv']),\n}\n\n_EXTRACTED_FILES_MD5 = {\n 'train': \"31b268b09fd794e0ca5a1f59a0358677\",\n 'test': \"0f1e78ab60f625f2a30eab6810ef987c\"\n}\n\n\n@_add_docstring_header(num_lines=NUM_LINES, num_classes=5)\n@_wrap_split_argument(('train', 'test'))\ndef AmazonReviewFull(root, split):\n def _create_data_from_csv(data_path):\n with io.open(data_path, encoding=\"utf8\") as f:\n reader = unicode_csv_reader(f)\n for row in reader:\n yield int(row[0]), ' '.join(row[1:])\n\n path = _download_extract_validate(root, URL, MD5, os.path.join(root, _PATH), os.path.join(root, _EXTRACTED_FILES[split]),\n _EXTRACTED_FILES_MD5[split], hash_type=\"md5\")\n logging.info('Creating {} data'.format(split))\n return _RawTextIterableDataset(\"AmazonReviewFull\", NUM_LINES[split],\n _create_data_from_csv(path))\n","sub_path":"torchtext/datasets/amazonreviewfull.py","file_name":"amazonreviewfull.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"360750662","text":"import bot\nprint(\"Hi! I'm AppoBot! How can I help you? If you are not sure, you can type help :)\")\n\nwhile 1:\n message = input()\n\n if message == \"exit\" or message == \"quit\":\n break\n\n print(bot.answer(message))\n\n if(bot.state in bot.states[\"appointment\"]):\n bot.appointment()\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"521959763","text":"from binaryninja import BinaryReader, BinaryView\nfrom typing import Callable, List, Union\nimport copy\n\nfrom .vmt import DelphiVMT\nfrom .constants import VMTOffsets\n\n\nclass DelphiAnalyzer(object):\n '''\n TODO: Doc\n '''\n\n def __init__(self, bv: BinaryView, delphi_version: int, offset_ptr_size = -1, start = -1, end = -1):\n self._offset_ptr_size = offset_ptr_size if offset_ptr_size > 0 else bv.address_size\n self._start = start if start >= 0 else bv.start\n self._end = end if end > 0 else bv.end\n self._vmt_list: List[DelphiVMT] = []\n self._bv = bv\n self._br = BinaryReader(bv)\n self._delphi_version = delphi_version\n self._vmt_offsets = VMTOffsets(delphi_version, self._offset_ptr_size)\n\n # Not really sure about this but it's working on my test binary (Win64 Delphi v10.3)\n # Need to check offsets for macOS\n # I'll clean that later\n if self._bv.view_type == 'PE' and self._offset_ptr_size == 8:\n for x, y in self._vmt_offsets.__dict__.items():\n setattr(self._vmt_offsets, x, y + -24)\n\n\n ## Properties\n\n @property\n def start(self) -> int:\n return self._start\n\n @property\n def end(self) -> int:\n return self._end\n\n @property\n def delphi_version(self) -> int:\n return self._delphi_version\n\n @property\n def vmt_list(self) -> List[DelphiVMT]:\n return self._vmt_list\n\n @property\n def vmt_offsets(self) -> VMTOffsets:\n return copy.copy(self._vmt_offsets)\n\n\n ## Public API\n\n def update_analysis_and_wait(self, callback: Callable[[DelphiVMT], None] = None):\n self._vmt_list = []\n self._seek_to_offset(0)\n\n while True:\n addy = self._get_possible_vmt()\n\n if addy is None:\n break\n\n delphi_vmt = DelphiVMT(self._bv, self._delphi_version, addy, self, self._offset_ptr_size)\n\n if not delphi_vmt.is_valid:\n continue\n\n self._vmt_list.append(delphi_vmt)\n\n if callback is not None:\n callback(delphi_vmt)\n\n\n ## Protected methods\n\n def _seek_to_offset(self, offset: int):\n self._br.seek(self._start + offset)\n\n\n def _read_ptr(self) -> Union[None, int]:\n if self._offset_ptr_size == 4:\n return self._br.read32()\n elif self._offset_ptr_size == 8:\n return self._br.read64()\n\n\n def _get_possible_vmt(self) -> int:\n while self._br.offset <= self._end - self._offset_ptr_size - 1:\n begin = self._br.offset\n\n if not self._bv.is_valid_offset(begin):\n self._br.seek_relative(self._offset_ptr_size)\n continue\n\n class_vmt = self._read_ptr()\n\n if class_vmt is None:\n # If BinaryReader can't read, it will not update the offset\n self._br.seek_relative(self._offset_ptr_size)\n continue\n\n if begin == class_vmt + self._vmt_offsets.cVmtSelfPtr:\n return class_vmt\n","sub_path":"delphi/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"415934548","text":"from typing import Any, Tuple\nimport logging\nfrom enum import Enum\nimport numpy as np\nimport torch\n\n# AUTHORSHIP\n__version__ = \"0.0.0dev\"\n__author__ = \"Mirko Polato\"\n__copyright__ = \"Copyright 2021, gossipy\"\n__license__ = \"MIT\"\n__maintainer__ = \"Mirko Polato, PhD\"\n__email__ = \"mak1788@gmail.com\"\n__status__ = \"Development\"\n#\n\n__all__ = [\"node\",\n \"simul\",\n \"utils\",\n \"data\",\n \"model\",\n \"set_seed\",\n \"DuplicateFilter\",\n \"CreateModelMode\",\n \"AntiEntropyProtocol\",\n \"MessageType\",\n \"CacheKey\",\n \"CacheItem\"]\n\n\nclass DuplicateFilter(object):\n def __init__(self):\n self.msgs = set()\n\n def filter(self, record):\n rv = record.msg not in self.msgs\n self.msgs.add(record.msg)\n return rv\n\nlogging.basicConfig(level=logging.INFO,\n format=\"[%(asctime)s] %(message)s\",\n datefmt='%d%m%y-%H:%M:%S')\n\nLOG = logging.getLogger(\"gossipy\")\nLOG.addFilter(DuplicateFilter())\n\n\ndef set_seed(seed=0) -> None:\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n\nclass CreateModelMode(Enum):\n UPDATE = 1\n MERGE_UPDATE = 2\n UPDATE_MERGE = 3\n PASS = 4\n\n\nclass AntiEntropyProtocol(Enum):\n PUSH = 1,\n PULL = 2,\n PUSH_PULL = 3\n\n\nclass MessageType(Enum):\n PUSH = 1,\n PULL = 2,\n REPLY = 3,\n PUSH_PULL = 4\n\n\nclass EqualityMixin(object):\n def __eq__(self, other: Any) -> bool:\n return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n\nclass Sizeable():\n def get_size(self) -> int:\n raise NotImplementedError()\n\n\nclass CacheKey(Sizeable):\n def __init__(self, *args):\n self.key = tuple(args)\n \n def get(self):\n return self.key\n \n def get_size(self) -> int:\n from gossipy.model.handler import ModelHandler\n val = ModelHandler._CACHE[self].value\n if isinstance(val, (float, int, bool)): return 1\n elif isinstance(val, Sizeable): return val.get_size()\n else: \n LOG.warning(\"Impossible to compute the size of %s. Set to 0.\" %val)\n return 0\n \n def __repr__(self):\n return str(self.key)\n \n def __hash__(self):\n return hash(self.key)\n \n def __eq__(self, other: Any) -> bool:\n if isinstance(other, CacheKey):\n return self.key == other.key\n return False\n\n def __ne__(self, other: Any):\n return not (self == other)\n\n\nclass CacheItem(Sizeable):\n def __init__(self, value: Any):\n self.value = value\n self.refs = 1\n \n def add_ref(self):\n self.refs += 1\n \n def del_ref(self):\n self.refs -= 1\n return self.value\n \n def is_referenced(self):\n return self.refs > 0\n \n def get_size(self) -> int:\n if isinstance(self.value, (tuple, list)):\n sz: int = 0\n for t in self.value:\n if t is None: continue\n if isinstance(t, (float, int, bool)): sz += 1\n elif isinstance(t, Sizeable): sz += t.get_size()\n else: \n LOG.warning(\"Impossible to compute the size of %s. Set to 0.\" %t)\n return max(sz, 1)\n elif isinstance(self.value, Sizeable):\n return self.value.get_size()\n elif isinstance(self.value, (float, int, bool)):\n return 1\n else:\n LOG.warning(\"Impossible to compute the size of %s. Set to 0.\" %self.value)\n return 0\n\n\nclass Message(Sizeable):\n def __init__(self,\n timestamp: int,\n sender: int,\n receiver: int,\n type: MessageType,\n value: Tuple[Any, ...]):\n self.timestamp = timestamp\n self.sender = sender\n self.receiver = receiver\n self.type = type\n self.value = value\n \n def get_size(self) -> int:\n if self.value is None: return 1\n if isinstance(self.value, (tuple, list)):\n sz: int = 0\n for t in self.value:\n if t is None: continue\n if isinstance(t, (float, int, bool)): sz += 1\n elif isinstance(t, Sizeable): sz += t.get_size()\n else: raise TypeError(\"Cannot compute the size of the payload!\")\n return max(sz, 1)\n elif isinstance(self.value, Sizeable):\n return self.value.get_size()\n elif isinstance(self.value, (float, int, bool)):\n return 1\n else:\n raise TypeError(\"Cannot compute the size of the payload!\")\n \n def __str__(self) -> str:\n s: str = \"T%d [%d -> %d] {%s}: \" %(self.timestamp,\n self.sender,\n self.receiver,\n self.type.name)\n s += \"ACK\" if self.value is None else str(self.value)\n return s","sub_path":"gossipy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"506900145","text":"from keras.models import load_model\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, LSTM, Activation\nfrom keras.utils import to_categorical\nimport wandb\nfrom wandb.keras import WandbCallback\nimport matplotlib.pyplot as plt\nimport sklearn.metrics as metrics\nimport librosa\nimport numpy as np\nimport traceback\nimport os\nimport json\n\ndef classify_file( audio_file ) :\n # load the models\n print(\"loading the data\")\n all_models = [ load_model(\"snaw-backend/model/anthro/ant_cnn_model.h5\"),\n load_model(\"snaw-backend/model/bio/bio_cnn_model.h5\"),\n load_model(\"snaw-backend/model/geo/geo_cnn_model.h5\") ]\n\n print(\"loaded data\")\n all_labels = [ [\"AAT\", \"AHV\", \"AMA\", \"ART\", \"ASI\", \"AVH\", \"AVT\"],\n [\"BRA\", \"BAM\", \"BBI\", \"BMA\", \"BIN\"],\n [\"GOC\", \"GRA\", \"GST\",\"GWG\", \"GWC\"] ]\n\n classify_dict = [ {'name' : 'Anthrophony',\n 'color' : '#0088FE',\n 'data' : [] },\n {'name': 'Biophony',\n 'color': '#00C49F',\n 'data': [] },\n {'name': 'Geophony',\n 'color': '#FFBB28',\n 'data': [] } ]\n\n ## Running the models\n\n n_mfcc = 128 # bucket size !!SUBJECT TO CHANGE!!\n max_len = 32 # max_len size !!SUBJECT TO CHANGE!!\n channels = 1 # channels !!SUBJECT TO CHANGE!!\n\n # convert file to wav2mfcc\n # Mel-frequency cepstral coefficients\n file_path = audio_file\n big_wave, sr = librosa.load(file_path, mono=True, sr=None)\n #print(wave.shape, sr)\n\n for sec_index in range( int(big_wave.shape[0] / sr) ) :\n start_sec = sec_index\n end_sec = sec_index + 1\n\n sec_to_trim = np.array( [ float(start_sec), float(end_sec) ] )\n sec_to_trim = np.ceil( sec_to_trim * sr )\n\n wave = big_wave[int(sec_to_trim[0]) : int(sec_to_trim[1])]\n\n wave = np.asfortranarray(wave[::3])\n mfcc = librosa.feature.mfcc(wave, sr=16000, n_mfcc=n_mfcc)\n\n # If maximum length exceeds mfcc lengths then pad the remaining ones\n if (max_len > mfcc.shape[1]):\n pad_width = max_len - mfcc.shape[1]\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')\n\n # Else cutoff the remaining parts\n else:\n mfcc = mfcc[:, :max_len]\n\n # Convert wav to MFCC\n #prediction_data = wav2mfcc('./prediction/nature_sc.wav')\n prediction_data = mfcc\n\n # Reshape to 4 dimensions\n prediction_data = prediction_data.reshape(1, n_mfcc, max_len, channels)\n\n # Run the model on the inputted file\n\n all_predicted = [ model.predict(prediction_data) for model in all_models ]\n\n for labels, predicted, classification in zip( all_labels, all_predicted, classify_dict ) :\n # Output the prediction values for each class\n print ('PREDICTED VALUES')\n labels_indices = range(len(labels))\n max_value = 0\n max_value_index = 0\n for index in labels_indices:\n print(\"\\n\", labels[index], \": \", '%.08f' % predicted[0,index])\n if predicted[0,index] > max_value:\n max_value_index = index\n max_value = predicted[0,index]\n\n # Output the prediction\n if max_value < 0.5:\n print(\"GUESS: Nothing\")\n classification['data'].append( { \"category\" : \"NO\", \"time\" : start_sec } )\n else:\n print('\\n\\nGUESS: ', labels[max_value_index])\n classification['data'].append( { \"category\" : labels[max_value_index], \"time\" : start_sec } )\n\n print(classify_dict)\n return classify_dict\n\n#classify_file( \"instance/upload/hand_saw_27.wav\" )\n\n# driver function\ndef runScript():\n\n print(\"[WORKING] Attempting to run CNN classification calculator - classification_svm.py\")\n # Create dictionary for storing return information\n # Create a counter for files\n finalResult = {}\n fileCount = 0\n\n try:\n # Retrieve File\n for filename in os.listdir('instance/upload/'):\n audiofile = \"instance/upload/\" + filename\n print(audiofile)\n result = classify_file( audiofile )\n\n # Create list to store information\n #result = []\n #print(\"[WORKING] Attempting to run anthrophony classification - classification.py\")\n #result.append( classify_file( audiofile, anthro_model(), 'Anthrophony', '#0088FE' ) )\n #print(\"[WORKING] Attempting to run geophony classification - classification.py\")\n #result.append( classify_file(audiofile, bio_model(), 'Biophony', '#00C49F' ) )\n #print(\"[WORKING] Attempting to run biophony classification - classification.py\")\n #result.append( classify_file(audiofile, geo_model(), 'Geophony', '#FFBB28' ) )\n\n # Add result list to finalResult dictionary with filecounter as the key\n finalResult[fileCount] = result\n fileCount += 1\n\n except Exception as e:\n track = traceback.format_exc()\n print(track)\n #except:\n # print('[FAILURE -- Classification 1] File upload unsuccessful, or not file uploaded.')\n\n print(json.dumps(finalResult))\n\n print(\"[SUCCESS] Classification was successful - classification.py\")\n return finalResult","sub_path":"snaw-backend/classification_cnn.py","file_name":"classification_cnn.py","file_ext":"py","file_size_in_byte":5480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"542004325","text":"import os\nimport csv\n\ndef get_marchands(marchands_file):\n \"\"\" recuperer les éléments necessaire contenu dans le fichier initial\"\"\"\n liste_data_marchands= {}\n\n with open(marchands_file, newline='',encoding=\"utf-8\") as csvfile:\n spamreader = csv.reader(csvfile,delimiter=\";\")\n for row in spamreader:\n liste_data_marchands[row[4]]={\n \"nom\":row[0].strip(), \n \"ville\":row[1],\n \"email\":row[4]\n }\n return liste_data_marchands\n\ndef email_in(email,liste_marchands):\n \"\"\" verifier si un email de la boite de reception correspond à un email de la liste des marchands\"\"\"\n return liste_marchands.get(email)\n\ndef creation_dossier(marchand):\n \"\"\"créer un dossier recevant les pièces jointes d'un marchants\"\"\"\n chemin=\"marchands\\{ville}\\{nom_marchand}\".format(ville=marchand[\"ville\"],nom_marchand=marchand[\"nom\"])\n try:\n os.makedirs(chemin)\n except FileExistsError:\n pass\n\n return chemin\n\ndef gestion_redondance(pj_name,chemin,offset=0):\n \"\"\"gérer la redondance du nom des pièces jointes par ajout de surfixe numérique ordonné\"\"\"\n for root, dirs, files in os.walk(chemin, topdown=False):\n if pj_name in files:\n c = files.count(pj_name)\n if c >= 1:\n pj_name_temp = format_pj_name(pj_name,c)\n while pj_name_temp in files:\n c += 1\n pj_name_temp = format_pj_name(pj_name,c)\n pj_name = pj_name_temp\n \n return os.path.join(chemin,pj_name)\n \n return os.path.join(chemin,pj_name)\n\ndef format_pj_name(pj_name,nbr_occurences):\n \"\"\"déterminer le nom surfixé d'une pièce jointe en fonction de son nombre de redondances dans son dossier de reception\"\"\"\n elems = pj_name.split('.')\n basename = elems[:len(elems) - 1][0]\n extension = elems[-1]\n pj_name = \"{}-{}.{}\".format(basename,nbr_occurences+1,extension)\n return pj_name","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"28663382","text":"def encode(msg, rails):\n ret = [str() for _ in range(rails)]\n change = 1\n current = 0\n for c in msg:\n ret[current] += c\n current += change\n if not current or current == rails - 1:\n change *= -1\n return ''.join(ret)\n\n\ndef decode(msg, rails):\n jump = 2 * (rails - 1)\n ret = list('c' for c in msg)\n msgindex = 0\n startindex = [0 for _ in range(jump)]\n for offset in range(jump):\n if offset == 0 or offset == rails - 1:\n skip = 1\n else:\n skip = 2\n\n if msgindex + jump == len(msg):\n msgindex -= 1\n\n if offset >= rails:\n msgindex = startindex[(rails - 1) - (offset - (rails - 1))] + 1\n\n start = offset\n startindex[offset] = msgindex\n\n for x in range(start, len(msg), jump):\n if msgindex >= len(msg):\n break\n ret[x] = msg[msgindex]\n msgindex += skip\n return ''.join(ret)\n","sub_path":"python/rail-fence-cipher/rail_fence_cipher.py","file_name":"rail_fence_cipher.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"268401350","text":"from __future__ import print_function\r\n\r\nmenu = [[\"Nasi Goreng\", 10000], [\"Nasi Kuning\", 13000], [\"Nasi Pecel\", 5000]]\r\n\r\ndef menu_makanan():\r\n print(\"#-------------------------------------#\")\r\n print(\"# No | Nama Makanan | Harga #\")\r\n print(\"#-------------------------------------#\")\r\n i = 1\r\n for item in menu:\r\n print(\"# \" + str(i) + \" | \" + item[0] + \" | \" + str(item[1]) + \" #\")\r\n i += 1\r\n print(\"#-------------------------------------#\")\r\n print(\"# 0 | Checkout #\")\r\n print(\"#-------------------------------------#\")\r\n return\r\n\r\n\r\nmenu_makanan()\r\njawaban = \"\"\r\ncatatan_pilihan = []\r\nwhile jawaban != \"0\":\r\n jawaban = input(\"Pilih menu makanan \") \r\n menu_makanan()\r\n if jawaban != \"0\":\r\n catatan_pilihan.append(int(jawaban)-1)\r\n\r\nno = 1\r\nprint(\"Pesanan anda : \")\r\ntotal = 0\r\nfor pilihan in catatan_pilihan:\r\n print(\"Makanan ke-\" + str(no) + \" = \" + menu[pilihan][0] + \" Harga + \" + str(menu[pilihan][1]))\r\n no += 1\r\n total = total + menu[pilihan][1]\r\n\r\nprint(\"Total pembayaran \" + str(total))\r\n","sub_path":"coba.py","file_name":"coba.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"91417067","text":"import items\nfrom items import allItems, inventory\n#This is where the magic happens, the while loop is always true but in the case that the player dies or wins, using the ded() function they can leave the game, breaking the while loop.\n\ndef Play_Zork():\n print(\"---------------------------------------------------------\")\n print(\"Welcome to Zork - The Unofficial Python Version.\")\n print(\"Enter 'show inventory' at any time to see what items you have\")\n room = 4\n while True:\n user_input= get_user_input(room)\n \n if room == 1:\n room = room1(user_input)\n elif room == 2:\n room = room2(user_input)\n elif room == 3:\n room = room3(user_input)\n elif room == 4:\n room = room4(user_input)\n elif room == 5:\n room = room5(user_input)\n elif room == 6:\n room = room6(user_input)\n elif room == 7:\n room = room7(user_input)\n elif room == 8:\n room = room8(user_input)\n elif room == 9:\n room = room9(user_input)\n elif room == 10:\n room = room10(user_input)\n elif room == 11:\n room = room11(user_input)\n elif room == 12:\n room = room12(user_input)\n\n\n#This function helps clean up some redundant code and deals with resetting/ quitting the game when they win or die \ndef ded():\n ded_input=input('Do you want to continue? Y/N ')\n if ded_input.lower() == ('n'):\n exit()\n elif ded_input.lower() == ('y'):\n Play_Zork()\n\n#This gives the user output for each function\ndef get_user_input(room):\n print(\"---------------------------------------------------------\")\n if room == 1:\n print(\"You find yourself at the edge of a beautiful lake aside rolling hills.\")\n print(\"A small pier juts out into the lake.\")\n print(\"A fishing rod rests on the pier.\")\n print(\"(You can see a white house in the distance to the south.)\")\n \n elif room == 2:\n print(\"You find yourself behind the house, unkempt ivy creeps up the beautiful old estate.\")\n print(\"Going west towards, the house there is a rickety window, but it's open!\")\n print(\"Going south will take you back to the front of the house.\")\n \n elif room == 3:\n print(\"You find yourself in a dimly lit kitchen with dust covering the floor.\")\n print(\"A lantern rests on the kitchen island.\")\n print(\"The window is to the east.\")\n print(\"A set of stairs go up to another room.\")\n \n elif room == 4:\n print(\"You are standing in an open field west of a white house, with a boarded front door.\")\n print(\"You can see a small lake to the north.\")\n print(\"Going east around the house there is a gate that leads to the back of the home.\")\n print(\"(A secret path leads southwest into the forest.)\")\n print(\"There is a Small Mailbox.\")\n \n elif room == 5:\n print(\"You come up the stairs to a dusty old attic.\")\n print(\"There is a chest in the corner.\")\n print(\"The staircase descends behind you.\")\n \n elif room == 6:\n print(\"Venturing into the next chamber you find yourself at the entrance to an ancient maze.\")\n print(\"You can see the cave entrance back to the North.\")\n print(\"The Maze looms to your south\")\n \n elif room == 7:\n print(\"The stone walls grow close around you, there is a fork in the path before you but all ways are dark.\")\n print(\"A foul stench reaches you, something lives in here.\")\n print(\"You can stil flee, the entrance is to the north!\")\n \n \n elif room == 8:\n print(\"This is a forest, with trees in all directions. To the east, there appears to be sunlight.\")\n print(\"You see a mountain range poking above the trees to the west.\")\n \n elif room == 9:\n print(\"You are in a clearing, with a forest surrounding you on all sides. A path leads south.\")\n print(\"There is an open grating, descending into darkness.\")\n\n elif room == 10:\n print(\"You are in a tiny cave with a dark, forbidding staircase leading down.\")\n print(\"There is a skeleton of a human male in one corner.\")\n print(\"To the south there is something in the depths of the cave, but you cant make it out.\")\n\n elif room == 11:\n print(\"You have entered a mud-floored room.\")\n print(\"Lying half buried in the mud is an old trunk, bulging with jewels.\")\n elif room == 12:\n print(\"You come to the feet of the Nandic Mountans! Their towering, snowcapped peakes loom to the west,.\")\n print(\"A powerful river rages to the north and the evil lands lie to the south.\")\n\n user_input= input(\"What do you do? \").lower()\n\n if user_input == 'show inventory':\n print(inventory)\n\n \n return user_input\n\n\n##################################################################\n########These are the room functions##############################\n##################################################################\ndef room1(user_input):\n room = 1\n print(\"---------------------------------------------------------\")\n if user_input == (\"go south\"):\n room = 4\n elif user_input == (\"swim\"):\n print(\"You don't have a change of clothes and you aren't here on vacation.\")\n elif user_input == (\"fish\"):\n print(\"---------------------------------------------------------\")\n print(\"You spend some time fishing but nothing seems to bite.\")\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room\n\ndef room2(user_input):\n room = 2\n print(\"---------------------------------------------------------\")\n if user_input == (\"go south\"):\n room = 4\n elif user_input == (\"go west\"):\n room = 3\n print(\"Opening a rickety window you climb into the house\")\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room\n\ndef room3(user_input):\n room = 3\n print(\"---------------------------------------------------------\")\n if user_input == (\"go up staircase\"):\n print(\"You climb the creaking stairs...\")\n room = 5\n elif user_input == (\"take lantern\"):\n items.pick_up('lantern',room)\n elif user_input == (\"go east\"):\n print(\"You hop out the window\")\n room = 2\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room \ndef room4(user_input):\n room = 4\n print(\"---------------------------------------------------------\")\n if user_input == (\"take mailbox\"):\n print(\"It is securely anchored.\")\n elif user_input == (\"open mailbox\"):\n print(\"Opening the small mailbox reveals a leaflet.\")\n elif user_input == (\"go east\"):\n room = 2\n elif user_input == (\"go north\"):\n room = 1\n elif user_input == (\"fuckem\"):\n room = 11\n print(\"RULE #1 FUCKEM\")\n print(\"THE GODS OF THE UNIVERSE RECOGNIZE YOUR WISEDOM! YOU'RE TRANSPORTED TO THE FINAL ROOM\")\n elif user_input == (\"open door\"):\n print(\"The door cannot be opened.\")\n elif user_input == (\"take boards\"):\n print(\"The boards are securely fastened.\")\n elif user_input == (\"look at house\"):\n print(\"The house is a beautiful colonial house which is painted white. It is clear that the owners must have been extremely wealthy.\")\n elif user_input == (\"go southwest\"):\n room = 8\n elif user_input == (\"read leaflet\"):\n print(\"Welcome to the Unofficial Python Version of Zork. Your mission is to find a Jade Statue.\")\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room\n\ndef room5(user_input):\n room = 5\n print(\"---------------------------------------------------------\")\n if user_input == (\"descend staircase\"):\n room =3\n elif user_input == (\"open chest\"):\n print(\"A sword lies inside the chest!\")\n elif user_input == (\"take sword\"):\n items.pick_up(\"sword\",room)\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room\n\ndef room6(user_input):\n room = 6\n print(\"---------------------------------------------------------\")\n if user_input == (\"go north\"):\n room = 10\n print(\"You head back to the first chamber in the cave.\")\n elif user_input == (\"go south\"):\n room = 7\n print(\"You enter the maze and are greeted by the stench of death.\")\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room\n\ndef room7(user_input):\n room = 7\n print(\"---------------------------------------------------------\")\n if user_input == (\"go north\"):\n \n print(\"You escaped the maze, something was off about that place.\")\n room = 6\n else:\n if items.useItem('lantern',inventory):\n print('You see the grue! Wanting to avoid trouble you leave the maze.')\n room = 6\n elif items.useItem('gold ring', inventory):\n print('The little gold ring you were fingering in your pocket slips onto your finger!\\n You see the grue in the distance, wanting to avoid conflict you leave the maze.')\n room = 6\n else:\n \n print(\"You were eaten by the grue! You should've left the maze while you had the chance.\")\n print(\"---------------------------------------------------------\")\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n return room\ndef room8(user_input):\n room = 8\n print(\"---------------------------------------------------------\")\n if user_input == (\"go west\"):\n \n print(\"You begin a long trek towards the mountains...\")\n room = 12\n elif user_input == (\"go north\"):\n print(\"---------------------------------------------------------\")\n print(\"The forest becomes impenetrable to the North.\")\n elif user_input == (\"go northeast\"):\n room = 4\n elif user_input == (\"go south\"):\n print(\"---------------------------------------------------------\")\n print(\"Storm-tossed trees block your way.\")\n elif user_input == (\"go east\"):\n room = 9\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room\n\ndef room9(user_input):\n room = 9\n print(\"---------------------------------------------------------\")\n if user_input == (\"go south\"):\n print(\"You see a large ogre and turn around.\")\n elif user_input == (\"descend grating\"):\n room = 10\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room \ndef room10(user_input):\n room = 10\n print(\"---------------------------------------------------------\")\n if user_input == (\"descend staircase\"):\n room = 11\n elif user_input == (\"go south\"):\n print(\"You walk deeper into the cave...\")\n room = 6\n elif user_input == (\"take skeleton\"):\n print(\"Why would you do that? Are you some sort of sicko?\")\n elif user_input == (\"smash skeleton\"):\n print(\"Sick person. Have some respect mate.\")\n elif user_input == (\"light up room\"):\n print(\"You would need a torch or lamp to do that.\")\n elif user_input == (\"break skeleton\"):\n print(\"I have two questions: Why and With What?\")\n elif user_input == (\"go down staircase\"):\n room = 11\n elif user_input == (\"scale staircase\"):\n room = 11\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room \ndef room11(user_input):\n room = 11\n print(\"---------------------------------------------------------\")\n if user_input == (\"open trunk\"):\n print(\"You have found the Jade Statue and have completed your quest!\")\n ded()\n \n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\ndef room12(user_input):\n room = 12\n print(\"---------------------------------------------------------\")\n if user_input == (\"go east\"):\n print(\"---------------------------------------------------------\")\n print (\"You return to the forest.\")\n room = 8\n elif user_input == (\"go west\"):\n print(\"The mountains are impassiblble this time of year.\")\n elif user_input == (\"go north\"):\n print(\"A roaring river blocks your way.\")\n elif user_input == (\"go south\"):\n if items.useItem('sword', inventory):\n print('You used your sword to kill the uruks! You found a small gold ring, but more uruks are coming!\\n\\n You run back to the forest')\n items.pick_up('gold ring',12)\n room = 8\n else:\n print(\"You wander into the evil lands and are killed by uruks\")\n ded()\n elif user_input == (\"kick the bucket\"):\n print(\"You die.\")\n print(\"---------------------------------------------------------\")\n ded()\n else:\n print(\"---------------------------------------------------------\")\n return room \n \n \n","sub_path":"src/zork.py","file_name":"zork.py","file_ext":"py","file_size_in_byte":16251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"467482538","text":"class Tree: # create a class for the tree\n def __init__(self,data): # constructor\n self.left=None\n self.right=None\n self.val=data\n\nclass Minmax:\n def solution(self,root,h,curheight): # to find the solution using minmax algo\n if not root:\n return\n self.solution(root.left,h,curheight+1)\n self.solution(root.right,h,curheight+1)\n if(curheight 0:\n\n volume = schedule * water_lpd/ 1000 # m3/h\n massflow = volume * Pwater/3600 # in kg/s\n\n else:\n volume = 0\n massflow = 0\n\n return massflow, volume\n\n# final hot water demand calculation\n\ndef calc_Qwwf(Lcww_dis, Lsww_dis, Lvww_c, Lvww_dis, T_ext, Ta, Tww_re, Tww_sup_0, Y, gv, schedules, bpr):\n # Refactored from CalcThermalLoads\n \"\"\"\n This function calculates the distribution heat loss and final energy consumption of domestic hot water.\n Final energy consumption of dhw includes dhw demand, sensible heat loss in hot water storage tank, and heat loss in the distribution network.\n :param Lcww_dis: Length of dhw usage circulation pipeline in m.\n :param Lsww_dis: Length of dhw usage distribution pipeline in m.\n :param Lvww_c: Length of dhw heating circulation pipeline in m.\n :param Lvww_dis: Length of dhw heating distribution pipeline in m.\n :param T_ext: Ambient temperature in C.\n :param Ta: Room temperature in C.\n :param Tww_re: Domestic hot water tank return temperature in C, this temperature is the ground water temperature, set according to norm.\n :param Tww_sup_0: Domestic hot water supply set point temperature in C.\n :param vw: specific fresh water consumption in m3/hr*m2.\n :param vww: specific domestic hot water consumption in m3/hr*m2.\n :param Y: linear trasmissivity coefficients of piping in W/m*K\n :return:\n\n \"\"\"\n\n # calc end-use demand\n Vww = schedules['Vww'] * bpr.internal_loads['Vww_lpd'] * bpr.rc_model['Af'] / 1000 # m3/h\n Vw = schedules['Vw'] * bpr.internal_loads['Vw_lpd'] * bpr.rc_model['Af'] / 1000 # m3/h\n mww = Vww * gv.Pwater /3600 # kg/s\n\n Qww = np.vectorize(calc_Qww)(mww, Tww_sup_0, Tww_re, gv.Cpw)\n Qww_0 = Qww.max()\n\n # distribution and circulation losses\n Vol_ls = Lsww_dis * ((gv.D / 1000)/2) ** 2 * pi # m3, volume inside distribution pipe\n Qww_dis_ls_r = np.vectorize(calc_Qww_dis_ls_r)(Ta, Qww, Vww, Lsww_dis, Lcww_dis, Y[1], Qww_0, Vol_ls, gv.Flowtap,\n Tww_sup_0, gv.Cpw, gv.Pwater, gv)\n Qww_dis_ls_nr = np.vectorize(calc_Qww_dis_ls_nr)(Ta, Qww, Vww, Lvww_dis, Lvww_c, Y[0], Qww_0, Vol_ls, gv.Flowtap,\n Tww_sup_0, gv.Cpw, gv.Pwater, gv.Bf, T_ext, gv)\n # storage losses\n Qww_st_ls, Tww_st, Qwwf = calc_Qww_st_ls(T_ext, Ta, Qww, Vww, Qww_dis_ls_r, Qww_dis_ls_nr, gv)\n\n # final demand\n Qwwf_0 = Qwwf.max()\n mcpwwf = Qwwf / abs(Tww_st - Tww_re)\n\n return mww, Qww, Qww_st_ls, Qwwf, Qwwf_0, Tww_st, Vww, Vw, mcpwwf\n\n# end-use hot water demand calculation\n\ndef calc_Qww(mww, Tww_sup_0, Tww_re, Cpw):\n \"\"\"\n Calculates the DHW demand according to the supply temperature and flow rate.\n :param mww: required DHW flow rate in [kg/s]\n :param Tww_sup_0: Domestic hot water supply set point temperature.\n :param Tww_re: Domestic hot water tank return temperature in C, this temperature is the ground water temperature, set according to norm.\n :param Cpw: heat capacity of water [kJ/kgK]\n :return Qww: Heat demand for DHW in [Wh]\n \"\"\"\n mcpww = mww * Cpw * 1000 # W/K\n Qww = mcpww * (Tww_sup_0 - Tww_re) # heating for dhw in Wh\n return Qww\n\n# losess hot water demand calculation\n\ndef calc_Qww_dis_ls_r(Tair, Qww, Vww, Lsww_dis, Lcww_dis, Y, Qww_0, V, Flowtap, twws, Cpw, Pwater, gv):\n\n if Qww > 0:\n # Calculate tamb in basement according to EN\n tamb = Tair\n\n # Circulation circuit losses\n circ_ls = (twws - tamb) * Y * Lcww_dis * (Qww / Qww_0)\n\n # Distribtution circuit losses\n dis_ls = calc_disls(tamb, Qww, Flowtap, V, twws, Lsww_dis, Pwater, Cpw, Y, gv)\n\n Qww_d_ls_r = circ_ls + dis_ls\n else:\n Qww_d_ls_r = 0\n return Qww_d_ls_r\n\n\ndef calc_Qww_dis_ls_nr(tair, Qww, Vww, Lvww_dis, Lvww_c, Y, Qww_0, V, Flowtap, twws, Cpw, Pwater, Bf, te, gv):\n if Qww > 0:\n # Calculate tamb in basement according to EN\n tamb = tair - Bf * (tair - te)\n\n # Circulation losses\n d_circ_ls = (twws - tamb) * Y * (Lvww_c) * (Qww / Qww_0)\n\n # Distribution losses\n d_dis_ls = calc_disls(tamb, Qww, Flowtap, V, twws, Lvww_dis, Pwater, Cpw, Y, gv)\n Qww_d_ls_nr = d_dis_ls + d_circ_ls\n else:\n Qww_d_ls_nr = 0\n return Qww_d_ls_nr\n\n\ndef calc_disls(tamb, Vww, Flowtap, V, twws, Lsww_dis, p, cpw, Y, gv):\n \"\"\"\n Calculates distribution losses in Wh according to Fonseca & Schlueter (2015) Eq. 24, which is in turn based\n on Annex A of ISO EN 15316 with pipe mass m_p,dis = 0.\n \n :param tamb: Room temperature in C\n :param Vww: volumetric flow rate of hot water demand (in m3)\n :param Flowtap: volumetric flow rate of tapping in m3 ( == 12 L/min for 3 min)\n :param V: volume of water accumulated in the distribution network in m3\n :param twws: Domestic hot water supply set point temperature in C\n :param Lsww_dis: length of circulation/distribution pipeline in m\n :param p: water density kg/m3\n :param cpw: heat capacity of water in kJ/kgK\n :param Y: linear trasmissivity coefficient of piping in distribution network in W/m*K\n :param gv: globalvar.py\n\n :return losses: recoverable/non-recoverable losses due to distribution of DHW\n \"\"\"\n if Vww > 0:\n TR = 3600 / ((Vww / 1000) / Flowtap) # Thermal response of insulated piping\n if TR > 3600: TR = 3600\n try:\n exponential = scipy.exp(-(Y * Lsww_dis * TR) / (p * cpw * V * 1000))\n except ZeroDivisionError:\n gv.log('twws: %(twws).2f, tamb: %(tamb).2f, p: %(p).2f, cpw: %(cpw).2f, V: %(V).2f',\n twws=twws, tamb=tamb, p=p, cpw=cpw, V=V)\n raise ZeroDivisionError\n\n tamb = tamb + (twws - tamb) * exponential\n\n losses = (twws - tamb) * V * cpw * p / 3.6 # in Wh\n else:\n losses = 0\n return losses\n\n\ndef calc_Qww_st_ls(T_ext, Ta, Qww, Vww, Qww_dis_ls_r, Qww_dis_ls_nr, gv):\n \"\"\"\n Calculates the heat flows within a fully mixed water storage tank for 8760 time-steps.\n :param T_ext: external temperature in [C]\n :param Ta: room temperature in [C]\n :param Qww: hourly DHW demand in [Wh]\n :param Vww: hourly DHW demand in [m3]\n :param Qww_dis_ls_r: recoverable loss in distribution in [Wh]\n :param Qww_dis_ls_nr: non-recoverable loss in distribution in [Wh]\n :param gv: globalvar.py\n\n :type T_ext: ndarray\n :type Ta: ndarray\n :type Qww: ndarray\n :type Vww: ndarray\n :type Qww_dis_ls_r: ndarray\n :type Qww_dis_ls_nr: ndarray\n :return:\n \"\"\"\n Qwwf = np.zeros(8760)\n Qww_st_ls = np.zeros(8760)\n Tww_st = np.zeros(8760)\n Qd = np.zeros(8760)\n # calculate DHW tank size [in m3] based on the peak DHW demand in the building\n Vww_0 = Vww.max()\n Tww_st_0 = gv.Tww_setpoint\n\n if Vww_0 > 0:\n for k in range(8760):\n Qww_st_ls[k], Qd[k], Qwwf[k] = sto_m.calc_Qww_ls_st(Ta[k], T_ext[k], Tww_st_0, Vww_0, Qww[k], Qww_dis_ls_r[k],\n Qww_dis_ls_nr[k], gv)\n Tww_st[k] = sto_m.solve_ode_storage(Tww_st_0, Qww_st_ls[k], Qd[k], Qwwf[k], Vww_0, gv)\n Tww_st_0 = Tww_st[k]\n else:\n for k in range(8760):\n Tww_st[k] = np.nan\n return Qww_st_ls, Tww_st, Qwwf","sub_path":"cea/demand/hotwater_loads.py","file_name":"hotwater_loads.py","file_ext":"py","file_size_in_byte":8146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"370749626","text":"import torch\n\nemb_dim = 10\nhidden_dim = 20\nnum_layers = 1\nlstm = torch.nn.LSTM(emb_dim, hidden_dim, num_layers)\n\nseq_len = 5 # 序列长度,即一句话又几个词\nbatch_size = 3 # 批处理大小\ninput_data = torch.randn(seq_len, batch_size, emb_dim) # [5, 3, 10]\n\n# 初始化的隐藏元和记忆元,通常它们的维度是一样的\n# 2个LSTM层,batch_size=3,隐藏元维度20\nh0 = torch.randn(num_layers, batch_size, hidden_dim) # [2, 3, 20]\nc0 = torch.randn(num_layers, batch_size, hidden_dim) # [2, 3, 20]\n\noutput, (hn, cn) = lstm(input_data, (h0, c0))\n\nprint(output.size(), hn.size(), cn.size())\n# torch.Size([5, 3, 20]) torch.Size([2, 3, 20]) torch.Size([2, 3, 20])\n","sub_path":"LSTMBeginner.py","file_name":"LSTMBeginner.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"526724536","text":"# -*- coding: utf-8 -*-\n\nimport flask\n\nimport anitya\nimport anitya.lib.plugins\nimport anitya.lib.model\n\nfrom anitya.app import APP, SESSION\n\n\n@APP.route('/api/projects/')\n@APP.route('/api/projects')\ndef api_projects():\n\n pattern = flask.request.args.get('pattern', None)\n\n if pattern and '*' not in pattern:\n pattern += '*'\n\n if pattern:\n project_objs = anitya.lib.model.Project.search(\n SESSION, pattern=pattern)\n else:\n project_objs = anitya.lib.model.Project.all(SESSION)\n\n projects = [project.__json__() for project in project_objs]\n\n output = {\n 'total': len(projects),\n 'projects': projects\n }\n\n jsonout = flask.jsonify(output)\n jsonout.status_code = 200\n return jsonout\n\n\n@APP.route('/api/projects/wiki/')\n@APP.route('/api/projects/wiki')\ndef api_projects_list():\n\n project_objs = anitya.lib.model.Project.all(SESSION)\n\n projects = []\n for project in project_objs:\n for package in project.packages:\n tmp = '* {name} {regex} {version_url}'.format(\n name=package.package_name,\n regex=project.regex,\n version_url=project.version_url)\n projects.append(tmp)\n\n return flask.Response(\n \"\\n\".join(projects),\n content_type=\"text/plain;charset=UTF-8\"\n )\n\n\n@APP.route('/api/projects/names/')\n@APP.route('/api/projects/names')\ndef api_projects_names():\n\n pattern = flask.request.args.get('pattern', None)\n\n if pattern and '*' not in pattern:\n pattern += '*'\n\n if pattern:\n project_objs = anitya.lib.model.Project.search(\n SESSION, pattern=pattern)\n else:\n project_objs = anitya.lib.model.Project.all(SESSION)\n\n projects = [project.name for project in project_objs]\n\n output = {\n 'total': len(projects),\n 'projects': projects\n }\n\n jsonout = flask.jsonify(output)\n jsonout.status_code = 200\n return jsonout\n\n\n@APP.route('/api/version/', methods=['POST'])\ndef api_get_version():\n\n project_id = flask.request.form.get('id', None)\n httpcode = 200\n\n if not project_id:\n errors = []\n if not project_id:\n errors.append('No project id specified')\n output = {'output': 'notok', 'error': errors}\n httpcode = 400\n else:\n\n project = anitya.lib.model.Project.get(\n SESSION, project_id=project_id)\n\n if not project:\n output = {'output': 'notok', 'error': 'No such project'}\n httpcode = 404\n else:\n try:\n anitya.check_release(project, SESSION)\n output = project.__json__()\n except anitya.lib.exceptions.AnityaException as err:\n output = {'output': 'notok', 'error': [str(err)]}\n httpcode = 400\n\n jsonout = flask.jsonify(output)\n jsonout.status_code = httpcode\n return jsonout\n\n\n@APP.route('/api/project//', methods=['GET'])\n@APP.route('/api/project/', methods=['GET'])\ndef api_get_project(project_id):\n\n project = anitya.lib.model.Project.get(SESSION, project_id=project_id)\n\n if not project:\n output = {'output': 'notok', 'error': 'no such project'}\n httpcode = 404\n else:\n output = project.__json__()\n httpcode = 200\n\n jsonout = flask.jsonify(output)\n jsonout.status_code = httpcode\n return jsonout\n\n\n@APP.route('/api/project///', methods=['GET'])\n@APP.route('/api/project//', methods=['GET'])\ndef api_get_project_distro(distro, package_name):\n\n package = anitya.lib.model.Packages.by_package_name_distro(\n SESSION, package_name, distro)\n\n if not package:\n output = {\n 'output': 'notok',\n 'error': 'No package \"%s\" found in distro \"%s\"' % (\n package_name, distro)}\n httpcode = 404\n\n else:\n project = anitya.lib.model.Project.get(\n SESSION, project_id=package.project.id)\n\n output = project.__json__()\n httpcode = 200\n\n jsonout = flask.jsonify(output)\n jsonout.status_code = httpcode\n return jsonout\n","sub_path":"anitya/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"66631220","text":"#!/usr/bin/python2\nimport Tkinter\nimport tkFileDialog\nimport tkFont\nimport random\nfrom datetime import datetime\n\nimport doctest\n\nclass vector2: # class to hold x and y coordinates\n\tdef __init__(self, x, y):\n\t\t\"\"\"This function initiates the x and y\n\t\t>>> test = vector2(5,5)\n\t\t>>> type(test)\n\t\t\n\t\t>>> test.x + test.y\n\t\t10\n\t\t\"\"\"\n\t\tself.x = x\n\t\tself.y = y\n\n\n#functio that creates empty field Nx9\ndef clearfield(n):\n\t\"\"\"This function returns a list of n lists with 9 zeros\n\t>>> clearfield(1)\n\t[[0, 0, 0, 0, 0, 0, 0, 0, 0]]\n\t>>> type(clearfield(2))\n\t\n\t>>> len(clearfield(3))\n\t3\n\t\"\"\"\n\tgrid = []\n\tfor i in range(n):\n\t\tgrid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n\treturn grid\n\n\ndef generatetop():\n\t\"\"\"This function randomly generates the \n\t middle and bottom left side \n\t\t>>> type(generatetop())\n\t\t\n\t \"\"\"\n # create grid with 3 rows and 9 cols\n\tgrid = clearfield(3)\n\tfor i in range(3): # rows\n\t\tfor j in range(9): # column\n\t\t\t# reset the number list each iteration\n\t\t\tnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\t\t\t# remove numbers that already exist in the column\n\t\t\tfor k in range(3):\n\t\t\t\tif grid[k][j] in numbers:\n\t\t\t\t\tnumbers.remove(grid[k][j])\n\n\t\t\t# remove numbers that already exist in the row\n\t\t\tfor l in range(9):\n\t\t\t\tif grid[i][l] in numbers:\n\t\t\t\t\tnumbers.remove(grid[i][l])\n\n\t\t\t# remove numbers that already exist in the 3x3 grid\n\t\t\tfor m in range(3): # rows\n\t\t\t\tif j < 3:\n\t\t\t\t\tfor n in range(3): # cols\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\t\t\t\telif j > 2 and j < 6:\n\t\t\t\t\tfor n in range(3, 6):\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\t\t\t\telif j > 5:\n\t\t\t\t\tfor n in range(6, 9):\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\n\t\t\t# randomly select a number from available numbers\n\t\t\tif len(numbers) > 0:\n\t\t\t\tindex = random.choice(numbers)\n\t\t\t\tgrid[i][j] = index\n\t\t\telse:\n\t\t\t\t# if we ran out of numbers but still have empty field, return false\n\t\t\t\treturn []\n\t# if successful return the grid\n\treturn grid\n\n\ndef generateleft(grid):\n\t\"\"\"This function randomly generates the \n\t middle and bottom left side \n\t >>> type(generateleft(clearfield(9)))\n\t \n\t \"\"\"\n\ttemp = clearfield(6)\n\tgrid += temp\n\t\t\n\tfor i in range(3,9): # rows\n\t\tfor j in range(3): # column\n\t\t\t# reset the number list each iteration\n\t\t\tnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\t\t\t\n\t\t\t# remove numbers that already exist in the column\n\t\t\tfor k in range(9):\n\t\t\t\tif grid[k][j] in numbers:\n\t\t\t\t\tnumbers.remove(grid[k][j])\n\n\t\t\t# remove numbers that already exist in the row\n\t\t\tfor l in range(3):\n\t\t\t\tif grid[i][l] in numbers:\n\t\t\t\t\tnumbers.remove(grid[i][l])\n\t\t\t\t\t\n\t\t\t# remove numbers that already exist in the 3x3 grid\t\n\t\t\tfor m in range(3):\n\t\t\t\tif i > 2 and i < 6:\n\t\t\t\t\tfor n in range(3, 6):\n\t\t\t\t\t\tif grid[n][m] in numbers:\n\t\t\t\t\t\t\tnumbers.remove(grid[n][m])\n\n\t\t\t\telif i > 5:\n\t\t\t\t\tfor n in range(6, 9):\n\t\t\t\t\t\tif grid[n][m] in numbers:\n\t\t\t\t\t\t\tnumbers.remove(grid[n][m])\n\t\t\t\t\t\t\t\n\t\t\t# randomly select a number from available numbers\n\t\t\tif len(numbers) > 0:\n\t\t\t\tindex = random.choice(numbers)\n\t\t\t\tgrid[i][j] = index\n\t\t\telse:\n\t\t\t\t# if we ran out of numbers but still have empty field, return false\n\t\t\t\t#return grid\n\t\t\t\treturn []\n\t# if successful return the grid\n\treturn grid\n\n\n\n\n\ndef generatebottom(grid):\n\t\"\"\"This function randomly generates the bottom side\n\t\t>>> type(generatebottom(clearfield(9)))\n\t\t\n\t\t\"\"\"\n\t\t\n\tfor i in range(6,9): # rows\n\t\tfor j in range(3,9): # column\n\t\t\t# reset the number list each iteration\n\t\t\tnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\t\t\t\n\t\t\t# remove numbers that already exist in the column\n\t\t\tfor k in range(9):\n\t\t\t\tif grid[k][j] in numbers:\n\t\t\t\t\tnumbers.remove(grid[k][j])\n\n\t\t\t# remove numbers that already exist in the row\n\t\t\tfor l in range(9):\n\t\t\t\tif grid[i][l] in numbers:\n\t\t\t\t\tnumbers.remove(grid[i][l])\n\t\t\t\t\t\n\t\t\t# remove numbers that already exist in the 3x3 grid\t\n\t\t\tfor m in range(6,9):\n\t\t\t\tif j > 2 and j < 6:\n\t\t\t\t\tfor n in range(3, 6):\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\n\t\t\t\telif j > 5:\n\t\t\t\t\tfor n in range(6, 9):\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\t\t\t\t\t\t\t\n\t\t\t# randomly select a number from available numbers\n\t\t\tif len(numbers) > 0:\n\t\t\t\tindex = random.choice(numbers)\n\t\t\t\tgrid[i][j] = index\n\t\t\telse:\n\t\t\t\t# if we ran out of numbers but still have empty field, return false\n\t\t\t\t#return grid\n\t\t\t\treturn []\n\t# if successful return the grid\n\treturn grid\n\n\n\n\n\ndef generateright(grid):\n\t\"\"\"This function randomly generates the right side \n\t\t>>> type(generateright(clearfield(9)))\n\t\t\n\t\t\"\"\"\n\tfor i in range(3,6): # rows\n\t\tfor j in range(3,9): # column\n\t\t\t# reset the number list each iteration\n\t\t\tnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\t\t\t\n\t\t\t# remove numbers that already exist in the column\n\t\t\tfor k in range(9):\n\t\t\t\tif grid[k][j] in numbers:\n\t\t\t\t\tnumbers.remove(grid[k][j])\n\n\t\t\t# remove numbers that already exist in the row\n\t\t\tfor l in range(9):\n\t\t\t\tif grid[i][l] in numbers:\n\t\t\t\t\tnumbers.remove(grid[i][l])\n\t\t\t\t\t\n\t\t\t# remove numbers that already exist in the 3x3 grid\t\n\t\t\tfor m in range(3,6):\n\t\t\t\tif j > 2 and j < 6:\n\t\t\t\t\tfor n in range(3, 6):\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\n\t\t\t\telif j > 5:\n\t\t\t\t\tfor n in range(6, 9):\n\t\t\t\t\t\tif grid[m][n] in numbers:\n\t\t\t\t\t\t\tnumbers.remove(grid[m][n])\n\t\t\t\t\t\t\t\n\t\t\t# randomly select a number from available numbers\n\t\t\tif len(numbers) > 0:\n\t\t\t\tindex = random.choice(numbers)\n\t\t\t\tgrid[i][j] = index\n\t\t\telse:\n\t\t\t\t# if we ran out of numbers but still have empty field, return false\n\t\t\t\treturn []\n\t# if successful return the grid\n\treturn grid\n\n\n\n\n\n\n\n\ndef genGrid(win):\n\t\"\"\"This function creates a canvas\n\t and draws lines to create a grid \n\t >>> win = Tkinter.Tk()\n\t >>> type(genGrid(win))\n\t \n\t >>> win.destroy()\n\t \"\"\"\n\t# recreate the canvas\n\tcanvas = Tkinter.Canvas(win, bg=\"white\", height=500, width=500)\n\n\t# vertical data\n\tvStart = vector2(25, 25)\n\tvEnd = vector2(25, 475)\n\n\t# horizontal data\n\thStart = vector2(25, 25)\n\thEnd = vector2(475, 25)\n\n\t# grid creation\n\tfor i in range(10):\n\t\tif i == 3 or i == 6:\n\t\t\tcanvas.create_line(vStart.x, vStart.y, vEnd.x, vEnd.y, fill=\"red\")\n\t\t\tcanvas.create_line(hStart.x, hStart.y, hEnd.x, hEnd.y, fill=\"red\")\n\t\telse:\n\t\t\tcanvas.create_line(vStart.x, vStart.y, vEnd.x, vEnd.y)\n\t\t\tcanvas.create_line(hStart.x, hStart.y, hEnd.x, hEnd.y)\n\t\tvStart.x += 50\n\t\tvEnd.x += 50\n\t\thStart.y += 50\n\t\thEnd.y += 50\n\n\tcanvas.place(relx=0, rely=0)\n\treturn canvas\n\n\n\ndef genPuzzle(): # function that creates the puzzle\n\t\"\"\"This function calls other functions to create the puzzle\n\t\t>>> type(genPuzzle())\n\t\t\n\t\t>>> len(genPuzzle())\n\t\t9\n\t\t\"\"\"\n\t# initiate every field to zero\n\tfinish = False\n\tstageOne = True\n\tstageTwo = False\n\tstageThree = False\n\n\twhile finish == False:\n\t\twhile stageOne == True:\n\t\t\ttop = []\n\t\t\twhile len(top) == 0:\n\t\t\t\ttop = generatetop()\n\n\n\t\t\tleft = []\n\t\t\tcounter = 0\n\t\t\twhile len(left) == 0:\n\t\t\t\tleft = generateleft(top)\n\t\t\t\tcounter += 1\n\t\t\t\tif len(left) > 0:\n\t\t\t\t\tstageOne = False\n\t\t\t\t\tstageTwo = True\n\t\t\t\tif counter > 200: # if the function failes 200 times to find the left field break, and start over\n\t\t\t\t\tbreak\n\n\n\t\tbottom = []\n\t\tcounter = 0\n\t\twhile stageTwo == True:\n\t\t\tbottom = generatebottom(left)\n\t\t\tcounter += 1\n\t\t\tif len(bottom) > 0:\n\t\t\t\tstageTwo = False\n\t\t\t\tstageThree = True\n\t\t\t\t#finish = True\n\t\t\tif counter > 200: # if the function failes 200 times to find the left field break, and start over\n\t\t\t\tstageTwo = False\n\t\t\t\tstageOne = True\n\t\t\t\tbreak\n\n\n\t\tright = []\n\t\tcounter = 0\n\t\twhile stageThree == True:\n\t\t\tright = generateright(bottom)\n\t\t\tcounter += 1\n\t\t\tif len(right) > 0:\n\t\t\t\tstageThree = False\n\t\t\t\tfinish = True\n\t\t\tif counter > 200: # if the function failes 200 times to find the left field break, and start over\n\t\t\t\tstageThree = False\n\t\t\t\tstageOne = True\n\t\t\t\tbreak\n\treturn right\n\n\n\n\n\ndef removenumbers(puzzle):\n\tfor i in range(len(puzzle)):\n\t\tfor j in range(12):\n\t\t\tnum = range(9)\n\t\t\tk = random.choice(num)\n\t\t\tpuzzle[i][k] = 0\n\t\t\tdel num[k]\n\treturn puzzle\n\n\n\n\n\n\ndef drawPuzzle(puzzle, canvas):\n\t\"\"\"This function draws the numbers on te grid\n\t>>> win = Tkinter.Tk()\n\t>>> type(drawPuzzle([],genGrid(win)))\n\t\n\t\n\t>>> drawPuzzle([],genGrid(window))\n\t\n\t>>> win.destroy() # Here we do not expect anything\n\t\n\t\n\t \"\"\"\n\t# create the font size 20\n\tm_font = tkFont.Font(size=20)\n\tm_font.config(weight='bold')\n\t\n\t# draw the numbers on the grid\n\trowPos = 50\n\tfor i in range(len(puzzle)):\n\t\tcolPos = 50\n\t\tfor j in range(len(puzzle[i])):\n\t\t\tif puzzle[i][j] != 0:\n\t\t\t\t# draw the number to canvas\n\t\t\t\tcanvas.create_text(colPos, rowPos, text=puzzle[i][j], font=m_font)\n # move to next column\n\t\t\tcolPos += 50\n\t\t# move to next row\n\t\trowPos += 50\n\n\n\ndef create(win):\n\t\"\"\"This function is called when the Gererate button is pressed\n\t>>> create(window)\n\t\n\t\"\"\"\n\tglobal puzzleField\n\tglobal canvas\n\t# generate a puzzle\n\tpuzzleField = genPuzzle()\n\tfinalPuzzle = removenumbers(puzzleField)\n\t# generate a canvas with the grid\n\tcanvas = genGrid(win)\n\t# draw the puzzle\n\tdrawPuzzle(finalPuzzle, canvas)\n\treturn finalPuzzle\n\n\ndef save(puzzle): \n\t\"\"\" This function saves the puzzle to txt file\n\t>>> save(genPuzzle())\n\t\n\tNothing is expected as a return\n\t\"\"\"\n\tftype = [(\"Image\", \"*.ps\"), (\"Text files\", \"*.txt\")]\n\tfname = \"sudoku\"+str(datetime.now().strftime(' %Y-%m-%d %H:%M:%S'))\n\tfilename = tkFileDialog.asksaveasfilename(filetypes=ftype, initialfile=fname)\n\n\tif filename.endswith(\".txt\"):\n\t\ttext_file = open(filename, \"w\")\n\t\tfor num in range(len(puzzle)):\n\t\t\ttext_file.write(\" %s \" % puzzle[num] + \"\\n\")\n\t\ttext_file.close()\n\n\telif filename.endswith(\".ps\"):\n\t\tcanvas.postscript(file=fname, colormode='color')\n\n\n\n\n# window creating\nwindow = Tkinter.Tk()\nwindow.title(\"Sudoku Creator\")\nwindow.geometry(\"500x550\")\nwindow.resizable(False, False)\n\n# create whole puzzle\npuzzleField = []\ncanvas = Tkinter.Canvas(window, bg=\"white\", height=500, width=500)\ncreate(window)\n\n\n# create buttons\nbutton_generate = Tkinter.Button(window, text=\"Generate\", command=lambda: create(window), bg=\"white\")\nbutton_save = Tkinter.Button(window, text=\"Save\", command=lambda: save(puzzleField), bg=\"white\")\n\n\n# place everything on the screen\nbutton_generate.place(relx=0.2, rely=0.93)\nbutton_save.place(relx=0.6, rely=0.93)\nwindow.mainloop()\n","sub_path":"Sudoku_Creator.py","file_name":"Sudoku_Creator.py","file_ext":"py","file_size_in_byte":10380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"407122455","text":"#!/usr/bin/python\n#-- Content-Encoding: UTF-8 --\n\"\"\"\nHTTP Service demo for Pelix / iPOPO : the servlet bundle\n\n:author: Thomas Calmant\n:license: GPLv3\n\"\"\"\n\n# ------------------------------------------------------------------------------\n\n# iPOPO decorators\nfrom pelix.ipopo.decorators import ComponentFactory, Provides, Validate, \\\n Invalidate, Property, Instantiate, Requires\n\n# ------------------------------------------------------------------------------\n\n@ComponentFactory(name=\"HelloWorldFactory\")\n@Instantiate(name=\"HelloWorld\")\n@Property(\"path\", \"servlet.path\", \"/hello\")\n@Provides(specifications=\"demo.HttpServlet\")\n# The component requires an extra information service, if available\n@Requires(\"extra_info\", \"demo.ExtraInfoService\", optional=True)\nclass HelloWorldServlet(object):\n \"\"\"\n A simple hello world servlet for HTTP service\n \"\"\"\n def __init__(self):\n \"\"\"\n Sets up members\n \"\"\"\n self.extra_info = None\n\n\n def do_GET(self, handler):\n \"\"\"\n HTTP Servlet API : handle a GET request\n \n :param handler: The request handler associated to the call\n \"\"\"\n # Prepare extra info, if available\n if self.extra_info is None:\n info = \"No extra information available.
\"\n\n else:\n info = \"\"\"\n- Time : {time}
\n- Platform : {platform}
\n- PID : {pid}
\"\"\".format(time=self.extra_info.get_time(),\n platform=self.extra_info.get_platform(),\n pid=self.extra_info.get_pid())\n\n # Generate the page\n page = \"\"\"\n\nHello, World !\n\n\nHello, World !
\nExtra information
\n{extra_info}\n\n\"\"\".format(extra_info=info)\n\n # Send the page\n handler.send_data(page)\n","sub_path":"website-ipopo/source/_static/httpsvc/hello_servlet.py","file_name":"hello_servlet.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"41330783","text":"# coding:utf-8\nfrom pyftpdlib.authorizers import DummyAuthorizer\nfrom pyftpdlib.handlers import FTPHandler\nfrom pyftpdlib.servers import FTPServer\n\nauthorizer = DummyAuthorizer()\nauthorizer.add_user(\"qian\",\"123456\",\"F:/\",perm=\"elr\")\nauthorizer.add_anonymous(\"F:/\")\n\nhandler =FTPHandler\nhandler.authorizer = authorizer\n\nserver = FTPServer((\"127.0.0.1\",21),handler)\nserver.serve_forever()","sub_path":"crawling/cnblogSpider/cnblogSpider/ftp-test/ftp001.py","file_name":"ftp001.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"372702167","text":"import os\n\n\n# We need a function that creates the paths to the Nifti images\n# of every subject for every input channel. So some assumptions must be\n# made here.\n\n# Approach 1:\n# root_file_folder->(train,test)->subject_folders\n# we need key words to find the images in the subject_folder\n# the output will be a dictionary with the inputs, outputs and ROI(optional):\n# \"input X\": list of paths to channel (1-X) input\n# \"output Y\": list of paths to channel (1-Y) output\n# \"ROI Z\": list of paths to (1-Z) ROI\n\ndef create_experiment_from_folder(path_subjects, key_words_in, key_words_out, key_words_roi):\n # This function create a dictionary used to feed a tfrecord creator used in a Deep Learning\n # experiment in Tensorflow.\n # path_subjects -> root folder that contains the subjects\n # key_words_* -> A list of keywords used to find the files used as input, output or roi in the\n # neural network\n\n NN_IN = 'network_in'\n NN_OUT = 'network_out'\n NN_ROI = 'network_roi'\n subjects = os.listdir(path_subjects)\n\n channels = dict()\n channels[NN_IN] = len(key_words_in)\n channels[NN_OUT] = len(key_words_out)\n\n\n record_dict = dict()\n\n record_dict[NN_IN] = {}\n record_dict[NN_OUT] = {}\n\n key_words = dict()\n\n key_words[NN_IN] = key_words_in\n key_words[NN_OUT] = key_words_out\n if len(key_words_roi):\n record_dict[NN_ROI] = {}\n channels[NN_ROI] = len(key_words_roi)\n key_words[NN_ROI] = key_words_roi\n\n for subject in subjects:\n\n files = os.listdir(os.path.join(path_subjects, subject))\n\n # lists to track the keys, every key must point one file and only one.\n track_keys = dict()\n track_keys[NN_IN] = key_words_in.copy()\n track_keys[NN_OUT] = key_words_out.copy()\n track_keys[NN_ROI] = key_words_roi.copy()\n\n for file in files:\n\n for network_side in record_dict.keys():\n\n for key_word, channel in zip(key_words[network_side], range(1, channels[network_side] + 1)):\n\n if key_word in file:\n\n if key_word in track_keys[network_side]:\n track_keys[network_side].remove(key_word)\n else:\n raise ValueError(\n 'ERROR: Key word \"%s\" was used in various files, each key must point an unique file.' % key_word)\n\n record_dict[network_side].setdefault(channel, {}).update({subject:\n os.path.join(path_subjects,\n subject,\n file)})\n\n error_keys = []\n for network_side in record_dict.keys():\n error_keys += track_keys[network_side]\n\n if error_keys:\n for key in error_keys:\n print('ERROR: Key word \"%s\" was NOT used.' % key)\n\n raise ValueError(' ERROR: Unused keywords.')\n\n\n return record_dict\n\n\ndef create_experiment(path_experiment, key_words_in, key_words_out, key_words_roi=[]):\n # This function create a dictionary used to feed a tfrecord creator used in a Deep Learning\n # experiment in Tensorflow.\n # path_subjects -> root folder that contains the experiments.\n # key_words_* -> A list of keywords used to find the files used as input, output or roi in the\n # nerual network.\n\n folders_experiment = os.listdir(path_experiment)\n\n experiment_dict = dict()\n\n for folder_experiment in folders_experiment:\n folder_dictionary = create_experiment_from_folder(os.path.join(path_experiment, folder_experiment),\n key_words_in,\n key_words_out, key_words_roi)\n\n experiment_dict[folder_experiment] = folder_dictionary\n\n return experiment_dict\n","sub_path":"gen_paths_script.py","file_name":"gen_paths_script.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"477137114","text":"\n# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\ndf_plot = pd.read_csv('new-old_LTV.csv',usecols=[0,1,2,3]).iloc[:-4]\ndf_plot['subsite_id'] = np.nan_to_num(df_plot['subsite_id']).astype(int)\ndf_plot.tail()\n#df_plot.plot(figsize=(25,5),title='SVOD purchase on Samsung',kind='area',color = ['r','b']);\n#stacked=True,subplots=True,\n\ntable = pd.pivot_table(df_plot,aggfunc=np.sum,columns=['subsite_id'], values=[\"NewUsers\",\"ReturnUsers\"], index=['date'])\ntable.loc[:, ([\"NewUsers\",\"ReturnUsers\"], 10)]\n\n#table[\"ReturnUsers\"]\n#table.dtypes.index\n#.loc[:, (slice(None), ('11', '353'))]\n#.iloc[:,1]\n\ndf = pd.read_csv('svod_tot.csv').sort_values(by='ds',ascending=True,kind='mergesort')\ndf['y'] = np.log(df['old_users'].replace(0, np.NaN))\n#df['y'] = np.log(df['new_users'])\ndf.pop('new_users')\ndf.pop('old_users')\n\n#df['cap'] = 8.5\nbasic_holy = pd.DataFrame({\n 'holiday': 'basic',\n 'ds': pd.to_datetime(['2017-01-14'\n #, '2016-01-14', '2015-01-14',\n]),\n 'lower_window': -1,\n 'upper_window': 1,\n})\nshort_holy=pd.DataFrame({\n 'holiday': 'short',\n 'ds': pd.to_datetime(['2017-05-09','2016-05-09', '2015-05-09',\n '2017-03-08', '2016-03-08', '2015-03-08',\n '2017-02-23', '2016-02-23', '2015-02-23',\n '2017-01-01','2016-01-01', '2015-01-01']),\n 'lower_window': 0,\n 'upper_window': 2,\n})\nholy = pd.concat((basic_holy, short_holy))\ndf\n\ndf_plot.corr(method= 'pearson')\n\n#df['cap'] = 0\nm = Prophet(holidays=basic_holy).fit(df)\n'''holidays=holy\n#changepoints=['2016-12-20'],\n seasonality_prior_scale=0.98\n ,holidays_prior_scale=6.9\n ,changepoint_prior_scale=0.0076\n #,interval_width=0.98\n #,mcmc_samples=100'''\n \n'''\nlg\nholidays=holy,\n #changepoints=['2016-12-20'],\n seasonality_prior_scale=0.98\n ,holidays_prior_scale=6.9\n ,changepoint_prior_scale=0.0076\n #,interval_width=0.98\n #,mcmc_samples=100\n********************************************************************\nparameters_samsung\n#changepoints=['2016-12-20']\n seasonality_prior_scale=0.1\n ,holidays_prior_scale=1,\n changepoint_prior_scale=0.055\n #,interval_width=0.9''' \n\n'''future = m.make_future_dataframe(periods=365)\nfuture['cap'] = 0\nfcst = m.predict(future)\nm.plot(fcst);'''\nfuture = m.make_future_dataframe(periods=365,freq='D')\nfuture.tail()\n\nforecast = m.predict(future)\nm.plot(forecast);\n\nfrom math import exp\nforecast = m.predict(future)\nyy=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]\nyy['yhat'] = np.exp(yy['yhat'])\nyy['yhat_lower'] = np.exp(yy['yhat_lower'])\nyy['yhat_upper'] = np.exp(yy['yhat_upper'])\nyy.tail(367)\n\nm.plot_components(forecast);\n\nyy.plot(figsize=(15,6));\n\ndf2 = pd.read_csv('svod_tot.csv').sort_values(by='ds',ascending=True,kind='mergesort')\ndf2['y'] = np.log(df2['new_users'].replace(0, np.NaN))\n#df['y'] = np.log(df['new_users'])\ndf2.pop('new_users')\ndf2.pop('old_users')\n\n#df['cap'] = 8.5\nbasic_holy = pd.DataFrame({\n 'holiday': 'basic',\n 'ds': pd.to_datetime(['2017-01-14'\n #, '2016-01-14', '2015-01-14',\n]),\n 'lower_window': -1,\n 'upper_window': 1,\n})\nshort_holy=pd.DataFrame({\n 'holiday': 'short',\n 'ds': pd.to_datetime(['2017-05-09','2016-05-09', '2015-05-09',\n '2017-03-08', '2016-03-08', '2015-03-08',\n '2017-02-23', '2016-02-23', '2015-02-23',\n '2017-01-01','2016-01-01', '2015-01-01']),\n 'lower_window': 0,\n 'upper_window': 2,\n})\nholy = pd.concat((basic_holy, short_holy))\nmm = Prophet(holidays=basic_holy).fit(df2)\nfuture1 = mm.make_future_dataframe(periods=365,freq='D')\nforecast1 = mm.predict(future1)\nmm.plot(forecast1);\n\nyyy=forecast1[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]\nyyy['yhat'] = np.exp(yyy['yhat'])\nyyy['yhat_lower'] = np.exp(yyy['yhat_lower'])\nyyy['yhat_upper'] = np.exp(yyy['yhat_upper'])\nyyy.tail(367)\n\n\nadd=yy.join(yyy, lsuffix='_old',rsuffix='_new', how='outer')\nadd\n\nadd.plot(figsize=(15,6));\n\nmm.plot_components(forecast1);\n","sub_path":"TS_autopredict_prophet.py","file_name":"TS_autopredict_prophet.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"156338040","text":"from os import system\nimport requests\nimport time\nimport subprocess\n\ndef login():\n url = 'http://192.168.0.1/LoginCheck'\n headers = {\n 'Host': '192.168.0.1',\n 'Connection': 'keep-alive',\n 'Content-Length': '34',\n 'Cache-Control': 'max-age=0',\n 'Origin': 'http://192.168.0.1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Referer': 'http://192.168.0.1/login.asp',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Cookie': 'language=en'\n }\n data = {\n 'Username':'admin',\n 'Password':'YWRtaW4='\n }\n\n r = requests.post(url, data=data, headers=headers)\n print(r.headers)\n print(r.status_code,'\\n')\n #print(r.text)\n\ndef advanced():\n url = 'http://192.168.0.1/advance.asp'\n headers={\n 'Host': '192.168.0.1',\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Referer': 'http://192.168.0.1/index.asp',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Cookie': 'language=en; ecos_pw=YWRtaW4=tgb:language=en'\n }\n r = requests.get(url,headers=headers)\n print(r.headers)\n print(r.status_code,'\\n')\n\ndef ipt_account():\n url = 'http://192.168.0.1/sys_iptAccount.asp'\n headers = {\n 'Host': '192.168.0.1',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2787.0 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Referer': 'http://192.168.0.1/advance.asp',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Cookie': 'language=en; ecos_pw=YWRtaW4=tgb:language=en'\n }\n r = requests.get(url,headers=headers)\n print(r.headers)\n print(r.status_code)\n\ndef ajax_calls(url,headers,payload,stats):\n r = requests.post(url,headers=headers,data=payload)\n call_response = r.text\n data = call_response.split('\\n')\n for node in data:\n machine = node.split(';')\n if(machine[0] != ''):\n stats[machine[0]] = [machine[1],machine[2]]\n\ndef Fire_Watch(IP_ADDRESSES):\n\t# SYSTEM CONFIGS\n ##############################################################################################################################\n url = 'http://192.168.0.1/goform/updateIptAccount'\n headers = {\n 'Host': '192.168.0.1',\n 'Connection': 'keep-alive',\n 'Content-Length': '9',\n 'Origin': 'http://192.168.0.1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',\n 'Content-Type': 'text/plain;charset=UTF-8',\n 'Accept': '*/*',\n 'Referer': 'http://192.168.0.1/sys_iptAccount.asp',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Cookie': 'ecos_pw=YWRtaW4=cvb:language=en'\n }\n payload = {'something':''}\n # {'IP':['UPLOAD_RATE','DOWNLOAD_RATE']}\n stats = dict()\n # ENSURES NO INSTANCE OF EVENT IS ALREADY RUNNING\n # OTHERWISE THE EVENT WILL BE SHOWN RUNNING BY DEFAULT IN POLL() CALL\n # HOWEVER SUBPROCESS COULDN'T EXTEND CONTROL ON IT AS IT WASN'T STARTED BY IT\n system(\"TASKKILL /F /IM qbittorrent.exe\")\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n event = subprocess.Popen(r\"C:\\\\Program Files (x86)\\\\qBittorrent\\\\qbittorrent.exe\",startupinfo=startupinfo)\n print()\n while(event.poll() == 1):\n \tprint(\"WAITING TO START\")\n print(\"PROCESS STARTED\")\n down_time=0\n up_time=time.time()\n last_usage=0\n ##############################################################################################################################\n\n while(True):\n \tajax_calls(url,headers,payload,stats)\n \tfor IP in IP_ADDRESSES:\n \t\tnode = stats.get(IP)\n \t\tif(node is not None):\n \t\t\t# STOPS EVENT ONLY IF USAGE INCREASES MENTIONED LIMITS\n \t\t\t# AND THE PROCESS WAS ACTIVE\n \t\t\tif((float(node[1]) > 4.0)):\n \t\t\t\tif((event.poll() is None)):\n\t \t\t\t\tevent = stop_event(event)\n\t \t\t\t\tdown_time = time.time()\n\t \t\t\t\tprint(\"TOTAL UP TIME:\",round(down_time-up_time,2),\"sec\")\n\t \t\t\t# UPDATES LAST USAGE EVEN WHEN PROCESS IS CLOSED\n\t \t\t\t# THIS ENSURES THAT PROCESS IS STARTED ONLY IF THERE WAS NO USAGE\n\t \t\t\t# MAKING IT INDEPENDANT OF DOWN_TIME. THIS IS NECESSARY BECUASE DOWN_TIME IS\n\t \t\t\t# UPDATED ONLY WHEN PROCESS IS KILLED. THUS FIREWATCH MAKES INTELLIGENT DECISIONS\n\t \t\t\t# BASED ON LAST USAGE OF USERS. AND NOT LAST DOWN TIME OF APPLICATION\n\t \t\t\tlast_usage = time.time()\n\t \t\t\tbreak\n\n \tif((event.poll() == 1)):\n \t\t# AS MENTIONED DECISION BASED ON LAST USAGE AND NOT DOWN TIME\n \t\tif((time.time()-last_usage > 16.0)):\n\t \t\tevent = start_event(event)\n\t \t\tup_time = time.time()\n\t \t\tprint(\"TOTAL DOWN TIME:\",round(up_time-down_time,2),\"sec\")\n\t \telse:\n\t \t\tprint(\"THIS WAIT SHOULD REACH 16 sec, LAST USAGE:\",round(time.time()-last_usage,2),\"sec AGO\")\n \telse:\n \t\tprint(\"\\t--> 142 (RUNNING)\")\n \ttime.sleep(2)\n\n\ndef start_event(event):\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n event = subprocess.Popen(r\"C:\\\\Program Files (x86)\\\\qBittorrent\\\\qbittorrent.exe\",startupinfo=startupinfo)\n while(event.poll() == 1):\n \tprint(\"WAITING TO START\")\n print(\"PROCESS STARTED\")\n return event\n\ndef stop_event(event):\n event.terminate()\n while(event.poll() is None):\n \tprint(\"WAITING TO KILL\")\n print(\"PROCESS KILLED\")\n return event\n\nlogin()\n#advanced()\n#ipt_account()\n\n\nIP_ADDRESSES = ['192.168.0.100','192.168.0.104']\nFire_Watch(IP_ADDRESSES)\n","sub_path":"Fire_Watch.py","file_name":"Fire_Watch.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"196428233","text":"from __future__ import absolute_import\n\nfrom rq.decorators import job\n\nfrom .base import Senderable, value_in\n\nATTEMPTS = [0, 1, 2, 3]\n\n\ndef sender(wrapped, dkwargs, hash_value=None, *args, **kwargs):\n\n senderobj = Senderable(\n wrapped, dkwargs, hash_value, ATTEMPTS, *args, **kwargs\n )\n\n connection = value_in(\"url\", dkwargs, kwargs)\n connection = value_in(\"connection\", dkwargs, kwargs)\n\n @job('default', connection=connection)\n def worker(senderobj):\n\n return senderobj.send()\n\n return worker(senderobj)\n","sub_path":"webhooks/senders/async_redis.py","file_name":"async_redis.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"43967112","text":"'''\nプロトコル\n\nply_read(model_path)\nplyファイルのpathを与えてプロパティ・頂点リスト・面リストをnumpy形式で返す.\n与えるplyファイルは,頂点を含む必要がある.列数は任意なため,色情報やその他値の有無は問わない.\n面リストが含まれない場合は面リストを空で返す.\n\nply_read2(model_path)\n上記の簡易版で,頂点リスト(numpy形式)のみを返す.\nmain_rawにて使われている.\n\nwrite_csv(property_data, vertice_dataset, faces_dataset)\nply_readで得たプロパティ,頂点リスト,面リストをcsv形式で保存.\n保存したファイルは予測結果の図示などで新しいplyモデルを作成するときに用いられる.\n\nface_cell_scale(vertice_dataset, faces_dataset)\n*属性:検証\n面をなす三角形の1辺の長さのリストを返す.\n\nmain_mesh(model_index, path_now)\nmodel_indexのディレクトリへの移動,ply_read, write_csv, face_cell_scaleの行程を実行する.\nメッシュモデルを対象\n\nmain_raw(model_index, path_now)\n点群モデルを対象として,ply_read2, write_csv\n★(注意)全頂点に対して法線が計算されている必要がある.\n\ncolor_designer(terrain_variable, cmap=\"viridis\")\nterrain_variableのベクトル1列を与え,cmapに合わせて色を合わせる(viridisしか対応していない)\n'''\n\n#-------\n#read the ply file and split the data into property, vertice, and faces\ndef ply_read(model_path):\n import numpy as np\n test_data=open(model_path, \"r\")\n property_data=[]\n vertex_data=[]\n face_data=[]\n vertex_number=0\n\n flag=\"p\"\n #read line by line\n for i,line in enumerate(test_data):\n #read faces\n if(flag==\"f\"):\n face_data.append(line.split(\" \"))\n\n #read vertice\n if(flag==\"v\"):\n vertex_data.append(line.split(\" \"))\n if(i==len(property_data)+vertex_number-1):\n flag=\"f\"\n \n #read header(property)\n if(flag==\"p\"):\n property_data.append(line)\n if(line==\"end_header\\n\"):\n flag=\"v\"\n #read the number of vertice data from header(property)\n if('element vertex' in line):\n vertex_number=int(str(line)[str(line).rfind(\"vertex \")+7:str(line).find(\"\\\\n\")])\n #read the number of faces data from header(property)\n if('element face' in line):\n face_number=int(str(line)[str(line).rfind(\"face \")+5:str(line).find(\"\\\\n\")]) \n #convert lists into np.array()\n vertex_data=np.array(vertex_data)\n face_data=np.array(face_data)\n return(property_data, vertex_data, face_data)\n\n# save the csv file of the vertice coordinates(vertice_dataset.csv), vertice normal(vertice_normal.csv), vertice color(vertice_color.csv)\n# \n#座標,法線,色データのcsvデータ化(保存)\ndef write_csv(property_data, vertice_dataset, faces_dataset):\n import numpy as np\n import pandas as pd\n vertice_dataset2=pd.DataFrame(vertice_dataset)\n vertice_dataset2.iloc[:,0:3].to_csv('ply_parts/vertice_dataset.csv',header=False, index=False)\n vertice_dataset2.iloc[:,3:6].to_csv('ply_parts/vertice_normal.csv',header=False, index=False)\n vertice_dataset2.iloc[:,6:10].to_csv('ply_parts/vertice_color.csv',header=False, index=False)\n\n faces_dataset2=pd.DataFrame(faces_dataset)\n faces_dataset2.iloc[:,1:4].to_csv('ply_parts/faces_dataset.csv',header=False, index=False)\n faces_dataset2.iloc[:,4:7].to_csv('ply_parts/faces_color.csv',header=False, index=False)\n \n #write the property data(Need for reconstructing the new ply file after processing)\n pd.DataFrame(property_data).to_csv(\"ply_parts/property_data.csv\", header=False, index=False) \n \n #write the minimum property data: the numbers of vertices and faces\n vertex_number=len(vertice_dataset2)\n faces_number=len(faces_dataset2)\n path_property='ply_parts/property.d'\n with open(path_property, mode='w') as f:\n f.write(str(vertex_number)+\",\")\n f.write(str(faces_number))\n \n#モデル解像度の出力\n#faceの辺の長さを知りたい.どれくらいのスケールのモデルを用いたかを知るために.\ndef analysis_mesh(model_index, path_now): \n import numpy as np\n model_path=path_now+\"\\model\"+model_index+\"\\model\"+model_index+\".ply\"#メッシュモデル\n property_data, vertice_dataset, faces_dataset=ply_read(model_path)\n v1=vertice_dataset[faces_dataset[:,1].astype(int),0:3].astype(float)\n v2=vertice_dataset[faces_dataset[:,2].astype(int),0:3].astype(float)\n v3=vertice_dataset[faces_dataset[:,3].astype(int),0:3].astype(float)\n d=np.hstack((np.linalg.norm(v1-v2,axis=1),np.linalg.norm(v2-v3,axis=1),np.linalg.norm(v3-v1,axis=1)))\n data=[np.mean(d), np.max(d), np.min(d), np.std(d), len(d)] #Maximum, Mean, Minimum, Std. of edges of faces, and the number of faces\n S=np.sqrt(np.abs(np.sum((v1-v3)**2,axis=1)*np.sum((v2-v3)**2,axis=1)-np.sum((v1-v3)*(v2-v3),axis=1))**2)/2\n return(d, data)\n#Category: Main Processing\n\ndef main_mesh(model_path, path_now):\n import os\n import numpy as np\n #メッシュモデル\n os.chdir(path_now+\"\\model\"+model_index)\n #ファイルの読み込み\n property_data, vertice_dataset, faces_dataset=ply_read(model_path)\n #ファイルの書き出し\n write_csv(property_data, vertice_dataset, faces_dataset)\n #faceの大きさなどの出力\n #d=face_cell_scale(vertice_dataset, faces_dataset)\n #print(\"average face side length: \", np.mean(d))\n\n#Category: Main Processing\ndef main_raw(model_index, path_now):\n import os\n import pandas as pd\n model_path=path_now+\"\\model\"+model_index+\"\\model\"+model_index+\"_raw.ply\"#解像度0.05のモデル\n os.chdir(path_now+\"\\model\"+model_index)\n p, vertice_dataset, f=ply_read(model_path)\n vertice_dataset2=pd.DataFrame(vertice_dataset)\n vertice_dataset2.iloc[:,0:3].to_csv('ply_parts/vertice_dataset_raw.csv',header=False, index=False)\n vertice_dataset2.iloc[:,3:6].to_csv('ply_parts/vertice_normal_raw.csv',header=False, index=False)\n\n\n#----------------------------------------------------------------------#\n#与えられたパラメータの赤~青(にかけて上昇する)の色データに変換して返します.\n\n#外れ値がありうる場合\ndef color_designer5(terrain_variable, cmap=\"viridis\", mode=\"1\"):\n import numpy as np\n import matplotlib.pyplot as plt\n cm = plt.get_cmap(cmap)\n if(mode==\"1\"):\n minval, maxval = np.percentile(terrain_variable, [1,99])\n elif(mode==\"2\"):\n minval, maxval = 1,256\n elif(mode==\"3\"):\n minval, maxval= np.min(terrain_variable), np.max(terrain_variable)\n \n para_height=(terrain_variable-minval)/(maxval-minval)*256\n para_height=np.clip(para_height, 0.0, 256.0)\n #para_height=para_height.astype(int)\n para_height=np.array(para_height, dtype=\"int\")\n color_vec=cm(para_height)[:,0:3]*255\n color_vec=color_vec.astype(int)\n \n #色凡例準備用\n midval = minval+0.5*(maxval-minval)\n color_data=[0, minval, 256, maxval, 128, midval]\n return(color_vec, color_data)\n\n#---------------------------------------------------------#\n#Category:outply-tool\ndef color_designer4(terrain_variable, cmap=\"viridis\"):\n import numpy as np\n import matplotlib.pyplot as plt\n cm = plt.get_cmap(cmap)\n minval, maxval= np.min(terrain_variable), np.max(terrain_variable)\n para_height=(terrain_variable-minval)/(maxval-minval)*256\n para_height=np.clip(para_height, 0.0, 256.0)\n para_height=para_height.astype(int)\n color_vec=cm(para_height)[:,0:3]*255\n color_vec=color_vec.astype(int)\n return(color_vec)\n\ndef color_to_variables(model_path):\n #色から曲率データを出力する\n import numpy as np\n import matplotlib\n property_data, vertice_dataset, faces_dataset=ply_read(model_path)\n faces_color=np.array(faces_dataset[:,4:7],dtype=\"int\")\n #RGBからHSVに変換\n color_vec=matplotlib.colors.rgb_to_hsv(faces_color/255)\n variable=color_vec[:,0]\n return(variable)\n\ndef array_to_ply(property_data, vertice_data_all, vertice_color, faces_data, faces_color):\n vertice_data2=[]\n for i in range(0,len(vertice_data_all)):\n line1=map(str, vertice_data_all.iloc[i,:])\n #line2=map(str, vertice_norm.iloc[i,:])\n line3=map(str,vertice_color.iloc[i,:])\n vertice_data2.append(' '.join(line1)+\" \"+' '.join(line3)+\"\\n\")\n\n faces_data2=[]\n for i in range(0,len(faces_data)):\n line4=map(str,faces_data.iloc[i,:])\n line5=map(str,faces_color.iloc[i,:])\n faces_data2.append(\"3 \"+' '.join(line4)+\" \"+' '.join(line5)+\" 255\"+\"\\n\")\n plydata=property_data+vertice_data2+faces_data2\n return(plydata)\n\ndef save_ply(model_path, plydata):\n with open(model_path, mode='w') as f:\n f.writelines(plydata)\n#---------------------------------------------------------#\n\n\n\ndef extract_data(faces_dataset, path_params):\n faces_color=np.array(faces_dataset[:,4:7],dtype=\"int\")\n color_focus=np.array([0,0,0])*np.ones((len(faces_color),3))\n bool_vec=np.sum(faces_color-color_focus,axis=1)\n ext=np.where(bool_vec==0)\n return(np.array(ter_variables)[ext])\n\n#---------------------------------------------------------#\n#Category:outply-main\ndef main_ply(model_index, dir_name, param_kernel_labels, path_now, file_name=\"mydata\"):\n #os.chdir(path_now+\"\\model\"+model_index)\n import pandas as pd\n import numpy as np\n import os\n new_dir_path_recursive=path_now+\"/model\"+model_index+\"/\"+dir_name+\"/\"\n os.makedirs(new_dir_path_recursive, exist_ok=True)\n \n #パラメータの読み込み\n ter_variables=pd.read_csv(path_now+\"\\model\"+model_index+\"\\\\\"+file_name+model_index+\".csv\", header=None)\n ter_variables=ter_variables.astype(float)\n \n #点の座標,法線,色の読み込み\n vertice_data=pd.read_csv(path_now+\"\\model\"+model_index+r\"\\ply_parts\\vertice_dataset.csv\", header=None)\n vertice_data=vertice_data.round(6)\n vertice_vector=pd.read_csv(path_now+\"\\model\"+model_index+r\"\\ply_parts\\vertice_normal.csv\", header=None)\n vertice_vector=vertice_vector.round(6)\n vertice_color=pd.read_csv(path_now+\"\\model\"+model_index+r\"\\ply_parts\\vertice_color.csv\", header=None)\n vertice_color=vertice_color.astype(int)\n vertice_data_all=pd.concat([vertice_data, vertice_vector], axis=1)\n \n #面のデータ\n faces_data=pd.read_csv(path_now+\"\\model\"+model_index+r\"\\ply_parts\\faces_dataset.csv\", header=None)\n #プロパティ\n p_file=open(path_now+\"\\model\"+model_index+r\"\\ply_parts\\property_data.csv\")\n property_data=[]\n for line in p_file:\n property_data.append(line[1:])\n property_data=property_data[::2]\n \n for (i,param) in enumerate(param_kernel_labels):\n ter_var=ter_variables.iloc[:,i]\n color_data=[]\n try:\n color_vec, color_data = color_designer5(ter_var, cmap=\"viridis\", mode=\"1\")\n color_vec=pd.DataFrame(color_vec)\n except ValueError:\n color_vec=pd.DataFrame(np.zeros_like(ter_var))\n print(param,\"<- this parameter is all-zero or flat\")\n #色の情報をコメントに記録する\n import copy\n property_data2=copy.copy(property_data)\n property_data2.insert(2, \"comment \"+str(color_data)+\"\\n\")\n plydata=array_to_ply(property_data2, vertice_data_all, vertice_color, faces_data, faces_color=color_vec)\n #record.append(param)\n save_ply(path_now+\"\\model\"+model_index+\"\\\\\"+dir_name+r\"/\"+param+\"f.ply\", plydata)\n \n#----------------------------------------------------------------------#\n#結果の表示\n#生物の出現データの生成(occurrenceデータ)\ndef make_occurrence_data(species_name, model_codes, path_now, path_dataset):\n import numpy as np\n import pandas as pd\n import os\n \n occurrence=pd.DataFrame([])\n occurrence_monthspecies=pd.DataFrame([])\n mesh_num=0\n \n for (i, model_index) in enumerate(model_codes):\n path_data=path_dataset+\"/\"+model_index\n listsp=[path for path in os.listdir(path_data) if species_name in path]\n\n for path in listsp:\n #print(path)\n #調査月データ\n month_data=path[-8:-4]\n #位置情報読み込み\n property_data, vertice_dataset, faces_dataset=ply_read(path_data+r\"/\"+path)\n #出現インデックスの取得\n try:\n faces_color=np.array(faces_dataset[:,4:7],dtype=\"int\")\n except:\n print(path)\n \n color_focus=np.array([255,255,0])*np.ones((len(faces_color),3))\n bool_vec=np.sum(faces_color-color_focus,axis=1)\n ext=np.where(bool_vec==0)[0]\n\n #モデル番号に応じてメッシュ番号を繰り上げる(maxentで読み込むときに必要な処置)\n ext=ext+mesh_num\n ext=np.array(ext, dtype=\"int\")\n #種名の配列を生成\n species=np.array([species_name]*len(ext),dtype=\"str\")\n species_month=np.array([species_name+\"_\"+month_data]*len(ext),dtype=\"str\")\n #latitude=np.array([model_index[2]]*len(ext), dtype=\"int\")\n #latitude=np.array([i]*len(ext), dtype=\"int\")\n latitude=np.array([1]*len(ext), dtype=\"int\")\n month_data=np.array([str(month_data)]*len(ext))\n #出現データの生成,保存\n occurrence_tmp=pd.DataFrame([species,ext,latitude])\n occurrence_tmp=occurrence_tmp.T\n occurrence_tmp.columns=[\"species\", \"dd long\", \"dd lat\"]\n #speciesに種名,ddlongにメッシュ番号,ddlatにモデル番号を適用する.ddlongは繰り上げする.\n #本当は経緯データではないので注意,MaxEntに適用するために,疑似経緯データを割り振る\n occurrence=pd.concat([occurrence, occurrence_tmp])\n\n occurrence_monthspecies_tmp=pd.DataFrame([species_month,ext,latitude, month_data])\n occurrence_monthspecies_tmp=occurrence_monthspecies_tmp.T\n occurrence_monthspecies_tmp.columns=[\"species_month\", \"dd long\", \"dd lat\", \"month\"]\n occurrence_monthspecies=pd.concat([occurrence_monthspecies, occurrence_monthspecies_tmp])\n\n property_data, vertice_dataset, faces_dataset=ply_read(path_data+r\"/model\"+model_index+\".ply\")\n mesh_num+=len(faces_dataset)\n return(occurrence, occurrence_monthspecies)\n\n\n#make_env_dataset2(param_kernel_labels_all, train_model, path_now, mode=\"train\")\ndef make_env_dataset2(param_kernel_labels, model_codes, path_now, path_out, mode, filename):\n import numpy as np\n import pandas as pd \n env_data_main=pd.DataFrame([])\n for model_index in model_codes:\n ter_variables=pd.read_csv(path_now+\"/model\"+model_index+\"/\"+mode+model_index+\".csv\", header=None)\n ter_variables.columns=param_kernel_labels\n ter_variables[\"model_index\"]=[model_index]*len(ter_variables)\n env_data_main=pd.concat([env_data_main,ter_variables])\n #print(len(ter_variables), end=\" \")\n env_data_main.to_csv(path_out+\"/envdata\"+filename+\".csv\", index=False)\n\ndef make_occurrence_dataset2(species, model_file_train, path_now, path_out, path_data, filename):\n import pandas as pd\n import numpy as np \n data_species=pd.DataFrame([])\n data_species_month=pd.DataFrame([])\n for specie in species:\n data_specie, data_specie_month=make_occurrence_data(species_name=specie, model_codes=model_file_train, path_now=path_now, path_dataset=path_data)#, save=False)\n data_species=pd.concat([data_species, data_specie])\n data_species_month=pd.concat([data_species_month, data_specie_month])\n data_species.to_csv(path_out+\"/occurrence\"+filename+\".csv\", index=False, header=False)\n data_species_month.to_csv(path_out+\"/occurrence_month\"+filename+\".csv\",index=False, header=False)\n\ndef param_to_asc(param_name, param_vec, path_now):\n import pandas as pd\n import numpy as np\n #xがlongitude,yがlatitude\n asc_header=[\"ncols \"+str(len(param_vec))+\"\\n\",\"nrows 1\\n\", \"xllcorner 1\\n\",\"yllcorner 1\\n\",\"cellsize 1\\n\",\"NODATA_value -9999999\\n\"]\n for j in range(len(param_vec)):\n if(np.isnan(param_vec.iloc[j])):\n asc_header.append(\"-9999999 \")\n else:\n if(param_vec.iloc[j]*1000<-9999999):\n print(\"NODATA_value warning!\")\n asc_header.append(str(int(round(param_vec.iloc[j]*1000,4)))+\" \")\n with open(path_now+param_name+\".asc\", mode='w') as f:\n f.writelines(asc_header)\n\n\n\n#occurrence.csv(presence-vertice)の準備\n#maxent用のasciiファイル準備\ndef data_prepmaxent(species, asc, param_kernel_labels, model_file, path_now, file):\n import pandas as pd\n import numpy as np \n data_species=pd.DataFrame([])\n for specie in species:\n data_specie=make_occurrence_data(species_name=specie, model_codes=model_file)\n data_species=pd.concat([data_species, data_specie])\n #print(data_species)\n data_species.to_csv(path_now+r\"/\"+file+\"_occurrence.csv\", index=False)\n \n env_data_main=make_env_dataset(param_kernel_labels, model_file, path_now)\n if(asc==True):\n for i, param_name in enumerate(param_kernel_labels):\n param_to_asc(param_name, env_data_main.iloc[:,i], path_now)\n\n#ascファイルはallで生成する.\ndef data_prepmaxent2(path_now, env_data_all, species, param, model_file_train, model_file_test=\"Nothing\"):\n import pandas as pd\n import numpy as np \n \n #train:種と緯度,経度のデータを出力する.\n data_species=pd.DataFrame([])\n for specie in species:\n data_specie, data2=make_occurrence_data(species_name=specie, model_codes=model_file_train)\n data_species=pd.concat([data_species, data_specie])\n data_species.to_csv(path_now+r\"/train_occurrence.csv\", index=False)\n \n \n if(model_file_test!=\"Nothing\"):\n #test\n data_species_test=pd.DataFrame([])\n for specie in species:\n data_specie, data2=make_occurrence_data(species_name=specie, model_codes=model_file_test)\n data_species_test=pd.concat([data_species_test, data_specie])\n data_species_test.to_csv(path_now+r\"/test_occurrence.csv\", index=False) \n \n #all\n data_species_all=pd.DataFrame([])\n for specie in species:\n data_specie, data2=make_occurrence_data(species_name=specie, model_codes=model_file_train+model_file_test)\n data_species_all=pd.concat([data_species_all, data_specie])\n data_species_all.to_csv(path_now+r\"/all_occurrence.csv\", index=False) \n \n #環境データのascファイル出力, ascファイルはallで出力してよい.\n for i, param_name in enumerate(param):\n param_to_asc(param_name, env_data_all.iloc[:,i], path_now)\n \n#2章用のpreparation\ndef data_prepmain3(species, asc, param_kernel_labels, model_file_train, path_now):\n import pandas as pd\n import numpy as np \n\n data_species=pd.DataFrame([])\n for specie in species:\n data_specie=make_occurrence_data(species_name=specie, model_codes=model_file_train)\n data_species=pd.concat([data_species, data_specie])\n data_species.to_csv(path_now+r\"/train_occurrence.csv\", index=False) \n env_data_train=make_env_dataset2(param_kernels_labels_all, model_file_train, path_now, mode=\"raw_add\")\n env_data_train=make_env_dataset2(param_kernels_labels_all, model_file_train, path_now, mode=\"mesh_add\")\n\n\n#----------------------------------------------------------------------#\n#calc_variables\ndef set_orient(vertice_normal, v3):\n import numpy as np\n normal_mean=np.mean(vertice_normal,axis=0)\n v32=np.array(v3)*np.sign(normal_mean[0])*np.sign(v3[0])\n return(v32)\n\n\ndef calc_pca(i, ext_index, vertice_matrix, vertice_normal, face_normal, normal_correct_mode=\"face\"):\n import numpy as np\n from sklearn.decomposition import PCA\n vertice_matrix2=vertice_matrix[:,2]-np.min(vertice_matrix[:,2])+0.1\n vertice_ext=vertice_matrix[np.nonzero(vertice_matrix2*ext_index)]\n normal_ext=vertice_normal[np.nonzero(vertice_matrix2*ext_index)]\n pca=PCA()\n pca.fit(vertice_ext)\n v1, v2, v3 = pca.components_\n #v3は近似平面の法線を表す(符号は不正確)\n if(normal_correct_mode==\"vertice\"):\n standard_normal=np.mean(normal_ext, axis=0)\n standard_index=np.argmax(np.abs(standard_normal))#0近くの値を拾わないようにする\n vector_sign=np.sign(standard_normal[standard_index])*np.sign(v3[standard_index]) #v3と真の法線の方向が一致すれば1, 異なれば-1をかける\n elif(normal_correct_mode==\"face\"): \n standard_index=np.argmax(np.abs(face_normal))\n vector_sign=np.sign(face_normal[standard_index])*np.sign(v3[standard_index])\n \n v32=np.array(v3)*vector_sign#符号の修正\n X0=pca.mean_\n d=-np.dot(v32, X0)\n plane_params=np.array([v1[0], v1[1], v1[2], v2[0], v2[1], v2[2], v32[0], v32[1], v32[2], d])\n return(plane_params) \n\n\ndef calc_distances(v3, d, vertice):\n import numpy as np\n distance=(v3[0]*vertice[:,0]+v3[1]*vertice[:,1]+v3[2]*vertice[:,2]+d)/np.sqrt(v3[0]**2+v3[1]**2+v3[2]**2)\n return(distance)\n\n\ndef calc_metaorientation(vertice_matrix):\n import numpy as np\n from sklearn.decomposition import PCA\n pca=PCA()\n pca.fit(vertice_matrix)\n v1, v2, v3 = pca.components_\n #第一主成分のうち,z方向が正になるもの\n orix,oriy,slope= v1/np.linalg.norm(v1)\n #符号の調整(slope>0になるように)\n orix=orix*np.sign(slope)\n oriy=oriy*np.sign(slope)\n northness=np.arccos(orix/(orix**2+oriy**2)**0.5)/np.pi*180\n westness= np.arccos(oriy/(orix**2+oriy**2)**0.5)/np.pi*180 \n northness=90-northness\n westness=90-westness\n eastness=-westness\n orientation_2pi=180-np.sign(eastness)*(90+northness)\n #record.append([orix,oriy,northness,westness,eastness,orientation_2pi])\n return(orientation_2pi)\n\ndef calc_PI(X_G, ext_index, vertice_matrix):\n import numpy as np\n z_ext=vertice_matrix[np.nonzero(vertice_matrix[:,2]*ext_index),2]\n rugosity=X_G[2]-np.min(z_ext)\n BPI=X_G[2]-np.mean(z_ext)\n return(rugosity, BPI)\n\ndef calc_orientations_and_slope(v3, meta_orientation):\n import numpy as np\n #v3=v3+np.array([0.0001,0.0001,0.0001])#完全に0の場合は計算エラーが生じるので摂動を加えて防ぐ\n orix, oriy, slope = v3/np.linalg.norm(v3)\n if(orix==0 and oriy==0):\n northness=np.nan\n eastness=np.nan\n orientation_2pi=np.nan\n shoreside=np.nan\n else: \n northness=np.arccos(orix/(orix**2+oriy**2)**0.5)/np.pi*180\n westness=np.arccos(oriy/(orix**2+oriy**2)**0.5)/np.pi*180\n northness=90-northness\n westness=90-westness\n eastness=-westness\n orientation_2pi=180-np.sign(eastness)*(90+northness)\n #岸向き方位と面方位のなす角度\n shoreside=np.min([np.abs(meta_orientation-orientation_2pi),360-np.abs(meta_orientation-orientation_2pi)])\n #180>-1, 90>0, 0>1\n shoreside=-shoreside+90\n #shoreside=np.cos(shoreside/180*np.pi)\n\n if(v3[0]==0 and v3[1]==0):\n slope=0\n else:\n slope=np.arctan(v3[2]/(v3[0]**2+v3[1]**2)**0.5)\n slope=(np.pi/2)-slope\n slope=slope/np.pi*180\n return(northness, eastness, orientation_2pi, shoreside, slope)\n\ndef calc_ruggedness(ext_index, v3, d, vertice_matrix):\n import numpy as np\n vertice_extract=vertice_matrix[ext_index]\n distance=calc_distances(v3, d, vertice_extract) \n #ruggedness_maxheight=np.max(distance)-np.min(distance)\n ruggedness_stdheight=np.std(distance)\n return(ruggedness_stdheight)\n #return(ruggedness_maxheight, np.max(distance), np.min(distance), ruggedness_stdheight)#, ruggedness_stdheight) \n#最大高さ,最大高さ,最大深さ,標準偏差\n\n\ndef calc_lightmap(bins1, bins2, sp1, sp2):\n import numpy as np\n distance=np.zeros((bins1,bins2))\n bins=[np.linspace(0, np.pi, bins1), np.linspace(-np.pi, np.pi, bins2)]\n\n for i in range(0,bins1,1):\n for j in range(0, bins2,1):\n theta1=i/bins1*2*np.pi\n theta2=j/bins2*np.pi\n a=[np.cos(theta2)*np.cos(theta1), np.cos(theta2)*np.sin(theta1), 1-np.sin(theta2)]\n theta_s1=sp1*np.pi\n theta_s2=sp2*np.pi\n b=[np.cos(theta_s2)*np.cos(theta_s1), np.cos(theta_s2)*np.sin(theta_s1), 1-np.sin(theta_s2)]\n #print(a,b)\n distance[i,j]=np.linalg.norm((np.array(a)-np.array(b)))\n light_map=1-distance/2 #最大値は2になるはず\n return(light_map)\n\ndef main_calc_variables2(model_index, param_kernel_labels, kernel_size, path_now, depth_correction, mode1, mode2=\"notna\", mydata=\"mydata\"):\n import numpy as np\n import pandas as pd\n import os\n vertice_matrix_mesh=np.array(pd.read_csv(path_now+\"model\"+model_index+r\"/ply_parts/vertice_dataset.csv\", header=None))\n vertice_matrix=np.array(pd.read_csv(path_now+\"model\"+model_index+r\"/ply_parts/vertice_dataset_raw.csv\", header=None))\n vertice_normal=np.array(pd.read_csv(path_now+\"model\"+model_index+r\"/ply_parts/vertice_normal_raw.csv\", header=None)) \n vertice_normal_mesh=np.array(pd.read_csv(path_now+\"model\"+model_index+r\"/ply_parts/vertice_normal.csv\", header=None))\n #face_normal_mesh=np.array(pd.read_csv(path_now+r\"/model\"+model_index+r\"/faces_normal.csv\", header=None))\n faces_data=np.array(pd.read_csv(path_now+\"model\"+model_index+r\"/ply_parts/faces_dataset.csv\", header=None))\n\n bottom=np.min(vertice_matrix[:,2])\n meta_orientation=calc_metaorientation(vertice_matrix_mesh)\n terrain_variables=np.zeros((len(faces_data), len(param_kernel_labels)))\n #単純パラメータの出力(カーネル関係なし)\n #depth, height\n terrain_variables[:,0]=(vertice_matrix_mesh[faces_data[:,0],2]+vertice_matrix_mesh[faces_data[:,1],2]+vertice_matrix_mesh[faces_data[:,2],2])/3+depth_correction\n terrain_variables[:,1]=terrain_variables[:,0]-(bottom+depth_correction)\n \n for i in range(len(faces_data)):\n face_normal=np.mean(vertice_normal_mesh[faces_data[i,:],0:3], axis=0)#面の向きを決めるために必要\n #faceの重心座標計算\n X_G=np.mean(vertice_matrix_mesh[faces_data[i,:],0:3], axis=0)\n \n distance_raw=np.sum((X_G-vertice_matrix)**2, axis=1)\n distance_plane_raw=np.sum((X_G[0:2]-vertice_matrix[:,0:2])**2, axis=1)\n distance_mesh=np.sum((X_G-vertice_matrix_mesh)**2, axis=1)\n distance_plane_mesh=np.sum((X_G[0:2]-vertice_matrix_mesh[:,0:2])**2, axis=1)\n \n for (kcount, k) in enumerate(kernel_size):\n if(mode1[kcount]==\"raw\"):\n ext_index=np.array(distance_rawup_value)]=np.nan\n q2[np.where(q21):\n for kernel in kernels:\n param_all.append(params_list[param]+str(kernel).replace(\".\",\"_\"))\n return(param_all)\n","sub_path":"laughsheet.py","file_name":"laughsheet.py","file_ext":"py","file_size_in_byte":40514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"563842176","text":"# python imports\nimport re\n\n# local imports\nfrom crawler.source.utils import geocode\nfrom crawler.source.basesource import BaseSource\n\nproperties = {\n\t\"identifier\": \"//a[@title='Advocate Digital Media'][@class='agency']\",\n\t\"parent_name\": \"Texas Community Media\",\n\t\"parent_url\":\"http://www.mrobertsmedia.com/index.php/our-portfolio/\",\n\t\"parent_sources_xpath\":\"//div[@class='one-third column lower-footer-content']/p/a/@href\",\n\t\"software_name\":\"Advocate Digital Media\",\n\t\"source_name\": \"//meta[@property='og:title']/@content\",\n\t\"address\": \"//*[@id='copyright']//following-sibling::text()\",\n\t\"email\": \"//a[contains(@href, 'mailto:')]/@href\",\n\t\"phone\": re.compile(r'\\d{3}-\\d{3}-\\d{4}|\\(\\d{3}\\)\\s*\\d{3}-\\d{4}'),\n\t\"source_type\": \"NP\",\n\t\"has_rss\": False,\n\t\"is_popular\": False,\n\t\"crawl_type\": \"S\",\n\t\"contact_page\": \"/contact\",\n\t\"rss_path\": \"\",\n\t\"articles_path\": \"/\",\n\t\"articles_xpath\": \"(//div[contains(@class, 'headline-grid')])\"\n\t \"[1]//a[@class='comment-count']/@href\",\n\t\"icon_xpath\": \"//link[@rel='apple-touch-icon'][@sizes='180x180']/@href\",\n\t\"twitter\": \"//a[@class='first-item'][@title='Twitter']/@href\",\n\t\"facebook\": \"//a[@class='first-item'][@title='Facebook']/@href\"\n}\n\n\nclass Source(BaseSource):\n\n\tdef __init__(self, url, props=properties, page_tree=None):\n\t\tsuper().__init__(url, props, page_tree)\n\n\t@classmethod\n\tdef build_sources(cls, props=properties):\n\t\treturn super().build_sources(props)\n\n\tdef _get_contact_details(self):\n\t\t\"\"\"\n\t\tAddress can be gotten by checking exceptions\n\t\tor extracting address from the contact page. Exceptions\n\t\tare pages which their address wasn't found anywhere\n\t\t\"\"\"\n\t\tif not self.name:\n\t\t\traise AttributeError('Can\\'t get address without paper name')\n\n\t\taddress_list = self.page_tree.xpath(self.props['address'])\n\t\textract = address_list[0].strip().split(\" - \")[0]\n\t\taddress = geocode(extract)\n\t\t\"\"\"\n\t\tsearch address list for phone number, if none found\n\t\tparse the contact page and look for a phone number\n\t\t\"\"\"\n\t\tfind_phone = self.page_tree.xpath(\"//a[starts-with(@href, 'tel:')]/@href\")\n\t\tif find_phone:\n\t\t\tself.phone = self.props['phone'].search(find_phone[0].strip()).group()\n\n\t\t# email has the format news@hostname.com\n\t\tself.email = 'newsroom@{}'.format(self.hostname)\n\n\t\treturn address\n","sub_path":"source/np/tx/texas_community.py","file_name":"texas_community.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"424270629","text":"#!/usr/bin/env python\n#\n# License: BSD\n# https://raw.githubusercontent.com/stonier/py_trees/devel/LICENSE\n#\n##############################################################################\n# Documentation\n##############################################################################\n\n\"\"\"\n.. argparse::\n :module: py_trees.demos.stewardship\n :func: command_line_argument_parser\n :prog: py-trees-demo-tree-stewardship\n\n.. graphviz:: dot/stewardship.dot\n\n.. image:: images/tree_stewardship.gif\n\"\"\"\n\n##############################################################################\n# Imports\n##############################################################################\n\nimport argparse\nimport functools\nimport py_trees\nimport sys\nimport time\n\nimport py_trees.console as console\n\n##############################################################################\n# Classes\n##############################################################################\n\n\ndef description(root):\n content = \"A demonstration of tree stewardship.\\n\\n\"\n content += \"A slightly less trivial tree that uses a simple stdout pre-tick handler\\n\"\n content += \"and both the debug and snapshot visitors for logging and displaying\\n\"\n content += \"the state of the tree.\\n\"\n content += \"\\n\"\n content += \"EVENTS\\n\"\n content += \"\\n\"\n content += \" - 3 : sequence switches from running to success\\n\"\n content += \" - 4 : selector's first child flicks to success once only\\n\"\n content += \" - 8 : the fallback idler kicks in as everything else fails\\n\"\n content += \" - 14 : the first child kicks in again, aborting a running sequence behind it\\n\"\n content += \"\\n\"\n if py_trees.console.has_colours:\n banner_line = console.green + \"*\" * 79 + \"\\n\" + console.reset\n s = \"\\n\"\n s += banner_line\n s += console.bold_white + \"Trees\".center(79) + \"\\n\" + console.reset\n s += banner_line\n s += \"\\n\"\n s += content\n s += \"\\n\"\n s += banner_line\n else:\n s = content\n return s\n\n\ndef epilog():\n if py_trees.console.has_colours:\n return console.cyan + \"And his noodly appendage reached forth to tickle the blessed...\\n\" + console.reset\n else:\n return None\n\n\ndef command_line_argument_parser():\n parser = argparse.ArgumentParser(description=description(create_tree()),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-r', '--render', action='store_true', help='render dot tree to file')\n group.add_argument('-i', '--interactive', action='store_true', help='pause and wait for keypress at each tick')\n return parser\n\n\ndef pre_tick_handler(behaviour_tree):\n \"\"\"\n This prints a banner will run immediately before every tick of the tree.\n\n Args:\n behaviour_tree (:class:`~py_trees.trees.BehaviourTree`): the tree custodian\n\n \"\"\"\n print(\"\\n--------- Run %s ---------\\n\" % behaviour_tree.count)\n\n\ndef post_tick_handler(snapshot_visitor, behaviour_tree):\n \"\"\"\n Prints an ascii tree with the current snapshot status.\n \"\"\"\n print(\"\\n\" + py_trees.display.ascii_tree(behaviour_tree.root,\n snapshot_information=snapshot_visitor))\n\n\ndef create_tree():\n every_n_success = py_trees.behaviours.SuccessEveryN(\"EveryN\", 5)\n sequence = py_trees.Sequence(name=\"Sequence\")\n guard = py_trees.behaviours.Success(\"Guard\")\n periodic_success = py_trees.behaviours.Periodic(\"Periodic\", 3)\n finisher = py_trees.behaviours.Success(\"Finisher\")\n sequence.add_child(guard)\n sequence.add_child(periodic_success)\n sequence.add_child(finisher)\n sequence.blackbox_level = py_trees.common.BlackBoxLevel.COMPONENT\n idle = py_trees.behaviours.Success(\"Idle\")\n root = py_trees.Selector(name=\"Demo Tree\")\n root.add_child(every_n_success)\n root.add_child(sequence)\n root.add_child(idle)\n return root\n\n\n##############################################################################\n# Main\n##############################################################################\n\ndef main():\n \"\"\"\n Entry point for the demo script.\n \"\"\"\n args = command_line_argument_parser().parse_args()\n py_trees.logging.level = py_trees.logging.Level.DEBUG\n tree = create_tree()\n print(description(tree))\n\n ####################\n # Rendering\n ####################\n if args.render:\n py_trees.display.render_dot_tree(tree)\n sys.exit()\n\n ####################\n # Tree Stewardship\n ####################\n behaviour_tree = py_trees.trees.BehaviourTree(tree)\n behaviour_tree.add_pre_tick_handler(pre_tick_handler)\n behaviour_tree.visitors.append(py_trees.visitors.DebugVisitor())\n snapshot_visitor = py_trees.visitors.SnapshotVisitor()\n behaviour_tree.add_post_tick_handler(functools.partial(post_tick_handler, snapshot_visitor))\n behaviour_tree.visitors.append(snapshot_visitor)\n behaviour_tree.setup(timeout=15)\n\n ####################\n # Tick Tock\n ####################\n if args.interactive:\n unused_result = py_trees.console.read_single_keypress()\n while True:\n try:\n behaviour_tree.tick()\n if args.interactive:\n unused_result = py_trees.console.read_single_keypress()\n else:\n time.sleep(0.5)\n except KeyboardInterrupt:\n break\n print(\"\\n\")\n","sub_path":"py_trees/demos/stewardship.py","file_name":"stewardship.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"103111503","text":"import numpy as np\nimport os\nimport pickle\nimport pandas as pd\n\n\nimage_train_path = '../Data/img_data_train.pkl'\nimage_val_path = '../Data/img_data_val.pkl'\nimage_test_path = '../Data/img_data_test.pkl'\nwith open(image_train_path, 'rb') as file:\n image_train_feats = pickle.load(file)\nwith open(image_val_path, 'rb') as file:\n image_val_feats = pickle.load(file)\nwith open(image_test_path, 'rb') as file:\n image_test_feats = pickle.load(file)\nlabels = open('../classes.txt','r').read().split('\\n')\nlabels = labels[:-1]\n\nprint('loaded...')\n\nwith open('../Data/spk_ibm_train.pkl', 'rb') as fp:\n spk_ibm_train = pickle.load(fp)\n\nwith open('../Data/spk_google_train.pkl', 'rb') as fp:\n spk_google_train = pickle.load(fp)\n\nwith open('../Data/spk_microsoft_train.pkl', 'rb') as fp:\n spk_ms_train = pickle.load(fp)\n\nwith open('../Data/spk_ibm_val.pkl', 'rb') as fp:\n spk_ibm_val = pickle.load(fp)\n\nwith open('../Data/spk_google_val.pkl', 'rb') as fp:\n spk_google_val = pickle.load(fp)\n\nwith open('../Data/spk_microsoft_val.pkl', 'rb') as fp:\n spk_ms_val = pickle.load(fp)\n\n\nspk_ibm_train += 14\nspk_ms_train += 14 + 3\nspk_train = np.concatenate((spk_google_train, spk_ibm_train, spk_ms_train))\nspk_train = spk_train.astype(int)\nspk_ibm_val += 14\nspk_ms_val += 14 + 3\nspk_val = np.concatenate((spk_google_val, spk_ibm_val, spk_ms_val))\nspk_val = spk_val.astype(int)\n\n\ntrain_set = []\nfor label in labels:\n for image_idx in range(len(img_data_train[label])):\n train_set.append([label, image_idx])\n\ndf_train = pd.DataFrame(train_set)\nprint(df_train)\ndf_train.to_csv('../Data/train_data_proxy_image.csv')\n\n\nspk = spk_train\ntrain_set = []\nfor label in labels:\n for speech_idx in spk:\n train_set.append([label, speech_idx])\n\ndf_train = pd.DataFrame(train_set)\nprint(df_train)\ndf_train.to_csv('../Data/train_data_proxy_audio.csv')\n\n\nspk = spk_val\nval_set = []\nfor label in labels:\n\n # Image anchor\n grounding = 0\n for image_idx in range(len(img_data_val[label])):\n val_set.append([grounding, label, image_idx])\n \n # Audio anchor\n grounding = 1\n for speech_idx in spk: \n val_set.append([grounding, label, speech_idx])\n\n\ndf_val = pd.DataFrame(val_set)\nprint(df_val)\ndf_val.to_csv('../Data/val_data_proxy.csv')\n","sub_path":"Preprocessing/create_csv.py","file_name":"create_csv.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"82522117","text":"# The Craftr build system\n# Copyright (C) 2016 Niklas Rosenstein\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom pip.req import parse_requirements\nfrom setuptools import setup, find_packages\n\nimport functools\nimport os\nimport pip\nimport sys\n\nif sys.version < '3.4' or sys.version >= '3.6':\n print('-----------------------------------------------------------------')\n print(\"WARNING: Craftr officially supports Python 3.4, 3.5\")\n print(\"WARNING: Your current version is Python {}\".format(sys.version[:5]))\n print('-----------------------------------------------------------------')\n\n# parse_requirements() interface has changed in Pip 6.0\nif pip.__version__ >= '6.0':\n parse_requirements = functools.partial(\n parse_requirements, session=pip.download.PipSession())\n\n\ndef readme():\n if os.path.isfile('README.md') and any('dist' in x for x in sys.argv[1:]):\n if os.system('pandoc -s README.md -o README.rst') != 0:\n print('-----------------------------------------------------------------')\n print('WARNING: README.rst could not be generated, pandoc command failed')\n print('-----------------------------------------------------------------')\n if sys.stdout.isatty():\n input(\"Enter to continue... \")\n else:\n print(\"Generated README.rst with Pandoc\")\n\n if os.path.isfile('README.rst'):\n with open('README.rst') as fp:\n return fp.read()\n return ''\n\n\ndef find_files(directory, strip):\n \"\"\"\n Using glob patterns in ``package_data`` that matches a directory can\n result in setuptools trying to install that directory as a file and\n the installation to fail.\n\n This function walks over the contents of *directory* and returns a list\n of only filenames found. The filenames will be stripped of the *strip*\n directory part.\n \"\"\"\n\n result = []\n for root, dirs, files in os.walk(directory):\n for filename in files:\n filename = os.path.join(root, filename)\n result.append(os.path.relpath(filename, strip))\n return result\n\n\nsetup(\n name = 'craftr-build',\n version = '2.0.0',\n author = 'Niklas Rosenstein',\n author_email = 'rosensteinniklas@gmail.com',\n description = 'Meta build system based on Ninja and Python',\n long_description = readme(),\n url = 'https://gitlab.niklasrosenstein.com/niklas/craftr',\n install_requires = [str(x.req) for x in parse_requirements('requirements.txt')],\n entry_points = dict(\n console_scripts = [\n 'craftr = craftr.__main__:main_and_exit'\n ]\n ),\n packages = find_packages(),\n package_data = {\n 'craftr': find_files('craftr/stl', strip='craftr') + find_files('craftr/stl_auxiliary', strip='craftr')\n },\n license = 'GNU GPL v3',\n classifiers = [\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Build Tools\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"License :: OSI Approved :: MIT License\"\n ],\n)\n","sub_path":"pypi_install_script/craftr-build-2.0.0/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"645900295","text":"#!/usr/bin/env python\nimport StateMachine.const\nfrom StateMachine.sub import *\nfrom StateMachine.sub import rospy\nfrom StateMachine.sub import smach\n\nimport math\n\nclass Launch_Error(Exception):\n '''Base class for exceptions in this module. Indicates a failure to launch.'''\n pass\n\nclass Launcher_Ready_Error(Launch_Error):\n '''Indicates that launchers are failing to present as READY.'''\n pass\n\nclass Interact_Torpedo(Sub):\n '''Executes interaction state for a task requiring launching torpedoes.\n \n Assumptions:\n - Torpedo launch is mapped to 1 or 2 joystick buttons (left and right\n launchers would need different windage caluclations applied, based on\n distance to target).\n - State is entered from a track_torpedo_target\n such that the desired target is aligned and centered.\n '''\n def __init__(self):\n smach.State.__init__(self, outcomes=['TORPEDO_SUCCESS',\n 'TORPEDO_FAILURE'])\n\n def execute(self, userdata):\n '''Executes the INTERACT_TORPEDO state's primariy action.'''\n #initialization\n self.init_state()\n self.last_seen = rospy.get_time()\n rospy.loginfo('Executing state INTERACT_TORPEDO')\n\n # Start the front network\n self.use_front_network(True)\n\n if not self.active_launcher:\n rospy.loginfo('[INTERACT_TORPEDO] - %s' % ('No available launch tubes'))\n return 'torpedo_failed'\n try:\n self.launch(self.active_launcher)\n return 'torpedo_launched'\n except Launch_Error as e:\n #Issues with launchers themselves - failure to ready, failure to\n #fire, etc.\n rospy.loginfo('[INTERACT_TORPEDO] - %s' % (e.message))\n return 'torpedo_failed'\n except Exception as e: \n #Some other thing is broken, likely this very code.\n rospy.logwarn('[INTERACT_TORPEDO] - %s' % (e.message))\n return 'torpedo_failed'\n\n def launch(self, launcher):\n '''Launches a torpedo at the target.\n\n Args:\n launcher: string, id'd launcher, assumed armed and ready to fire.\n Raises:\n Launch_Error: if there is an issue with the launcher(s) preventing\n torpedo launch.\n '''\n try:\n jmsg = self.init_joy_msg()\n jmsg.buttons[const.BUTTONS[const.JOY_MAP[launcher]]]=1\n self.publish_joy(jmsg)\n rospy.sleep(1)\n jmsg.buttons[const.BUTTONS[const.JOY_MAP[launcher]]]=0\n self.publish_joy(jmsg)\n # Activates next tube\n self.set_active_launcher()\n except Exception as e:\n raise Launch_Error(e)\n\n\n\n\n\n\n","sub_path":"src/subdriver/src/StateMachine/interact/interact_torpedo.py","file_name":"interact_torpedo.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"610318599","text":"#!/usr/bin/env python\nimport pytest\nfrom pytest import approx\nfrom time import time\nimport numpy as np\nfrom pandas import DataFrame\nimport signal_subspace as subs\nimport subspace\n\nx = np.random.randn(4096).astype(np.complex128)\n\nf0 = 12345.6\nfs = 48e3\nsnr = 50. # dB\nNtone = 2\n\nt = np.arange(0, 0.01, 1/fs)\n\nnvar = 10**(-snr/10.)\n\nxr = (np.exp(1j*2*np.pi*f0*t) + np.sqrt(nvar)*(np.random.randn(t.size))).real\nxc = np.exp(1j*2*np.pi*f0*t) + np.sqrt(nvar)*(np.random.randn(t.size) + 1j*np.random.randn(t.size))\n\n\ndef test_corrmtx():\n M = 5\n subs.corrmtx(x.real, M)\n\n\ndef test_autocov():\n \"\"\"2x extra speedup from casting correct type\"\"\"\n M = 5\n tic = time()\n C = subs.compute_autocovariance(x, M)\n tocpy = time()-tic\n print(C)\n# %%\n tic = time()\n Cc = subspace.covariance.autocov_c(x, M)\n tocfortcmpl = time()-tic\n\n tic = time()\n Cr = subspace.covariance.autocov_r(x.real, M)\n tocfortreal = time() - tic\n\n print('autocovariance: Complex: Fortran faster than Python by factor:', tocpy/tocfortcmpl)\n print('autocovariance: Real: Fortran faster than Python by factor:', tocpy/tocfortreal)\n\n assert C == approx(Cc, rel=1)\n assert C.real == approx(Cr, rel=1)\n\n\ndef test_music():\n fest, sigma = subs.rootmusic(x, L=2, M=200, fs=fs)\n\n\ndef test_esprit():\n \"\"\"\n ESPRIT TEST PYTHON\n It appears that this PYTHON implementation of ESPRIT scales by O(N^3.25)\n 0.0588 sec for N=480,fs=48e3,Ntone=1, M=N/2\n 11.2199 sec for N=2400, .. .. . .\n\n FORTRAN results seem to scale by O(N^2.825)\n 0.170 sec for N=480, fs=48e3, Ntone=1, M=N/2\n 16.615 sec. for N=2400, ... .. .\n\n later found literature stating ESPRIT is O(M^3) (or was it N^3?)\n \"\"\"\n# %% measure signal\n M = [100] # iterating over block length\n\n py = DataFrame(index=M, columns=['err', 'sigma', 'time'])\n fortreal = DataFrame(index=M, columns=['err', 'sigma', 'time'])\n fortcmpl = DataFrame(index=M, columns=['err', 'sigma', 'time'])\n\n for m in M:\n # %% python\n tic = time()\n fest, sigma = subs.esprit(xc, Ntone//2, M=m, fs=fs, verbose=False)\n toc = time()-tic\n py.loc[m, :] = [fest-f0, sigma, toc]\n\n assert fest == approx(f0, rel=1e-3)\n assert sigma[0] > 50, f'too small sigma {sigma[0]}'\n # print(f'PYTHON time signal N= {xc.size} M={m} freq {fest} Hz, sigma {sigma}, time {toc:.4f} sec')\n# %% fortran\n\n tic = time()\n fest, sigma = subspace.subspace.esprit_c(xc, Ntone, m, fs)\n\n assert fest[0] == approx(f0, rel=1e-3)\n assert sigma[0] > 50, f'too small sigma {sigma[0]}'\n fortcmpl.loc[m, :] = [fest-f0, sigma, time()-tic]\n\n fest, sigma = subspace.subspace.esprit_r(xr, Ntone, m, fs)\n\n assert fest[0] == approx(f0, rel=1e-3)\n assert sigma[0] > 20, f'too small sigma {sigma[0]}'\n fortreal.loc[m, :] = [fest-f0, sigma, time()-tic]\n\n # print('FORTRAN time signal N= {} M={} freq {} Hz, sigma {}, time {:.4f} sec'.format(x.size,m,fest,sigma,toc))\n\n print('python complex: sec.', py[\"time\"].values[0])\n\n print('Fortran complex: sec.', fortcmpl[\"time\"].values[0])\n\n print('Fortran real: sec.', fortreal[\"time\"].values[0])\n\n print('fESPRIT: Fortran faster than Python by factor:', py[\"time\"].values[0] / fortcmpl[\"time\"].values[0])\n\n\nif __name__ == '__main__':\n pytest.main(['-x', __file__])\n","sub_path":"tests/test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"581403399","text":"import os\nimport secrets\nimport random\nfrom app import app\nfrom xray import model_predict\nfrom flask import render_template, url_for, flash, redirect, request, Response, session\nfrom app.forms import ImageForm, LoginForm, SignupForm, AddMemberForm, DetailsForm\nfrom flask_mysqldb import MySQL\nimport MySQLdb.cursors\n\n\napp.config['SECRET_KEY'] = 'TeamBeta30'\napp.config['MYSQL_HOST'] = 'sql12.freemysqlhosting.net'\napp.config['MYSQL_USER'] = 'sql12373645'\napp.config['MYSQL_PASSWORD'] = 'J7i89uAyM8'\napp.config['MYSQL_DB'] = 'sql12373645'\nmysql = MySQL(app)\n\n\n@app.route('/')\ndef home():\n return render_template('home.html', title=\"Home\")\n\n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_name = random_hex + f_ext\n picture_path = os.path.join(\n app.root_path, 'static/assets/img/xray', picture_name)\n\n form_picture.save(picture_path)\n return picture_name\n\n\n@app.route('/patient/dashboard', methods=['GET', 'POST'])\ndef patient_dashboard():\n if \"email\" not in session:\n flash(\"You must log in first\")\n return redirect(url_for(\"login\"))\n\n form = ImageForm()\n if form.validate_on_submit():\n f_name = save_picture(form.picture.data)\n covid_prediction = model_predict(os.path.join(\n app.root_path, 'static/assets/img/xray', f_name), )\n if covid_prediction is True:\n email = session[\"email\"]\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'UPDATE DETAILS SET xraystatus = \"Threatened\", status = \"Threatened\", disease = \"Covid-19\" WHERE email = %s', (email,))\n mysql.connection.commit()\n flash('You have high chances of Covid-19, please see a doctor.')\n else:\n flash('Congratulations! You have low chances of Covid-19.')\n return render_template('patient-dashboard.html', form=form, title=\"Patient Dashboard\")\n\n\n@app.route('/patient/')\ndef disease(disease):\n if disease:\n email = session[\"email\"]\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'UPDATE DETAILS SET disease = %s, status = \"Threatened\" WHERE email = %s', (disease, email))\n mysql.connection.commit()\n return redirect(url_for('patient_dashboard'))\n\n@app.route('/patient/profile', methods=['GET', 'POST'])\ndef profile():\n form = DetailsForm()\n if request.method == \"POST\":\n age = request.form[\"age\"]\n height = request.form[\"height\"]\n weight = request.form[\"weight\"]\n bloodgrp = request.form[\"bloodgrp\"]\n\n email = session[\"email\"]\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'UPDATE DETAILS SET age = %s, height = %s,weight = %s,blood_grp = %s WHERE email = %s', (age, height, weight, bloodgrp, email))\n mysql.connection.commit()\n return redirect(url_for('patient_dashboard'))\n return render_template('profile.html', form=form, title=\"SignUp\")\n\n\n@app.route('/patient/family', methods=['GET', 'POST'])\ndef family():\n form = AddMemberForm()\n if \"email\" in session and request.method == \"GET\":\n email = session['email']\n fid = session['fid']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM DETAILS WHERE familyid = %s', (fid,))\n members = cursor.fetchall()\n return render_template('family.html', members=members, form=form, title=\"Family\")\n\n elif \"email\" in session and request.method == \"POST\":\n if \"new_email\" in request.form and request.method == \"POST\":\n cur_email = session['email']\n cur_fid = session['fid']\n new_email = request.form['new_email']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'SELECT familyid FROM DETAILS WHERE email = %s', (new_email,))\n member = cursor.fetchone()\n if member:\n new_fid = member['familyid']\n cursor.execute(\n 'UPDATE USERS SET familyid = %s WHERE familyid = %s', (cur_fid, new_fid))\n mysql.connection.commit()\n cursor.execute(\n 'UPDATE DETAILS SET familyid = %s WHERE familyid = %s', (cur_fid, new_fid))\n mysql.connection.commit()\n else:\n flash(\"No such user!\")\n return redirect(url_for('family'))\n else:\n return redirect(url_for('login'))\n\n\n@app.route('/doctor')\ndef doctor():\n if \"email\" in session and request.method == \"GET\":\n email = session['email']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'SELECT * FROM DETAILS WHERE email NOT IN (\"doctor@gmail.com\")')\n members = cursor.fetchall()\n return render_template('doctor.html', members=members, title=\"Doctor\")\n\n return render_template('doctor.html', title=\"Doctor\")\n\n\n@app.route('/pharmacy')\ndef pharmacy():\n return render_template('pharmacy.html', title=\"Pharmacy\")\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if \"email\" in session:\n return redirect(url_for('patient_dashboard'))\n\n if request.method == 'POST' and 'email' in request.form and 'password' in request.form:\n email = request.form['email']\n password = request.form['password']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\n 'SELECT * FROM USERS WHERE email = %s AND password = %s', (email, password))\n patient = cursor.fetchone()\n if patient:\n session['email'] = patient['email']\n session['name'] = patient['name']\n cursor.execute(\n 'SELECT * FROM USERS WHERE email = %s', (email,))\n patient = cursor.fetchone()\n session['fid'] = patient[\"familyid\"]\n\n if session[\"email\"] == 'doctor@gmail.com':\n return redirect(url_for('doctor'))\n else:\n return redirect(url_for('patient_dashboard'))\n else:\n flash('Incorrect username or password')\n return redirect(url_for('login'))\n return render_template('login.html', form=form, title=\"Login\")\n\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n form = SignupForm()\n if \"email\" in session:\n return redirect(url_for('profile'))\n\n if request.method == 'POST' and 'name' in request.form and 'password' in request.form and 'email' in request.form:\n name = request.form['name']\n password = request.form['password']\n email = request.form['email']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('SELECT * FROM USERS WHERE email = %s', (email,))\n account = cursor.fetchone()\n if account:\n flash('Email already Exists!')\n else:\n fid = random.randint(1, 9999)\n cursor.execute(\n '''INSERT INTO USERS VALUES (%s, %s, %s,%s,'1234567890',-1,-1,-1,'NA')''', (name, email, password, fid))\n cursor.execute(\n '''INSERT INTO DETAILS VALUES (%s, %s,'NA',-1,%s,'1234567890',-1,-1,-1,'NA','NA','NA')''', (name, email, fid))\n mysql.connection.commit()\n flash('You have succesfully registered!')\n return redirect(url_for('profile'))\n elif request.method == 'POST':\n flash('Please fill out the form!')\n return render_template('signup.html', form=form, title=\"Signup\")\n\n\n@app.route('/logout')\ndef logout():\n if \"email\" in session:\n session.pop(\"email\", None)\n session.pop(\"name\", None)\n return redirect(url_for('login'))\n return redirect(url_for('login'))\n\n\n@app.route('/find')\ndef find():\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute('''SELECT * from USERS''')\n results = cursor.fetchall()\n print(results)\n return \"Done\"","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"69579838","text":"from jnius import autoclass, cast\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .utils import Utils\nfrom .index import Indexer\nfrom .transformer import TransformerBase, Symbol\nfrom tqdm import tqdm\nimport deprecation\n\n# import time\n\ndef importProps():\n from . import properties as props\n # Make import global\n globals()[\"props\"] = props\nprops = None\n\n_matchops = [\"#combine\", \"#uw\", \"#1\", \"#tag\", \"#prefix\", \"#band\", \"#base64\", \"#syn\"]\ndef _matchop(query):\n for m in _matchops:\n if m in query:\n return True\n return False\n\ndef parse_index_like(index_location):\n JIR = autoclass('org.terrier.querying.IndexRef')\n JI = autoclass('org.terrier.structures.Index')\n\n if isinstance(index_location, JIR):\n return index_location\n if isinstance(index_location, JI):\n return cast('org.terrier.structures.Index', index_location).getIndexRef()\n if isinstance(index_location, str) or issubclass(type(index_location), Indexer):\n if issubclass(type(index_location), Indexer):\n return JIR.of(index_location.path)\n return JIR.of(index_location)\n\n raise ValueError(\n f'''index_location is current a {type(index_location)},\n while it needs to be an Index, an IndexRef, a string that can be\n resolved to an index location (e.g. path/to/index/data.properties),\n or an pyterrier.Indexer object'''\n )\n\nclass BatchRetrieveBase(TransformerBase, Symbol):\n \"\"\"\n A base class for retrieval\n\n Attributes:\n verbose(bool): If True transform method will display progress\n \"\"\"\n def __init__(self, verbose=0, **kwargs):\n super().__init__(**kwargs)\n self.verbose = verbose\n\nclass BatchRetrieve(BatchRetrieveBase):\n \"\"\"\n Use this class for retrieval by Terrier\n\n Attributes:\n default_controls(dict): stores the default controls\n default_properties(dict): stores the default properties\n IndexRef: stores the index reference object\n appSetup: stores the Terrier ApplicationSetup object\n verbose(bool): If True transform method will display progress\n properties(dict): Current properties\n controls(dict): Current controls\n num_results(int): Number of results to retrieve. \n \"\"\"\n default_controls = {\n \"terrierql\": \"on\",\n \"parsecontrols\": \"on\",\n \"parseql\": \"on\",\n \"applypipeline\": \"on\",\n \"localmatching\": \"on\",\n \"filters\": \"on\",\n \"decorate\": \"on\",\n \"wmodel\": \"DPH\",\n }\n\n default_properties = {\n \"querying.processes\": \"terrierql:TerrierQLParser,parsecontrols:TerrierQLToControls,parseql:TerrierQLToMatchingQueryTerms,matchopql:MatchingOpQLParser,applypipeline:ApplyTermPipeline,localmatching:LocalManager$ApplyLocalMatching,qe:QueryExpansion,labels:org.terrier.learning.LabelDecorator,filters:LocalManager$PostFilterProcess\",\n \"querying.postfilters\": \"decorate:SimpleDecorate,site:SiteFilter,scope:Scope\",\n \"querying.default.controls\": \"wmodel:DPH,parsecontrols:on,parseql:on,applypipeline:on,terrierql:on,localmatching:on,filters:on,decorate:on\",\n \"querying.allowed.controls\": \"scope,qe,qemodel,start,end,site,scope,applypipeline\",\n \"termpipelines\": \"Stopwords,PorterStemmer\"\n }\n\n \"\"\"\n Init method\n\n Args:\n index_location: An index-like object - An Index, an IndexRef, or a String that can be resolved to an IndexRef\n controls(dict): A dictionary with with the control names and values\n properties(dict): A dictionary with with the property keys and values\n verbose(bool): If True transform method will display progress\n num_results(int): Number of results to retrieve. \n metadata(list): What metadata to retrieve\n \"\"\"\n def __init__(self, index_location, controls=None, properties=None, metadata=[\"docno\"], num_results=None, wmodel=None, **kwargs):\n super().__init__(**kwargs)\n \n self.indexref = parse_index_like(index_location)\n self.appSetup = autoclass('org.terrier.utility.ApplicationSetup')\n self.properties = _mergeDicts(BatchRetrieve.default_properties, properties)\n self.metadata = metadata\n\n if props is None:\n importProps()\n for key, value in self.properties.items():\n self.appSetup.setProperty(key, str(value))\n \n self.controls = _mergeDicts(BatchRetrieve.default_controls, controls)\n if wmodel is not None:\n self.controls[\"wmodel\"] = wmodel\n\n if num_results is not None:\n if num_results > 0:\n self.controls[\"end\"] = str(num_results -1)\n #self.appSetup.setProperty(\"matching.retrieved_set_size\", str(num_results))\n elif num_results == 0:\n del self.controls[\"end\"]\n else: \n raise ValueError(\"num_results must be None, 0 or positive\")\n\n\n MF = autoclass('org.terrier.querying.ManagerFactory')\n self.manager = MF._from_(self.indexref)\n \n def set_parameter(self,name,value):\n if name in self.controls:\n self.controls[name] = value\n elif name in self.properties:\n self.properties[name] = value\n else:\n super().set_parameter(name,value)\n\n def transform(self, queries):\n \"\"\"\n Performs the retrieval\n\n Args:\n queries: String for a single query, list of queries, or a pandas.Dataframe with columns=['qid', 'query']\n\n Returns:\n pandas.Dataframe with columns=['qid', 'docno', 'rank', 'score']\n \"\"\"\n results=[]\n if not isinstance(queries, pd.DataFrame):\n queries=Utils.form_dataframe(queries)\n docno_provided = \"docno\" in queries.columns\n docid_provided = \"docid\" in queries.columns\n scores_provided = \"scores\" in queries.columns\n if docno_provided or docid_provided:\n from . import check_version\n assert check_version(5.3)\n input_results = queries\n\n # query is optional, and functionally dependent on qid.\n # Hence as long as one row has the query for each qid, \n # the rest can be None\n queries = input_results[[\"qid\", \"query\"]].dropna(axis=0, subset=[\"query\"]).drop_duplicates()\n RequestContextMatching = autoclass(\"org.terrier.python.RequestContextMatching\")\n\n # make sure queries are a String\n if queries[\"qid\"].dtype == np.int64:\n queries['qid'] = queries['qid'].astype(str)\n\n\n for index,row in tqdm(queries.iterrows(), desc=str(self), total=queries.shape[0], unit=\"q\") if self.verbose else queries.iterrows():\n rank = 0\n qid = str(row['qid'])\n query = row['query']\n srq = self.manager.newSearchRequest(qid, query)\n \n for control, value in self.controls.items():\n srq.setControl(control, value)\n\n # this is needed until terrier-core issue #106 lands\n if \"applypipeline:off\" in query:\n srq.setControl(\"applypipeline\", \"off\")\n srq.setOriginalQuery(query.replace(\"applypipeline:off\", \"\"))\n\n # transparently detect matchop queries\n if _matchop(query):\n srq.setControl(\"terrierql\", \"off\")\n srq.setControl(\"parsecontrols\", \"off\")\n srq.setControl(\"parseql\", \"off\")\n srq.setControl(\"matchopql\", \"on\")\n\n # this handles the case that a candidate set of documents has been set. \n if docno_provided or docid_provided:\n # we use RequestContextMatching to make a ResultSet from the \n # documents in the candidate set. \n matching_config_factory = RequestContextMatching.of(srq)\n input_query_results = input_results[input_results[\"qid\"] == qid]\n if docid_provided:\n matching_config_factory.fromDocids(input_query_results[\"docid\"].values.tolist())\n elif docno_provided:\n matching_config_factory.fromDocnos(input_query_results[\"docno\"].values.tolist())\n if scores_provided:\n matching_config_factory.withScores(input_query_results[\"scores\"].values.tolist())\n matching_config_factory.build()\n srq.setControl(\"matching\", \"org.terrier.matching.ScoringMatching\" + \",\" + srq.getControl(\"matching\"))\n\n # now ask Terrier to run the request\n self.manager.runSearchRequest(srq)\n result = srq.getResults()\n\n # check we got all of the expected metadata (if the resultset has a size at all)\n if len(result) > 0 and len(set(self.metadata) & set(result.getMetaKeys())) != len(self.metadata):\n raise KeyError(\"Requested metadata: %s, obtained metadata %s\" % (str(self.metadata), str(result.getMetaKeys()))) \n\n # prepare the dataframe for the results of the query\n for item in result:\n metadata_list = []\n for meta_column in self.metadata:\n metadata_list.append(item.getMetadata(meta_column))\n res = [qid, item.getDocid()] + metadata_list + [rank, item.getScore()]\n rank += 1\n results.append(res)\n res_dt = pd.DataFrame(results, columns=['qid', 'docid' ] + self.metadata + ['rank', 'score'])\n # ensure to return the query\n res_dt = res_dt.merge(queries[[\"qid\", \"query\"]], on=[\"qid\"])\n return res_dt\n\n def __repr__(self):\n return \"BR(\" + \",\".join([\n self.indexref.toString(),\n str(self.controls),\n str(self.properties)\n ]) + \")\"\n\n def __str__(self):\n return \"BR(\" + self.controls[\"wmodel\"] + \")\"\n\n @deprecation.deprecated(deprecated_in=\"0.3.0\",\n details=\"Please use pt.Utils.write_results_trec()\")\n def saveResult(self, result, path, run_name=None):\n if run_name is None:\n run_name = self.controls[\"wmodel\"]\n res_copy = result.copy()[[\"qid\", \"docno\", \"rank\", \"score\"]]\n res_copy.insert(1, \"Q0\", \"Q0\")\n res_copy.insert(5, \"run_name\", run_name)\n res_copy.to_csv(path, sep=\" \", header=False, index=False)\n\n def setControls(self, controls):\n for key, value in controls.items():\n self.controls[key] = value\n\n def setControl(self, control, value):\n self.controls[control] = value\n\ndef _mergeDicts(defaults, settings):\n KV = defaults.copy()\n if settings is not None and len(settings) > 0:\n KV.update(settings)\n return KV\n\n\nclass TextScorer(TransformerBase):\n\n def __init__(self, body_attr=\"body\", background_index=None, **kwargs):\n #super().__init__(**kwargs)\n self.body_attr = body_attr\n if background_index is not None:\n self.background_indexref = parse_index_like(background_index)\n else:\n self.background_indexref = None\n self.kwargs = kwargs\n\n def transform(self, topics_and_res):\n from . import DFIndexer, autoclass, IndexFactory\n from .index import IndexingType\n documents = topics_and_res[[\"docno\", self.body_attr]].drop_duplicates()\n indexref = DFIndexer(None, type=IndexingType.MEMORY).index(documents[self.body_attr], documents[\"docno\"])\n index_docs = IndexFactory.of(indexref)\n \n # if a background index is set, we create an \"IndexWithBackground\" using both that and our new index\n if self.background_indexref is None:\n index = index_docs\n else:\n index_background = IndexFactory.of(self.background_indexref)\n index = autoclass(\"org.terrier.python.IndexWithBackground\")(index_docs, index_background) \n\n # we have provided the documents, so we dont need a docno or docid column that will confuse \n # BR and think it is re-ranking. In fact, we only need qid and query\n topics = topics_and_res[[\"qid\", \"query\"]].dropna(axis=0, subset=[\"query\"]).drop_duplicates()\n\n # and then just instantiate BR using the our new index \n # we take all other arguments as arguments for BR\n inner = BatchRetrieve(index, **(self.kwargs))\n inner_res = inner.transform(topics)\n if len(inner_res) < len(topics_and_res):\n inner_res = topics_and_res[[\"qid\", \"docno\"]].merge(inner_res, on=[\"qid\", \"docno\"], how=\"left\")\n inner_res[\"score\"] = inner_res[\"score\"].fillna(value=0)\n return inner_res\n\nclass FeaturesBatchRetrieve(BatchRetrieve):\n \"\"\"\n Use this class for retrieval with multiple features\n\n Attributes:\n default_controls(dict): stores the default controls\n default_properties(dict): stores the default properties\n IndexRef: stores the index reference object\n appSetup: stores the Terrier ApplicationSetup object\n verbose(bool): If True transform method will display progress\n properties(dict): Current properties\n controls(dict): Current controls\n \"\"\"\n FBR_default_controls = BatchRetrieve.default_controls.copy()\n FBR_default_controls[\"matching\"] = \"FatFeaturedScoringMatching,org.terrier.matching.daat.FatFull\"\n FBR_default_properties = BatchRetrieve.default_properties.copy()\n\n def __init__(self, index_location, features, controls=None, properties=None, **kwargs):\n \"\"\"\n Init method\n\n Args:\n index_location: An index-like object - An Index, an IndexRef, or a String that can be resolved to an IndexRef\n features(list): List of features to use\n controls(dict): A dictionary with with the control names and values\n properties(dict): A dictionary with with the control names and values\n verbose(bool): If True transform method will display progress\n num_results(int): Number of results to retrieve. \n \"\"\"\n # if props==None:\n # importProps()\n controls = _mergeDicts(FeaturesBatchRetrieve.FBR_default_controls, controls)\n properties = _mergeDicts(FeaturesBatchRetrieve.FBR_default_properties, properties)\n self.features = features\n properties[\"fat.featured.scoring.matching.features\"] = \";\".join(features)\n super().__init__(index_location, controls, properties, **kwargs)\n\n def transform(self, topics):\n \"\"\"\n Performs the retrieval with multiple features\n\n Args:\n topics: String for a single query, list of queries, or a pandas.Dataframe with columns=['qid', 'query']\n\n Returns:\n pandas.Dataframe with columns=['qid', 'docno', 'score', 'features']\n \"\"\"\n results = []\n queries = Utils.form_dataframe(topics)\n\n docno_provided = \"docno\" in queries.columns\n docid_provided = \"docid\" in queries.columns\n scores_provided = \"scores\" in queries.columns\n if docno_provided or docid_provided:\n from . import check_version\n assert check_version(5.3)\n input_results = queries\n\n # query is optional, and functionally dependent on qid.\n # Hence as long as one row has the query for each qid, \n # the rest can be None\n queries = input_results[[\"qid\", \"query\"]].dropna(axis=0, subset=[\"query\"]).drop_duplicates()\n RequestContextMatching = autoclass(\"org.terrier.python.RequestContextMatching\")\n\n if queries[\"qid\"].dtype == np.int64:\n queries['qid'] = queries['qid'].astype(str)\n\n for index, row in tqdm(queries.iterrows(), desc=str(self), total=queries.shape[0], unit=\"q\") if self.verbose else queries.iterrows():\n qid = str(row['qid'])\n query = row['query']\n\n srq = self.manager.newSearchRequest(qid, query)\n\n for control, value in self.controls.items():\n srq.setControl(control, value)\n\n # this is needed until terrier-core issue #106 lands\n if \"applypipeline:off\" in query:\n srq.setControl(\"applypipeline\", \"off\")\n srq.setOriginalQuery(query.replace(\"applypipeline:off\", \"\"))\n\n # transparently detect matchop queries\n if _matchop(query):\n srq.setControl(\"terrierql\", \"off\")\n srq.setControl(\"parsecontrols\", \"off\")\n srq.setControl(\"parseql\", \"off\")\n srq.setControl(\"matchopql\", \"on\")\n\n # this handles the case that a candidate set of documents has been set. \n if docno_provided or docid_provided:\n # we use RequestContextMatching to make a ResultSet from the \n # documents in the candidate set. \n matching_config_factory = RequestContextMatching.of(srq)\n input_query_results = input_results[input_results[\"qid\"] == qid]\n if docid_provided:\n matching_config_factory.fromDocids(input_query_results[\"docid\"].values.tolist())\n elif docno_provided:\n matching_config_factory.fromDocnos(input_query_results[\"docno\"].values.tolist())\n if scores_provided:\n matching_config_factory.withScores(input_query_results[\"scores\"].values.tolist())\n matching_config_factory.build()\n srq.setControl(\"matching\", \",\".join([\"FatFeaturedScoringMatching\",\"ScoringMatchingWithFat\", srq.getControl(\"matching\")]))\n \n self.manager.runSearchRequest(srq)\n srq = cast('org.terrier.querying.Request', srq)\n fres = cast('org.terrier.learning.FeaturedResultSet', srq.getResultSet())\n feat_names = fres.getFeatureNames()\n\n docids=fres.getDocids()\n scores= fres.getScores()\n metadata_list = []\n for meta_column in self.metadata:\n metadata_list.append(fres.getMetaItems(\"docno\"))\n feats_values = [] \n for feat in feat_names:\n feats_values.append(fres.getFeatureScores(feat))\n rank = 0\n for i in range(fres.getResultSize()):\n \n feats_array = []\n for j in range(len(feats_values)):\n feats_array.append(feats_values[j][i])\n feats_array = np.array(feats_array)\n meta=[]\n for meta_idx, meta_column in enumerate(self.metadata):\n meta.append( metadata_list[meta_idx][i] )\n\n results.append( [qid, docids[i], rank ] + meta + [ scores[i], feats_array] )\n rank += 1\n\n res_dt = pd.DataFrame(results, columns=[\"qid\", \"docid\", \"rank\", \"docno\", \"score\", \"features\"])\n return res_dt\n\n def __repr__(self):\n return \"FBR(\" + \",\".join([\n self.indexref.toString(),\n str(self.features),\n str(self.controls),\n str(self.properties)\n ]) + \")\"\n\n def __str__(self):\n return \"FBR(\" + self.controls[\"wmodel\"] + \" and \" + str(len(self.features)) + \" features)\"\n","sub_path":"pyterrier/batchretrieve.py","file_name":"batchretrieve.py","file_ext":"py","file_size_in_byte":19215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"9359299","text":"import wx\n#from Model import Model\nfrom StandardValsPanel import StandardValsPanel\nfrom ChannelValsPanel import ChannelValsPanel\nfrom AxisValsPanel import AxisValsPanel\nfrom RangeValsPanel import RangeValsPanel\nfrom OkCancelPanel import OkCancelPanel \nfrom NumberTextControl import NumberTextControl\nfrom Components.SettingsHelpers import *\n\nclass SpotChartDialog(wx.Dialog):\n\n def __init__(self, parent, title, model, component=None):\n super(SpotChartDialog, self).__init__(parent=parent, \n title=title)\n \n self.model = model\n self.component = component\n\n if component ==None:\n self.settings={}\n else:\n self.settings=component.getSettings()\n \n if not component==None:\n self.componentID = component.getID()\n else:\n self.componentID = None\n \n innerMinimum=getFloatSetting(self.settings,\"innerMinimum\",0.5)\n innerMaximum=getFloatSetting(self.settings,\"innerMaximum\",0.9)\n outerMinimum=getFloatSetting(self.settings,\"outerMinimum\",0.9)\n \n self.panel = StandardValsPanel(self,\"Spot Chart\",component)\n self.panel2 = ChannelValsPanel(self,component=component,oneOnly=True) \n self.panel3 = AxisValsPanel(self,component=component)\n self.panel4 = RangeValsPanel(self,component=component)\n self.panel5= wx.Panel(self)\n\n sb = wx.StaticBox(self.panel5, label='Spot Size')\n vbox = wx.StaticBoxSizer(sb,orient=wx.VERTICAL) \n\n hbox = wx.BoxSizer(wx.HORIZONTAL) \n self.spotInnerMinimum = NumberTextControl(self.panel5, value=str(innerMinimum),allowDecimal=True,allowNegative=False)\n self.spotInnerMaximum = NumberTextControl(self.panel5, value=str(innerMaximum),allowDecimal=True,allowNegative=False)\n\n hbox.Add(wx.StaticText(self.panel5,-1, \"Inner Size - Min:\"),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)\n hbox.Add(self.spotInnerMinimum)\n hbox.Add(wx.StaticText(self.panel5,-1, \" Max:\"),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)\n hbox.Add(self.spotInnerMaximum)\n \n vbox.Add(hbox,flag=wx.ALL,border=1)\n\n hbox = wx.BoxSizer(wx.HORIZONTAL) \n self.spotOuterMinimum = NumberTextControl(self.panel5, value=str(outerMinimum),allowDecimal=True,allowNegative=False)\n hbox.Add(wx.StaticText(self.panel5,-1, \" Outer size - Min:\"),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)\n hbox.Add(self.spotOuterMinimum)\n hbox.Add(wx.StaticText(self.panel5,-1, \" Max: Always 1.0 \"),flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)\n vbox.Add(hbox,flag=wx.ALL|wx.EXPAND,border=1)\n\n self.panel5.SetSizer(vbox)\n\n \n \n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(self.panel, proportion=0.75, flag=wx.ALL|wx.EXPAND, border=5)\n vbox.Add(self.panel2, proportion=0.75, flag=wx.ALL|wx.EXPAND, border=5)\n vbox.Add(self.panel3, proportion=0.75, flag=wx.ALL|wx.EXPAND, border=5)\n vbox.Add(self.panel4, proportion=0.75, flag=wx.ALL|wx.EXPAND, border=5)\n vbox.Add(self.panel5, proportion=0.75, flag=wx.ALL|wx.EXPAND, border=5)\n self.okCancel=OkCancelPanel(self,self.OnOK,self.OnClose)\n vbox.Add(self.okCancel, flag= wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)\n\n self.SetSizerAndFit(vbox)\n \n def killConsumer(self,host,port):\n c = self.model.getConsumer(host,port)\n if not c==None:\n self.model.removeConsumer(c)\n \n \n def OnClose(self, e): \n self.Destroy()\n \n def OnOK(self, e):\n if len(self.panel2.getHostList())==0:\n mdlg = wx.MessageDialog(self, 'No Channels Selected', 'Warning', wx.OK|wx.ICON_WARNING)\n mdlg.ShowModal()\n mdlg.Destroy()\n return\n self.panel.saveSettings(self.settings)\n self.panel2.saveSettings(self.settings)\n self.panel3.saveSettings(self.settings)\n self.panel4.saveSettings(self.settings)\n self.settings[\"outerMinimum\"]=self.spotOuterMinimum.GetValue()\n self.settings[\"innerMinimum\"]=self.spotInnerMinimum.GetValue()\n self.settings[\"innerMaximum\"]=self.spotInnerMaximum.GetValue()\n\n self.model.addComponent(\"SpotChart\",self.settings,self.componentID)\n \n if not self.panel2.dlg == None:\n self.panel2.dlg.Destroy()\n if not self.panel3.dlg == None:\n self.panel3.dlg.Destroy()\n self.Destroy()\n \n \n \n \n","sub_path":"Vicarious/Vicarious/Application/Components/Dialogs/SpotChartDialog.py","file_name":"SpotChartDialog.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"387542867","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Save data statistics and get pandas boxplot\ndata_path = r'C:\\Users\\GRBH.COWI.001\\Desktop\\desktop projects\\7 Siasconset'\ndata = pd.read_excel(data_path + r'\\data_pd_friendly.xlsx')\ndata = data.set_index(keys=[data.columns[0], data.index])\ndata = data.transpose()\ndata_stats = data.describe(percentiles=[0.25, 0.50, 0.75])\ndata_stats.to_excel(data_path + r'\\data_statistics.xlsx')\ndata_stats.boxplot()\n\n# Plot boxplots\ndata = pd.read_excel(data_path + r'\\data_pd_friendly.xlsx')\ndel data[data.columns[0]]\nvalues = [x[~np.isnan(x)] for x in data.values]\nlabels = np.array(data.index)\n\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nwith plt.style.context('bmh'):\n fig = plt.figure(1, figsize=(18, 8))\n ax = fig.add_subplot(1, 1, 1)\n bp = plt.boxplot(\n values,\n patch_artist=True,\n whis=1.5,\n boxprops=dict(color='#009cde', linewidth=2, facecolor='#d1d9dd'),\n whiskerprops=dict(color='#435a69', linewidth=1, linestyle='dashed'),\n capprops=dict(color='#435a69', linewidth=1),\n medianprops=dict(color='#F04E23', linewidth=2),\n flierprops=dict(marker='o', markerfacecolor='#435a69', markeredgecolor='#435a69', alpha=0.5)\n )\n ax.set_xticklabels(labels=labels, rotation=90)\n ax.set_xlabel('Profile')\n ax.set_ylabel('Volume change [$yd^3/ft$]')\n fig.savefig(data_path + r'\\Boxplot', bbox_inches='tight')\n","sub_path":"Archive/Siasconset/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"261402703","text":"# -*- coding:utf-8 -*-\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\ndef show_images(images): # 定义画图工具\n images = np.reshape(images, [images.shape[0], -1])\n sqrtn = int(np.ceil(np.sqrt(images.shape[0])))\n sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))\n\n fig = plt.figure(figsize=(sqrtn, sqrtn))\n gs = gridspec.GridSpec(sqrtn, sqrtn)\n gs.update(wspace=0.05, hspace=0.05)\n\n for i, img in enumerate(images):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n img = img.reshape((28, 28, 3))\n #img = img.transpose(1, 2, 0)\n plt.imshow(img.astype(np.uint8))\n return\n\ndef preprocess(img):\n # crop face area\n img_shape = tf.shape(img)\n im_h = img_shape[0]\n im_w = img_shape[1]\n face_width = face_height = 108\n i = (im_h - face_height) // 2\n j = (im_w - face_width) // 2\n crop = img[i: i + face_height, j: j + face_width]\n \n # resize\n resize = tf.image.resize_images(crop, (28, 28))\n \n # normalize to [0, 1]\n normalize = resize / 255\n \n # normalize to [-1, 1]\n preprocessd = (normalize - 0.5) / 0.5\n \n return preprocessd\n \ndef deprocess(img):\n img = (img + 1.0) / 2.0 * 255\n return tf.clip_by_value(img, 0, 255)\n\ndef read(imgs_folder, batch_size=1, shuffle=True, epoch=None):\n def _read_name(name):\n img_content = tf.read_file(name)\n img_decoded = tf.image.decode_jpeg(img_content, channels=3)\n img_float = tf.cast(img_decoded, tf.float32)\n \n return img_float\n \n imgs_name = [os.path.join(imgs_folder, name) for name in os.listdir(imgs_folder)]\n \n imgs_name_dataset = tf.data.Dataset.from_tensor_slices(imgs_name)\n \n imgs_dataset = imgs_name_dataset.map(_read_name)\n \n imgs_dataset = imgs_dataset.map(preprocess)\n \n if epoch is not None:\n imgs_dataset = imgs_dataset.repeat(epoch)\n \n if shuffle:\n imgs_dataset = imgs_dataset.shuffle(100)\n \n imgs_dataset = imgs_dataset.batch(batch_size)\n \n iterator = imgs_dataset.make_one_shot_iterator()\n \n imgs = iterator.get_next()\n \n return imgs\n","sub_path":"tensorflow-proj/生成对抗网络GAN/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"620077686","text":"\"\"\"\r\n合并数据\r\n\"\"\"\r\nimport os\r\nimport csv\r\nimport xlwt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndef make_data(all_data):\r\n table_head = ['申请号','名称','主分类号','分类号','申请(专利权)人',\t'发明(设计)人','公开(公告)日'\t,'公开(公告)号',\t'专利代理机构','代理人','申请日','地址','摘要'\t,'国省代码']\r\n workbook = xlwt.Workbook(encoding = 'utf-8')\r\n xlsheet = workbook.add_sheet(\"excel写入练习\",cell_overwrite_ok=True)\r\n # 写表头\r\n headlen = len(table_head)\r\n for i in range(headlen):\r\n xlsheet.write(0, i, table_head[i])\r\n # 获取有多少条数据\r\n all_data_num = len(all_data)\r\n for row in range(all_data_num):\r\n for col in range(14):\r\n xlsheet.write(row+1,col,all_data[row][col])\r\n\r\n workbook.save('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/六年数据_1.xls')\r\n\r\n\r\n\r\ntable_info_1 = pd.read_excel('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/2009_1.xls')\r\ntable_info_2 = pd.read_excel('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/2010_1.xls')\r\ntable_info_3 = pd.read_excel('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/2011_1.xls')\r\ntable_info_4 = pd.read_excel('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/2012_1.xls')\r\ntable_info_5 = pd.read_excel('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/2013_1.xls')\r\ntable_info_6 = pd.read_excel('D:/挖掘/实验一:数据及数据预处理/电子信息产业原始数据/7电子计算机数据处理及应用/2014_1.xls')\r\n\r\ntable_info = pd.concat((table_info_1,table_info_2,table_info_3,table_info_4,table_info_5,table_info_6))\r\ntable_info_x = np.array(table_info)\r\nall_data = table_info_x.tolist()\r\nmake_data(all_data)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# 写入数据\r\n# workbook = xlsxwriter.Workbook(target_xls) # 创建了一个名字叫做3.xlsx , Excel表格文件\r\n# worksheet = workbook.add_worksheet() # 建立sheet,\r\n# font = workbook.add_format({\"font_size\": 14}) # 表格中值(字体)的大小\r\n# for i in range(len(data)): # 从data列表中读取数据\r\n# for j in range(len(data[i])):\r\n# worksheet.write(i, j, data[i][j], font)\r\n# # 关闭文件流\r\n# workbook.close()\r\n\r\n\r\n# table_info = pd.read_excel(source_path)\r\n# table_info_1 = pd.read_excel(source_path_1)","sub_path":"ch01max.py","file_name":"ch01max.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"397488838","text":"from model.model import Model\nfrom view.view import View\nfrom datetime import date\n\nclass Controller:\n \"\"\"\n *******************************\n * A controller for a store DB *\n *******************************\n \"\"\"\n\n def __init__(self):\n self.model = Model()\n self.view = View()\n \n def start(self):\n self.view.start()\n self.main_menu()\n \n \"\"\"\n ***********************\n * General controllers *\n ***********************\n \"\"\" \n\n def main_menu(self):\n o = '0'\n while o != '6':\n self.view.main_menu()\n self.view.option('6')\n o = input()\n if o == '1':\n self.dir_menu()\n elif o == '2':\n self.autor_menu()\n elif o == '3':\n self.libro_menu()\n elif o == '4':\n self.usuarios_menu()\n elif o == '5':\n self.prestamo_menu()\n elif o == '6':\n self.view.end()\n else:\n self.view.not_valid_option()\n return\n \n def update_lists(self, fs , vs):\n fields = []\n vals = []\n for f,v in zip(fs,vs):\n if v != '':\n fields.append(f+' = %s')\n vals.append(v)\n return fields, vals\n \n \"\"\"\n ********************\n * General for dir's *\n ********************\n \"\"\"\n\n def dir_menu(self):\n o = '0'\n while o != '13':\n self.view.dir_menu()\n self.view.option('13')\n o = input()\n if o == '1':\n self.create_zip()\n elif o == '2':\n self.read_zip() \n elif o == '3':\n self.read_all_zips()\n elif o == '4':\n self.read_zips_city()\n elif o == '5':\n self.update_zip()\n elif o == '6':\n self.delete_zip()\n ############ DIRECCIONES ############3\n elif o == '7':\n self.create_dir()\n elif o == '8':\n self.read_a_dir()\n elif o == '9':\n self.read_all_dirs()\n elif o == '10':\n self.read_dirs_cp()\n elif o == '11':\n self.update_dir()\n elif o == '12':\n self.delete_dir()\n elif o == '13':\n return\n else:\n self.view.not_valid_option()\n return\n \n def ask_zip(self):\n self.view.ask('Ciudad: ')\n city = input()\n self.view.ask('Estado: ')\n state = input()\n return [city,state] \n \n def create_zip(self):\n self.view.ask('CP: ')\n cp = input()\n ciudad, estado = self.ask_zip()\n out = self.model.create_zip(cp,ciudad,estado)\n if out == True:\n self.view.ok(cp, 'agrego')\n else:\n if out.errno == 1062:\n self.view.error('El CP esta repetido')\n else:\n self.view.error('No se pudo agregar el CP')\n return\n \n def read_zip(self):\n self.view.ask('CP: ')\n i_cp = input()\n cp = self.model.read_zip(i_cp)\n if type(cp) == tuple:\n self.view.show_zip_header('Datos del CP '+i_cp+' ')\n self.view.show_a_zip(cp)\n self.view.show_zip_midder()\n self.view.show_zip_footer()\n else:\n if cp == None:\n self.view.error('El CP no existe')\n else:\n self.view.error('Hay un problema al leer el CP')\n return\n \n def read_all_zips(self):\n cps = self.model.read_all_zips()\n if type(cps) == list:\n self.view.show_zip_header(' Todos los CPs ')\n for cp in cps:\n self.view.show_a_zip(cp)\n self.view.show_zip_midder()\n self.view.show_zip_footer()\n else:\n self.view.error('Hay un problema al leer los CPs ')\n \n \n def read_zips_city(self):\n self.view.ask('Ciudad: ')\n ciudad = input()\n cps = self.model.read_zips_city(ciudad)\n if type(cps) == list:\n self.view.show_zip_header('CP para la ciudad de '+ciudad+' ')\n for cp in cps:\n self.view.show_a_zip(cp)\n self.view.show_zip_midder()\n self.view.show_zip_footer()\n else:\n self.view.error('Hay un problema al leer los CPs ')\n return\n \n\n def update_zip(self):\n self.view.ask('CP a modificar: ')\n i_cp = input()\n cp = self.model.read_zip(i_cp)\n if type(cp) == tuple:\n self.view.show_zip_header(' Datos del CP '+i_cp+' ')\n self.view.show_a_zip(cp)\n self.view.show_zip_midder()\n self.view.show_zip_footer()\n else:\n if cp == None:\n self.view.error('El CP no existe')\n else:\n self.view.error('Hay un problema al leer el CP')\n return\n self.view.msg(' Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals =self.ask_zip()\n fields, vals = self.update_lists(['ciudad','estado'], whole_vals)\n vals.append(i_cp)\n vals = tuple(vals)\n out = self.model.update_zip(fields,vals)\n if out == True:\n self.view.ok(i_cp, 'actualizo')\n else: \n self.view.error('No se pudo actualizar el CP')\n return\n\n def delete_zip(self):\n self.view.ask('CP a borrar: ')\n i_cp = input()\n count = self.model.delete_zip(i_cp)\n if count != 0:\n self.view.ok(i_cp, 'borro')\n else:\n if count == 0:\n self.view.error('El CP no exite')\n else:\n self.view.error('Problema al borrar el CP')\n return\n\n \"\"\" \n ***************************\n * Controllers for dir's *\n ***************************\n \"\"\"\n \n def ask_dir(self):\n self.view.ask('Calle: ')\n calle = input()\n self.view.ask('Colonia: ')\n colonia = input()\n self.view.ask('Num Ext: ')\n n_ext = input()\n self.view.ask('Num Int: ')\n n_int = input()\n self.view.ask('CP: ')\n cp = input()\n return [calle, colonia, n_ext, n_int, cp] \n\n \n def create_dir(self):\n calle, colonia, n_ext, n_int, cp = self.ask_dir()\n out = self.model.create_dir(calle, colonia, n_ext, n_int, cp)\n if out == True:\n self.view.ok(calle+' #'+n_ext +' Colonia: '+ colonia+' ',' agrego')\n else:\n self.view.error('No se pudo agregar el producto')\n return\n \n def read_a_dir(self):\n self.view.ask('ID Dir: ')\n i_dir = input()\n dir = self.model.read_dir(i_dir)\n if type(dir) == tuple:\n self.view.show_dir_header('Datos de la dirección '+i_dir+' ')\n self.view.show_a_dir(dir)\n self.view.show_dir_midder()\n self.view.show_dir_footer()\n else:\n if dir == None:\n self.view.error('El ID de la dirección no existe')\n else:\n self.view.error('Hay un problema al leer la dirección')\n return\n\n\n def read_all_dirs(self):\n dirs = self.model.read_all_dir()\n if type(dirs) == list:\n self.view.show_dir_header(' Todas las direcciones ')\n for dir in dirs:\n self.view.show_a_dir(dir)\n self.view.show_dir_midder()\n self.view.show_dir_footer()\n else:\n self.view.error('Hay un problema al leer las direcciones ')\n \n\n def read_dirs_cp(self):\n self.view.ask('Cp: ')\n cp = input()\n dirs = self.model.read_dir_cps(cp)\n if type(dirs) == list:\n self.view.show_dir_header('Ciudades con el código postal: '+cp+' ')\n for dir in dirs:\n self.view.show_a_dir(dir)\n self.view.show_dir_midder()\n self.view.show_dir_footer()\n else:\n self.view.error('Hay un problema al leer las direcciones con ese CP ')\n return\n\n \n def update_dir(self):\n self.view.ask('Dirección a modificar: ')\n i_dir = input()\n dir = self.model.read_dir(i_dir)\n if type(dir) == tuple:\n self.view.show_dir_header(' Datos de la dirección '+i_dir+' ')\n self.view.show_a_dir(dir)\n self.view.show_dir_midder()\n self.view.show_dir_footer()\n else:\n if dir == None:\n self.view.error('La id con esa dirección no existe')\n else:\n self.view.error('Hay un problema al leer esa dirección')\n return\n self.view.msg(' Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals =self.ask_dir()\n fields, vals = self.update_lists(['d_calle','d_col','d_numExt', 'd_numInt', 'd_cp'], whole_vals)\n vals.append(i_dir)\n vals = tuple(vals)\n out = self.model.update_dir(fields,vals)\n if out == True:\n self.view.ok(i_dir, 'actualizo')\n else: \n self.view.error('No se pudo actualizar la dirección')\n return\n\n def delete_dir(self):\n self.view.ask('Dirección a borrar: ')\n i_dir = input()\n count = self.model.delete_dir(i_dir)\n if count != 0:\n self.view.ok(i_dir, 'borro')\n else:\n if count == 0:\n self.view.error('La dirección no existe')\n else:\n self.view.error('Problema al borrar la dirección')\n return\n\n \"\"\" \n ***************************\n * Controllers for author's *\n ***************************\n \"\"\"\n\n\n def autor_menu(self):\n o = '0'\n while o != '7':\n self.view.autor_menu()\n self.view.option('7')\n o = input()\n if o == '1':\n self.create_autor()\n elif o == '2':\n self.read_a_autor()\n elif o == '3':\n self.read_all_autors()\n elif o == '4':\n self.read_author_ap()\n elif o == '5':\n self.update_autor()\n elif o == '6':\n self.delete_autor()\n elif o == '7':\n return\n else:\n self.view.not_valid_option()\n return\n \n\n def ask_autor(self):\n self.view.ask('Nombre: ')\n a_nombre = input()\n self.view.ask('Apellido paterno: ')\n a_apellidoPat = input()\n self.view.ask('Apellido Materno: ')\n a_apellidoMat = input()\n return(a_nombre,a_apellidoPat,a_apellidoMat)\n \n def create_autor(self):\n a_nombre,a_apellidoPat,a_apellidoMat = self.ask_autor()\n out = self.model.create_autor(a_nombre,a_apellidoPat,a_apellidoMat)\n if out == True:\n self.view.ok(a_nombre+' '+a_apellidoPat+' '+a_apellidoMat, 'agrego')\n else:\n self.view.error('No se pudo agregar el autor')\n return\n \n def read_a_autor(self):\n self.view.ask('ID autor: ') \n id_autor = input()\n autor = self.model.read_autor(id_autor)\n if type(autor) == tuple:\n self.view.show_autor_header('Datos del autor '+id_autor+' ')\n self.view.show_a_autor(autor)\n self.view.show_autor_midder()\n self.view.show_autor_footer()\n else:\n if autor == None:\n self.view.error('El autor no existe')\n else:\n self.view.error('Hay un problema al leer el autor')\n return\n \n def read_all_autors(self):\n autors = self.model.read_all_autors()\n if type(autors) == list:\n self.view.show_autor_header(' Todos los autores ')\n for autor in autors:\n self.view.show_a_autor(autor)\n self.view.show_autor_midder()\n self.view.show_autor_footer()\n else:\n self.view.error('Problema al leer los autores')\n return\n \n\n def read_author_ap(self):\n self.view.ask('Apellido Paterno: ')\n ap = input()\n autores = self.model.read_author_ap(ap)\n if type(autores) == list:\n self.view.show_autor_header('Autores con el apellido paterno: '+ap+' ')\n for autor in autores:\n self.view.show_a_autor(autor)\n self.view.show_autor_midder()\n self.view.show_autor_footer()\n else:\n self.view.error('Problema al leer los autores')\n return\n \n\n def update_autor(self):\n self.view.ask('ID del autor a modificar: ')\n id_autor = input()\n autor = self.model.read_autor(id_autor)\n if type(autor) == tuple:\n self.view.show_autor_header(' Datos del autor '+id_autor+ ' ')\n self.view.show_a_autor(autor)\n self.view.show_autor_midder()\n self.view.show_autor_footer()\n else:\n if autor == None:\n self.view.error('El autor no existe')\n else:\n self.view.error('Problema al leer el autor')\n self.view.msg('Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals = self.ask_autor()\n fields, vals = self.update_lists(['a_nombre','a_apellidoPat','a_apellidoMat'], whole_vals)\n vals.append(id_autor)\n vals = tuple(vals)\n out = self.model.update_autor(fields,vals)\n if out == True:\n self.view.ok(id_autor, 'actualizo')\n else:\n self.view.error('Error no se pudo actualizar el autor')\n return\n \n def delete_autor(self):\n self.view.ask('ID del autor a borrar: ')\n id_autor = input()\n count = self.model.delete_autor(id_autor)\n if count != 0:\n self.view.ok(id_autor, 'Borro')\n else:\n if count == 0:\n self.view.error('El autor no exite')\n else:\n self.view.error('Prblema al borrar el autor')\n return\n \n \"\"\" \n ***************************\n * Controllers for libros *\n ***************************\n \"\"\"\n\n def libro_menu(self):\n o = '0'\n while o != '7':\n self.view.libro_menu()\n self.view.option('7')\n o = input()\n if o == '1':\n self.create_libro()\n elif o == '2':\n self.read_a_libro()\n elif o == '3':\n self.read_all_libros()\n elif o == '4':\n self.read_libro_nombre()\n elif o == '5':\n self.update_libro()\n elif o == '6':\n self.delete_libro()\n elif o == '7':\n return\n else:\n self.view.not_valid_option()\n return\n \n def ask_libro(self):\n self.view.ask('Nombre: ')\n l_nombre = input()\n self.view.ask('Cantidad: ')\n l_cantidad = input()\n self.view.ask('Edición: ')\n l_edicion = input()\n self.view.ask('Autor: ')\n l_id_autor = input()\n return [l_nombre, l_cantidad,l_edicion,l_id_autor]\n \n def create_libro(self):\n l_nombre, l_cantidad, l_edicion, l_id_autor = self.ask_libro()\n out = self.model.create_libro(l_nombre, l_cantidad, l_edicion, l_id_autor)\n if out == True:\n self.view.ok(l_nombre+' '+' Edición: '+l_edicion,'agrego')\n else:\n self.view.error('No se pudo agregar el libro')\n return\n \n def read_a_libro(self):\n self.view.ask('ID libro: ')\n id_libro = input()\n libro = self.model.read_book(id_libro)\n if type(libro) == tuple:\n self.view.show_libro_header('Datos del libro '+id_libro+' ')\n self.view.show_a_libro(libro)\n self.view.show_libro_midder()\n self.view.show_libro_footer()\n else:\n if libro == None:\n self.view.error('El libro no existe')\n else:\n self.view.error('Hay un problema al leer el libro')\n return\n \n def read_all_libros(self):\n libros = self.model.read_all_books()\n if type(libros) == list:\n self.view.show_libro_header(' Todos los libros ')\n for libro in libros:\n self.view.show_a_libro(libro)\n self.view.show_libro_midder()\n self.view.show_libro_footer()\n else:\n self.view.error('Hay un problema al leer los libros ')\n \n \n def read_libro_nombre(self):\n self.view.ask('Nombre: ')\n nombre = input()\n libros = self.model.read_book_name(nombre)\n if type(libros) == list:\n self.view.show_libro_header('Libros con el nombre '+nombre+' ')\n for libro in libros:\n self.view.show_a_libro(libro)\n self.view.show_libro_midder()\n self.view.show_libro_footer()\n else:\n self.view.error('Problema al leer los libros')\n return\n \n def update_libro(self):\n self.view.ask('Libro a modificar: ')\n id_libro = input()\n libro = self.model.read_book(id_libro)\n if type(libro) == tuple:\n self.view.show_libro_header(' Datos del libro '+id_libro+ ' ')\n self.view.show_a_libro(libro)\n self.view.show_libro_midder()\n self.view.show_libro_footer()\n else:\n if libro == None:\n self.view.error('El libro no existe')\n else:\n self.view.error('Problema al leer el libro')\n self.view.msg('Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals = self.ask_libro()\n fields, vals = self.update_lists(['l_nombre','l_cantidad','l_edicion','l_id_autor'], whole_vals)\n vals.append(id_libro)\n vals = tuple(vals)\n out = self.model.update_book(fields,vals)\n if out == True:\n self.view.ok(id_libro, 'actualizo')\n else:\n self.view.error('Error no se pudo actualizar el libro')\n return\n \n def delete_libro(self):\n self.view.ask('ID de libro a borrar: ')\n id_libro = input()\n count = self.model.delete_book(id_libro)\n if count != 0:\n self.view.ok(id_libro, 'Borro')\n else:\n if count == 0:\n self.view.error('El libro no exite')\n else:\n self.view.error('Prblema al borrar el libro')\n return\n \n \"\"\" \n *****************************\n * Controllers for usuarios *\n *****************************\n \"\"\"\n\n def usuarios_menu(self):\n o = '0'\n while o != '7':\n self.view.usuarios_menu()\n self.view.option('7')\n o = input()\n if o == '1':\n self.create_user()\n elif o == '2':\n self.read_a_user()\n elif o == '3':\n self.read_all_users()\n elif o == '4':\n self.read_user_ap()\n elif o == '5':\n self.update_user()\n elif o == '6':\n self.delete_user()\n elif o == '7':\n return\n else:\n self.view.not_valid_option()\n return\n \n def ask_user(self):\n self.view.ask('Nombre: ')\n name = input()\n self.view.ask('Apellido paterno: ')\n sname1 = input()\n self.view.ask('Apellido Materno: ')\n sname2 = input()\n self.view.ask('Telefono: ')\n telefono = input()\n self.view.ask('Correo: ')\n correo = input() \n self.view.ask('ID de la dirección: ')\n id_dir = input() \n self.view.ask('CP: ')\n cp = input() \n return(name,sname1,sname2,telefono,correo,id_dir,cp)\n \n\n def create_user(self):\n name,sname1,sname2,telefono,correo,id_dir,cp = self.ask_user()\n out = self.model.create_user(name,sname1,sname2,telefono,correo,id_dir,cp)\n if out == True:\n self.view.ok(name+' '+sname1+' '+sname2, 'agrego')\n else:\n self.view.error('No se pudo agregar el usuario')\n return\n \n def read_a_user(self):\n self.view.ask('ID usuario: ') \n id_usuario = input()\n usuario = self.model.read_user(id_usuario)\n if type(usuario) == tuple:\n self.view.show_user_header('Datos del cliente '+id_usuario+' ')\n self.view.show_a_user(usuario)\n self.view.show_user_midder()\n self.view.show_user_footer()\n else:\n if usuario == None:\n self.view.error('El usuario no existe')\n else:\n self.view.error('Hay un problema al leer el usuario')\n return\n \n\n def read_all_users(self):\n users = self.model.read_all_users()\n if type(users) == list:\n self.view.show_user_header(' Todos los usuarios ')\n for user in users:\n self.view.show_a_user(user)\n self.view.show_user_midder()\n self.view.show_user_footer()\n else:\n self.view.error('Problema al leer los usuarios')\n return\n \n def read_user_ap(self):\n self.view.ask('Apellido paterno: ')\n ap = input()\n users = self.model.read_user_ap(ap)\n if type(users) == list:\n self.view.show_user_header('Usuarios con el apellido paterno '+ap+' ')\n for user in users:\n self.view.show_a_user(user)\n self.view.show_user_midder()\n self.view.show_user_footer()\n else:\n self.view.error('Problema al leer los usuarios')\n return\n \n def update_user(self):\n self.view.ask('ID del usuario a modificar: ')\n id_user = input()\n user = self.model.read_user(id_user)\n if type(user) == tuple:\n self.view.show_user_header(' Datos del usuario '+id_user+ ' ')\n self.view.show_a_user(user)\n self.view.show_user_midder()\n self.view.show_user_footer()\n else:\n if user == None:\n self.view.error('El usuario no existe')\n else:\n self.view.error('Problema al leer el usuario')\n self.view.msg('Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals = self.ask_user()\n fields, vals = self.update_lists(['u_nombre','u_apellidopat','u_apellidomat','u_tel','correo','u_id_dir','u_cp'], whole_vals)\n vals.append(id_user)\n vals = tuple(vals)\n out = self.model.update_user(fields,vals)\n if out == True:\n self.view.ok(id_user, 'actualizo')\n else:\n self.view.error('Error no se pudo actualizar el usuario')\n return\n \n def delete_user(self):\n self.view.ask('ID del usuario a borrar: ')\n id_user = input()\n count = self.model.delete_user(id_user)\n if count != 0:\n self.view.ok(id_user, 'Borro')\n else:\n if count == 0:\n self.view.error('El usuario no exite')\n else:\n self.view.error('Prblema al borrar el usuario')\n return\n \n \"\"\" \n ****************************\n * Controllers for prestamo *\n ****************************\n \"\"\" \n\n def prestamo_menu(self):\n o = '0'\n while o != '14':\n self.view.prestamo_menu()\n self.view.option('14')\n o = input()\n if o == '1':\n self.create_prestamo()\n elif o == '2':\n self.read_prestamo()\n elif o == '3':\n self.read_all_prestamos()\n elif o == '4':\n self.read_prestamo_user()\n elif o == '5':\n self.update_prestamo()\n elif o == '6':\n self.delete_prestamo()\n elif o == '7':\n self.create_details_prestamo()\n elif o == '8':\n self.add_prestamo_details()\n elif o == '9':\n self.read_detalle_prestamo()\n elif o == '10':\n self.read_all_details_prestamos()\n elif o == '11':\n self.read_details_prestamo_book()\n elif o == '12':\n self.update_details_prestamo()\n elif o == '13':\n self.delete_details_prestamo()\n \n elif o == '14':\n return\n else:\n self.view.not_valid_option()\n return\n \n def ask_prestamo(self):\n self.view.ask('ID usuario: ')\n id_user = input()\n self.view.ask('Fecha de devolución: ')\n d_date = input()\n self.view.ask('Adeudo: ')\n adeudo = input()\n return('',d_date,adeudo,id_user)\n\n\n\n def create_prestamo(self):\n self.view.ask('ID usuario: ')\n id_user = input()\n self.view.ask('Fecha de devolución: ')\n d_date = input()\n p_adeudo = 0.0\n today = date.today()\n p_date = today.strftime('%y-%m-%d')\n id_prestamo = self.model.create_prestamo(p_date, d_date, p_adeudo, id_user)\n if type(id_prestamo) == int:\n self.view.ok(id_prestamo,'agrego')\n prestamo = self.model.read_prestamo(id_prestamo)\n self.view.show_a_prestamo(prestamo)\n return prestamo\n else:\n self.view.error('No se pudo crear el prestamo')\n return\n\n \n def read_prestamo(self):\n self.view.ask('ID prestamo: ') \n id_prestamo = input()\n prestamo = self.model.read_prestamo(id_prestamo)\n if type(prestamo) == tuple:\n self.view.show_prestamo_header('Datos del prestamo '+id_prestamo+' ')\n self.view.show_a_prestamo(prestamo)\n self.view.show_prestamo_adeudo(prestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_footer()\n else:\n if prestamo == None:\n self.view.error('El prestamo no existe')\n else:\n self.view.error('Hay un problema al leer el prestamo')\n return\n \n def read_all_prestamos(self):\n prestamos = self.model.read_all_prestamos()\n if type(prestamos) == list:\n self.view.show_prestamo_header(' Todos los prestamos ')\n for prestamo in prestamos:\n self.view.show_a_prestamo(prestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_footer()\n else:\n self.view.error('Problema al leer los prestamos')\n return\n \n def read_prestamo_user(self):\n self.view.ask('ID del usuario: ')\n id_user = input()\n prestamos = self.model.read_prestamo_user(id_user)\n if type(prestamos) == list:\n self.view.show_prestamo_header('Prestamos del usario '+id_user+' ')\n for prestamo in prestamos:\n self.view.show_a_prestamo(prestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_footer()\n else:\n self.view.error('Problema al leer los prestamos')\n return\n \n\n def update_prestamo(self):\n self.view.ask('ID del prestamo a modificar: ')\n id_prestamo = input()\n prestamo = self.model.read_prestamo(id_prestamo)\n if type(prestamo) == tuple:\n self.view.show_prestamo_header(' Datos del prestamo '+id_prestamo+ ' ')\n self.view.show_a_prestamo(prestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_footer()\n else:\n if prestamo == None:\n self.view.error('El prestamo no existe')\n else:\n self.view.error('Problema al leer el prestamo')\n self.view.msg('Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals = self.ask_prestamo()\n fields, vals = self.update_lists(['pF_prestamo','pF_devolu','p_adeudo','p_id_usuario'], whole_vals)\n vals.append(id_prestamo)\n vals = tuple(vals)\n out = self.model.update_prestamo(fields,vals)\n if out == True:\n self.view.ok(id_prestamo, 'actualizo')\n else:\n self.view.error('Error no se pudo actualizar el prestamo')\n return\n \n def delete_prestamo(self):\n self.view.ask('ID del prestamo a borrar: ')\n id_prestamo = input()\n count = self.model.delete_prestamo(id_prestamo)\n if count != 0:\n self.view.ok(id_prestamo, 'Borro')\n else:\n if count == 0:\n self.view.error('El prestamo no exite')\n else:\n self.view.error('Problema al borrar el prestamo')\n return\n\n \"\"\" \n ************************************\n * Controllers for prestamo details *\n ************************************\n \"\"\" \n def ask_dprestamo(self):\n self.view.ask('ID del libro: ')\n did_libro = input()\n self.view.ask('Cantidad: ')\n d_cantidad = input()\n self.view.ask('Adeudo total: ')\n d_adeudoTotal = input()\n return(did_libro, d_cantidad, d_adeudoTotal)\n \n\n\n def create_details_prestamo(self):\n pd_total = 0.0\n self.view.ask('ID libro: ')\n id_libro = input()\n self.view.ask('ID prestamo: ')\n id_prestamo = input()\n if id_libro != '':\n libro = self.model.read_book(id_libro)\n if type(libro) == tuple:\n self.view.show_libro_header(' Datos del libro '+id_libro+' ')\n self.view.show_a_libro(libro)\n self.view.show_libro_footer()\n self.view.ask('Cantidad: ')\n pd_amount = int(input())\n self.view.ask('Dias de prestamo: ')\n pd_dias = int(input())\n\n pd_total = pd_amount*pd_dias*5\n out = self.model.create_details_prestamo(id_prestamo, id_libro,pd_amount, pd_total)\n if out == True:\n self.view.ok(libro[1]+' Edicion: '+libro[3], 'agrego al prestamo')\n else:\n if out.errno == 1062:\n self.view.error(' El libro ya esta en el prestamo')\n else:\n self.view.error('No se pudo agregar el libro')\n od_total = 0.0\n else:\n if libro == None:\n self.view.error('El libro no existe')\n else:\n self.view.error('Problema al leer el libro')\n return id_libro, pd_total\n \n\n def add_prestamo_details(self):\n prestamo = self.read_prestamo()\n if type(prestamo) == tuple:\n id_prestamo = prestamo[0]\n p_total = prestamo[3]\n id_libro = ' '\n while id_libro != '':\n self.view.msg('--- Agrega libros a la orden (deja vacio el id del producto para salir) ---')\n id_libro, pd_total = self.create_details_prestamo()\n p_total += pd_total\n self.model.update_prestamo(('p_total = %s',),(p_total,id_prestamo))\n return\n \n\n def read_detalle_prestamo(self):\n self.view.ask('ID detalle prestamo: ') \n id_dPrestamo = input()\n dprestamo = self.model.read_detallesprestamo(id_dPrestamo)\n if type(dprestamo) == tuple:\n self.view.show_prestamo_details_header()\n self.view.show_a_prestamo_details(dprestamo)\n self.view.show_prestamo_details_footer()\n else:\n if dprestamo == None:\n self.view.error('El detalle del prestamo no existe')\n else:\n self.view.error('Hay un problema al leer el detalle del prestamo')\n return\n \n\n def read_all_details_prestamos(self):\n dprestamos = self.model.read_all_detallesprestamo()\n if type(dprestamos) == list:\n self.view.show_prestamo_header(' Todos los detalles de los prestamos ')\n for dprestamo in dprestamos:\n self.view.show_a_prestamo_details(dprestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_details_footer()\n else:\n self.view.error('Problema al leer los detalles de prestamos')\n return\n \n\n def read_details_prestamo_book(self):\n self.view.ask('ID del libro: ')\n id_libro = input()\n dprestamos = self.model.read_detalleprestamo_book(id_libro)\n if type(dprestamos) == list:\n self.view.show_prestamo_header('Detalles del prestamo del libro'+id_libro+' ')\n for dprestamo in dprestamos:\n self.view.show_a_prestamo_details(dprestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_footer()\n else:\n self.view.error('Problema al leer los detalles de prestamos')\n return\n \n\n def update_details_prestamo(self):\n self.view.ask('ID del detalle de prestamo a modificar: ')\n id_dprestamo = input()\n dprestamo = self.model.read_detallesprestamo(id_dprestamo)\n if type(dprestamo) == tuple:\n self.view.show_prestamo_header(' Datos del detalle de prestamo '+id_dprestamo+ ' ')\n self.view.show_a_prestamo(dprestamo)\n self.view.show_prestamo_midder()\n self.view.show_prestamo_footer()\n else:\n if dprestamo == None:\n self.view.error('El detalle del prestamo no existe')\n else:\n self.view.error('Problema al leer el detalle del prestamo')\n self.view.msg('Ingresa los valores a modificar (vacio para dejarlo igual): ')\n whole_vals = self.ask_dprestamo()\n fields, vals = self.update_lists(['d_id_libro','d_cantidad','d_adeudoTotal'], whole_vals)\n vals.append(id_dprestamo)\n vals = tuple(vals)\n out = self.model.update_detalleprestamo(fields,vals)\n if out == True:\n self.view.ok(id_dprestamo, 'actualizo')\n else:\n self.view.error('Error no se pudo actualizar el detalle de prestamo')\n return\n \n def delete_details_prestamo(self):\n self.view.ask('ID del detalle de prestamo a borrar: ')\n id_dprestamo = input()\n count = self.model.delete_detalleprestamo(id_dprestamo)\n if count != 0:\n self.view.ok(id_dprestamo, 'Borro')\n else:\n if count == 0:\n self.view.error('El detalle de prestamo no exite')\n else:\n self.view.error('Problema al borrar el detalle de prestamo')\n return","sub_path":"mvc_libreria_db/controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":34924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"313109646","text":"#using numpy arrays\\\r\nimport os\r\nimport PySpin\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom PIL import Image\r\nfrom numpy import array, empty, ravel, where, ones, reshape, arctan2\r\nfrom matplotlib.pyplot import plot, draw, show, ion\r\nfrom matplotlib import animation\r\nfrom IPython.display import HTML #can't use this...\r\n\r\n\r\n#User input of which Data folder they desire\r\nFolder_Name = input(\"Enter the name of the file you wish to retrieve the images from: \") #Folder_Name in some previous save_aquire (versions newer than 191216)\r\n\r\n#adding automation\r\n#Find number of numpy array files within 'Folder_Name'\r\nData_dir = 'C:/Users/localadmin/Documents/pjflab/HetPhaseCam/Data/'\r\ndata_path = os.path.join(Data_dir,Folder_Name)\r\nNUM_IMAGES_list = os.listdir(data_path) #Num(instensity images)=Num(numpy arrays), not the number of phase maps\r\nNUM_IMAGES = len(NUM_IMAGES_list)\r\nprint(\"Number of intensity image numpy arrays present in \" + Folder_Name + \" is: \", NUM_IMAGES)\r\n\r\n\r\npicList = []\r\n\r\n#open image arrays\r\nfor i in range(NUM_IMAGES):\r\n\t#imgName = 'C:/Users/localadmin/Desktop/Phase_Camera_Images/Data/' + File_Name + '-%d.npy' % i\r\n\timgName = data_path +\"/\" + Folder_Name + '_%d.npy' % i\r\n\tIMG = np.load(imgName)\r\n\tpicList.append(IMG)\r\n\r\ncmap='gray'\r\ninterval=25\r\ncbar_lim=None\r\n\r\nfig = plt.gcf()\r\nim = plt.imshow(np.real(picList[0]), cmap=cmap)\r\nfig.colorbar(im)\r\n\r\nif cbar_lim != None:\r\n plt.clim(vmin=0,vmax=cbar_lim)\r\n\r\nplt.close()\r\n\r\ndef animate(i):\r\n im.set_data(np.real(picList[i]))\r\n return im,\r\n\r\nanim = animation.FuncAnimation(fig, animate, frames=range(0,len(picList)), interval=interval, repeat=True)\r\ndisplay(HTML(anim.to_jshtml()))\r\n","sub_path":"HetPhaseCam/View_intensity.py","file_name":"View_intensity.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"608796772","text":"from flask import Flask, request, jsonify\nimport xgboost as xgb\nimport numpy as np\nfrom datetime import datetime as dt\nfrom flask_logs import LogSetup\nfrom play_handler import PlayProcess\nimport os\nimport logging\nimport pandas as pd\nimport json\n\napp = Flask(__name__)\napp.config[\"LOG_TYPE\"] = os.environ.get(\"LOG_TYPE\", \"stream\")\napp.config[\"LOG_LEVEL\"] = os.environ.get(\"LOG_LEVEL\", \"INFO\")\n\nlogs = LogSetup()\nlogs.init_app(app)\n\nep_model = xgb.Booster({'nthread': 4}) # init model\nep_model.load_model('models/ep_model.model')\n\nwp_model = xgb.Booster({'nthread': 4}) # init model\nwp_model.load_model('models/wp_spread.model')\n\n@app.after_request\ndef after_request(response):\n logger = logging.getLogger(\"app.access\")\n logger.info(\n \"[python] %s [%s] %s %s %s\",\n request.remote_addr,\n dt.utcnow().strftime(\"%d/%b/%Y:%H:%M:%S.%f\")[:-3],\n request.method,\n request.path,\n response.status\n )\n return response\n\n@app.route('/cfb/process', methods=['POST'])\ndef process():\n base_data = request.get_json(force=True)['data']\n drives_data = request.get_json(force=True)['drivesData']\n boxScore = request.get_json(force=True)['boxScore']\n spread = request.get_json(force=True)['homeTeamSpread']\n homeTeam = request.get_json(force=True)['homeTeamId']\n awayTeam = request.get_json(force=True)['awayTeamId']\n firstHalfKickoffTeam = request.get_json(force=True)['firstHalfKickoffTeamId']\n\n processed_data = PlayProcess(logger = logging.getLogger(\"root\"), json_data=base_data, drives_data=drives_data, boxScore = boxScore, spread=spread, homeTeam=homeTeam, awayTeam=awayTeam, firstHalfKickoffTeam=firstHalfKickoffTeam)\n processed_data.run_processing_pipeline()\n tmp_json = processed_data.plays_json.to_json(orient=\"records\")\n jsonified_df = json.loads(tmp_json)\n\n box = processed_data.create_box_score()\n \n bad_cols = [\n 'start.distance', 'start.yardLine', 'start.team.id', 'start.down', 'start.yardsToEndzone', 'start.posTeamTimeouts', 'start.defTeamTimeouts', \n 'start.shortDownDistanceText', 'start.possessionText', 'start.downDistanceText',\n 'clock.displayValue', \n 'type.id', 'type.text', 'type.abbreviation'\n 'end.shortDownDistanceText', 'end.possessionText', 'end.downDistanceText', 'end.distance', 'end.yardLine', 'end.team.id','end.down', 'end.yardsToEndzone', 'end.posTeamTimeouts','end.defTeamTimeouts', \n 'expectedPoints.before', 'expectedPoints.after', 'expectedPoints.added', \n 'winProbability.before', 'winProbability.after', 'winProbability.added', \n 'scoringType.displayName', 'scoringType.name', 'scoringType.abbreviation'\n ]\n # clean records back into ESPN format\n for record in jsonified_df:\n record[\"clock\"] = {\n \"displayValue\" : record[\"clock.displayValue\"]\n }\n\n record[\"type\"] = {\n \"id\" : record[\"type.id\"],\n \"text\" : record[\"type.text\"],\n \"abbreviation\" : record[\"type.abbreviation\"],\n }\n\n record[\"expectedPoints\"] = {\n \"before\" : record[\"EP_start\"],\n \"after\" : record[\"EP_end\"],\n \"added\" : record[\"EPA\"]\n }\n\n record[\"winProbability\"] = {\n \"before\" : record[\"wp_before\"],\n \"after\" : record[\"wp_after\"],\n \"added\" : record[\"wpa\"]\n }\n\n record[\"start\"] = {\n \"team\" : {\n \"id\" : record[\"start.team.id\"],\n },\n \"distance\" : record[\"start.distance\"],\n \"yardLine\" : record[\"start.yardLine\"],\n \"down\" : record[\"start.down\"],\n \"yardsToEndzone\" : record[\"start.yardsToEndzone\"],\n \"posTeamTimeouts\" : record[\"start.posTeamTimeouts\"],\n \"defTeamTimeouts\" : record[\"start.defTeamTimeouts\"],\n \"shortDownDistanceText\" : record[\"start.shortDownDistanceText\"],\n \"possessionText\" : record[\"start.possessionText\"],\n \"downDistanceText\" : record[\"start.downDistanceText\"]\n }\n\n record[\"end\"] = {\n \"team\" : {\n \"id\" : record[\"end.team.id\"],\n },\n \"distance\" : record[\"end.distance\"],\n \"yardLine\" : record[\"end.yardLine\"],\n \"down\" : record[\"end.down\"],\n \"yardsToEndzone\" : record[\"end.yardsToEndzone\"],\n \"posTeamTimeouts\" : record[\"end.posTeamTimeouts\"],\n \"defTeamTimeouts\" : record[\"end.defTeamTimeouts\"],\n \"shortDownDistanceText\" : record[\"end.shortDownDistanceText\"],\n \"possessionText\" : record[\"end.possessionText\"],\n \"downDistanceText\" : record[\"end.downDistanceText\"]\n }\n\n # remove added columns\n for col in bad_cols:\n record.pop(col, None)\n\n result = {\n \"count\" : len(jsonified_df),\n \"records\" : jsonified_df,\n \"box_score\" : box,\n \"boxScore\": boxScore\n }\n # logging.getLogger(\"root\").info(result)\n return jsonify(result)\n\n@app.route('/healthcheck', methods=['GET'])\ndef healthcheck():\n return jsonify({\n \"status\": \"ok\"\n })\n\nif __name__ == '__main__':\n app.run(port=7000, debug=False, host='0.0.0.0')\n\n","sub_path":"python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"348151129","text":"from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.shortcuts import render\nfrom miniBond.models import *\nfrom django.template.loader import render_to_string\nimport os\nimport uuid\nfrom datetime import datetime\n\n\ndef hello(request):\n message = 'hello'\n return HttpResponse(\"Hello world\")\n\n\ndef myname(request, offset):\n name = \"wqfaa\"\n return render(request, 'miniBond/sample.html', locals())\n\n\ndef index(request):\n # allpfs = Platform.objects.filter(isValid=True)\n # pfs = [p for p in allpfs if p.promotioninfo_set.filter(\n # isValid=True).count() > 0 or (hasattr(p, 'linktowx') and p.linktowx.isValid)]\n # contextDict = {'pfs': pfs}\n templateName = \"index.html\"\n # staticView(contextDict, templateName, templateName)\n response = render(request, \"static/\"+templateName)\n if \"cookieId\" not in request.COOKIES:\n response.set_cookie(\"cookieId\", uuid.uuid4())\n return response\n\n\ndef staticView(contextDict, templateName, staticFileName):\n static_html = 'templates/static/' + staticFileName\n if not os.path.exists(static_html):\n content = render_to_string('miniBond/' + templateName, contextDict)\n with open(static_html, 'w', encoding=\"utf-8\") as static_file:\n static_file.write(content)\n\n\ndef logTrace(request, areaType, target, propertyData):\n trace = ClickTrace()\n trace.areaType = areaType\n trace.target = target\n trace.propertyData = propertyData\n trace.clickTime = datetime.now()\n cookieId = request.COOKIES[\n \"cookieId\"] if \"cookieId\" in request.COOKIES else uuid.uuid4()\n trace.cookieId = cookieId\n trace.save()\n\n response = HttpResponse('')\n if \"cookieId\" not in request.COOKIES:\n response.set_cookie(\"cookieId\", cookieId)\n return response\n\n\ndef toWx(request, uuid):\n staticFileName = str(uuid)+\".html\"\n return render(request, \"static/\" + staticFileName)\n\n\ndef strongStaticView(contextDict, templateName, staticFileName):\n static_html = 'templates/static/' + staticFileName\n content = render_to_string('miniBond/' + templateName, contextDict)\n with open(static_html, 'w', encoding=\"utf-8\") as static_file:\n static_file.write(content)\n\n\ndef refreshCache(request):\n allpfs = Platform.objects.all()\n pfs = [p for p in allpfs if\n p.promotioninfo_set.filter(isValid=True).count() > 0 or (hasattr(p, 'linktowx') and p.linktowx.isValid)]\n\n contextDict = {'pfs': pfs}\n templateName = \"index.html\"\n strongStaticView(contextDict, templateName, templateName)\n\n allpfs = Platform.objects.all()\n for pf in allpfs:\n if hasattr(pf, 'linktowx'):\n toWxItem = LinkToWx.objects.filter(platForm__id=pf.id).first()\n contextDict = {'wxText': toWxItem.wxText,\n 'imgName': toWxItem.image, 'pfName': toWxItem.platForm.name}\n\n staticFileName = str(toWxItem.platForm.id) + \".html\"\n strongStaticView(contextDict, \"linkToWx.html\", staticFileName)\n\n response = HttpResponse('well done!')\n return response\n","sub_path":"miniBond/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"224704537","text":"import pandas as pd\nimport numpy as np\nimport glob, os, re\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom tabulate import tabulate \nfrom regressors import stats\nimport sympy\nimport copy\nfrom scipy.stats import norm\nfrom DeepEstimator import *\nfrom summary import *\n\n\nclass doubly_robust:\n def __init__(self, df_Xy_Z, y_idx, XZ_start_idx):\n self.df_Xy_Z = df_Xy_Z\n self.y_idx = y_idx\n self.XZ_start_idx = XZ_start_idx\n \n def est_ate_continuous(self, t_name, cols, t_level):\n \n t = self.df_Xy_Z[t_name].values # retrieve treatments\n XZ = self.df_Xy_Z.iloc[:, self.XZ_start_idx:][cols].values # retrieve confounders and other treatments\n y = self.df_Xy_Z.iloc[:, self.y_idx].values\n \n \n \n # summary statistics\n s = Summary(XZ, y, t, 'c', t_name, cols)\n s.basic_statistics()\n \n \n \n est_con = est_continuous(XZ, y, t, t_name[0])\n est_con.ps_est_fit()\n est_con.outcome_est_fit()\n \n print(est_con.sigma)\n\n res = [est_con.outcome_est_pred(i) for i in t_level]\n \n #return est_con.t_mu\n \n return res\n \n \n def est_ate_discrete(self, t_name, cols):\n #estimate average treatment effects\n #t_idx = [self.df_Xy_Z.columns.get_loc(t) for t in t_name]\n M = len(t_name)\n if M == 1: # binary \n t = self.df_Xy_Z[t_name].values # retrieve treatments\n t_label = t.ravel()\n XZ = self.df_Xy_Z.iloc[:, self.XZ_start_idx:][cols].values # retrieve confounders and other treatments\n y = self.df_Xy_Z.iloc[:, self.y_idx].values\n \n est_bin = est_discrete(XZ, y, t, 'b', t_name[0]) # initialization\n \n \n # summary statistics\n s = Summary(XZ, y, t, 'b', t_name[0], cols)\n s.basic_statistics()\n \n \n # propensity score\n est_bin.ps_est_fit()\n ps = est_bin.ps_est_pred()\n ps = ps.ravel() # flattent to one-dimension array\n \n \n # predict outcomes\n model = est_bin.outcome_est_fit()\n y_1 = est_bin.outcome_est_pred(model, np.ones_like(t))\n y_0 = est_bin.outcome_est_pred(model, np.zeros_like(t))\n y_1 = y_1.ravel()\n y_0 = y_0.ravel()\n \n \n # doubly robust\n idx_1 = np.where(t_label==1)\n idx_0 = np.where(t_label==0)\n dr1 = np.zeros(len(y))\n dr0 = np.zeros(len(y))\n \n dr1[idx_1] = y[idx_1]/ps[idx_1] - y_1[idx_1]*(1-ps[idx_1])/ps[idx_1]\n dr1[idx_0] = y_1[idx_0]\n dr0[idx_1] = y_0[idx_1]\n dr0[idx_0] = y[idx_0]/(1-ps[idx_0]) - y_0[idx_0]*ps[idx_0]/(1-ps[idx_0]) \n \n ate = np.nanmean(dr1) - np.nanmean(dr0)\n \n return ate\n \n elif M > 1: # multi-valued treatments \n t = self.df_Xy_Z[t_name].values # retrieve treatments\n t_label = np.argmax(t, axis=1)\n XZ = self.df_Xy_Z.iloc[:, self.XZ_start_idx:][cols].values # retrieve confounders and other treatments\n y = self.df_Xy_Z.iloc[:, self.y_idx].values\n \n est_mult = est_discrete(XZ, y, t, 'm', t_name[0]) # initialization\n \n \n # summary statistics\n s = Summary(XZ, y, t, 'm', t_name, cols)\n s.basic_statistics()\n \n \n # propensity score\n est_mult.ps_est_fit()\n ps = est_mult.ps_est_pred()\n \n \n \n # predict outcomes and doubly robust \n \n dr = np.zeros((len(y), M))\n \n for i in range(M):\n \n model = est_mult.outcome_est_fit(t_level=i)\n \n t_hat = np.zeros((len(y), M))\n t_hat[:,i] = 1\n y_hat = est_mult.outcome_est_pred(model, t_hat)\n y_hat = y_hat.ravel()\n \n idx_1 = np.where(t[:,i]==1)\n idx_0 = np.where(t[:,i]==0)\n \n dr[idx_1,i] = y[idx_1]/ps[idx_1,i] - (1-ps[idx_1,i])*y_hat[idx_1]/ps[idx_1,i]\n #dr[idx_1,i] = y[idx_1]\n dr[idx_0,i] = y_hat[idx_0]\n \n \n # average treatment effects \n ate = np.zeros(M) \n for i in range(M):\n ate[i] = np.nanmean(dr[:,i])\n \n return ate\n \n \n ","sub_path":"Code/double_robust_simulation_DL.py","file_name":"double_robust_simulation_DL.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"571603787","text":"def explore_website(url, banlist):\n import bs4\n import urllib.request\n import json\n import time as t\n i = 0\n url_list = [url]\n visited_list = []\n initialt = t.time()\n while url_list != []:\n actual_url = url_list.pop(0)\n visited_list.append(actual_url)\n print(i, actual_url, t.time()-initialt, \"s\")\n initialt = t.time()\n i += 1\n cpass = False\n for x in banlist:\n if actual_url.find(x) != -1:\n cpass = True\n if not cpass:\n try:\n req = urllib.request.Request(\n actual_url, headers={'User-Agent': \"Magic Browser\"})\n con = urllib.request.urlopen(req)\n webpage = str(con.read().decode(\"utf-8\"))\n soup = bs4.BeautifulSoup(webpage, \"lxml\")\n article = {\"title\": soup.title.get_text(), \"text\": \"\",\n \"link\": actual_url}\n for poccur in soup.find_all('p'):\n article['text'] += \" \" + poccur.get_text()\n file = open('./articles/' + alpha_string(url) + '.txt', 'a')\n json.dump(article, file)\n file.write('\\n')\n file.close()\n link_list = []\n for link in soup.find_all('a'):\n if link.get(\"href\") and not link.img:\n link_list.append(link.get('href'))\n link_list = evaluate(url, link_list, visited_list, url_list)\n url_list += link_list\n\n except:\n print('fail')\n continue\n\n return 0\n\n\ndef evaluate(url, link_list, visited_list, url_list):\n new_link_list = []\n for x in link_list:\n if url_check(x, url):\n if not x in visited_list and not x in new_link_list and not x in url_list:\n new_link_list.append(x)\n return new_link_list\n\n\ndef url_check(target_url, url):\n if target_url.startswith(\"http://\"):\n target_url = target_url[7:]\n elif target_url.startswith('https://'):\n target_url = target_url[8:]\n if url.startswith(\"http://\"):\n url = url[7:]\n elif url.startswith('https://'):\n url = url[8:]\n return target_url.startswith(url)\n\n\ndef alpha_string(string):\n res = ''\n for x in string:\n if x.isalpha():\n res += x\n return res\n\n\ndef spclabel(text):\n import spacy\n nlp = spacy.load('fr_core_news_md')\n doc = nlp(text)\n return [ent.label_ for ent in doc.ents]\n\n\ndef blog_preprocess(blogpath, location=False, clustering=False, random=1):\n import time as t\n import json\n from tqdm import tqdm\n import random as r\n import spacy\n nlp = spacy.load('fr_core_news_md')\n r.seed(t.time())\n article_content = []\n blog = open(blogpath, 'r')\n lines = blog.readlines()\n n = len(lines)\n tag_list = []\n link_list = []\n passing_tag = []\n article_list = []\n print('Pre-process' + ' ' + blogpath)\n pbar = tqdm(total=n)\n for x in range(n):\n json_text = json.loads(lines[x])\n doc = nlp(json_text['title'] +\n ' ' + json_text['text'])\n if len(doc) > 400 and r.random() < random:\n article_list.append(json_text['title'])\n link_list.append(json_text['link'])\n remove_location = \" \".join(\n [ent.text for ent in doc.ents if ent.label_ != 'LOC' and ent.text.isalpha()])\n tag_list.append(\n [ent.text for ent in doc.ents if ent.label_ == 'LOC' and ent.text.isalpha()])\n passing_tag.append(\" \".join([element for element in tag_list[-1]]))\n doc = nlp(remove_location)\n article_content.append(\n [token.lemma_ for token in doc if not token.is_stop and not token.is_punct and token.text.isalpha()])\n pbar.update(1)\n pbar.close()\n if not location:\n return article_content\n elif clustering:\n return article_content, article_list\n else:\n return passing_tag, article_list\n\n\ndef blog_preprocess2(blogpath, location=False, clustering=False, random=1):\n import time as t\n import json\n from tqdm import tqdm\n import random as r\n import spacy\n import stanfordnlp\n import spacy\n if not location or clustering:\n nlp = stanfordnlp.Pipeline(\n lang=\"fr\", processors='tokenize,pos,lemma')\n else:\n nlpSpacy = spacy.load('fr_core_news_md')\n r.seed(t.time())\n article_content = []\n blog = open(blogpath, 'r')\n lines = blog.readlines()\n blog.close()\n n = len(lines)\n tag_list = []\n link_list = []\n passing_tag = []\n article_list = []\n print('Pre-process' + ' ' + blogpath)\n pbar = tqdm(total=n)\n for x in range(n):\n if r.random() < random:\n json_text = json.loads(lines[x])\n if not location or clustering:\n doc = nlp(json_text['title'] +\n ' ' + json_text['text'])\n else:\n docSpacy = nlpSpacy(json_text['title'] +\n ' ' + json_text['text'])\n if len([word for sent in doc.sentences for word in sent.words]) > 400:\n article_list.append(json_text['title'])\n link_list.append(json_text['link'])\n if location and not clustering:\n tag_list.append(\n [ent.text for ent in docSpacy.ents if ent.label_ == 'LOC' and ent.text.isalpha()])\n passing_tag.append(\n \" \".join([element for element in tag_list[-1]]))\n else:\n article_content.append([word.lemma for sent in doc.sentences for word in sent.words if word.upos in [\n 'NOUN']])\n pbar.update(1)\n pbar.close()\n if not location:\n return article_content\n elif clustering:\n return article_content, article_list\n else:\n return passing_tag, article_list\n\n\ndef serialize_pre_process(blogpaths, location=False, clustering=False, random=[]):\n if len(random) < len(blogpaths):\n to_delete = []\n for i in range(len(random), len(blogpaths)):\n blog = open(blogpaths[i], 'r')\n lines = blog.readlines()\n if (len(lines) < 100):\n to_delete.append(i)\n else:\n random.append(len(lines))\n blog.close()\n for i, x in enumerate(to_delete):\n del blogpaths[x-i]\n minimal_article = 20 # min([x for x in random if x > 1])\n for i in range(len(random)):\n if random[i] > 1:\n random[i] = minimal_article/random[i]\n if not location and not clustering:\n article_content_agglo = []\n for index, x in enumerate(blogpaths):\n article_content_agglo += blog_preprocess(x, random[index])\n return article_content_agglo\n else:\n res, article_list_agglo = [], []\n for index, x in enumerate(blogpaths):\n res_temp, article_list_agglo_temp = blog_preprocess(\n x, location=location, clustering=clustering, random=random[index])\n res += res_temp\n article_list_agglo += article_list_agglo_temp\n return res, article_list_agglo\n\n\ndef serialize_pre_process2(blogpaths, location=False, clustering=False, random=[]):\n if len(random) < len(blogpaths):\n to_delete = []\n for i in range(len(random), len(blogpaths)):\n blog = open(blogpaths[i], 'r')\n lines = blog.readlines()\n if (len(lines) < 100):\n to_delete.append(i)\n else:\n random.append(len(lines))\n blog.close()\n for i, x in enumerate(to_delete):\n del blogpaths[x-i]\n minimal_article = 20 # min([x for x in random if x > 1])\n for i in range(len(random)):\n if random[i] > 1:\n random[i] = minimal_article/random[i]\n if not location and not clustering:\n article_content_agglo = []\n for index, x in enumerate(blogpaths):\n try:\n article_content_agglo += blog_preprocess2(\n x, random=random[index])\n except:\n continue\n return article_content_agglo\n else:\n res, article_list_agglo = [], []\n for index, x in enumerate(blogpaths):\n try:\n res_temp, article_list_agglo_temp = blog_preprocess2(\n x, location=location, clustering=clustering, random=random[index])\n res += res_temp\n article_list_agglo += article_list_agglo_temp\n except:\n continue\n return res, article_list_agglo\n\n\ndef location_tag(passing_tag, article_list):\n from sklearn.feature_extraction.text import CountVectorizer\n import pandas\n vec = CountVectorizer(binary=False)\n vec.fit(passing_tag)\n res = pandas.DataFrame(vec.transform(passing_tag).toarray())\n maxidx = res.idxmax(axis=1)\n maximum = res.max(axis=1)\n tags = sorted(vec.vocabulary_.keys())\n for i in range(len(res)):\n print(article_list[i], tags[maxidx[i]], maximum[i])\n\n\ndef topic_modeling(article_content, model_name):\n import gensim\n dictionnary = gensim.corpora.dictionary.Dictionary(article_content)\n corpus = [dictionnary.doc2bow(text) for text in article_content]\n lda = gensim.models.ldamodel.LdaModel(\n corpus, num_topics=15, id2word=dictionnary)\n lda.save(model_name + '.gensim')\n topics = lda.print_topics(num_words=4)\n for topic in topics:\n print(topic)\n\n\ndef updateModelWith(other_article_content, model_to_update):\n import gensim\n dictionnary = gensim.corpora.dictionary.Dictionary(other_article_content)\n corpus = [dictionnary.doc2bow(text) for text in other_article_content]\n\n lda = gensim.models.ldamodel.LdaModel.load(\n model_to_update + '.gensim', mmap='r')\n lda.update(corpus)\n topics = lda.print_topics(num_words=4)\n for topic in topics:\n print(topic)\n\n\ndef clustering(article_content, article_list, n_clusters):\n from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n from sklearn.cluster import MiniBatchKMeans\n for i, x in enumerate(article_content):\n article_content[i] = \" \".join(x)\n vec2 = TfidfVectorizer()\n vec2.fit(article_content)\n features = vec2.transform(article_content)\n\n cls = MiniBatchKMeans(n_clusters=n_clusters)\n cls.fit(features)\n cls.predict(features)\n clusters = cls.labels_\n for i in range(n_clusters):\n print(i)\n index_list = [j for j, x in enumerate(clusters) if x == i]\n for article in index_list:\n print(article_list[article])\n print('\\n\\n\\n\\n\\n')\n\n\ndef label_data(article_list, data_amount, target_name):\n import random as r\n import json\n import spacy\n import re\n n = len(article_list)\n for article_index, article_path in enumerate(article_list):\n blog = open(article_path, 'r')\n lines = blog.readlines()\n article_length = len(lines)\n for line in range(article_length):\n if r.random() < (data_amount/n)/article_length:\n text = json.loads(lines[line])\n mark = ''\n while not mark in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n print(text[\"title\"])\n print(text['link'])\n mark = input(str(article_index + 1) + '/' + str(n) + ' - ' + str(line) +\n '/' + str(article_length) + ' article\\'s category: ')\n article_content = text['title'] + ' ' + text['text']\n file = open(target_name + '.csv', 'a')\n file.write(article_content + ';' + mark)\n file.write('\\n')\n file.close()\n\n\ndef stanfordize_data(csv_path, target_name):\n import stanfordnlp\n from tqdm import tqdm\n nlp = stanfordnlp.Pipeline(\n lang=\"fr\", processors='tokenize,pos,lemma')\n X, y = getDataFromCSV(csv_path)\n pbar = tqdm(total=len(X))\n for i, x in enumerate(X):\n doc = nlp(x)\n temp = \" \".join([word.lemma for sent in doc.sentences for word in sent.words if word.upos in [\n 'NOUN']])\n if len(temp) > 20:\n file = open(target_name + '.csv', 'a')\n file.write(temp + ';' + y[i])\n file.write('\\n')\n file.close()\n pbar.update(1)\n\n\ndef getDataFromCSV(csv_path):\n import pandas\n articles = pandas.read_csv(\n csv_path, sep=\";\", header=None, names=[\"article\", \"tag\"])\n return articles[\"article\"].to_numpy(), articles[\"tag\"].to_numpy()\n\n\ndef tdIdfSplitForML(csv_path):\n from sklearn.model_selection import train_test_split\n from sklearn.feature_extraction.text import TfidfVectorizer\n from sklearn.utils import resample\n import numpy as np\n X, y = getDataFromCSV(csv_path)\n tfidfconverter = TfidfVectorizer(max_features=3000)\n X = tfidfconverter.fit_transform(X).toarray()\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n temp = [{'data': X_train[i], 'score': y_train[i]} for i in range(len(X_train))]\n score_0_res = [x for x in temp if x['score'] == 0]\n score_1_res = [x for x in temp if x['score'] == 1]\n maximum = 0\n for i in range(2,9):\n maximum = max(maximum,len([x for x in temp if x['score'] == i]))\n score_0_res_undersampled = resample(score_0_res,replace = True, n_samples = maximum)\n score_1_res_undersampled = resample(score_1_res,replace = True, n_samples = maximum)\n other_scores = [x for x in temp if x['score'] != 1 and x['score'] != 0]\n X_train = np.array([x['data'] for x in score_0_res_undersampled] + [x['data'] for x in score_1_res_undersampled] + [x['data'] for x in other_scores])\n y_train = np.array([x['score'] for x in score_0_res_undersampled] + [x['score'] for x in score_1_res_undersampled] + [x['score'] for x in other_scores])\n return X_train, X_test, y_train, y_test\n\ndef Word2VecSplitForML(csv_path):\n from sklearn.model_selection import train_test_split\n from gensim.models import Word2Vec\n from sklearn.utils import resample\n import numpy as np\n model = Word2Vec.load('./fr/fr.bin')\n X, y = getDataFromCSV(csv_path)\n for i,x in enumerate(X):\n temp = []\n for word in x.split():\n try:\n vector = np.array(model[word])\n if len(temp) == 0:\n temp = vector\n else:\n temp += vector\n except:\n continue\n X[i] = temp / len(temp)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n temp = [{'data': X_train[i], 'score': y_train[i]} for i in range(len(X_train))]\n score_0_res = [x for x in temp if x['score'] == 0]\n score_1_res = [x for x in temp if x['score'] == 1]\n maximum = 0\n for i in range(2,9):\n maximum = max(maximum,len([x for x in temp if x['score'] == i]))\n score_0_res_undersampled = resample(score_0_res,replace = True, n_samples = maximum)\n score_1_res_undersampled = resample(score_1_res,replace = True, n_samples = maximum)\n other_scores = [x for x in temp if x['score'] != 1 and x['score'] != 0]\n X_train = np.array([x['data'] for x in score_0_res_undersampled] + [x['data'] for x in score_1_res_undersampled] + [x['data'] for x in other_scores])\n y_train = np.array([x['score'] for x in score_0_res_undersampled] + [x['score'] for x in score_1_res_undersampled] + [x['score'] for x in other_scores])\n return X_train, X_test, y_train, y_test\n\ndef random_forest(csv_path, word_embedding):\n from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n from sklearn.ensemble import RandomForestClassifier\n import pickle\n if word_embedding == 'tdIdf':\n X_train, X_test, y_train, y_test = tdIdfSplitForML(csv_path)\n else:\n X_train, X_test, y_train, y_test = Word2VecSplitForML(csv_path)\n classifier = RandomForestClassifier(n_estimators=1000, random_state=0)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict([list(x) for x in X_test])\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n print(accuracy_score(y_test, y_pred))\n with open('random_forest_text_classifier', 'wb') as picklefile:\n pickle.dump(classifier, picklefile)\n\n\ndef naive_bayes(csv_path, word_embedding):\n from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n from sklearn.naive_bayes import MultinomialNB\n import numpy as np\n import pickle\n if word_embedding == 'tdIdf':\n X_train, X_test, y_train, y_test = tdIdfSplitForML(csv_path)\n classifier = MultinomialNB()\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict([list(x) for x in X_test])\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n print(accuracy_score(y_test, y_pred))\n with open('naive_bayes_text_classifier', 'wb') as picklefile:\n pickle.dump(classifier, picklefile)\n else:\n return 'can not use word2vec for naive bayes classifier'\n\n\ndef SVM(csv_path, word_embedding):\n from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n from sklearn.linear_model import SGDClassifier\n import pickle\n if word_embedding == 'tdIdf':\n X_train, X_test, y_train, y_test = tdIdfSplitForML(csv_path)\n else:\n X_train, X_test, y_train, y_test = Word2VecSplitForML(csv_path)\n classifier = SGDClassifier(\n loss='hinge', penalty='l2', alpha=1e-3, random_state=0)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict([list(x) for x in X_test])\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n print(accuracy_score(y_test, y_pred))\n with open('svm_text_classifier', 'wb') as picklefile:\n pickle.dump(classifier, picklefile)\n","sub_path":"auxFuncs.py","file_name":"auxFuncs.py","file_ext":"py","file_size_in_byte":18201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"569812530","text":"lower_letters = \"abcdefghijklmnopqrstuvwxyz\"\nvect1 = [x for x in lower_letters]\nvect2 = [x for x in lower_letters.upper()]\n\n\ndef encrypt(message, key):\n vect = []\n for x in message:\n if x in vect1:\n index = vect1.index(x)\n x = vect1[((index + key) % 26)]\n elif x in vect2:\n index = vect2.index(x)\n x = vect2[((index + key) % 26)]\n vect.append(x)\n return \"\".join(vect)\n\ndef decrypt(message, key):\n return encrypt(message, 26 - key)\n\n\nif __name__ == \"__main__\":\n while True:\n\n question = \"Want to encrypt or decrypt a message? Type [e] to encrypt, [d] to decrypt, [n] to go back\\n\"\n answer = input(question).lower()\n not_exit = answer.startswith(\"e\") or answer.startswith(\"d\")\n if not_exit:\n\n message = input(\"Type the message: \")\n try:\n key = int(input(\"Type the key: \"))\n except:\n print(\"Error! Please type a digit between 1 and 26\")\n continue\n\n if answer.startswith(\"e\"):\n print(\"Result: \" + encrypt(message, key))\n elif answer.startswith(\"d\"):\n print(\"Result: \" + decrypt(message, key))\n else:\n break\n","sub_path":"ciphers/_caesar_cipher/cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"345905588","text":"# Author : nadavschwalb@mail.tau.ac.il\n# main scrip running the puzzlebox \n# tests camera and motor on startup\n# creates session log file\n# apon door opening the door closes after the delay set in door_delay\n# captures a video untill the door is closed and saves the video with a timestamp\n\n#imports\nfrom picamera import PiCamera\nimport time\nimport os\nimport RPi.GPIO as GPIO\nfrom RpiMotorLib import RpiMotorLib\nimport threading\nimport sys\nimport asyncio\n\n#functions\nasync def arrived(delay, timestamp):\n await capture(timestamp,delay)\n print(\"departed\")\n \n\nasync def close_door(delay):\n await asyncio.sleep(delay)\n doorMotor.motor_run(DM_Pins,0.001,fullRotation/4,False,False,\"half\",.001)\n time.sleep(0.5)\n doorMotor.motor_run(DM_Pins,0.001,fullRotation/4,True, False,\"half\",.001)\n print(\"door closed\")\n \nasync def capture(timestamp,delay):\n print(\"capture started\")\n camera.start_recording(capture_dir+\"/\"+timestamp+\".h264\",format='h264')\n count = 0\n await close_door(delay)\n while GPIO.input(IR_L_pin) or GPIO.input(IR_R_pin) or count > 50:\n time.sleep(0.3)\n count += 1\n time.sleep(1)\n camera.stop_recording()\n print(\"capture finnished\")\n \n \n\n#objects\nuser = os.environ[\"USER\"]\ncamera = PiCamera()\ndoorMotor = RpiMotorLib.BYJMotor(\"TestMotor\", \"28BYJ\")\ncapture_dir = \"/home/\"+ user + \"/puzzlebox_data/captures\"\ntimeStamp = time.strftime(\"%a-%d-%m-%y-%H-%M-%S\",time.localtime())\nlog_dir = \"/home/\" + user + \"/puzzlebox_data/log\"\nIR_L_pin = 21\nIR_R_pin = 26\ndoor_delay = 3\n\n#objects setups\ncamera.resolution = (1024,768)\nlogFileName = log_dir+\"/\"+\"log-file \"+timeStamp+\".txt\"\nlogFile = open(logFileName,'w')\nDM_Pins = [4, 17 ,23, 24]\nfullRotation = 512\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(IR_L_pin,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(IR_R_pin,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n#log file\nlogFile.write(\"log session started at \"+ timeStamp +\"\\n\")\n\n\n#test camera\ncamera.start_preview()\ntime.sleep(0.5)\nsave_str = capture_dir +\"/\"+\"cam-test \"+timeStamp+\".jpg\"\ncamera.capture(save_str)\ncamera.stop_preview()\nlogFile.write(\"camera test finished\\n\")\n\n\n#test mototrs\ndoorMotor.motor_run(DM_Pins,0.001,fullRotation/4,False,False,\"half\",.001)\ntime.sleep(0.5)\ndoorMotor.motor_run(DM_Pins,0.001,fullRotation/4,True, False,\"half\",.001)\nlogFile.write(\"motor test finished\\n\")\n\n\n#main loop\ntry:\n loop = asyncio.get_event_loop()\n print(\"loop started\")\n while True:\n if GPIO.input(IR_R_pin):\n print(\"right\")\n timeStamp = timeStamp = time.strftime(\"%a-%d-%m-%y-%H-%M-%S\",time.localtime())\n logFile.write(\"opened right \" + timeStamp +\"\\n\")\n loop.run_until_complete(arrived(door_delay,timeStamp))\n \n\n elif GPIO.input(IR_L_pin):\n print(\"left\")\n timeStamp = timeStamp = time.strftime(\"%a-%d-%m-%y-%H-%M-%S\",time.localtime())\n logFile.write(\"opened left \" + timeStamp+\"\\n\")\n loop.run_until_complete(arrived(door_delay,timeStamp))\n \n else:\n pass\nexcept KeyboardInterrupt:\n print(\"keyboard interrupt caught\")\n \n#cleanup\n logFile.write(\"log session closed\")\n logFile.close()\n GPIO.cleanup()\n \n \n\n","sub_path":"puzzlebox_main.py","file_name":"puzzlebox_main.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"260904760","text":"import torch\nfrom torch import nn\nimport torchvision\nimport numpy as np\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass Encoder(nn.Module):\n \n \"\"\"\n Encoder\n \"\"\"\n \n def __init__(self, encoder_size = 14):\n super(Encoder, self).__init__()\n self.enc_image_size = encoder_size\n resnet = torchvision.models.resnet101(pretrained=True)\n \n modules = list(resnet.children())[:-2]\n self.resnet = nn.Sequential(*modules)\n \n self.adaptive_pool = nn.AdaptiveAvgPool2d((encoder_size, encoder_size))\n \n self.fine_tune()\n \n \n def forward(self, images):\n out = self.resnet(images) # (batch_size, 2048, image_size/32, image_size/32)\n out = self.adaptive_pool(out) # (batch_size, 2048, encoded_image_size, encoded_image_size)\n out = out.permute(0, 2, 3, 1) # (batch_size, encoded_image_size, encoded_image_size, 2048)\n \n return out\n \n \n def fine_tune(self, fine_tune = True):\n \n for p in self.resnet.parameters():\n p.requires_grad = False\n \n for c in list(self.resnet.children())[5:]:\n for p in c.parameters():\n p.requires_grad = fine_tune\n\n\nclass Attention(nn.Module):\n \"\"\"\n Attention\n \"\"\"\n \n def __init__(self, encoder_dim, decoder_dim, attention_dim):\n \"\"\"\n param encoder_dim: feature size of encoded images\n param decoder_dim: size of decoder's RNN\n param attention_dim: size of the attention_dim\n \"\"\"\n super(Attention, self).__init__()\n \n self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded_image\n self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output\n self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmaxed\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim = 1) # softmax layer to calculate weights\n \n \n def forward(self, encoder_out, decoder_hidden):\n \"\"\"\n Forward propogation\n \n param encoder_out: encoded_images, tensor with shape: (batch_size, num_pixels, encoder_dim)\n param decoder_hidden: previous decoder output, tensor with shape: (batch_size, decoder_dim)\n :returns: attention weighted encoding, weights\n \"\"\"\n att1 = self.encoder_att(encoder_out) # (batch_size, num_pixels, attention_dim)\n att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)\n att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) #(batch_size, num_pixels)\n alpha = self.softmax(att) #(batch_size, num_pixels)\n attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim = 1)\n \n return attention_weighted_encoding, alpha\n\n\nclass DecoderWithAttention(nn.Module):\n \"\"\"\n Decoder.\n \"\"\"\n \n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, encoder_dim = 2048, dropout = 0.5):\n \"\"\"\n param attention_dim: size of attention network\n param embed_dim: embedding size\n param decoder_dim: size of decoder's RNN\n param vocab_size: size of vocabulary\n param encoder_dim: feature size of encoded images\n param dropout: dropout\n \"\"\"\n super(DecoderWithAttention, self).__init__()\n \n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.vocab_size = vocab_size\n self.decoder_dim = decoder_dim\n self.dropout = dropout\n \n self.Attention = Attention(encoder_dim, decoder_dim, attention_dim)\n \n self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer\n self.dropout = nn.Dropout(self.dropout)\n self.decode_step = nn.LSTMCell(embed_dim + encoder_dim, decoder_dim, bias = True) #decoding LSTM cell\n self.init_h = nn.Linear(encoder_dim, decoder_dim) # initial hidden state of LSTM\n self.init_c = nn.Linear(encoder_dim, decoder_dim) # initial cell state of LSTM\n self.f_beta = nn.Linear(decoder_dim, encoder_dim) # layer to create sigmoid-activated gate\n self.sigmoid = nn.Sigmoid()\n self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary\n \n self.init_weights()\n \n \n def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n \n \n def load_pretrained_weights(self, embeddings):\n \"\"\"\n Loads layer with pretrained embeddings\n \"\"\"\n self.embedding.weight = nn.Parameter(embeddings)\n \n \n def fine_tune_embedding(self, fine_tune = True):\n \"\"\"\n Allow fine tuning of embedding layer? Only makes sense not to allow when using pretrained\n embeddings\n \"\"\"\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n \n \n def init_hidden_state(self, encoder_out):\n \"\"\"\n Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.\n :param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)\n :returns: hidden state, cell state\n \"\"\"\n \n mean_encoder_out = encoder_out.mean(dim = 1)\n h = self.init_h(mean_encoder_out) #(batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out) #(batch_size, decoder_dim)\n \n return h, c\n \n \n def forward(self, encoder_out, encoded_captions, caption_lengths):\n \"\"\"\n Forward propagation\n \n param encoder_out: encoded images, tensor with dimensions (batch_size, enc_image_size, enc_image_size, encoder_dim)\n param encoded_captions: encoded_captions: tensor with dimensions (batch_size, max_caption_length)\n param caption_lengths: length of captions, tensor with shape (batch_size, 1)\n :returns: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices\n \"\"\"\n \n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(3)\n vocab_size = self.vocab_size\n \n # Flatten image:\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim)\n num_pixels = encoder_out.size(1)\n \n # Sort inputs by decreasing dimension:\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim = 0, descending = True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n \n # Embedding: \n # self.embedding = nn.Embedding(vocab_size, embed_dim)\n # encoded_captions: (batch_size, max_caption_length)\n embeddings = self.embedding(encoded_captions) # (batch_size, max_caption_length, embed_dim)\n \n #Initalize LSTM state\n h, c = self.init_hidden_state(encoder_out) #(batch_size, decoder_dim)\n \n # We won't decode at the position, since we've finished generating as soon \n # as we generate . So, decoding lengths are actual lengths - 1\n decode_lengths = (caption_lengths - 1).tolist()\n \n # Create tensors to hold word predicion scores and alphas\n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)\n \n # At each time-step, decode by\n # attention-weighing the encoder's output based on the decoder's previous hidden state output\n # then generate a new word in the decoder with the previous word and the attention weighted encoding\n for t in range(max(decode_lengths)):\n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.Attention(encoder_out[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n \n return predictions, encoded_captions, decode_lengths, alphas, sort_ind\n\n","sub_path":"Scripts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"272931981","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\n\nimport cv2\nfrom ImageAnalysisFuncs.base import ImageAnalysisError\nfrom ImageAnalysisFuncs import target_detection_contours, target_detection_otsu\nfrom target_detection_otsu import OtsuTargetFindingError\n\n# version number for analysis algorithm\n# (each different result for the same data\n# should yield a version number increase)\n\nMETROLOGY_ANALYSIS_ALGORITHM_VERSION = (1,0,0)\n\nCONTOUR_ALGORITHM = \"contour\"\nOTSU_ALGORITHM = \"otsu\"\n\n\n# exceptions which are raised if image analysis functions fail\n\n\nclass MetrologyAnalysisTargetError(ImageAnalysisError):\n pass\n\n\nclass MetrologyAnalysisFibreError(ImageAnalysisError):\n pass\n\n\ndef metcalTargetCoordinates(image_path, pars=None):\n \"\"\" Reads the image and analyse the location and quality of the targets\n using the chosen algorithm\n\n\n :return: A tuple length 6 containing the x,y coordinate and quality factor for the small and large targets\n Where quality is measured by 4 * pi * (area / (perimeter * perimeter)).\n (small_x, small_y, small_qual, big_x, big_y, big_qual)\n \"\"\"\n\n if pars.MET_CAL_TARGET_DETECTION_ALGORITHM == CONTOUR_ALGORITHM:\n analysis_func = target_detection_contours.targetCoordinates\n func_pars = pars.MET_CAL_TARGET_DETECTION_CONTOUR_PARS\n elif pars.MET_CAL_TARGET_DETECTION_ALGORITHM == OTSU_ALGORITHM:\n analysis_func = target_detection_otsu.targetCoordinates\n func_pars = pars.MET_CAL_TARGET_DETECTION_OTSU_PARS\n else:\n raise MetrologyAnalysisTargetError(\n \"MET_CAL_ALORITHM ({}) does not match an algorithm.\".format(\n pars.POS_REP_AlGORITHM\n )\n )\n\n func_pars.display = pars.display\n func_pars.verbosity = pars.verbosity\n func_pars.loglevel = pars.loglevel\n func_pars.PLATESCALE = pars.PLATESCALE\n\n try:\n positions = analysis_func(image_path, func_pars)\n except OtsuTargetFindingError as err:\n raise MetrologyAnalysisTargetError(\n err.message + \" from Image {}\".format(image_path)\n )\n\n return positions\n\n\ndef metcalFibreCoordinates(image_path, pars=None): # configurable parameters\n\n MET_CAL_PLATESCALE = pars.MET_CAL_PLATESCALE\n MET_CAL_QUALITY_METRIC = pars.MET_CAL_QUALITY_METRIC\n verbosity = pars.verbosity\n display = pars.display\n\n \"\"\"reads an image from the metrology calibration camera and returns the\n XY coordinates and Gaussian fit quality of the backlit fibre in mm\"\"\"\n\n # Authors: Stephen Watson (initial algorithm March 4, 2019)\n # Johannes Nix (code imported and re-formatted)\n\n # pylint: disable=no-member\n image = cv2.imread(image_path)\n\n # image processing\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n metcal_fibre_x = 0\n metcal_fibre_y = 0\n metcal_fibre_quality = 0\n\n # exceptions: MetrologyAnalysisFibreError()\n\n return metcal_fibre_x, metcal_fibre_y, metcal_fibre_quality\n","sub_path":"ImageAnalysisFuncs/analyze_metrology_calibration.py","file_name":"analyze_metrology_calibration.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"619778609","text":"# Generic Parser\nimport os\nimport sys\nfrom akrr.parsers.akrrappkeroutputparser import AppKerOutputParser, total_seconds\n\n\ndef process_appker_output(appstdout=None, stdout=None, stderr=None, geninfo=None, proclog=None, \n resource_appker_vars=None):\n # set App Kernel Description\n if resource_appker_vars is not None and 'app' in resource_appker_vars and 'name' in resource_appker_vars['app']:\n ak_name = resource_appker_vars['app']['name']\n else:\n ak_name = 'unknown'\n\n # initiate parser\n parser = AppKerOutputParser(\n name=ak_name\n )\n # set obligatory parameters and statistics\n # set common parameters and statistics (App:ExeBinSignature and RunEnv:Nodes)\n parser.add_common_must_have_params_and_stats()\n # set app kernel custom sets\n # parser.add_must_have_parameter('App:Version')\n\n parser.add_must_have_statistic('Wall Clock Time')\n # parse common parameters and statistics\n parser.parse_common_params_and_stats(appstdout, stdout, stderr, geninfo, resource_appker_vars)\n\n if hasattr(parser, 'appKerWallClockTime'):\n parser.set_statistic(\"Wall Clock Time\", total_seconds(parser.appKerWallClockTime), \"Second\")\n\n # Here can be custom output parsing\n # #read output\n # lines=[]\n # if os.path.isfile(appstdout):\n # fin=open(appstdout,\"rt\")\n # lines=fin.readlines()\n # fin.close()\n #\n # #process the output\n # parser.successfulRun=False\n # j=0\n # while j ajout de (u,v) cree un cycle donc erreur\r\n if sommetV.aChemin(sommetU):\r\n return False\r\n else: # non => chemin u à v?\r\n # si chemin alors on a déjà la précédence par transitivité\r\n # sinon:\r\n if not sommetU.aChemin(sommetV):\r\n # ajout u -> v\r\n sommetU.sortants.append(sommetV)\r\n sommetV.entrants.append(sommetU)\r\n return True\r\n\r\n # BONUS 1 partie 2\r\n def messageErreurPrecedence(t1, t2, type):\r\n \"\"\"arrete le programme et affiche l'erreur dans la console\r\n \"\"\"\r\n if type == \"explicite\":\r\n raise Exception(\r\n \"Impossible d'établir une relation de précédence entre {} et {}.\".format(t1.name, t2.name) +\r\n \" Veuillez vérifier vos préférences de précédence.\\n\" +\r\n \" Un cycle explicite a été detecté dans le dictionnaire de préférences que vous avez rentré ie. {}:[{}] et {}:[{}] \".format(t1.name, t2.name, t2.name, t1.name))\r\n if type == \"implicite\":\r\n raise Exception(\r\n \"Impossible d'établir une relation de précédence entre {} et {}.\".format(t1.name, t2.name) +\r\n \"Veuillez vérifier vos préférences de précédence. Un cycle implicite a été detecté dans le dictionnaire de préférences que vous avez rentré.\")\r\n if type == \"noInfo\":\r\n raise Exception(\r\n \"Impossible d'établir une relation de précédence entre %s et %s. Veuillez vérifier vos préférences de précédence. Nous ne disposons pas d'informations suffisantes pour construire le graph de précédence\" % (t1.name, t2.name))\r\n\r\n def makeGraph(self):\r\n \"\"\"construit le graph de precedence du systeme de parallelisme maximal\r\n \"\"\"\r\n self.initGraph()\r\n interferences = self.getInterferences()\r\n aTraiter = []\r\n # e de la forme (t1,t2) (interference tache t1 et tache t2)\r\n for e in interferences:\r\n # on verifie les preferences de precedence\r\n t1 = e[0]\r\n t2 = e[1]\r\n # si t1 precede t2\r\n if (t1.name in self.dic[t2.name]):\r\n # si t2 precede t1 aussi alors erreur\r\n if(t2.name in self.dic[t1.name]):\r\n TaskSystem.messageErreurPrecedence(t1, t2, \"explicite\")\r\n else: # t2 ne precede pas t1 dans les préférences\r\n if not self.ajout(t1, t2):\r\n TaskSystem.messageErreurPrecedence(t1, t2, \"implicite\")\r\n elif (t2.name in self.dic[t1.name]): # si t2 precede t1\r\n if not self.ajout(t2, t1):\r\n TaskSystem.messageErreurPrecedence(t2, t1, \"implicite\")\r\n else: # aucune preference à été communiquée explicitement\r\n # on garde les taches à part\r\n aTraiter.append(e)\r\n for a in aTraiter: # a de la forme (tache1,tache2)\r\n t1 = a[0]\r\n t2 = a[1]\r\n # on recupere les sommets associes à ces taches\r\n sommetU = self.graph[t1.name]\r\n sommetV = self.graph[t2.name]\r\n # a t'on (t1,t2) ou (t2,t1) par transitivité?\r\n # si oui alors les interferences ont déjà été traitées\r\n # sinon, on ne dispose pas d'informations suffisantes pour construire le graph de précédence,\r\n # (l'utilisateur prefere-t-il t1 avant t2 ou t2 avant t1?)\r\n if not (sommetU.aChemin(sommetV) or sommetV.aChemin(sommetU)):\r\n TaskSystem.messageErreurPrecedence(t1, t2, \"noInfo\")\r\n self.trimGraph()\r\n\r\n def trimGraph(self):\r\n \"\"\" enleve les aretes \"en trop\"\r\n pour une arete (u,v) si on l'enleve et qu'on a toujours un chemin entre u et v alors on n'a pas besoin de cette arete\r\n \"\"\"\r\n for u in self.graph.values():\r\n # les sommets accessibles directement\r\n accesDirect = u.sortants.copy()\r\n for v in accesDirect : # soit l'arete u -> v\r\n # on enleve le sommet v\r\n u.sortants.remove(v)\r\n # s'il existe un chemin u à v on peut enlever cette arete\r\n # s'il n'existe pas de chemin de u à v on doit garder cette arete\r\n if not u.aChemin(v):\r\n u.sortants.append(v)\r\n\r\n\r\n\r\n def getInterferences(self):\r\n \"\"\"retourne les interferences entre les taches\r\n sous la forme d'une liste de tuples (T1, T2)\r\n \"\"\"\r\n # la liste à retourner\r\n interferences = []\r\n # calcul combinatoire, 2 parmi n taches\r\n for i in range(0, len(self.tasks)):\r\n for j in range(i+1, len(self.tasks)):\r\n t1 = self.tasks[i]\r\n t2 = self.tasks[j]\r\n # si t1 et t2 interferents on ajoute le tuple (t1,t2) à interferences\r\n if(TaskSystem.isInterferent(t1, t2)):\r\n interferences.append((t1, t2))\r\n return interferences\r\n\r\n def estDisjoint(l1, l2):\r\n \"\"\"retourne vrai si l1 et l2 sont disjoints, faux sinon\r\n \"\"\"\r\n # si un element est dans l1 et l2 alors l1 et l2 ne sont pas disjoints\r\n for e1 in l1:\r\n if e1 in l2:\r\n return False\r\n return True\r\n\r\n def isInterferent(task1, task2):\r\n \"\"\"verifie si 2 taches sont interferentes en fonction des conditions de Bernstein\r\n\r\n return True si interference False sinon.\r\n 2 taches sont non interferentes si E1&E2=EnsVide et E1&L2=EnsVide et L1&E2=EnsVide\r\n \"\"\"\r\n # e1 et e2 sont disjoints si leur intersection est vide (E1&E2=EnsVide => E1 et E2 disjoints)\r\n return not(TaskSystem.estDisjoint(task1.reads, task2.writes)\r\n and TaskSystem.estDisjoint(task1.writes, task2.reads)\r\n and TaskSystem.estDisjoint(task1.writes, task2.writes)\r\n )\r\n\r\n # BONUS2 affichage du système de parallélisme maximal\r\n # utilisation graphviz https://pypi.org/project/graphviz/\r\n\r\n def draw(self):\r\n \"\"\"permet d'afficher graphiquement le graphe de précédence du système de parallélisme maximal construit\r\n le fichier .pdf est placé dans le repertoire Graphs\r\n \"\"\"\r\n systeme = Digraph()\r\n # pour ajoute chaque sommet du systeme au graphe et on ajoute aussi chaque arete (sortant de ce sommet)\r\n for sommet in self.graph.values():\r\n systeme.node(sommet.task.name, label=sommet.task.name)\r\n for s in sommet.sortants:\r\n systeme.edge(sommet.task.name, s.task.name)\r\n systeme.render('Graphs/GrapheTaskSystem', view=True)\r\n\r\n\r\n########################################################################################################\r\n###EXEMPLES D'EXECUTION###\r\n\"\"\"\r\nfrom time import sleep\r\nfrom random import randint\r\n\r\nX = None\r\nY = None\r\nZ = None\r\n\r\n\r\ndef runT1():\r\n global X\r\n print(\"running T1\")\r\n sleep(randint(1,10))\r\n X = 1\r\n print(\"finished T1\")\r\n\r\n\r\ndef runT2():\r\n global Y\r\n print(\"running T2\")\r\n sleep(randint(1,10))\r\n Y = 2\r\n print(\"finished T2\")\r\n\r\n\r\ndef runTsomme():\r\n global X, Y, Z\r\n print(\"running Tsomme\")\r\n Z = X+Y\r\n print(\"finished Tsomme\")\r\n\r\n\r\nt1 = Task(\"T1\", [], [\"X\"], runT1)\r\nt2 = Task(\"T2\", [], [\"Y\"], runT2)\r\ntsomme = Task(\"Tsomme\", [\"Y\", \"X\"], [\"Z\"], runTsomme)\r\ntasksystem = TaskSystem([t1, t2, tsomme], {\"T1\": [], \"T2\": [\r\n \"T1\"], \"Tsomme\": [\"T1\", \"T2\"]})\r\nprint(tasksystem.getDependencies(\"Tsomme\"))\r\ntasksystem.draw()\r\ntasksystem.run()\r\n\"\"\"","sub_path":"maxpar.py","file_name":"maxpar.py","file_ext":"py","file_size_in_byte":16268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"123604805","text":"from django.utils import timezone\nfrom django_extensions.management.jobs import HourlyJob\n\nfrom shared.oidc.models import EAuthorizationProfile, OIDCProfile\nfrom shared.oidc.services import clear_eauthorization_profiles, clear_oidc_profiles\n\n\nclass Job(HourlyJob):\n help = \"Clear e-authorization sessions over 2h old. Clear oidc profiles that have expired refresh tokens.\"\n\n def execute(self):\n eauthorization_profiles = EAuthorizationProfile.objects.filter(\n refresh_token_expires__lt=timezone.now(),\n # modified_at__gt=timezone.now() - datetime.timedelta(hours=2)\n )\n clear_eauthorization_profiles(eauthorization_profiles)\n\n oidc_profiles = OIDCProfile.objects.filter(\n refresh_token_expires__lt=timezone.now(),\n )\n clear_oidc_profiles(oidc_profiles)\n","sub_path":"backend/shared/shared/oidc/jobs/hourly/clear_user_sessions.py","file_name":"clear_user_sessions.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"616080491","text":"import logging\nfrom typing import Iterable, Optional, Sequence\n\nfrom .core import Base, Session\nfrom .models import Author, Book\n\n\ndef delete_book_or_all_db(\n session: Session,\n primary_key: Optional[int] = None,\n all_flag: Optional[bool] = False,\n) -> int:\n \"\"\"Delete a book in a database if not all_flag else Flush whole db\"\"\"\n\n counter = 0\n if not all_flag:\n logging.debug(\"Searching book in db\")\n book = session.query(Book).filter(Book.id == primary_key).first()\n\n if book is None:\n logging.info(\"Book Not Found\")\n return counter\n\n logging.debug(f\"Deleting book: {book.as_dict}\")\n session.delete(book)\n counter += 1\n else:\n logging.debug(\"Deleting all books\")\n counter += session.query(Book).delete()\n\n logging.debug(\"Deleting all authors\")\n counter += session.query(Author).delete()\n session.commit()\n return counter\n\n\ndef get_or_create(session: Session, model: Base, **kwargs) -> (Base, bool):\n \"\"\"Find instance with such kwarg in db or Create if not exists\"\"\"\n instance = session.query(model).filter_by(**kwargs).first()\n if instance:\n return instance, True\n instance = model(**kwargs)\n session.add(instance)\n session.commit()\n return instance, False\n\n\ndef find_books_and_authors(\n session: Session,\n book_name: str,\n author_first_name: Optional[str],\n author_last_name: Optional[str],\n book_year: Optional[int],\n) -> (Sequence[Book], Optional[Author]):\n \"\"\"Find Books and Author of books in DB\"\"\"\n query_param = [\n Book.name == book_name,\n (Book.year == book_year) if book_year is not None else None,\n Book.author.has(\n Author.first_name == author_first_name\n and Author.last_name == author_last_name\n )\n if author_first_name is not None and author_last_name is not None\n else None,\n ]\n query_param = filter(lambda x: x is not None, query_param)\n return session.query(Book).filter(*query_param).all()\n\n\ndef get_books_from_db(\n session: Session,\n book_name: str,\n author_first_name: Optional[str],\n author_last_name: Optional[str],\n book_year: Optional[int],\n primary_key: bool,\n):\n \"\"\"Find Books in DB and convert it into dict\"\"\"\n\n logging.debug(\"Searching book in db\")\n books = find_books_and_authors(\n session, book_name, author_first_name, author_last_name, book_year\n )\n if primary_key:\n return [book.id for book in books]\n return [book.as_dict for book in books]\n\n\ndef create_books_and_authors(session: Session, data: Sequence[dict], update_flag: bool):\n \"\"\"Creating Books and Authors if they do not exists in DB\"\"\"\n\n authors = create_all_authors(session, data)\n create_all_books(session, data, authors, update_flag)\n\n\ndef create_all_authors(\n session: Session, data: Sequence[dict]\n) -> Iterable[Optional[Author]]:\n \"\"\"Creating Authors if they do not exists in DB\"\"\"\n\n def get_author(book_info):\n if book_info[\"author_first_name\"] is None:\n return None\n kwargs = {\n \"first_name\": book_info[\"author_first_name\"],\n \"last_name\": book_info[\"author_last_name\"],\n }\n return get_or_create(session, Author, **kwargs)[0]\n\n return map(get_author, data)\n\n\ndef create_all_books(\n session: Session,\n data: Sequence[dict],\n authors: Iterable[Optional[Author]],\n update_flag: bool,\n):\n \"\"\"Creating Books if they do not exists in DB else update it, if update_flag is True\"\"\"\n cr, up = 0, 0\n\n for book_data, author in zip(data, authors):\n book_kwargs = {\n \"name\": book_data[\"name\"],\n \"year\": book_data[\"year\"],\n \"author_id\": author.id if author is not None else None,\n }\n book, not_created = get_or_create(session, Book, **book_kwargs)\n if not_created:\n up += update_if_update_flag(session, book, book_data, update_flag)\n else:\n cr += 1\n logging.debug(f\"Saving info about book: {book.as_dict}\")\n logging.info(f\"Created: {cr} books AND Updated: {up} books\")\n\n\ndef update_if_update_flag(\n session: Session, book: Book, book_data: dict, update_flag: bool\n) -> bool:\n \"\"\"Update Book if update_flag is True\"\"\"\n\n logging.debug(f\"Book already exists: {book.as_dict}\")\n if update_flag:\n logging.debug(f\"Updating info about book: {book.as_dict}\")\n book.name = book_data[\"name\"]\n book.year = book_data[\"year\"]\n session.commit()\n return True\n return False\n","sub_path":"src/db/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"36053474","text":"\"\"\"\nJosh Powell\nLidar Cal Python Test\nfizz_rnd_list class \n\"\"\"\n\nimport random\nimport numpy as np\n\nclass fizz_rnd_list():\n\n def __init__(self,N):\n # @param N is a floating point number range 0.0-1.0\n # mylist is a list of N random floats from 0.0 to 1.0\n self.mylist = [random.uniform(0.0,1.0) for i in range(0,N)]\n \n def get_stats(self,X):\n # @param X is a floating point number range 0.0-1.0\n # @return the value Y in the list closest to X\n # @return the index M of Y in the list\n # @return the mean of the values in the list for indices [0:M]\n # (including M)\n # @return the standard deviation of the values in the list for\n # indices [0:M] (including M)\n \n # instead of calling a helper function, I could have used:\n # min(self.mylist,key=lambda x:abs(x-X))\n # both the helper function and the min() functions are O(n) time\n Y = getClosestToX(self.mylist,X)\n \n M = self.mylist.index(Y)\n \n # I am not sure if using numpy to find the mean and standard deviation\n # is more efficient than looping through the list to do so,\n # but it is at least cleaner.\n mean = np.mean(self.mylist[:M+1])\n sd = np.std(self.mylist[:M+1])\n \n return Y,M,mean,sd \n \ndef getClosestToX(L,X):\n # helper function for get_stats()\n # @param L is the list that stats are being taken from\n # @param X is a floating point number range 0.0-1.0\n # @return the closest value to X in the list L\n \n # initialize closest value to X to None\n closest = None\n \n # initialize the difference between |closest and X| to -1\n closestDifference = -1\n for i in range(len(L)):\n diff = abs(X - L[i])\n \n if L[i] == X:\n # if L[i] is X, do not update closest\n pass\n \n elif (closestDifference == -1) or (diff < closestDifference):\n # if closestDifference has not been changed\n # or L[i] is closer to X than closest\n closestDifference = diff\n closest = L[i]\n\n return closest\n\nif __name__ == '__main__':\n# test1 = fizz_rnd_list(0)\n# print(test1.mylist)\n# \n# test2 = fizz_rnd_list(2)\n# print(test2.mylist)\n\n test3 = fizz_rnd_list(5)\n print(test3.mylist)\n \n Y,M,mean,sd = test3.get_stats(0.5)\n print(Y)\n print(M)\n print(mean)\n print(sd)\n \n Y,M,mean,sd = test3.get_stats(1.0)\n print(Y)\n print(M)\n print(mean)\n print(sd)\n\n \n","sub_path":"fizz_rnd_list_class.py","file_name":"fizz_rnd_list_class.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"628977802","text":"seq = [\n [1, 2, 3, 4, 5], # 5\n [2, 1, 2, 3, 2, 4, 2, 5], # 8\n [3, 3, 1, 1, 2, 2, 4, 4, 5, 5] # 10\n]\nscores = [0 for x in range(3)]\nseq_len = [5, 8, 10]\n\ndef checkNext(who, ans, idx):\n target_seq = seq[who]\n t_len = seq_len[who]\n\n t_idx = idx % t_len\n \n return target_seq[t_idx] == ans\n\ndef solution(answers):\n answer = []\n \n for idx, ans in enumerate(answers):\n for who in range(3):\n\n if checkNext(who, ans, idx):\n scores[who] += 1\n\n\n max_score = max(scores)\n \n answer = [ idx + 1 for idx, x in enumerate(scores) if x == max_score ]\n return answer\n\nif __name__ == '__main__':\n print(solution([1,2,3,4,5]))\n ","sub_path":"휴지통/2019_흔적/PROGRAMMERS/level1/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"515899260","text":"from utils import *\n\n# sorts array A in place, returns no value\ndef quick_sort(A, comp=less_than_equal):\n # student code here\n\tquick(A,0,len(A)-1,comp)\ndef swap(A,b,c):\n\tsw=A[b]\n\tA[b]=A[c]\n\tA[c]=sw\ndef quick(A,start,end,comp):\n\tif start!=end:\n\t\tpivot=partition(A,start,end,comp)\n\t\tquick(A,start,pivot,comp)\n\t\tquick(A,pivot+1,end,comp)\ndef partition(A,start,end,comp):\n\tp=A[end-1]\n\ti=start-1\n\tfor j in range(start,end-1):\n\t\tif comp(A[j],p):\n\t\t\ti+=1\n\t\t\tswap(A,i,j)\n\tswap(A,i+1,end-1)\n\treturn i+1\nif __name__ == \"__main__\":\n # student unit tests here\n pass\n","sub_path":"hw8/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"158192689","text":"from math import exp\n\nfrom numpy import (\n asarray,\n atleast_2d,\n concatenate,\n dot,\n full,\n log,\n maximum,\n sum as npsum,\n zeros,\n)\nfrom numpy.linalg import inv, lstsq, slogdet\nfrom numpy_sugar import epsilon\n\nfrom glimix_core._util import cache, log2pi\nfrom optimix import Function, Scalar\n\nfrom .._util import economic_qs_zeros, numbers\nfrom ._lmm_scan import FastScanner\n\n\nclass LMM(Function):\n r\"\"\"\n Fast Linear Mixed Models inference via maximum likelihood.\n\n Examples\n --------\n .. doctest::\n\n >>> from numpy import array\n >>> from numpy_sugar.linalg import economic_qs_linear\n >>> from glimix_core.lmm import LMM\n >>>\n >>> X = array([[1, 2], [3, -1]], float)\n >>> QS = economic_qs_linear(X)\n >>> covariates = array([[1], [1]])\n >>> y = array([-1, 2], float)\n >>> lmm = LMM(y, covariates, QS)\n >>> lmm.fit(verbose=False)\n >>> print('%.3f' % lmm.lml())\n -3.649\n\n One can also specify which parameters should be fitted:\n\n .. doctest::\n\n >>> from numpy import array\n >>> from numpy_sugar.linalg import economic_qs_linear\n >>> from glimix_core.lmm import LMM\n >>>\n >>> X = array([[1, 2], [3, -1]], float)\n >>> QS = economic_qs_linear(X)\n >>> covariates = array([[1], [1]])\n >>> y = array([-1, 2], float)\n >>> lmm = LMM(y, covariates, QS)\n >>> lmm.fix('delta')\n >>> lmm.fix('scale')\n >>> lmm.delta = 0.5\n >>> lmm.scale = 1\n >>> lmm.fit(verbose=False)\n >>> print('%.3f' % lmm.lml())\n -3.832\n >>> lmm.unfix('delta')\n >>> lmm.fit(verbose=False)\n >>> print('%.3f' % lmm.lml())\n -3.713\n\n Notes\n -----\n The LMM model can be equivalently written as ::\n\n 𝐲 ∼ 𝓝(X𝜷, s((1-𝛿)K + 𝛿I)),\n\n and we thus have v₀ = s (1 - 𝛿) and v₁ = s 𝛿.\n Consider the economic eigendecomposition of K:\n\n .. math::\n\n \\overbrace{[\\mathrm Q₀ \\quad \\mathrm Q₁]}^{\\mathrm Q}\n \\overbrace{\\left[\\begin{array}{cc}\n \\mathrm S₀ & 𝟎\\\\\n 𝟎 & 𝟎\n \\end{array}\\right]}^{\\mathrm S}\n \\left[\\begin{array}{c}\n \\mathrm Q₀ᵀ \\\\\n \\mathrm Q₁ᵀ\n \\end{array}\\right] = \\mathrm K\n\n and let\n\n .. math::\n\n \\mathrm D = \\left[\n \\begin{array}{cc}\n (1-𝛿)\\mathrm S₀ + 𝛿\\mathrm I & 𝟎\\\\\n 𝟎 & 𝛿\\mathrm I\n \\end{array}\n \\right].\n\n We thus have ::\n\n ((1-𝛿)K + 𝛿I)⁻¹ = QD⁻¹Qᵀ.\n\n A diagonal covariance-matrix can then be used to define an equivalent\n marginal likelihood::\n\n 𝓝(Qᵀ𝐲|QᵀX𝜷, sD).\n\n \"\"\"\n\n def __init__(self, y, X, QS=None, restricted=False):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n y : array_like\n Outcome.\n X : array_like\n Covariates as a two-dimensional array.\n QS : tuple\n Economic eigendecompositon in form of ``((Q0, Q1), S0)`` of a\n covariance matrix ``K``.\n restricted : bool\n ``True`` for restricted maximum likelihood optimization; ``False``\n otherwise. Defaults to ``False``.\n \"\"\"\n from numpy_sugar import is_all_finite\n from numpy_sugar.linalg import ddot, economic_svd\n\n logistic = Scalar(0.0)\n logistic.listen(self._delta_update)\n logistic.bounds = (-numbers.logmax, +numbers.logmax)\n Function.__init__(self, \"LMM\", logistic=logistic)\n self._logistic = logistic\n\n y = asarray(y, float).ravel()\n if not is_all_finite(y):\n raise ValueError(\"There are non-finite values in the outcome.\")\n\n if len(y) == 0:\n raise ValueError(\"The outcome array is empty.\")\n\n X = atleast_2d(asarray(X, float).T).T\n if not is_all_finite(X):\n raise ValueError(\"There are non-finite values in the covariates matrix.\")\n\n self._optimal = {\"beta\": False, \"scale\": False}\n if QS is None:\n QS = economic_qs_zeros(len(y))\n self.delta = 1.0\n logistic.fix()\n else:\n self.delta = 0.5\n\n if QS[0][0].shape[0] != len(y):\n msg = \"Sample size differs between outcome and covariance decomposition.\"\n raise ValueError(msg)\n\n if y.shape[0] != X.shape[0]:\n msg = \"Sample size differs between outcome and covariates.\"\n raise ValueError(msg)\n\n self._Darr = []\n n = y.shape[0]\n d = self.delta\n if QS[1].size > 0:\n self._Darr += [QS[1] * (1 - d) + d]\n if QS[1].size < n:\n self._Darr += [full(n - QS[1].size, d)]\n\n self._y = y\n self._QS = QS\n SVD = economic_svd(X)\n self._X = {\"X\": X, \"tX\": ddot(SVD[0], SVD[1]), \"VT\": SVD[2]}\n self._tbeta = zeros(len(SVD[1]))\n self._scale = 1.0\n self._fix = {\"beta\": False, \"scale\": False}\n self._restricted = restricted\n\n @property\n def beta(self):\n \"\"\"\n Fixed-effect sizes.\n\n Returns\n -------\n effect-sizes : numpy.ndarray\n Optimal fixed-effect sizes.\n\n Notes\n -----\n Setting the derivative of log(p(𝐲)) over effect sizes equal\n to zero leads to solutions 𝜷 from equation ::\n\n (QᵀX)ᵀD⁻¹(QᵀX)𝜷 = (QᵀX)ᵀD⁻¹(Qᵀ𝐲).\n \"\"\"\n from numpy_sugar.linalg import rsolve\n\n return rsolve(self._X[\"VT\"], rsolve(self._X[\"tX\"], self.mean()))\n\n @beta.setter\n def beta(self, beta):\n beta = asarray(beta, float).ravel()\n self._tbeta[:] = self._X[\"VT\"] @ beta\n self._optimal[\"beta\"] = False\n self._optimal[\"scale\"] = False\n\n @property\n def beta_covariance(self):\n \"\"\"\n Estimates the covariance-matrix of the optimal beta.\n\n Returns\n -------\n beta-covariance : ndarray\n (Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.\n\n References\n ----------\n .. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John\n Wiley & Sons.\n \"\"\"\n from numpy_sugar.linalg import ddot\n\n tX = self._X[\"tX\"]\n Q = concatenate(self._QS[0], axis=1)\n S0 = self._QS[1]\n D = self.v0 * S0 + self.v1\n D = D.tolist() + [self.v1] * (len(self._y) - len(D))\n D = asarray(D)\n A = inv(tX.T @ (Q @ ddot(1 / D, Q.T @ tX)))\n VT = self._X[\"VT\"]\n H = lstsq(VT, A, rcond=None)[0]\n return lstsq(VT, H.T, rcond=None)[0]\n\n def fix(self, param):\n \"\"\"\n Disable parameter optimization.\n\n Parameters\n ----------\n param : str\n Possible values are ``\"delta\"``, ``\"beta\"``, and ``\"scale\"``.\n \"\"\"\n if param == \"delta\":\n super()._fix(\"logistic\")\n else:\n self._fix[param] = True\n\n def unfix(self, param):\n \"\"\"\n Enable parameter optimization.\n\n Parameters\n ----------\n param : str\n Possible values are ``\"delta\"``, ``\"beta\"``, and ``\"scale\"``.\n \"\"\"\n if param == \"delta\":\n self._unfix(\"logistic\")\n else:\n self._fix[param] = False\n\n @property\n def v0(self):\n \"\"\"\n First variance.\n\n Returns\n -------\n v0 : float\n s(1 - 𝛿).\n \"\"\"\n return self.scale * (1 - self.delta)\n\n @property\n def v1(self):\n \"\"\"\n Second variance.\n\n Returns\n -------\n v1 : float\n s𝛿.\n \"\"\"\n return self.scale * self.delta\n\n def fit(self, verbose=True):\n \"\"\"\n Maximise the marginal likelihood.\n\n Parameters\n ----------\n verbose : bool, optional\n ``True`` for progress output; ``False`` otherwise.\n Defaults to ``True``.\n \"\"\"\n if not self._isfixed(\"logistic\"):\n self._maximize_scalar(desc=\"LMM\", rtol=1e-6, atol=1e-6, verbose=verbose)\n\n if not self._fix[\"beta\"]:\n self._update_beta()\n\n if not self._fix[\"scale\"]:\n self._update_scale()\n\n def get_fast_scanner(self):\n \"\"\"\n Return :class:`.FastScanner` for association scan.\n\n Returns\n -------\n fast-scanner : :class:`.FastScanner`\n Instance of a class designed to perform very fast association scan.\n \"\"\"\n v0 = self.v0\n v1 = self.v1\n QS = (self._QS[0], v0 * self._QS[1])\n return FastScanner(self._y, self.X, QS, v1)\n\n def value(self):\n \"\"\"\n Internal use only.\n \"\"\"\n if not self._fix[\"beta\"]:\n self._update_beta()\n\n if not self._fix[\"scale\"]:\n self._update_scale()\n\n return self.lml()\n\n def gradient(self):\n \"\"\"\n Not implemented.\n \"\"\"\n raise NotImplementedError\n\n @property\n def nsamples(self):\n \"\"\"\n Number of samples, n.\n \"\"\"\n return len(self._y)\n\n @property\n def ncovariates(self):\n \"\"\"\n Number of covariates, c.\n \"\"\"\n return self._X[\"X\"].shape[1]\n\n def lml(self):\n \"\"\"\n Log of the marginal likelihood.\n\n Returns\n -------\n lml : float\n Log of the marginal likelihood.\n\n Notes\n -----\n The log of the marginal likelihood is given by ::\n\n 2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| - (Qᵀ𝐲)ᵀs⁻¹D⁻¹(Qᵀ𝐲)\n + (Qᵀ𝐲)ᵀs⁻¹D⁻¹(QᵀX𝜷)/2 - (QᵀX𝜷)ᵀs⁻¹D⁻¹(QᵀX𝜷).\n\n By using the optimal 𝜷, the log of the marginal likelihood can be rewritten\n as::\n\n 2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| + (Qᵀ𝐲)ᵀs⁻¹D⁻¹Qᵀ(X𝜷-𝐲).\n\n\n In the extreme case where 𝜷 is such that 𝐲 = X𝜷, the maximum is attained as\n s→0.\n\n For optimals 𝜷 and s, the log of the marginal likelihood can be further\n simplified to ::\n\n 2⋅log(p(𝐲; 𝜷, s)) = -n⋅log(2π) - n⋅log s - log|D| - n.\n \"\"\"\n reml = (self._logdetXX() - self._logdetH()) / 2\n if self._optimal[\"scale\"]:\n lml = self._lml_optimal_scale()\n else:\n lml = self._lml_arbitrary_scale()\n return lml + reml\n\n @property\n def X(self):\n \"\"\"\n Covariates matrix.\n\n Returns\n -------\n X : ndarray\n Covariates.\n \"\"\"\n return self._X[\"X\"]\n\n @property\n def delta(self):\n \"\"\"\n Variance ratio between ``K`` and ``I``.\n \"\"\"\n\n v = float(self._logistic.value)\n\n if v > 0.0:\n v = 1 / (1 + exp(-v))\n else:\n v = exp(v)\n v = v / (v + 1.0)\n\n return min(max(v, epsilon.tiny), 1 - epsilon.tiny)\n\n @delta.setter\n def delta(self, delta):\n delta = min(max(delta, epsilon.tiny), 1 - epsilon.tiny)\n self._logistic.value = log(delta / (1 - delta))\n self._optimal[\"beta\"] = False\n self._optimal[\"scale\"] = False\n\n @property\n def scale(self):\n \"\"\"\n Scaling factor.\n\n Returns\n -------\n scale : float\n Scaling factor.\n\n Notes\n -----\n Setting the derivative of log(p(𝐲; 𝜷)), for which 𝜷 is optimal, over\n scale equal to zero leads to the maximum ::\n\n s = n⁻¹(Qᵀ𝐲)ᵀD⁻¹ Qᵀ(𝐲-X𝜷).\n\n In the case of restricted marginal likelihood ::\n\n s = (n-c)⁻¹(Qᵀ𝐲)ᵀD⁻¹ Qᵀ(𝐲-X𝜷),\n\n where s is the number of covariates.\n \"\"\"\n return self._scale\n\n @scale.setter\n def scale(self, scale):\n self._scale = scale\n self._optimal[\"scale\"] = False\n\n def mean(self):\n \"\"\"\n Mean of the prior.\n\n Formally, 𝐦 = X𝜷.\n\n Returns\n -------\n mean : ndarray\n Mean of the prior.\n \"\"\"\n return self._X[\"tX\"] @ self._tbeta\n\n def covariance(self):\n \"\"\"\n Covariance of the prior.\n\n Returns\n -------\n covariance : ndarray\n v₀K + v₁I.\n \"\"\"\n from numpy_sugar.linalg import ddot, sum2diag\n\n Q0 = self._QS[0][0]\n S0 = self._QS[1]\n return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1)\n\n def _delta_update(self):\n self._optimal[\"beta\"] = False\n self._optimal[\"scale\"] = False\n self._Dcache = None\n\n @cache\n def _logdetXX(self):\n \"\"\"\n log(|XᵀX|).\n \"\"\"\n if not self._restricted:\n return 0.0\n\n ldet = slogdet(self._X[\"tX\"].T @ self._X[\"tX\"])\n if ldet[0] != 1.0:\n raise ValueError(\"The determinant of XᵀX should be positive.\")\n return ldet[1]\n\n def _logdetH(self):\n \"\"\"\n log(|H|) for H = s⁻¹XᵀQD⁻¹QᵀX.\n \"\"\"\n if not self._restricted:\n return 0.0\n ldet = slogdet(sum(self._XTQDiQTX) / self.scale)\n if ldet[0] != 1.0:\n raise ValueError(\"The determinant of H should be positive.\")\n return ldet[1]\n\n def _lml_optimal_scale(self):\n \"\"\"\n Log of the marginal likelihood for optimal scale.\n\n Implementation for unrestricted LML::\n\n Returns\n -------\n lml : float\n Log of the marginal likelihood.\n \"\"\"\n assert self._optimal[\"scale\"]\n\n n = len(self._y)\n lml = -self._df * log2pi - self._df - n * log(self.scale)\n lml -= sum(npsum(log(D)) for D in self._D)\n return lml / 2\n\n def _lml_arbitrary_scale(self):\n \"\"\"\n Log of the marginal likelihood for arbitrary scale.\n\n Returns\n -------\n lml : float\n Log of the marginal likelihood.\n \"\"\"\n s = self.scale\n D = self._D\n n = len(self._y)\n lml = -self._df * log2pi - n * log(s)\n lml -= sum(npsum(log(d)) for d in D)\n d = (mTQ - yTQ for (mTQ, yTQ) in zip(self._mTQ, self._yTQ))\n lml -= sum((i / j) @ i for (i, j) in zip(d, D)) / s\n\n return lml / 2\n\n @property\n def _df(self):\n \"\"\"\n Degrees of freedom.\n \"\"\"\n if not self._restricted:\n return self.nsamples\n return self.nsamples - self._X[\"tX\"].shape[1]\n\n def _optimal_scale_using_optimal_beta(self):\n from numpy_sugar import epsilon\n\n assert self._optimal[\"beta\"]\n\n yTQDiQTy = self._yTQDiQTy\n yTQDiQTm = self._yTQDiQTX\n s = sum(i - j @ self._tbeta for (i, j) in zip(yTQDiQTy, yTQDiQTm))\n return maximum(s / self._df, epsilon.small)\n\n def _update_beta(self):\n from numpy_sugar.linalg import rsolve\n\n assert not self._fix[\"beta\"]\n if self._optimal[\"beta\"]:\n return\n\n yTQDiQTm = list(self._yTQDiQTX)\n mTQDiQTm = list(self._XTQDiQTX)\n nominator = yTQDiQTm[0]\n denominator = mTQDiQTm[0]\n\n if len(yTQDiQTm) > 1:\n nominator += yTQDiQTm[1]\n denominator += mTQDiQTm[1]\n\n self._tbeta[:] = rsolve(denominator, nominator)\n self._optimal[\"beta\"] = True\n self._optimal[\"scale\"] = False\n\n def _update_scale(self):\n from numpy_sugar import epsilon\n\n if self._optimal[\"beta\"]:\n self._scale = self._optimal_scale_using_optimal_beta()\n else:\n yTQDiQTy = self._yTQDiQTy\n yTQDiQTm = self._yTQDiQTX\n b = self._tbeta\n p0 = sum(i - 2 * j @ b for (i, j) in zip(yTQDiQTy, yTQDiQTm))\n p1 = sum((b @ i) @ b for i in self._XTQDiQTX)\n self._scale = maximum((p0 + p1) / self._df, epsilon.small)\n\n self._optimal[\"scale\"] = True\n\n @property\n def _D(self):\n if self._Dcache is None:\n i = 0\n d = self.delta\n if self._QS[1].size > 0:\n self._Darr[i][:] = self._QS[1]\n self._Darr[i] *= 1 - d\n self._Darr[i] += d\n i += 1\n if self._QS[1].size < self._y.shape[0]:\n self._Darr[i][:] = d\n\n self._Dcache = self._Darr\n return self._Dcache\n\n @property\n def _XTQDiQTX(self):\n return (i / j @ i.T for (i, j) in zip(self._tXTQ, self._D))\n\n @property\n def _mTQ(self):\n return (self.mean().T @ Q for Q in self._QS[0] if Q.size > 0)\n\n @property\n def _tXTQ(self):\n return (self._X[\"tX\"].T @ Q for Q in self._QS[0] if Q.size > 0)\n\n @property\n def _XTQ(self):\n return (self._X[\"tX\"].T @ Q for Q in self._QS[0] if Q.size > 0)\n\n @property\n def _yTQ(self):\n return (self._y.T @ Q for Q in self._QS[0] if Q.size > 0)\n\n @property\n def _yTQQTy(self):\n return (yTQ ** 2 for yTQ in self._yTQ)\n\n @property\n def _yTQDiQTy(self):\n return (npsum(i / j) for (i, j) in zip(self._yTQQTy, self._D))\n\n @property\n def _yTQDiQTX(self):\n yTQ = self._yTQ\n D = self._D\n tXTQ = self._tXTQ\n return (i / j @ l.T for (i, j, l) in zip(yTQ, D, tXTQ))\n","sub_path":"glimix_core/lmm/_lmm.py","file_name":"_lmm.py","file_ext":"py","file_size_in_byte":17393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"73177006","text":"import sys\nimport numpy as np\nfrom scipy.io import wavfile\nimport scipy.signal as sig\nimport warnings\n\n\n\nthreshold = 155\n\nif not sys.argv[1]:\n print(\"Podaj plik do analizy\")\n exit(0)\n\ntry:\n warnings.filterwarnings('ignore')\n wav_fs, data = wavfile.read(sys.argv[1]) #read WAV file\nexcept ValueError:\n print(\"M\")\n exit(0)\n\n\n\n#Stereo\ndata = data.astype(float)\nif len(data.shape) > 1:\n data = data.sum(axis=1) / 2\n\n\ndata = data - np.mean(data)\ndata_autokor = sig.correlate(data, data)\ndata_autokor = data_autokor[len(data_autokor)//2:]\ndata_autokor = data_autokor / data_autokor[0]\n\nautocor = np.maximum(data_autokor, 0)\nbufer = np.zeros(len(autocor))\n\nx = len(bufer)\nn = len(autocor) // 2\nbufer[:x-1:2] = autocor[:n]\nbufer[1::2] = (autocor[:n] + autocor[1:n+1]) / 2\nautocor = np.maximum(autocor - bufer, 0)\nm = np.argmax(autocor)\n\nfreq = wav_fs / m\n\nif freq>threshold:\n print(\"K\")\nelse:\n print(\"M\")","sub_path":"cor.py","file_name":"cor.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"524796877","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AreaInteresse',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nome', models.CharField(help_text='Nome da \\xc1rea de Interesse', max_length=64, verbose_name='Nome')),\n ('min_lat', models.FloatField(help_text='Latitude M\\xednima de Download', verbose_name='Latitude M\\xednima')),\n ('max_lat', models.FloatField(help_text='Latitude M\\xe1xima de Download', verbose_name='Latitude M\\xe1xima')),\n ('min_lon', models.FloatField(help_text='Longitude M\\xednima de Download', verbose_name='Longitude M\\xednima')),\n ('max_lon', models.FloatField(help_text='Longitude M\\xe1xima de Download', verbose_name='Longitude M\\xe1xima')),\n ('style', models.FileField(help_text='Arquivo de estilo de importa\\xe7\\xe3o.', upload_to=b'', null=True, verbose_name=b'Estilo Importa\\xc3\\xa7\\xc3\\xa3o', blank=True)),\n ],\n options={\n 'verbose_name': '\\xc1rea de Interesse',\n 'verbose_name_plural': '\\xc1reas de Interesse',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='AtualizacaoOpenStreetMaps',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('data_atualizacao', models.DateTimeField(help_text='Data/Hora da Atualiza\\xe7\\xe3o', verbose_name=b'Data Atualiza\\xc3\\xa7\\xc3\\xa3o', auto_now_add=True)),\n ('sucesso', models.BooleanField(default=False, help_text='Atualiza\\xe7\\xe3o foi bem sucedida?', verbose_name=b'Atualiza\\xc3\\xa7\\xc3\\xa3o bem sucedidade?')),\n ('area_interesse', models.ForeignKey(related_name='atualizacoes', verbose_name='\\xc1rea de Interesse', to='osm.AreaInteresse', help_text='\\xc1rea de interesse desta atualiza\\xe7\\xe3o')),\n ],\n options={\n 'ordering': ['-data_atualizacao'],\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"src/speed/osm/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"44337714","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\ndef impulse(t,shift = 0):\n '''Unit Impulse Function'''\n if (t+shift) == 0:\n # used 100 to mimic infinity\n return 100\n else:\n return 0\nl = []\nshift = int(input(\"Enter Shift\\n\"))\nt = range(-100,100)\nfor i in t:\n l.append(impulse(i,shift))\nplt.plot(t,l)\nplt.show()","sub_path":"DSP/elementary_cont_time_sig/unit_impulse.py","file_name":"unit_impulse.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"267927191","text":"import socket\nimport threading, pickle\nfrom conversation import Client_Server_Converse\nfrom soldier import Soldier\nfrom eleph import Elephant\nfrom camel import Camel\nfrom horse import Horse\nfrom queen import Queen\nfrom king import King\n# from . import soldier, eleph, camel, horse, queen, king\nsoc = socket.socket()\nsoc.bind(('localhost', 3690))\nsoc.listen(4)\n\nclass MetaLabel:\n def __init__(self, position, character):\n self.position = position\n self.character = character\nglobalTurn = {'YOUR_TURN':'YOUR_TURN', 'MY_TURN':'MY_TURN'}\nlocalTurn = 'MY_TURN'\nclass ReceiveRequest(threading.Thread):\n def __init__(self, client, turn):\n threading.Thread.__init__(self)\n self.client = client\n self.turn = turn\n \n def processAndSend(self, char, pos):\n position = []\n if char == 'white:soldier' or char == 'black:soldier':\n sold = Soldier(char, pos)\n position = sold.getPositions()\n\n elif char == 'white:eleph' or char == 'black:eleph':\n elephant = Elephant(char, pos)\n position = elephant.getPositions()\n \n elif char == 'white:camel' or char == 'black:camel':\n cam = Camel(char, pos)\n position = cam.getPositions()\n\n elif char == 'white:horse' or char == 'black:horse':\n hors = Horse(char, pos)\n position = hors.getPositions()\n \n elif char == 'white:queen' or char == 'black:queen':\n qun = Queen(char, pos)\n position = qun.getPositions()\n \n elif char == 'white:king' or char == 'black:king':\n kin = King(char, pos)\n position = kin.getPositions()\n dataString = pickle.dumps(position)\n self.client.send(dataString)\n \n def sendToOther(self, position):\n data = pickle.dumps(position)\n for connection in toAll:\n if connection != self.client:\n connection.send(data)\n def run(self):\n global localTurn\n while True:\n if self.turn is localTurn:\n pass\n else:\n print(\"Wating for \", globalTurn)\n print(\"Completed \", self.turn)\n data = self.client.recv(1024)\n positionString = pickle.loads(data)\n print(\"Server Received: \",positionString)\n if type(positionString) is not list: # if meta label object\n self.processAndSend(positionString.character, positionString.position)\n elif type(positionString) is list:\n self.sendToOther(positionString)\n localTurn = globalTurn[self.turn]\n self.client.close()\n\n def validate(self, color):\n global colors\n if color in colors:\n colors.remove(color)\n return color\n color = colors[0]\n colors.pop(0)\n return color\ntoAll = []\ncolors = ['white', 'black']\nturns = ['YOUR_TURN', 'MY_TURN']\nwhile True:\n client,addr = soc.accept()\n toAll.append(client)\n receiver = ReceiveRequest(client, turns[0])\n turns.pop(0)\n color = client.recv(1024).decode('utf-8')\n result = receiver.validate(color)\n client.send(str(result).encode('utf-8'))\n receiver.start()","sub_path":"server/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"4920946","text":"#!/usr/bin/env python\n\nimport feedparser\nimport hashlib\nimport pickle\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport smtplib\n\ndef sendEmail(fileName):\n emailTo = 'user@domain.com'\n msg = MIMEMultipart('alternative')\n\n emailFrom = \"your.gmail.user@gmail.com\"\n username = emailFrom\n password = \"your.gmail.pass\"\n smtpServer = 'smtp.gmail.com'\n\n msg['Subject'] = 'Craigslist Alert'\n msg['From'] = emailFrom\n msg['To'] = emailTo\n\n text = \"Email client cannot display HTML\"\n html = open(fileName,\"rb\").read()\n\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n msg.attach(part1)\n msg.attach(part2)\n\n server = smtplib.SMTP(smtpServer, 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(username, password)\n server.sendmail(emailFrom, [emailTo], msg.as_string())\n server.close()\n\nif __name__ == '__main__':\n\n try:\n with open('data.pickle', 'rb') as fp:\n data = pickle.load(fp)\n fp.close()\n except:\n data = dict()\n\n feed = feedparser.parse('http://denver.craigslist.org/search/cta?sort=date&autoMinYear=2012&maxAsk=23000&minAsk=10000&format=rss')\n\n entriesToEmail = []\n\n for entry in feed.entries:\n entryHash = hashlib.md5(str(entry)).hexdigest()\n if entryHash not in data.keys():\n data[entryHash] = entry\n entriesToEmail.append(entry)\n\n if len(entriesToEmail) > 0:\n with open('email.html', 'w') as fp:\n fp.write('' + '\\n')\n fp.write('' + '\\n')\n for entry in entriesToEmail:\n fp.write('' + entry.title + '
' + '\\n')\n fp.write('' + '\\n')\n fp.write('' + '\\n')\n fp.close()\n\n sendEmail('email.html')\n \n with open('data.pickle', 'wb') as fp:\n pickle.dump(data, fp)\n fp.close()\n","sub_path":"checkCraigslist.py","file_name":"checkCraigslist.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"283634247","text":"from itertools import combinations\n\nN = int(input())\nability_board = []\nfor _ in range(N):\n ability_board.append(list(map(int, input().split())))\n\nnum_list = [i for i in range(N)]\nres = float('inf')\n\n\ndef solve():\n global res\n\n # 조합을 이용하여 각 후보자를 생성함\n for cand in combinations(num_list, N // 2):\n # 선택된 후보와 나머지\n start_member = list(cand)\n link_member = list(set(num_list) - set(cand))\n\n # 점수 비교는 2명씩 이루어진다.\n start_comb = list(combinations(start_member, 2))\n link_comb = list(combinations(link_member, 2))\n\n # 점수 구하기\n start_sum = 0\n for x, y in start_comb:\n start_sum += (ability_board[x][y] + ability_board[y][x])\n\n link_sum = 0\n for x, y in link_comb:\n link_sum += (ability_board[x][y] + ability_board[y][x])\n\n # 차이를 구하는 것이므로 abs 사용\n if (res > abs(start_sum - link_sum)):\n res = abs(start_sum - link_sum)\n\n\nsolve()\nprint(res)","sub_path":"Python_Solutions/14889.py","file_name":"14889.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"310304209","text":"__author__ = 'Christian'\r\n\r\n#fname = 'test_a.txt'\r\n#fname = 'A-small-attempt0.in'\r\nfname = 'A-large.in'\r\n\r\nf = open(fname, 'r')\r\ndata = f.read().split('\\n')\r\nf.close()\r\n\r\nres_file = open(fname + '.res', 'w')\r\n\r\n\r\ndef compute_sleep(n):\r\n res = set()\r\n \r\n if n == 0:\r\n return \"INSOMNIA\"\r\n \r\n current = 0\r\n i = 0\r\n while (i<1000000):\r\n current += n\r\n res = res.union(set(str(current)))\r\n if len(res) == 10:\r\n return current\r\n return \"INSOMNIA\"\r\n\r\nT = int(data[0])\r\nfor i in range(T):\r\n print >> res_file, \"Case #%s: %s\" % (i+1, compute_sleep(int(data[i+1])))\r\n \r\nres_file.close()","sub_path":"solutions_5652388522229760_1/Python/ChrisViking/prob_a.py","file_name":"prob_a.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"205049768","text":"# encoding: utf8\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('seatalloc', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),\n ('name', models.CharField(max_length=128, unique=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Page',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),\n ('category', models.ForeignKey(to='seatalloc.Category', to_field='id')),\n ('title', models.CharField(max_length=128)),\n ('url', models.URLField()),\n ('views', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"seatalloc/migrations/0002_category_page.py","file_name":"0002_category_page.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"491986562","text":"from powerup.power_up import Power_Up\nimport powerup, helper, effects\nfrom tiles import Tile\nimport pygame\n\nclass Attack_PUP(Power_Up):\n \"\"\"\n Power-up that increases \n the damage done by unit by 2\n Does not affect Transport units, but still gets destroyed\n \"\"\"\n sprite = pygame.image.load(\"assets/Damage_PUP.png\")\n \n def __init__(self, **keywords):\n #load the base class\n super().__init__(**keywords)\n\n #load the image\n self.image = Attack_PUP.sprite\n\n #set unit specific things.\n self.type = \"Attack_PUP\"\n self.description = \"+2 Damage\"\n\n def use_PUP(self, unit):\n \"\"\"\n Gives a unit the PUP's buff\n \"\"\"\n if unit.type != \"Transport\": \n unit.damage += 2\n\n # a power-up is destroyed after use\n self.deactivate()\n\n \npowerup.PUP_types[\"Attack_PUP\"] = Attack_PUP\n","sub_path":"a2_game/powerup/attack_PUP.py","file_name":"attack_PUP.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"559719635","text":"# --------------------------------------------------------\n# Deep Iterative Matching Network\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Written by Gu Wang, Yi Li\n# --------------------------------------------------------\n\"\"\"\nuse YCB trans to make multiple objects on the same plane as much as possible\n\"\"\"\nfrom __future__ import print_function, division\nimport numpy as np\nimport scipy.io as sio\nimport os\nimport sys\n\ncur_path = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(1, os.path.join(cur_path, \"..\"))\nfrom lib.pair_matching.RT_transform import *\nfrom lib.utils.mkdir_if_missing import mkdir_if_missing\nfrom six.moves import cPickle\nimport copy\nfrom tqdm import tqdm\nimport random\n\nrandom.seed(1234)\nnp.random.seed(2345)\n\ndata_dir = os.path.join(cur_path, \"..\", \"data\", \"render_v5\", \"data\", \"observed\")\n\ngt_observed_dir = os.path.join(\n cur_path, \"..\", \"data\", \"render_v5\", \"data\", \"gt_observed\"\n)\n\n\nK_lm = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])\nK_YCB_video = np.array([[1066.778, 0, 312.9869], [0, 1067.487, 241.3109], [0, 0, 1]])\n\n\npose_dict = {} # {observed_prefix: {class: euler angles, translations}}\neuler_stat = {}\ntrans_stat = {}\n\nobserved_indices = []\nvideos = [\"{:04d}\".format(i) for i in range(48)]\nfor video in videos:\n filelist = [fn for fn in os.listdir(os.path.join(data_dir, video)) if \"color\" in fn]\n for idx, fn in enumerate(filelist):\n if idx % 4 == 0:\n observed_indices.append(\"{}/{}\".format(video, fn.split(\"-\")[0]))\n\nprint(len(observed_indices))\n\nYCB_video_classes = [\n \"__background__\",\n \"002_master_chef_can\",\n \"003_cracker_box\",\n \"004_sugar_box\",\n \"005_tomato_soup_can\",\n \"006_mustard_bottle\",\n \"007_tuna_fish_can\",\n \"008_pudding_box\",\n \"009_gelatin_box\",\n \"010_potted_meat_can\",\n \"011_banana\",\n \"019_pitcher_base\",\n \"021_bleach_cleanser\",\n \"024_bowl\",\n \"025_mug\",\n \"035_power_drill\",\n \"036_wood_block\",\n \"037_scissors\",\n \"040_large_marker\",\n \"051_large_clamp\",\n \"052_extra_large_clamp\",\n \"061_foam_brick\",\n]\n\nidx2class = {\n 1: \"ape\",\n 5: \"can\",\n 6: \"cat\",\n 8: \"driller\",\n 9: \"duck\",\n 10: \"eggbox\",\n 11: \"glue\",\n 12: \"holepuncher\",\n}\n\n\ndef class2idx(class_name, idx2class=idx2class):\n for k, v in idx2class.items():\n if v == class_name:\n return k\n\n\nclasses = idx2class.values()\nclasses = sorted(classes)\n\n\ndef get_poses_from_meta(meta_path):\n meta_data = sio.loadmat(meta_path)\n cls_idxs = meta_data[\"cls_indexes\"]\n poses = []\n for class_idx in cls_idxs:\n inner_id = np.where(np.squeeze(meta_data[\"cls_indexes\"]) == class_idx)\n if len(meta_data[\"poses\"].shape) == 2:\n pose = meta_data[\"poses\"]\n else:\n pose = np.squeeze(meta_data[\"poses\"][:, :, inner_id])\n poses.append(pose)\n return poses\n\n\ndef stat_YCB_video():\n res_dir = os.path.join(cur_path, \"../data/LINEMOD_6D/pose_stat_v2\")\n mkdir_if_missing(res_dir)\n if os.path.exists(os.path.join(res_dir, \"trans_from_YCB_video.pkl\")):\n trans_dict = cPickle.load(\n open(os.path.join(res_dir, \"trans_from_YCB_video.pkl\"), \"rb\")\n )\n else:\n pose_dict = {}\n trans_list = []\n trans_lm_list = []\n trans_dict = {}\n for j, observed_idx in enumerate(tqdm(observed_indices)):\n meta_path = os.path.join(data_dir, observed_idx + \"-meta.mat\")\n poses = get_poses_from_meta(meta_path)\n tmp_pose = np.zeros((len(poses), 6), dtype=\"float32\")\n tmp_trans = np.zeros((len(poses), 3), dtype=\"float32\")\n for i, pose in enumerate(poses):\n rot_euler = mat2euler(pose[:3, :3])\n trans = pose[:3, 3]\n tmp_pose[i, :3] = rot_euler\n tmp_pose[i, 3:] = trans\n trans_list.append(trans)\n\n trans_lm = np.dot(\n np.dot(np.linalg.inv(K_lm), K_YCB_video), trans.reshape((3, 1))\n )\n trans_lm = trans_lm.reshape((3,))\n trans_lm_list.append(trans_lm)\n tmp_trans[i, :] = trans_lm\n pose_dict[\"{:06d}\".format(j)] = tmp_pose\n trans_dict[\"{:06d}\".format(j)] = tmp_trans\n\n trans_array = np.array(trans_list)\n trans_mean = np.mean(trans_array, 0)\n trans_std = np.std(trans_array, 0)\n print(\"trans, \", \"mean: \", trans_mean, \"std: \", trans_std)\n\n trans_lm_array = np.array(trans_lm_list)\n trans_lm_mean = np.mean(trans_lm_array, 0)\n trans_lm_std = np.std(trans_lm_array, 0)\n print(\"trans lm, \", \"mean: \", trans_lm_mean, \"std: \", trans_lm_std)\n\n print(len(pose_dict))\n\n # cPickle.dump(pose_dict, open(os.path.join(res_dir, 'YCB_video_pose_dict.pkl'), 'wb'), 2)\n # {prefix: array(num_posex7)}, num_pose is uncertain\n\n cPickle.dump(\n trans_dict, open(os.path.join(res_dir, \"trans_from_YCB_video.pkl\"), \"wb\"), 2\n )\n\n return trans_dict\n\n\n# # stat olm pose ------------------------------------------------------------\n\n\ndef angle(u, v):\n c = np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v)) # -> cosine of the angle\n rad = np.arccos(np.clip(c, -1, 1))\n deg = rad / np.pi * 180\n return deg\n\n\ndef stat_lm6d():\n observed_set_dir = os.path.join(\n cur_path,\n \"..\",\n \"data/LINEMOD_6D/LM6d_converted/LM6d_refine\",\n \"image_set/observed\",\n )\n gt_observed_dir = os.path.join(\n cur_path, \"../data/LINEMOD_6D/LM6d_converted/LM6d_rerefine/data/gt_observed\"\n )\n\n pz = np.array([0, 0, 1])\n new_points = {}\n pose_dict = {}\n trans_stat = {}\n quat_stat = {}\n for cls_idx, cls_name in enumerate(classes):\n # if cls_name != \"eggbox\":\n # continue\n new_points[cls_name] = {\"pz\": []}\n train_idx_file = os.path.join(observed_set_dir, \"{}_train.txt\".format(cls_name))\n with open(train_idx_file, \"r\") as f:\n observed_indices = [line.strip() for line in f.readlines()]\n\n num_observed = len(observed_indices)\n pose_dict[cls_name] = np.zeros((num_observed, 7)) # quat, translation\n trans_stat[cls_name] = {}\n quat_stat[cls_name] = {}\n for observed_i, observed_idx in enumerate(tqdm(observed_indices)):\n prefix = observed_idx.split(\"/\")[1]\n pose_path = os.path.join(\n gt_observed_dir, cls_name, \"{}-pose.txt\".format(prefix)\n )\n assert os.path.exists(pose_path), \"path {} not exists\".format(pose_path)\n pose = np.loadtxt(pose_path, skiprows=1)\n rot = pose[:3, :3]\n # print(rot)\n quat = np.squeeze(mat2quat(rot))\n src_trans = pose[:3, 3]\n pose_dict[cls_name][observed_i, :4] = quat\n pose_dict[cls_name][observed_i, 4:] = src_trans\n\n new_pz = np.dot(rot, pz.reshape((-1, 1))).reshape((3,))\n new_points[cls_name][\"pz\"].append(new_pz)\n new_points[cls_name][\"pz\"] = np.array(new_points[cls_name][\"pz\"])\n new_points[cls_name][\"pz_mean\"] = np.mean(new_points[cls_name][\"pz\"], 0)\n new_points[cls_name][\"pz_std\"] = np.std(new_points[cls_name][\"pz\"], 0)\n\n trans_mean = np.mean(pose_dict[cls_name][:, 4:], 0)\n trans_std = np.std(pose_dict[cls_name][:, 4:], 0)\n trans_max = np.max(pose_dict[cls_name][:, 4:], 0)\n print(\"trans mean: {}\".format(trans_mean))\n print(\"trans std: {}\".format(trans_std))\n print(\"trans max: {}\".format(trans_max))\n\n trans_stat[cls_name][\"trans_mean\"] = trans_mean\n trans_stat[cls_name][\"trans_std\"] = trans_std\n\n quat_mean = np.mean(pose_dict[cls_name][:, :4], 0)\n quat_std = np.std(pose_dict[cls_name][:, :4], 0)\n quat_stat[cls_name][\"quat_mean\"] = quat_mean\n quat_stat[cls_name][\"quat_std\"] = quat_std\n\n print(\n \"new z: \",\n \"mean: \",\n new_points[cls_name][\"pz_mean\"],\n \"std: \",\n new_points[cls_name][\"pz_std\"],\n )\n\n new_points[cls_name][\"angle\"] = [] # angle between mean vector and points\n pz_mean = new_points[cls_name][\"pz_mean\"]\n for p_i in range(new_points[cls_name][\"pz\"].shape[0]):\n deg = angle(pz_mean, new_points[cls_name][\"pz\"][p_i, :])\n new_points[cls_name][\"angle\"].append(deg)\n new_points[cls_name][\"angle\"] = np.array(new_points[cls_name][\"angle\"])\n\n print(\n \"angle mean: \",\n np.mean(new_points[cls_name][\"angle\"]),\n \"angle std: \",\n np.std(new_points[cls_name][\"angle\"]),\n \"angle max: \",\n np.max(new_points[cls_name][\"angle\"]),\n )\n new_points[cls_name][\"angle_max\"] = np.max(new_points[cls_name][\"angle\"])\n print()\n\n def vis_points():\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D # noqa:F401\n\n sel_p = \"pz\"\n ax = plt.figure().add_subplot(111, projection=\"3d\")\n ax.scatter(\n new_points[cls_name][sel_p][:, 0],\n new_points[cls_name][sel_p][:, 1],\n new_points[cls_name][sel_p][:, 2],\n c=\"r\",\n marker=\"^\",\n )\n ax.scatter(0, 0, 0, c=\"b\", marker=\"o\")\n ax.scatter(0, 0, 1, c=\"b\", marker=\"o\")\n ax.scatter(0, 1, 0, c=\"b\", marker=\"o\")\n ax.scatter(1, 0, 0, c=\"b\", marker=\"o\")\n ax.quiver(0, 0, 0, 0, 0, 1)\n pz_mean = new_points[cls_name][\"pz_mean\"]\n ax.quiver(0, 0, 0, pz_mean[0], pz_mean[1], pz_mean[2])\n\n ax.scatter(pz_mean[0], pz_mean[1], pz_mean[2], c=\"b\", marker=\"o\")\n ax.set_xlabel(\"X Label\")\n ax.set_ylabel(\"Y Label\")\n ax.set_zlabel(\"Z Label\")\n ax.set_xlim([-1.5, 1.5])\n ax.set_ylim([-1.5, 1.5])\n ax.set_zlim([-1.5, 1.5])\n plt.title(cls_name + \"-\" + sel_p)\n plt.show()\n\n # vis_points()\n return pose_dict, quat_stat, trans_stat, new_points\n\n\ndef stat_lm6d_occ_test():\n observed_set_dir = os.path.join(\n cur_path, \"../data/LINEMOD_6D/LM6d_converted/LM6d_refine/image_set/observed\"\n )\n gt_observed_dir = os.path.join(\n cur_path,\n \"../data/LINEMOD_6D/LM6d_converted/LM6d_refine/data/gt_observed/occ_test\",\n )\n\n pz = np.array([0, 0, 1])\n new_points = {}\n pose_dict = {}\n trans_stat = {}\n quat_stat = {}\n for cls_idx, cls_name in enumerate(classes):\n # if cls_name != \"eggbox\":\n # continue\n new_points[cls_name] = {\"pz\": []}\n test_idx_file = os.path.join(observed_set_dir, \"{}_val.txt\".format(cls_name))\n with open(test_idx_file, \"r\") as f:\n observed_indices = [line.strip() for line in f.readlines()]\n\n num_observed = len(observed_indices)\n pose_dict[cls_name] = np.zeros((num_observed, 7)) # quat, translation\n trans_stat[cls_name] = {}\n quat_stat[cls_name] = {}\n for observed_i, observed_idx in enumerate(tqdm(observed_indices)):\n prefix = observed_idx.split(\"/\")[1]\n pose_path = os.path.join(\n gt_observed_dir, cls_name, \"{}-pose.txt\".format(prefix)\n )\n assert os.path.exists(pose_path), \"path {} not exists\".format(pose_path)\n pose = np.loadtxt(pose_path, skiprows=1)\n rot = pose[:3, :3]\n # print(rot)\n quat = np.squeeze(mat2quat(rot))\n src_trans = pose[:3, 3]\n pose_dict[cls_name][observed_i, :4] = quat\n pose_dict[cls_name][observed_i, 4:] = src_trans\n\n new_pz = np.dot(rot, pz.reshape((-1, 1))).reshape((3,))\n new_points[cls_name][\"pz\"].append(new_pz)\n new_points[cls_name][\"pz\"] = np.array(new_points[cls_name][\"pz\"])\n new_points[cls_name][\"pz_mean\"] = np.mean(new_points[cls_name][\"pz\"], 0)\n new_points[cls_name][\"pz_std\"] = np.std(new_points[cls_name][\"pz\"], 0)\n\n trans_mean = np.mean(pose_dict[cls_name][:, 4:], 0)\n trans_std = np.std(pose_dict[cls_name][:, 4:], 0)\n trans_max = np.max(pose_dict[cls_name][:, 4:], 0)\n print(\"trans mean: {}\".format(trans_mean))\n print(\"trans std: {}\".format(trans_std))\n print(\"trans max: {}\".format(trans_max))\n\n trans_stat[cls_name][\"trans_mean\"] = trans_mean\n trans_stat[cls_name][\"trans_std\"] = trans_std\n\n quat_mean = np.mean(pose_dict[cls_name][:, :4], 0)\n quat_std = np.std(pose_dict[cls_name][:, :4], 0)\n quat_stat[cls_name][\"quat_mean\"] = quat_mean\n quat_stat[cls_name][\"quat_std\"] = quat_std\n\n print(\n \"new z: \",\n \"mean: \",\n new_points[cls_name][\"pz_mean\"],\n \"std: \",\n new_points[cls_name][\"pz_std\"],\n )\n\n new_points[cls_name][\"angle\"] = [] # angle between mean vector and points\n pz_mean = new_points[cls_name][\"pz_mean\"]\n for p_i in range(new_points[cls_name][\"pz\"].shape[0]):\n deg = angle(pz_mean, new_points[cls_name][\"pz\"][p_i, :])\n new_points[cls_name][\"angle\"].append(deg)\n new_points[cls_name][\"angle\"] = np.array(new_points[cls_name][\"angle\"])\n\n print(\n \"angle mean: \",\n np.mean(new_points[cls_name][\"angle\"]),\n \"angle std: \",\n np.std(new_points[cls_name][\"angle\"]),\n \"angle max: \",\n np.max(new_points[cls_name][\"angle\"]),\n )\n new_points[cls_name][\"angle_max\"] = np.max(\n new_points[cls_name][\"angle\"]\n ) # ##############\n print()\n\n def vis_points():\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D # noqa:F401\n\n sel_p = \"pz\"\n ax = plt.figure().add_subplot(111, projection=\"3d\")\n ax.scatter(\n new_points[cls_name][sel_p][:, 0],\n new_points[cls_name][sel_p][:, 1],\n new_points[cls_name][sel_p][:, 2],\n c=\"r\",\n marker=\"^\",\n )\n ax.scatter(0, 0, 0, c=\"b\", marker=\"o\")\n ax.scatter(0, 0, 1, c=\"b\", marker=\"o\")\n ax.scatter(0, 1, 0, c=\"b\", marker=\"o\")\n ax.scatter(1, 0, 0, c=\"b\", marker=\"o\")\n ax.quiver(0, 0, 0, 0, 0, 1)\n pz_mean = new_points[cls_name][\"pz_mean\"]\n ax.quiver(0, 0, 0, pz_mean[0], pz_mean[1], pz_mean[2])\n\n ax.scatter(pz_mean[0], pz_mean[1], pz_mean[2], c=\"b\", marker=\"o\")\n ax.set_xlabel(\"X Label\")\n ax.set_ylabel(\"Y Label\")\n ax.set_zlabel(\"Z Label\")\n ax.set_xlim([-1.5, 1.5])\n ax.set_ylim([-1.5, 1.5])\n ax.set_zlim([-1.5, 1.5])\n plt.title(\"occ_test-\" + cls_name + \"-\" + sel_p)\n plt.show()\n\n # vis_points()\n return pose_dict, quat_stat, trans_stat, new_points\n\n\ndef gen_poses():\n NUM_IMAGES = 20000\n pz = np.array([0, 0, 1])\n pose_dict, quat_stat, trans_stat, new_points = stat_lm6d()\n trans_lm_dict = stat_YCB_video()\n\n observed_prefix_list = [\"{:06d}\".format(i + 1) for i in range(NUM_IMAGES)]\n sel_classes = copy.deepcopy(classes)\n # prefix: {} for prefix in observed_prefix_list} # store poses\n observed_pose_dict = {}\n\n syn_pose_dir = os.path.join(\n cur_path, \"..\", \"data/LINEMOD_6D/LM6d_converted/LM6d_occ_dsm/syn_poses_multi\"\n )\n mkdir_if_missing(syn_pose_dir)\n\n for i in tqdm(range(NUM_IMAGES)):\n observed_prefix = observed_prefix_list[i]\n\n # randomly choose a set of transes\n rand_k = random.randint(0, len(trans_lm_dict.keys()) - 1)\n sel_transes = trans_lm_dict[trans_lm_dict.keys()[rand_k]]\n num_pose = sel_transes.shape[0]\n if num_pose < 3:\n continue\n\n observed_pose_dict[observed_prefix] = {}\n random.shuffle(sel_classes)\n gen_classes = sel_classes[:num_pose]\n for cls_i, cls_name in enumerate(gen_classes):\n # if cls_name != 'driller':\n # continue\n # src_quat_mean = quat_stat[cls_name][\"quat_mean\"]\n # src_quat_std = quat_stat[cls_name][\"quat_std\"]\n # src_trans_mean = trans_stat[cls_name][\"trans_mean\"]\n # src_trans_std = trans_stat[cls_name][\"trans_std\"]\n deg_max = new_points[cls_name][\"angle_max\"] + 10\n\n gen_this_pose = True\n # generate trans ------------------------------------------------\n tgt_trans = sel_transes[cls_i].copy()\n # print('tgt_trans: ', tgt_trans)\n tgt_trans += np.random.normal(0, 0.05, 1)\n\n # r_dist, t_dist = calc_rt_dist_q(tgt_quat, src_quat, tgt_trans, src_trans)\n transform = np.matmul(K_lm, tgt_trans.reshape(3, 1))\n center_x = float(transform[0] / transform[2])\n center_y = float(transform[1] / transform[2])\n count = 0\n while not (0.1 < tgt_trans[2] < 1.2) or not (\n 48 < center_x < (640 - 48) and 48 < center_y < (480 - 48)\n ):\n # randomly generate a pose\n tgt_trans = sel_transes[cls_i].copy()\n tgt_trans += np.random.normal(0, 0.05, 1)\n\n transform = np.matmul(K_lm, tgt_trans.reshape(3, 1))\n center_x = float(transform[0] / transform[2])\n center_y = float(transform[1] / transform[2])\n count += 1\n if count % 500 == 0:\n print(\n observed_prefix,\n cls_name,\n count,\n \"48 < center_x < (640-48): {}, 48 < center_y < (480-48): {}\".format(\n 48 < center_x < (640 - 48), 48 < center_y < (480 - 48)\n ),\n )\n print(\n \"\\tcenter_x:{}, center_y:{}, tgt_trans: {}\".format(\n center_x, center_y, tgt_trans\n )\n )\n if count == 5000:\n gen_this_pose = False\n break\n\n # randomly generate a quat -------------------------------------------------\n tgt_quat = np.random.normal(0, 1, 4)\n tgt_quat = tgt_quat / np.linalg.norm(tgt_quat)\n if tgt_quat[0] < 0:\n tgt_quat *= -1\n\n tgt_rot_m = quat2mat(tgt_quat)\n new_pz = np.dot(tgt_rot_m, pz.reshape((-1, 1))).reshape((3,))\n pz_mean = new_points[cls_name][\"pz_mean\"]\n deg = angle(new_pz, pz_mean)\n count = 0\n while deg > deg_max:\n tgt_quat = np.random.normal(0, 1, 4)\n tgt_quat = tgt_quat / np.linalg.norm(tgt_quat)\n if tgt_quat[0] < 0:\n tgt_quat *= -1\n\n tgt_rot_m = quat2mat(tgt_quat)\n new_pz = np.dot(tgt_rot_m, pz.reshape((-1, 1))).reshape((3,))\n pz_mean = new_points[cls_name][\"pz_mean\"]\n deg = angle(new_pz, pz_mean)\n count += 1\n if count % 100 == 0:\n print(\n observed_prefix,\n cls_name,\n count,\n \"deg < deg_max={}: {}\".format(deg_max, deg <= deg_max),\n )\n print(\"\\tdeg:{}\".format(deg))\n if count == 5000:\n gen_this_pose = False\n break\n # ---------------------------------------------------------------------------------\n if gen_this_pose:\n tgt_pose_q = np.zeros((7,), dtype=\"float32\")\n tgt_pose_q[:4] = tgt_quat\n tgt_pose_q[4:] = tgt_trans\n observed_pose_dict[observed_prefix][cls_name] = tgt_pose_q\n\n i = 0\n for k, v in observed_pose_dict.items():\n if len(v.keys()) >= 2:\n i += 1\n print(\"{} indices are successfully generated.\".format(i))\n\n # write pose\n poses_file = os.path.join(syn_pose_dir, \"LM6d_occ_dsm_train_observed_pose_all.pkl\")\n with open(poses_file, \"wb\") as f:\n cPickle.dump(observed_pose_dict, f, 2)\n\n\nif __name__ == \"__main__\":\n # trans_lm_array = stat_YCB_video()\n # pose_dict, quat_stat, trans_stat, new_points = stat_lm6d()\n # pose_dict, quat_stat, trans_stat, new_points = stat_lm6d_occ_test()\n gen_poses()\n print(\"{} finished\".format(__file__))\n","sub_path":"toolkit/LM6d_occ_dsm_0_gen_observed_poses.py","file_name":"LM6d_occ_dsm_0_gen_observed_poses.py","file_ext":"py","file_size_in_byte":20661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"128831361","text":"my_list=[1,3,4,5,1,3,4,5,9]\nprint(my_list)\n\ndef frekans_tablosu(liste):\n frekans={}\n for i in liste:\n if(i in frekans):#eger i frekansta mevcutsa if'e gir\n frekans[i]=frekans[i]+1\n else:\n frekans[i]=1\n #print(i,frekans)\n return frekans\n\n#print(frekans_tablosu(my_list))\nfrekans_tablosu(my_list)\n\ndef frekans_tablosu_2(liste_2):\n frekans_liste=[]\n for i in range(len(liste_2)):\n s=False\n for j in range(len(frekans_liste)):\n if(liste_2[i]==frekans_liste[j][0]):#sayi frekans_listesine yollanmis mi?\n frekans_liste[j][1]=frekans_liste[j][1]+1\n s=True\n if(s==False):\n frekans_liste.append([liste_2[i],1])#ekleme\n return frekans_liste\n \nprint(frekans_tablosu_2(my_list))\n","sub_path":"hafta3.py","file_name":"hafta3.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"289871471","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom geeknote import config\nfrom geeknote.geeknote import User, Notebooks, Notes, GeekNote, GeekNoteConnector\nfrom geeknote.storage import Storage\nfrom geeknote.oauth import GeekNoteAuth\nfrom random import SystemRandom\nfrom string import hexdigits\n#from proxyenv.proxyenv import ProxyFactory\n\n\n# see https://docs.python.org/2.7/library/unittest.html §25.3.6\n# http://thecodeship.com/patterns/guide-to-python-function-decorators/\n# (decorator with empty argument list)\ndef skipUnlessDevMode():\n if config.DEV_MODE:\n return lambda x: x\n else:\n return unittest.skip(\"Test only active with DEV_MODE=True\")\n\n\nclass TestSandbox(unittest.TestCase):\n\n @classmethod\n @skipUnlessDevMode()\n def setUpClass(cls):\n storage = Storage()\n\n # start out with empty auth token. Save current token to restore it later.\n cls.token = storage.getUserToken()\n cls.info = storage.getUserInfo()\n storage.removeUser()\n\n # Force reconnection and re-authorization because it's part of our test suite\n GeekNoteAuth.cookies = {}\n GeekNoteConnector.evernote = None\n GeekNote.skipInitConnection = False\n cls.storage = Storage()\n cls.notes = set()\n cls.nbs = set()\n cls.notebook = (\"Geeknote test %s please delete\" %\n \"\".join(SystemRandom().choice(hexdigits) for x in range(12)))\n cls.Notes = Notes()\n cls.Notebooks = Notebooks()\n cls.Geeknote = cls.Notebooks.getEvernote()\n\n @classmethod\n def tearDownClass(cls):\n if cls.token:\n cls.storage.createUser(cls.token, cls.info)\n\n def setUp(self):\n self.user = User()\n self.tag = \"geeknote_unittest_1\"\n\n @skipUnlessDevMode()\n def test01_userLogin(self):\n # This is an implicit test. The GeekNote() call in setUp() will perform\n # an automatic login.\n self.assertTrue(self.Geeknote.checkAuth())\n\n @skipUnlessDevMode()\n def test10_createNotebook(self):\n self.assertTrue(self.Notebooks.create(self.notebook))\n\n @skipUnlessDevMode()\n def test15_findNotebook(self):\n all = self.Geeknote.findNotebooks()\n nb = [nb for nb in all if nb.name == self.notebook]\n self.assertTrue(len(nb) == 1)\n self.nbs.add(nb[0].guid)\n\n @skipUnlessDevMode()\n def test30_createNote(self):\n self.Notes.create(\"note title 01\",\n content=\"\"\"\\\n# Sample note 01\nThis is the note text.\n\"\"\",\n notebook=self.notebook,\n tags=self.tag)\n\n @skipUnlessDevMode()\n def test31_findNote(self):\n self.Notes.find(notebooks=self.notebook, tags=self.tag)\n result = self.storage.getSearch()\n self.assertTrue(len(result.notes) == 1)\n self.notes.add(result.notes[0].guid)\n\n @skipUnlessDevMode()\n def test90_removeNotes(self):\n while self.notes:\n self.assertTrue(self.Geeknote.removeNote(self.notes.pop()))\n\n # EXPECTED FAILURE\n # \"This function is generally not available to third party applications\"\n # https://dev.evernote.com/doc/reference/NoteStore.html#Fn_NoteStore_expungeNotebook\n @skipUnlessDevMode()\n def test95_removeNotebooks(self):\n while self.nbs:\n # self.assertTrue(self.Geeknote.removeNotebook(self.nbs.pop()))\n self.assertRaises(SystemExit, self.Geeknote.removeNotebook, self.nbs.pop())\n\n @skipUnlessDevMode()\n def test99_userLogout(self):\n self.user.logout(force=True)\n self.assertFalse(self.Geeknote.checkAuth())\n\n\nclass TestSandboxWithProxy(TestSandbox):\n\n @classmethod\n @skipUnlessDevMode()\n def setUpClass(cls):\n cls.proxy = ProxyFactory()()\n cls.proxy.start()\n cls.proxy.wait()\n cls.proxy.enter_environment()\n super(TestSandboxWithProxy, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n super(TestSandboxWithProxy, cls).tearDownClass()\n cls.proxy.leave_environment()\n try:\n cls.proxy.stop()\n except:\n pass\n","sub_path":"tests/test_sandbox.py","file_name":"test_sandbox.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"461206331","text":"import numpy\n\nclass FastHisto():\n def __init__(\n self,\n nbins=256,\n ):\n self.nbins = nbins\n \n def predictZ0(self,value,weight):\n z0List = []\n halfBinWidth = 0.5*30./256.\n for ibatch in range(value.shape[0]):\n hist,bin_edges = numpy.histogram(value[ibatch],self.nbins,range=(-15,15),weights=weight[ibatch])\n hist = numpy.convolve(hist,[1,1,1],mode='same')\n z0Index= numpy.argmax(hist)\n z0 = -15.+30.*z0Index/self.nbins+halfBinWidth\n z0List.append([z0])\n return numpy.array(z0List,dtype=numpy.float32)\n","sub_path":"Train/vtx/FastHisto.py","file_name":"FastHisto.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"318458785","text":"import logging\n\nPANEL_VERSION = 'V3' # V2 or V3. V2 not support API\n\nAPI_URL = 'http://www.XXXXXX.com/includes/api.php'\nAPI_IDENTIFIER = 'identifier'\nAPI_SECRET = 'secret'\n\nNODE_ID = '1'\nCHECKTIME = 15\nSYNCTIME = 300\n\nSS_BIND_IP = '0.0.0.0'\nSS_METHOD = 'rc4-md5'\n\n#LOG CONFIG\nLOG_ENABLE = True\nLOG_LEVEL = logging.DEBUG\nLOG_FILE = '/var/log/shadowsocks.log'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"597134571","text":"import pickle\nfrom data.data_generator.util import *\n\nworking_dir = '/home/zbh/Workspace/'\n\ntf.flags.DEFINE_string('input_dir', os.path.join(working_dir, 'out/DataConstruction/constructed_data'),\n 'The directory which contains the input file')\ntf.flags.DEFINE_string('current_channel', 'social', 'the channel of the news')\ntf.flags.DEFINE_string('output_dir', os.path.join(working_dir, 'out/CommentMLE/LdaTFRecord/'),\n \"output directory for generated tfrecords\")\n\nFLAGS = tf.flags.FLAGS\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\nclass DataManager:\n\n def __init__(self, input_data_dict):\n self.input_data = input_data_dict\n self.comment_str_dictionary = None\n self.comment_key_dictionary = None\n self.comment_key_type = ['noun']\n self.content_key_type = ['noun']\n\n @staticmethod\n def _apply_key_types(key_types, keys_dict):\n keys_buffer = list()\n for k, v in keys_dict.items():\n if k in key_types:\n keys_buffer.extend(v)\n return keys_buffer\n\n @staticmethod\n def _get_character_list(sentence):\n return list(sentence[:])\n\n def obtain_dictionary(self, store_path):\n if not os.path.exists(store_path):\n training_data = self.input_data['train']\n condition = {\n \"no_below\": 1,\n \"no_above\": 1,\n \"keep_n\": 19996,\n \"keep_tokens\": None\n }\n comment_str_corpus = [DataManager._get_character_list(''.join(item['comment_str'].split())) for item in\n training_data]\n comment_keys_corpus = [DataManager._apply_key_types(self.comment_key_type, item['comment_keys']) for item in\n training_data]\n self.comment_str_dictionary = Dictionary.construct_dictionary(comment_str_corpus,\n store_path + 'dictionary_comment_str',\n **condition)\n self.comment_key_dictionary = Dictionary.construct_dictionary(comment_keys_corpus,\n store_path + 'dictionary_comment_key',\n **condition)\n else:\n self.comment_str_dictionary = Dictionary.load_dictionary(store_path + 'dictionary_comment_str')\n self.comment_key_dictionary = Dictionary.load_dictionary(store_path + 'dictionary_comment_key')\n\n def generate_record(self):\n eos = Dictionary._EXTRA_VOCAB_ITEMS.index(\"\")\n bos = Dictionary._EXTRA_VOCAB_ITEMS.index(\"\")\n generated_records = dict()\n for split, data in self.input_data.items():\n buffer = list()\n for item in data:\n comment_str_ids = [bos] + self.comment_str_dictionary.tokens2ids(\n DataManager._get_character_list(''.join(item['comment_str'].split()))) + [eos]\n comment_keys_ids = self.comment_key_dictionary.tokens2ids(\n DataManager._apply_key_types(self.comment_key_type, item['comment_keys']))\n title_str_ids = [bos] + self.comment_str_dictionary.tokens2ids(\n DataManager._get_character_list(''.join(item['title_str'].split()))) + [eos]\n\n # convert the topic_distribution into indices, values and shape\n topic_distribution = item['content_topic_distribution']\n indices = [item[0] for item in topic_distribution]\n values = [item[1] for item in topic_distribution]\n if len(indices) == 0 or len(comment_keys_ids) == 0:\n continue\n buffer.append({\n 'content_topic_distribution_indices': indices,\n 'content_topic_distribution_values': values,\n 'comment_keys': list(set(comment_keys_ids)),\n 'comment_str': comment_str_ids,\n 'title_str': title_str_ids\n })\n generated_records[split] = buffer\n return generated_records\n\n\ndef main():\n # load data from input files\n input_data_dir = FLAGS.input_dir\n pickle_filenames = os.listdir(input_data_dir)\n data_buffer = list()\n for filename in pickle_filenames:\n with open(os.path.join(input_data_dir, filename), \"rb\") as f:\n data_buffer.extend(pickle.load(f))\n\n if FLAGS.current_channel is not None:\n data_buffer = list(filter(lambda item: item['channel'] == FLAGS.current_channel, data_buffer))\n\n split_pos = int(len(data_buffer) * 0.8)\n input_data_dict = {\n 'train': data_buffer[:split_pos],\n 'test': data_buffer[split_pos:]\n }\n\n # construct records\n manager = DataManager(input_data_dict)\n manager.obtain_dictionary(os.path.join(FLAGS.output_dir))\n records = manager.generate_record()\n\n # convert records into tfrecords\n for split, record_list in records.items():\n tf.logging.info('Processing {} set'.format(split))\n record_store_path = os.path.join(FLAGS.output_dir, split + '.tfrecord')\n tfrecord_generator = TFRecordGenerator(record_list, record_store_path)\n tfrecord_generator.generate_TFRecord()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/data_generator/LdaTFRecordGenerator.py","file_name":"LdaTFRecordGenerator.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"222757441","text":"from typing import List, Dict\n\nimport luigi\nfrom luigi.contrib.spark import PySparkTask\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import arrays_zip, when, col, to_json, lit\nfrom impc_etl.workflow.config import ImpcConfig\nfrom impc_etl.workflow.load import ObservationsMapper\n\n\nclass ApiMapper(PySparkTask):\n name = \"IMPC_Api_Mapper\"\n output_entity_name = \"\"\n primary_dependency: luigi.Task = None\n column_select: List[str] = []\n column_renaming: Dict = {}\n extra_transformations = {}\n output_path = luigi.Parameter()\n\n def requires(self):\n return []\n\n def output(self):\n return ImpcConfig().get_target(\n f\"{self.output_path}_{self.output_entity_name}_api_parquet\"\n )\n\n def app_options(self):\n return [\n self.input()[0].path,\n self.output().path,\n ]\n\n def main(self, sc, *args):\n spark = SparkSession(sc)\n input_parquet_path = args[0]\n output_path = args[1]\n\n source_df = spark.read.parquet(input_parquet_path)\n impc_api_df = source_df.select(self.column_select).distinct()\n\n for col_name, new_col_name in self.column_renaming.items():\n impc_api_df = impc_api_df.withColumnRenamed(col_name, new_col_name)\n\n for transformation in self.extra_transformations:\n impc_api_df = transformation(impc_api_df)\n\n impc_api_df.write.parquet(output_path)\n\n\nclass ApiSpecimenMapper(ApiMapper):\n output_entity_name = \"specimen\"\n column_select = [\n \"specimen_id\",\n \"external_sample_id\",\n \"colony_id\",\n \"strain_accession_id\",\n \"genetic_background\",\n \"strain_name\",\n \"zygosity\",\n \"production_center\",\n \"phenotyping_center\",\n \"project_name\",\n \"litter_id\",\n \"biological_sample_group\",\n \"sex\",\n \"pipeline_stable_id\",\n \"date_of_birth\",\n ]\n column_renaming = {\n \"external_sample_id\": \"source_id\",\n \"biological_sample_group\": \"sample_group\",\n }\n extra_transformations = [\n lambda df: df.withColumn(\n \"specimen_type\",\n when(col(\"date_of_birth\").isNull(), lit(\"embryo\")).otherwise(lit(\"mouse\")),\n )\n ]\n\n def requires(self):\n return [ObservationsMapper()]\n\n\nclass ApiExperimentMapper(ApiMapper):\n output_entity_name = \"experiment\"\n column_select = [\n \"experiment_id\",\n \"specimen_id\",\n \"experiment_source_id\",\n \"phenotyping_center\",\n \"pipeline_stable_id\",\n \"procedure_stable_id\",\n \"date_of_experiment\",\n \"metadata\",\n \"metadata_group\",\n \"procedure_sequence_id\",\n \"age_in_days\",\n \"age_in_weeks\",\n \"weight\",\n \"weight_date\",\n \"weight_days_old\",\n \"developmental_stage_name\",\n \"developmental_stage_acc\",\n ]\n column_renaming = {\n \"experiment_source_id\": \"source_id\",\n \"age_in_days\": \"specimen_age_in_days\",\n \"age_in_weeks\": \"specimen_age_in_weeks\",\n \"weight\": \"specimen_weight\",\n \"weight_date\": \"specimen_weight_date\",\n \"weight_days_old\": \"specimen_weight_age_in_days\",\n \"developmental_stage_name\": \"specimen_developmental_stage_name\",\n \"developmental_stage_acc\": \"specimen_developmental_stage_acc\",\n }\n\n def requires(self):\n return [ObservationsMapper()]\n\n\nclass ApiObservationMapper(ApiMapper):\n output_entity_name = \"observation\"\n column_select = [\n \"observation_id\",\n \"experiment_id\",\n \"parameter_stable_id\",\n \"observation_type\",\n \"sequence_id\",\n \"data_point\",\n \"category\",\n \"text_value\",\n \"time_point\",\n \"discrete_point\",\n \"increment_value\",\n \"sub_term_id\",\n \"sub_term_name\",\n \"sub_term_description\",\n ]\n column_renaming = {\n \"sequence_id\": \"parameter_sequence_id\",\n }\n extra_transformations = [\n lambda df: df.withColumn(\n \"ontology_terms\",\n to_json(\n arrays_zip(\n col(\"sub_term_id\").alias(\"ontology_term_id\"),\n col(\"sub_term_name\").alias(\"ontology_term_name\"),\n col(\"sub_term_description\").alias(\"ontology_term_description\"),\n )\n ),\n ).drop(\n \"sub_term_id\",\n \"sub_term_name\",\n \"sub_term_name\",\n )\n ]\n\n def requires(self):\n return [ObservationsMapper()]\n","sub_path":"impc_etl/jobs/load/impc_api/impc_api_mapper.py","file_name":"impc_api_mapper.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"386867041","text":"from datetime import timedelta\n\n# The DAG object; we'll need this to instantiate a DAG\nfrom airflow import DAG\n# Operators; we need this to operate!\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\n# These args will get passed on to each operator\n# You can override them on a per-task basis during operator initialization\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(2),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n # 'wait_for_downstream': False,\n # 'dag': dag,\n # 'sla': timedelta(hours=2),\n # 'execution_timeout': timedelta(seconds=300),\n # 'on_failure_callback': some_function,\n # 'on_success_callback': some_other_function,\n # 'on_retry_callback': another_function,\n # 'sla_miss_callback': yet_another_function,\n # 'trigger_rule': 'all_success'\n}\ndag = DAG(\n 'tutorial',\n default_args=default_args,\n description='A simple tutorial DAG',\n schedule_interval=timedelta(days=1),\n)\n\n# t1, t2 and t3 are examples of tasks created by instantiating operators\nt1 = BashOperator(\n task_id='print_date',\n bash_command='date',\n dag=dag,\n)\n\nt2 = BashOperator(\n task_id='sleep',\n depends_on_past=False,\n bash_command='sleep 5',\n retries=3,\n dag=dag,\n)\ndag.doc_md = __doc__\n\nt1.doc_md = \"\"\"\\\n#### Task Documentation\nYou can document your task using the attributes `doc_md` (markdown),\n`doc` (plain text), `doc_rst`, `doc_json`, `doc_yaml` which gets\nrendered in the UI's Task Instance Details page.\n\n\"\"\"\ntemplated_command = \"\"\"\n{% for i in range(5) %}\n echo \"{{ ds }}\"\n echo \"{{ macros.ds_add(ds, 7)}}\"\n echo \"{{ params.my_param }}\"\n{% endfor %}\n\"\"\"\n\nt3 = BashOperator(\n task_id='templated',\n depends_on_past=False,\n bash_command=templated_command,\n params={'my_param': 'Parameter I passed in'},\n dag=dag,\n)\n\n\ndef print_sample_dataframe():\n \"\"\"Generate a dummy pandas dataframe and print it\"\"\"\n import pandas as pd\n \n data = {\n 'Product': ['ABC','DDD','XYZ','AAA','CCC','PPP','NNN','RRR'],\n 'Price': [630,790,250,370,880,1250,550,700],\n 'Discount': ['No','Yes','No','Yes','Yes','No','No','Yes']\n }\n\n df = pd.DataFrame(data, columns = ['Product','Price','Discount'])\n print(df)\n\nt4 = PythonOperator(\n task_id='print_sample_dataframe',\n python_callable=print_sample_dataframe,\n dag=dag,\n)\n\n\ndef contact_mysql():\n \"\"\"Tries to connect to a mysql instance\"\"\"\n import pymysql\n\n conn = pymysql.connect(\n host='mysql',\n user='airflow',\n password='pliplop',\n db='airflow',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor\n )\n\n try:\n with conn.cursor() as cursor:\n # Create a new record\n sql = \"SHOW TABLES;\"\n cursor.execute(sql)\n print(cursor.fetchall())\n finally:\n conn.close()\n\n\nt5 = PythonOperator(\n task_id='contact_mysql',\n python_callable=contact_mysql,\n dag=dag,\n)\n\n\n\nt1 >> [t2, t3, t4, t5]","sub_path":"data/dags/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"182495189","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport random\nfrom PIL import Image\n\nSIZE = 32\nBATCH_SIZE = 32\nFILTERS = [3, 32, 64, 128, 64, 32, 3] # first and last of FILTERS is always 3 so dont even put it\nFILTER_SIZE = [3, 3, 3, 3, 3, 3, 3] # size of filters for each layer\nLEARNING_RATE = 0.0005\nEPOCHS = 1000\nS_PATH = \"./models\"\nLOG_PATH = \"/tmp/texture-synth/model1\"\nS_NAME = \"model-{}\"\nIM_DENOISE = \"./100.jpg\"\n\ndef read_in_image(name):\n raw_img = tf.image.decode_jpeg(tf.read_file(name), channels=3)\n shaped = tf.image.resize_images(raw_img, [SIZE, SIZE])\n noised = shaped + tf.random_normal([SIZE, SIZE, 3], mean=0.0, stddev=np.random.uniform(20.0, 30.0))\n return noised, shaped\n\ndef make_dataset(filenames):\n _dataset = tf.data.Dataset.from_tensor_slices(filenames)\n _dataset = _dataset.map(read_in_image)\n _dataset = _dataset.batch(BATCH_SIZE).repeat().shuffle(buffer_size=100)\n _iterator = _dataset.make_one_shot_iterator()\n return _iterator\n\ndef convolve_getter(name, shape):\n weight = tf.get_variable(name, shape=shape) # by default uses glorot_uniform initializer (pretty good)\n return weight\n\ndef convolve_once(input_tensor, convolver):\n # the most basic convolutional layer, including the convolution, batch norm, relu non-linearity\n conv = tf.nn.conv2d(input_tensor, convolver, strides=[1, 1, 1, 1], padding=\"SAME\")\n # conv_ = tf.layers.batch_normalization(conv, axis=-1, training=TRAINING, scale=False, center=True)\n return tf.nn.relu(conv)\n\n\ndef convolve_block(input_tensor, layer_name, idx):\n with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE):\n with tf.name_scope(\"weights\"):\n convolve_matrix = convolve_getter(\"convolve\", [FILTER_SIZE[idx], FILTER_SIZE[idx], FILTERS[idx-1], FILTERS[idx]])\n with tf.name_scope(\"convolve_pool\"):\n convolved = convolve_once(input_tensor, convolve_matrix)\n return convolved\n\ndef remove_noise(some_imgs):\n next_img = some_imgs\n for i in range(1, len(FILTERS)):\n next_img = tf.identity(convolve_block(next_img, f\"layer-{i}\", i), name=f\"outpt_layer-{i}\")\n return next_img\n\ndef prep_img(name):\n im_read = Image.open(name)\n #im_shaped = im_read.resize((SIZE, SIZE), Image.ANTIALIAS)\n im_shaped = im_read\n im_shaped.show()\n arr_im = np.array(im_shaped).astype(np.float32)\n return np.expand_dims(arr_im, axis=0)\n\ndef display_img(np_arr):\n im_show = Image.fromarray(np_arr.astype(np.uint8), \"RGB\")\n im_show.show()\n\nall_names = os.listdir(\"./cifar-100-python\")\nrandom.shuffle(all_names)\ntrain_names = [f\"./cifar-100-python/{name}\" for name in all_names if 't' not in name]\ntest_names = [f\"./cifar-100-python/{name}\" for name in all_names if 't' in name]\nTR_SIZE = len(train_names)\nTE_SIZE = len(test_names)\ngst = tf.Variable(0, trainable=False)\n\ntr_iter = make_dataset(train_names)\nte_iter = make_dataset(test_names)\n\n#next_tr = tf.transpose(tr_iter.get_next(), [1, 0, 2, 3, 4])\n#next_te = tf.transpose(te_iter.get_next(), [1, 0, 2, 3, 4])\ntr_input, tr_outpt = tr_iter.get_next()\nte_input, te_outpt = te_iter.get_next()\n\n\n\n\nnoised = prep_img(IM_DENOISE) + np.random.normal( loc=0.0, scale=25.0, size=[SIZE, SIZE, 3]).astype(np.float32)\ndisplay_img(noised[0])\ndenoised = remove_noise(noised)\n#tr_bs = tf.shape(next_tr)[0]\n#tr_input = tf.squeeze(tf.slice(next_tr, [0, 0, 0, 0, 0], [1, tr_bs, SIZE, SIZE, 3]), axis=0)\n#tr_outpt = tf.squeeze(tf.slice(next_tr, [0, 0, 0, 0, 0], [1, tr_bs, SIZE, SIZE, 3]), axis=0)\n\n#te_input = tf.squeeze(tf.slice(next_te, [1, 0, 0, 0, 0], [1, -1, -1, -1, -1]), axis=0)\n#te_outpt = tf.squeeze(tf.slice(next_te, [1, 0, 0, 0, 0], [1, -1, -1, -1, -1]), axis=0)\n\n# would like to do next_tr[:, 0, :, :, :]\ntr_removed = remove_noise(tr_input)\nte_removed = remove_noise(te_input)\n\n\nloss = tf.reduce_mean(tf.square(tr_removed - tr_outpt))\ntloss = tf.reduce_mean(tf.square(te_removed - te_outpt))\nopt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss, global_step=gst)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n loader = tf.train.Saver()\n result = sess.run(denoised)[0]\n print(np.mean(result))\n display_img(result*255)\n# try:\n# loader.restore(sess, tf.train.latest_checkpoint(S_PATH))\n# except ValueError:\n# print(\"No model found, initializing random model\")\n# grapher = tf.summary.FileWriter(LOG_PATH, sess.graph)\n# grapher.add_summary(tf.Summary(), 0)\n# pct_incr = 0.1\n# pct_track = 0.0\n# num_iters = TR_SIZE//BATCH_SIZE\n# for ep in range(EPOCHS):\n# tr_loss = 0\n# pct_track = 0.0\n# print(f\"Epoch {ep}/{EPOCHS}: \", end=\"\", flush=True)\n# for iteration in range(num_iters):\n# _useless__, training_loss = sess.run([opt, loss])\n# tr_loss += training_loss\n# if iteration/float(num_iters) >= pct_track:\n# print(\"#\", end=\"\", flush=True)\n# pct_track += pct_incr\n# print(f\"\\nTraining loss was {tr_loss/iteration} (did {iteration} episodes)\")\n# test_cost = 0\n# for iteration in range(TE_SIZE//BATCH_SIZE):\n# test_batch_cost = sess.run(tloss)\n# test_cost += test_batch_cost\n# print(f\"Testing loss was {test_cost/iteration} (did {iteration} episodes)\")\n# if ep % 5 == 0:\n# saver.save(sess, os.path.join(S_PATH, S_NAME.format(tf.train.global_step(sess, gst))))\n#\n\n\n\n\n\n\n\n","sub_path":"texture-synthesis/denoiser.py","file_name":"denoiser.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"88972897","text":"import os\nimport glob\nfrom flask import Flask, flash, request, redirect, url_for, render_template, send_from_directory\nimport json\nimport wave\nimport contextlib\nimport re\n\nfrom werkzeug.utils import secure_filename\nfrom pyAudioAnalysis import webUtil\n\nTEMPLATES_AUTO_RELOAD = True\n\nUPLOAD_FOLDER = './uploads'\nALLOWED_EXTENSIONS = set(['wav'])\n\n# webdata.uploaded_files = []\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\napp.use_reloader=False\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n# Data structure to store all webapp info. This will be passed to front end.\nwebdata = {\n \"uploaded_files\": [],\n \"img_src\": \"\",\n \"type\": \"none\",\n \"name\": \"\",\n \"m_time\": 0,\n \"f_time\": 0,\n \"m_ratio\": 0.0,\n \"f_ratio\": 0.0,\n \"total_time\": 0\n}\n\n# Store list of uploaded files. Called when web app is first run\ndef getFilesInFolder():\n for (dirpath, dirnames, filenames) in os.walk(UPLOAD_FOLDER):\n for file in filenames:\n if file not in webdata[\"uploaded_files\"] and file.endswith(\"-nosilence.wav\"):\n webdata[\"uploaded_files\"].append(file)\n break\n\n# Get length (in seconds) of audio file \ndef computeLengthOfFile(wavFile):\n with contextlib.closing(wave.open(wavFile,'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n duration = frames / float(rate)\n print('audio file length computed for', wavFile, ': ', duration)\n return duration\n\n# loop through records in JSON file and find the one to update\ndef findRecordAndUpdate(filename, updateMode, mf_data=[0,0,0,0]):\n\n # For debugging...\n print('file to find and update: ', filename)\n print('update mode: ', updateMode)\n print('mf_data: ', mf_data)\n\n with open(\"reports.json\", \"r\") as jsonFile:\n data = json.load(jsonFile)\n \n # Iterate through records in JSON file and find the right one to update.\n for record in data[\"individual_report_data\"]:\n if record[\"basename\"] == filename:\n # If desired action is get new length after silence removed, compute length.\n if updateMode == \"silenceRemoved\":\n # Get filename with -nosilence appended.\n filename = re.findall('.*[^.wav]', filename)[0] + '-nosilence.wav'\n # Get full file path.\n fullFilePath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n record[\"lengthWithoutSilence\"] = computeLengthOfFile(fullFilePath)\n \n # TODO: Update m/f data in report record.\n # Right now, it takes an array as a parameter, so the mf_data values MUST BE IN THIS ORDER.\n # We could also use a dict, so that order wouldn't matter. Whatever works.\n elif updateMode == \"mfClassified\":\n print('setting times and ratios')\n print(mf_data[2] * record[\"lengthWithoutSilence\"])\n record[\"m_speakingTime\"] = mf_data[0] #* record[\"lengthWithoutSilence\"]\n record[\"f_speakingTime\"] = mf_data[1] #* record[\"lengthWithoutSilence\"]\n record[\"m_speakingRatio\"] = mf_data[2]\n record[\"f_speakingRatio\"] = mf_data[3]\n \n # Update webdata here\n webdata[\"m_time\"] = (mf_data[2] * record[\"lengthWithoutSilence\"])\n webdata[\"f_time\"] = (mf_data[3] * record[\"lengthWithoutSilence\"])\n webdata[\"total_time\"] = record[\"lengthWithoutSilence\"]\n \n # write resultant data to file\n with open(\"reports.json\", \"w\") as jsonFile:\n json.dump(data, jsonFile, indent=4)\n\ngetFilesInFolder()\n\n@app.route('/uploads')\ndef autoload(filename):\n if filename != '':\n return render_template('index.html', name = filename)\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS and \\\n filename not in webdata[\"uploaded_files\"]\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has a file. Form submission will not have file.\n if 'file' not in request.files:\n \n print('--- Received form data:')\n print(request.form)\n print('---')\n \n # TODO: Process all records in reports.json to get aggregate stats\n if request.form['processaction'] == 'Statistics For All Files':\n \n print('Processing all files in reports.json...')\n \n # Possible way to do this:\n # Iterate through records in the JSON using method similar to in findRecordAndUpdate()\n m_total = 0\n f_total = 0\n total_time = 0\n m_total_ratio = 0\n f_total_ratio = 0\n \n with open(\"reports.json\", \"r\") as jsonFile:\n data = json.load(jsonFile)\n \n for record in data[\"individual_report_data\"]:\n m_total += record[\"m_speakingTime\"]\n f_total += record[\"f_speakingTime\"]\n total_time += record[\"lengthWithoutSilence\"]\n \n m_total_ratio = m_total/total_time\n f_total_ratio = f_total/total_time\n \n # Write values to reports.json similar to in findRecordAndUpdate(). \n \n with open(\"reports.json\", \"r\") as jsonFile:\n data = json.load(jsonFile)\n \n data[\"aggregate_report_data\"][\"m_total_ratio\"] = m_total_ratio \n \n data[\"aggregate_report_data\"][\"f_total_ratio\"] = f_total_ratio\n \n data[\"aggregate_report_data\"][\"m_total_time\"] = m_total\n \n data[\"aggregate_report_data\"][\"f_total_time\"] = f_total\n \n data[\"aggregate_report_data\"][\"total_time\"] = total_time\n \n # write resultant data to file\n with open(\"reports.json\", \"w\") as jsonFile:\n json.dump(data, jsonFile, indent=4)\n\n # update webdata object\n webdata[\"type\"] = \"aggregate\"\n webdata[\"name\"] = \"\"\n webdata[\"m_time\"] = m_total\n webdata[\"f_time\"] = f_total\n webdata[\"m_ratio\"] = m_total_ratio\n webdata[\"f_ratio\"] = f_total_ratio\n webdata[\"total_time\"] = total_time\n # Create visualization from ratios\n webdata[\"img_src\"] = webUtil.visualizeAggregateData(m_total, f_total)\n print(\"img_src\", webdata[\"img_src\"])\n \n # Refresh webpage with updated image\n return render_template('index.html', data=webdata)\n\n elif request.form['processaction'] == \"Classify Male/Female\":\n # Run mf_classification\n print('Male/Female Classification')\n fileToProcess = './uploads/' + request.form['fileToProcess']\n \n # Call to webUtil mf_classify function. Should return speaking times and \n # percentages of males and females.\n [m_ratio, f_ratio, unk_ratio, m_time, f_time, unk_time] = webUtil.mf_classify(fileToProcess)\n majorKeys = [m_ratio,f_ratio,unk_ratio,m_time,f_time,unk_time]\n # Write total times and ratios to reports.json file\n longname = request.form['fileToProcess']\n print(\"---longname\",longname)\n basename = longname.replace('-nosilence.wav', '.wav')\n print(\"---basename\", basename)\n \n \n findRecordAndUpdate(basename, updateMode=\"mfClassified\", mf_data=[m_time, f_time, m_ratio, f_ratio])\n\n # Create visualization with ratios\n webdata[\"img_src\"] = webUtil.produceVisuals(fileToProcess,majorKeys)\n print(\"img_src\", webdata[\"img_src\"])\n # Share visualization info with frontend.\n webdata[\"m_time\"] = m_time\n webdata[\"f_time\"] = f_time\n webdata[\"m_ratio\"] = m_ratio\n webdata[\"f_ratio\"] = f_ratio\n webdata[\"type\"] = \"individual\"\n webdata[\"name\"] = longname\n # render_template() is called to refresh the index.html page. It sends the\n # webdata object to index.html so that we can use its objects in the HTML\n # code. See my example of iterating through the list of uploaded files \n # on the frontend.\n return render_template('index.html', data=webdata)\n else:\n file = request.files['file']\n # Check if no file selected\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n fullFilePath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n file.save(fullFilePath)\n \n with open(\"reports.json\", \"r\") as jsonFile:\n data = json.load(jsonFile)\n\n reports_length = len(data[\"individual_report_data\"])\n length = computeLengthOfFile(fullFilePath)\n \n # TODO: Get num_speakers from user input box on file upload\n num_speakers = 5\n \n new_file_data = {\n \"basename\": filename,\n \"length\": length,\n \"num_speakers\": num_speakers,\n \"lengthWithoutSilence\": 0,\n \"m_speakingTime\": 0,\n \"f_speakingTime\": 0,\n \"m_speakingRatio\": 0,\n \"f_speakingRatio\": 0\n }\n data[\"individual_report_data\"].append(new_file_data)\n\n with open(\"reports.json\", \"w\") as jsonFile:\n json.dump(data, jsonFile, indent=4)\n \n json.dumps(data, indent=4)\n getFilesInFolder()\n \n fileToProcess = './uploads/' + filename\n # Handle malformed user input\n if '.wav' not in fileToProcess:\n flash('ERROR: Please enter a .wav file from the list above as the file to process.')\n return render_template('index.html', data=webdata)\n if not os.path.isfile(fileToProcess):\n flash('ERROR: Please enter a .wav file from the list above as the file to process.')\n return render_template('index.html', data=webdata)\n \n print('Calling silenceUtil...')\n \n processedFile = webUtil.removeSilence(fileToProcess, 0.1, 0.1)\n \n # Find file record in records.json and update its lengthWithoutSilence\n basename = filename\n \n findRecordAndUpdate(basename, updateMode=\"silenceRemoved\")\n \n # Refresh file list after adding the -nosilence file\n if (processedFile):\n getFilesInFolder()\n return render_template('index.html', data=webdata)\n else:\n flash('ERROR: Silence removal failed.')\n return render_template('index.html', data=webdata)\n \n return render_template('index.html', data=webdata)\n # return redirect(url_for('uploaded_file',\n # filename=filename))\n\n\n return render_template('index.html', data=webdata)\n","sub_path":"pyAudioAnalysis/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":12227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"186922217","text":"from django.shortcuts import render, redirect, get_object_or_404\n\nfrom webapp.forms import BookForm\nfrom webapp.models import Book, choice_status\n\n\ndef index_view(request):\n books = Book.objects.filter(status='Active').order_by('-update_time')\n return render(request, 'index.html', context={\n 'books': books\n })\n\n\ndef book_create_view(request):\n if request.method == 'GET':\n form = BookForm()\n return render(request, 'create.html', context={\n 'form': form\n })\n elif request.method == 'POST':\n form = BookForm(data=request.POST)\n if form.is_valid():\n forms = Book.objects.create(author_name=form.cleaned_data['author_name'],\n author_email=form.cleaned_data['author_email'],\n record_text=form.cleaned_data['record_text'])\n\n return redirect('book_view', pk=forms.pk)\n else:\n return render(request, 'create.html', context={'form': form})\n\n\ndef book_update_view(request, pk):\n forms = get_object_or_404(Book, pk=pk)\n\n if request.method == 'GET':\n form = BookForm(data={'author_name': forms.author_name,\n 'author_email': forms.author_email,\n 'record_text': forms.record_text})\n return render(request, 'update.html', context={\n 'forms': forms,\n 'form': form})\n\n elif request.method == 'POST':\n form = BookForm(data=request.POST)\n\n if form.is_valid():\n forms.author_name = form.cleaned_data['author_name']\n forms.author_email = form.cleaned_data['author_email']\n forms.record_text = form.cleaned_data['record_text']\n forms.save()\n\n return redirect('book_view', pk=forms.pk)\n else:\n return render(request, 'update.html', context={\n 'form': form,\n 'forms': forms\n })\n\n\ndef book_view(request, pk):\n forms = get_object_or_404(Book, pk=pk)\n return render(request, 'book.html', context={\n 'forms': forms\n })\n\n\ndef book_delete_view(request, pk):\n form = get_object_or_404(Book, pk=pk)\n form.delete()\n return redirect('index')\n","sub_path":"exam_src/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"181639431","text":"#!/usr/bin/env python3\n\nimport click\nimport requests\nimport re\nimport uuid\n\n\ndef get_json(uri, expected_status=200):\n r = requests.get(uri + \"?cacheBust=\" + str(uuid.uuid1()))\n\n if r.status_code != expected_status:\n click.echo(\n click.style(\n f\"Status code fail - expected '{expected_status}' but got '{r.status_code}'\",\n fg=\"red\",\n )\n )\n return {}\n\n return r.json()\n\n\ndef validate_id(infojson, expected):\n info_id = infojson.get(\"@id\", None)\n\n if info_id != expected:\n click.echo(\n click.style(\n f\"Id fail - expected '{expected}' but found '{info_id}'\", fg=\"red\"\n )\n )\n return False\n else:\n click.echo(click.style(f\"Found '{info_id}'\", fg=\"green\"))\n return True\n\n\ndef validate_auth(info_json, regex):\n p = re.compile(regex)\n\n def validate_service(json):\n if not json:\n return\n\n service_description = json.get(\"service\", [])\n if isinstance(service_description, dict):\n service_description = [service_description]\n\n for service in service_description:\n info_id = service.get(\"@id\", None)\n if not p.match(info_id):\n click.echo(\n click.style(\n f\"Id fail - expected '{regex}' but found '{info_id}'\", fg=\"red\"\n )\n )\n else:\n click.echo(click.style(f\"Found '{info_id}'\", fg=\"green\"))\n\n validate_service(service)\n\n validate_service(info_json)\n\n\ndef validate_redirect(uri, redirect_to):\n r = requests.get(uri + \"?cacheBust=\" + str(uuid.uuid1()), allow_redirects=False)\n\n if r.status_code != 302:\n click.echo(click.style(f\"Request to '{uri}' wasn't a redirect\", fg=\"red\"))\n return False\n else:\n click.echo(click.style(f\"Request to '{uri}' redirected.\", fg=\"green\"))\n\n location = r.headers.get(\"Location\", None)\n if location != redirect_to:\n click.echo(\n click.style(\n f\"Redirect location incorrect - expected '{redirect_to}' but found '{location}'\",\n fg=\"red\",\n )\n )\n return False\n\n return True\n\n\ndef validate_cors_headers(uri, origin):\n headers = {\"Origin\": origin} if origin else {}\n r = requests.get(uri + \"?ccc=asd\", headers=headers)\n\n if r.status_code != 200:\n click.echo(\n click.style(\n f\"Status code fail - expected 200 but got '{r.status_code}'\", fg=\"red\"\n )\n )\n return\n\n if origin:\n h = r.headers.get(\"Access-Control-Allow-Origin\", None)\n if not h:\n click.echo(\n click.style(\n f\"'Access-Control-Allow-Origin' header expected but not found\",\n fg=\"red\",\n )\n )\n return\n elif \"Access-Control-Allow-Origin\" in r.headers:\n click.echo(\n click.style(\n f\"'Access-Control-Allow-Origin' header not expected but found\", fg=\"red\"\n )\n )\n return\n\n click.echo(click.style(f\"Request to '{uri}' has expected CORS.\", fg=\"green\"))\n\n\ndef run_checks(env_suffix=\"\", dlcs_hostname=\"dlcs.io\"):\n space = 5 if env_suffix == \"\" else 6\n id_checks = {\n # miro\n # wellcome_images_dlcs_behaviours\n f\"https://iiif{env_suffix}.wellcomecollection.org/image/V0022459\": f\"https://iiif{env_suffix}.wellcomecollection.org/image/V0022459\", # miro root, wc.org\n f\"https://iiif{env_suffix}.wellcomecollection.org/image/V0022459/info.json\": f\"https://iiif{env_suffix}.wellcomecollection.org/image/V0022459\", # miro info.json, wc.org\n f\"https://{dlcs_hostname}/iiif-img/2/8/V0022459\": f\"https://{dlcs_hostname}/iiif-img/2/8/V0022459\", # miro root, dlcs.io\n f\"https://{dlcs_hostname}/iiif-img/2/8/V0022459/info.json\": f\"https://{dlcs_hostname}/iiif-img/2/8/V0022459\", # miro info.json, dlcs.io\n # non-miro images\n # dlcs_images_behaviours\n f\"https://iiif{env_suffix}.wellcomecollection.org/image/b31905560_0006.jp2\": f\"https://iiif{env_suffix}.wellcomecollection.org/image/b31905560_0006.jp2\", # non-miro root, wc.org\n f\"https://iiif{env_suffix}.wellcomecollection.org/image/b31905560_0006.jp2/info.json\": f\"https://iiif{env_suffix}.wellcomecollection.org/image/b31905560_0006.jp2\", # non-miro info.json, wc.org\n f\"https://{dlcs_hostname}/iiif-img/2/{space}/b31905560_0006.jp2\": f\"https://{dlcs_hostname}/iiif-img/2/{space}/b31905560_0006.jp2\", # non-miro root, dlcs.io\n f\"https://{dlcs_hostname}/iiif-img/2/{space}/b31905560_0006.jp2/info.json\": f\"https://{dlcs_hostname}/iiif-img/2/{space}/b31905560_0006.jp2\", # non-miro info.json, dlcs.io\n # video\n # av_behaviours\n f\"https://iiif{env_suffix}.wellcomecollection.org/av/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg\": f\"https://iiif{env_suffix}.wellcomecollection.org/av/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg\", # root, wc.org\n f\"https://iiif{env_suffix}.wellcomecollection.org/av/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg/info.json\": f\"https://iiif{env_suffix}.wellcomecollection.org/av/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg\", # info.json, wc.org\n f\"https://{dlcs_hostname}/iiif-av/2/{space}/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg\": f\"https://{dlcs_hostname}/iiif-av/2/{space}/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg\", # root, dlcs.io\n f\"https://{dlcs_hostname}/iiif-av/2/{space}/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg/info.json\": f\"https://{dlcs_hostname}/iiif-av/2/{space}/b16756654_0055-0000-4202-0000-0-0000-0000-0.mpg\", # info.json, dlcs.io\n # audio\n # av_behaviours\n f\"https://iiif{env_suffix}.wellcomecollection.org/av/b32496485_0001_0001.mp3\": f\"https://iiif{env_suffix}.wellcomecollection.org/av/b32496485_0001_0001.mp3\", # root, wc.org\n f\"https://iiif{env_suffix}.wellcomecollection.org/av/b32496485_0001_0001.mp3/info.json\": f\"https://iiif{env_suffix}.wellcomecollection.org/av/b32496485_0001_0001.mp3\", # info.json, wc.org\n f\"https://{dlcs_hostname}/iiif-av/2/{space}/b32496485_0001_0001.mp3\": f\"https://{dlcs_hostname}/iiif-av/2/{space}/b32496485_0001_0001.mp3\", # root, dlcs.io\n f\"https://{dlcs_hostname}/iiif-av/2/{space}/b32496485_0001_0001.mp3/info.json\": f\"https://{dlcs_hostname}/iiif-av/2/{space}/b32496485_0001_0001.mp3\", # info.json, dlcs.io\n }\n\n # validate info.json @id correct\n click.echo()\n click.echo(click.style(f\"Validating info.json @id correct\", fg=\"white\", bold=True))\n for url, expected in id_checks.items():\n click.echo(click.style(f\"Checking: {url}\", fg=\"white\", underline=True))\n info_json = get_json(url)\n validate_id(info_json, expected)\n\n # validate thumbs - reuse all images in id_checks but rewrite path for /thumbs/\n click.echo()\n click.echo(click.style(f\"Validating thumbs correct\", fg=\"white\", bold=True))\n for url, expected in (i for i in id_checks.items() if \"av/\" not in i[0]):\n url = url.replace(\"/image/\", \"/thumbs/\").replace(\"/iiif-img/\", \"/thumbs/\")\n expected = expected.replace(\"/image/\", \"/thumbs/\").replace(\n \"/iiif-img/\", \"/thumbs/\"\n )\n\n click.echo(click.style(f\"Checking: {url}\", fg=\"white\", underline=True))\n info_json = get_json(url)\n validate_id(info_json, expected)\n\n # dlcs_images_behaviours\n authed_images = {\n f\"https://iiif{env_suffix}.wellcomecollection.org/image/b19582183_RAMC_391_4_0001.jp2\": f\"https://iiif{env_suffix}.wellcomecollection.org/image/b19582183_RAMC_391_4_0001.jp2\", # non-miro root, wc.org\n f\"https://iiif{env_suffix}.wellcomecollection.org/image/b19582183_RAMC_391_4_0001.jp2/info.json\": f\"https://iiif{env_suffix}.wellcomecollection.org/image/b19582183_RAMC_391_4_0001.jp2\",\n # non-miro info.json, wc.org\n f\"https://dlcs.io/iiif-img/2/{space}/b19582183_RAMC_391_4_0001.jp2\": f\"https://dlcs.io/iiif-img/2/{space}/b19582183_RAMC_391_4_0001.jp2\", # non-miro root, dlcs.io\n f\"https://dlcs.io/iiif-img/2/{space}/b19582183_RAMC_391_4_0001.jp2/info.json\": f\"https://dlcs.io/iiif-img/2/{space}/b19582183_RAMC_391_4_0001.jp2\", # non-miro info.json, dlcs.io\n }\n\n # validate info.json @id correct and any authservices are correct\n click.echo()\n click.echo(\n click.style(\n f\"Validating info.json @id correct and any authservices are correct\",\n fg=\"white\",\n bold=True,\n )\n )\n for url, expected in authed_images.items():\n click.echo(click.style(f\"Checking: {url}\", fg=\"white\", underline=True))\n info_json = get_json(url, expected_status=401)\n validate_id(info_json, expected)\n\n auth_pattern = \"https://dlcs.io/auth/2/.*\"\n if url.startswith(\"https://iiif\"):\n auth_pattern = (\n f\"https://iiif{env_suffix}.wellcomecollection.org/auth/[ltc].*\"\n )\n\n validate_auth(info_json, auth_pattern)\n\n # validate login attempts redirect to correct location\n # auth_behaviours\n auth_redirects = {\n f\"https://iiif{env_suffix}.wellcomecollection.org/auth/clinicallogin\": f\"https://iiif{env_suffix}.wellcomecollection.org/roleprovider/dlcslogin\", # wc.org\n \"https://dlcs.io/auth/2/clinicallogin\": \"https://wellcomelibrary.org/iiif/dlcslogin\", # current\n }\n\n click.echo()\n click.echo(click.style(f\"Validating auth redirects\", fg=\"white\", bold=True))\n for url, expected in auth_redirects.items():\n click.echo(click.style(f\"Checking: {url}\", fg=\"white\", underline=True))\n validate_redirect(url, expected)\n\n click.echo()\n click.echo(click.style(f\"Validating CORS caching\", fg=\"white\", bold=True))\n origins = [\"\", \"https://test.example.com\"]\n for origin in origins:\n url = f\"https://iiif{env_suffix}.wellcomecollection.org/presentation/b19582183\"\n click.echo(\n click.style(\n f\"Checking: {url} with origin {origin}\", fg=\"white\", underline=True\n )\n )\n validate_cors_headers(url, origin)\n\n\n@click.command()\n@click.option(\"--env\", default=\"prod\", help=\"Environment to check (stage|test|prod)\")\ndef check_iiif(env):\n if env == \"stage\":\n run_checks(\"-stage\")\n elif env == \"test\":\n run_checks(\"-test\")\n else:\n run_checks()\n\n\nif __name__ == \"__main__\":\n check_iiif()\n","sub_path":"cloudfront/iiif.wellcomecollection.org/tests/rewrite_tests.py","file_name":"rewrite_tests.py","file_ext":"py","file_size_in_byte":10539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"489839582","text":"from nicos.devices.epics import EpicsReadable, EpicsMoveable, pvname, EpicsDevice\nfrom nicos.core import status, Param, Override, Attach, usermethod, HasLimits, Moveable, SIMULATION, Value, tupleof\n\n\nclass EpicsFloatMoveable(EpicsMoveable):\n \"\"\"\n Handles EPICS devices which can set and read a float value, but without limits.\n \"\"\"\n valuetype = float\n\n\nclass EpicsStringMoveable(EpicsMoveable):\n \"\"\"\n Handles EPICS devices which can set and read a string value.\n \"\"\"\n valuetype = str\n\n\nclass EpicsEnumMoveable(EpicsMoveable):\n \"\"\"\n Handles EPICS devices which can set and read an int value.\n \"\"\"\n valuetype = str\n enum_strs = []\n\n def doInit(self, mode):\n if mode != SIMULATION:\n self.enum_strs = list(self._get_pvctrl('writepv', 'enum_strs', []))\n\n def doStart(self, value):\n real_value = value\n if isinstance(value, str):\n real_value = self.enum_strs.index(value.lower())\n\n self._put_pv('writepv', real_value)\n\n def doRead(self, maxage=None):\n return self.enum_strs[self._get_pv('readpv')]\n\n\nclass EssChopper(Moveable):\n attached_devices = {\n 'speed': Attach('Speed of the chopper disc.', EpicsMoveable),\n 'phase': Attach('Phase of the chopper disc', EpicsMoveable),\n 'parkposition': Attach('Position in parked state', EpicsMoveable),\n 'state': Attach('Current state of the chopper', EpicsReadable, optional=True),\n 'command': Attach('Command PV of the chopper', EpicsMoveable)\n }\n\n state_map = {\n 'init': (status.ERROR, 'Interlocks not fulfilled'),\n 'stopped': (status.OK, 'Waiting for commands'),\n 'parked': (status.OK, 'Parked'),\n 'parking': (status.BUSY, 'Moving to park position'),\n 'accelerating': (status.BUSY, 'Adjusting speed to target'),\n 'phase_locking': (status.BUSY, 'Acquiring phase lock'),\n 'phase_locked': (status.OK, 'Speed and phase locked'),\n 'stopping': (status.BUSY, 'Decelerating disc'),\n 'idle': (status.OK, 'Disc rotating freely, waiting for command.'),\n 'bearings': (status.BUSY, 'Initialising bearings'),\n }\n\n parameter_overrides = {\n 'fmtstr': Override(default='%.2f %.2f'),\n 'unit': Override(mandatory=False),\n }\n\n hardware_access = False\n valuetype = tupleof(float, float)\n\n def doRead(self, maxage=0):\n return [self._attached_speed.read(maxage), self._attached_phase.read(maxage)]\n\n def doStart(self, pos):\n if hasattr(self, '_attached_state') and self._attached_state.read() == 'init':\n self.initialize()\n\n self._attached_speed.move(pos[0])\n self._attached_phase.move(pos[1])\n self._attached_command.move('start')\n\n def doStop(self):\n self._attached_command.move('stop')\n\n # def doReadAbslimits(self):\n # return [(0.0, 40.0), (0.0, 360.0)]\n\n def doStatus(self, maxage=0):\n if hasattr(self, '_attached_state'):\n return self.state_map[self._attached_state.read().lower()]\n\n return status.WARN, 'State PV is missing, no reliable state information.'\n\n @usermethod\n def initialize(self):\n self._attached_command.move('init')\n\n @usermethod\n def deinitialize(self):\n self._attached_command.move('deinit')\n\n @usermethod\n def parkAt(self, position):\n self._attached_parkposition.move(position)\n self._attached_command.move('park')\n\n @usermethod\n def unlock(self):\n self._attached_command.move('unlock')\n","sub_path":"lib/chopper.py","file_name":"chopper.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"474466973","text":"if __name__ == '__main__':\n\tn,m,color = input().split()\n\tn,m = int(n), int(m)\n\tdmap = []\n\tfor i in range(n):\n\t\tdmap.append(input())\n\n\tlur, luc = -1, -1\n\tfor r in range(n):\n\t\tfor c in range(m):\n\t\t\tif dmap[r][c] == color:\n\t\t\t\tlur = r\n\t\t\t\tluc = c\n\t\t\t\tbreak\n\t\tif lur != -1 and luc != -1:\n\t\t\tbreak\n\n\trdr, rdc = -1, -1\n\tfor r in range(n-1,-1,-1):\n\t\tfor c in range(m-1,-1,-1):\n\t\t\tif dmap[r][c] == color:\n\t\t\t\trdr = r\n\t\t\t\trdc = c\n\t\t\t\tbreak\n\t\tif rdr != -1 and rdr != -1:\n\t\t\tbreak\n\n\trset = set([])\n\tif lur > 0:\n\t\tfor c in range(luc,rdc+1):\n\t\t\trset.add(dmap[lur-1][c])\n\tif luc > 0:\n\t\tfor r in range(lur,rdr+1):\n\t\t\trset.add(dmap[r][luc-1])\n\tif rdr < n-1:\n\t\tfor c in range(luc,rdc+1):\n\t\t\trset.add(dmap[rdr+1][c])\n\tif rdc < m-1:\n\t\tfor r in range(lur,rdr+1):\n\t\t\trset.add(dmap[r][rdc+1])\n\n\tif '.' in rset:\n\t\trset.remove('.')\n\tprint(len(rset))","sub_path":"14/B_Presidents_Office.py","file_name":"B_Presidents_Office.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"364711589","text":"from PIL import Image\r\nimport tqdm as tqdm\r\nimport os as os\r\nimport imageio as imageio\r\nimport numpy as np\r\n\r\n\r\ndef image_to_matrix(path):\r\n image = imageio.imread(path)\r\n return np.where(np.array(np.all(image == 255, 2), dtype=int) == 1, 0, 1)\r\n\r\n\r\ndef matrix_to_image(path, matrix):\r\n inds0, inds1 = np.where(matrix == 1), np.where(matrix == 0)\r\n tmp_res = np.empty((360, 360, 4))\r\n tmp_res[inds0[0], inds0[1], :] = np.array([255, 255, 255, 255], dtype=np.uint8)\r\n tmp_res[inds1[0], inds1[1], :] = np.array([0, 0, 0, 255], dtype=np.uint8)\r\n tmp_res = np.array(tmp_res, dtype=np.uint8)\r\n imageio.imwrite(path, tmp_res)\r\n\r\n\r\ndef standardize_size(path_source, path_dest, subfolders_source, subfolders_dest, new_width, new_height, tqdm_flag=False):\r\n if len(subfolders_source) != len(subfolders_dest):\r\n raise ValueError('Sizes of arguments \"subfolders_source\" and \"subfolders_dest\" must correspond to each other!')\r\n for sf in range(len(subfolders_source)):\r\n old_imgs_names = [file for file in os.listdir(path_source + subfolders_source[sf]) if file.endswith('.jpg')]\r\n name_it = tqdm.tqdm(old_imgs_names) if tqdm_flag else old_imgs_names\r\n for name in name_it:\r\n old_img = Image.open(path_source + subfolders_source[sf] + name)\r\n new_img = old_img.resize((new_width, new_height))\r\n new_img.save(path_dest + subfolders_dest[sf] + name)\r\n\r\n\r\ndef trim_edges(path_source, path_dest, subfolders_source, subfolders_dest, tqdm_flag=False):\r\n if len(subfolders_source) != len(subfolders_dest):\r\n raise ValueError('Sizes of arguments \"subfolders_source\" and \"subfolders_dest\" must correspond to each other!')\r\n for sf in range(len(subfolders_source)):\r\n old_imgs_names = [file for file in os.listdir(path_source + subfolders_source[sf]) if file.endswith('.jpg')]\r\n name_it = tqdm.tqdm(old_imgs_names) if tqdm_flag else old_imgs_names\r\n for name in name_it:\r\n old_img = imageio.imread(path_source + subfolders_source[sf] + name)\r\n old_img_matrix = np.where(np.array(np.all(old_img == 255, 2), dtype=int) == 1, 0, 1)\r\n tmp_matrix = np.argwhere(old_img_matrix)\r\n i_min, j_min = tuple(tmp_matrix[tmp_matrix.argmin(axis=0)])\r\n i_max, j_max = tuple(tmp_matrix[tmp_matrix.argmax(axis=0)])\r\n i_min, j_min = i_min[0] - 1 if i_min[0] > 0 else 0, j_min[1] - 1 if j_min[1] > 0 else 0\r\n i_max, j_max = \\\r\n i_max[0] + 1 if i_max[0] + 1 < old_img.shape[0] else old_img.shape[0] - 1,\\\r\n j_max[1] + 1 if j_max[1] + 1 < old_img.shape[1] else old_img.shape[1] - 1\r\n imageio.imwrite(path_dest + subfolders_source[sf] + name, old_img[i_min:i_max+1, j_min:j_max+1])\r\n\r\n\r\nif __name__ == '__main__':\r\n new_width, new_height = 60, 60\r\n path_dest = 'D:/documents/4 course/' \\\r\n 'NN/Standart/'\r\n path_source = 'D:/documents/4 course/' \\\r\n 'NN/Original/'\r\n subfolders = 'G/', 'A/', 'V/', 'Other/', 'Test/'\r\n trim_edges(path_source, path_dest, subfolders, subfolders, True)\r\n # for sf in range(len(subfolders)):\r\n # for i in tqdm.tqdm(range(sample_vol[sf])):\r\n # old_img = Image.open(path_source + subfolders[sf] + '%i.png' % i)\r\n # new_img = old_img.resize((new_width, new_height))\r\n # new_img.save(path_dest + subfolders[sf] + '%i.png' % i)\r\n\r\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"350446854","text":"\"\"\"Prefix matching with secondary score ordering\n\"\"\"\nimport logging\n\nfrom .set_theory import SetTheory\n\n# TODO: Base prefix for keys\n# TODO: Document/expand kwargs in get_matches\n# TODO: Make case sensitivity optional\n# TODO: Secondary score type as namedtuple('score', 'direction')\n# TODO: Document or kill kwargs in get_matches\n\nlog = logging.getLogger(__name__)\n\n\nclass PrefixIndex(object):\n\n \"\"\"Shared state for a prefix index\"\"\"\n\n def __init__(self, redis_conn, index_name, length_score=True, key_sep=':'):\n \"\"\"\n :param redis_conn: StrictRedis connection only\n :param index_name: Namespace for the index. All keys will be prefixed\n with this\n :param length_score: set to true to use edit distance as the most\n significant score in a secondary score list. No effect if\n secondary_scores is None\n :param key_sep: Character to seperate fields in key names\n \"\"\"\n self._redis_conn = redis_conn\n self._index_name = index_name\n self._length_score = length_score\n self._key_sep = key_sep\n\n def build_prefix_index(self, search_string, some_id, min_prefix_len=1,\n operator='add', secondary_scores=None):\n '''Indexes some_id by every prefix of search_string longer than\n min_prefix_len.\n\n :param search_string: The string you want prefixes of to find some_id\n :param some_id: The id to find\n :param min_prefix_length: Shortest prefix to index\n :param secondary_scores: Optionally, a list of pairs of the form\n (score, [asc|desc]) for secondary sorting of the results\n :param length_score: set to true to use edit distance as the most\n significant score in a secondary score list. No effect if\n secondary_scores is None\n '''\n if operator not in ('add', 'rem'):\n raise ValueError(\"unknown operator: %s\" % operator)\n if search_string is None or len(search_string) < min_prefix_len:\n return\n search_string = search_string.lower().strip()\n if secondary_scores:\n secondary_scores = list(secondary_scores)\n if self._length_score:\n secondary_scores.insert(0, (len(search_string), 'asc'))\n score = compute_compound_scores(secondary_scores, 100000)\n else:\n score = len(search_string)\n for i in xrange(min_prefix_len, len(search_string) + 1):\n key = \"%s:%s\" % (self._index_name, search_string[0:i])\n if operator == 'add':\n log.debug(\"adding id %s to key %s with score %s\", some_id, key,\n score)\n self._redis_conn.zadd(key, score, some_id)\n elif operator == 'rem':\n self._redis_conn.zrem(key, some_id)\n\n def get_matches(self, search_string, **kwargs):\n \"\"\"Return an ordered list of matches for the given search string\n \"\"\"\n set_theory = SetTheory(self._redis_conn)\n return set_theory.zset_fetch([('%s:%s' % (self._index_name,\n term.lower()),)\n for term in search_string.split()],\n reverse=False,\n operator='intersect', **kwargs)\n\n\ndef compute_compound_scores(score_list, score_band_width=100):\n '''Computes multi-layered sorting scores.\n\n :param score_list: Is a list of the subscores to combine. Each entry\n should be a tuple of the form:\n ((score_1, ['desc'|'asc']), ..., (score_N, ['desc'|'asc']) )\n where each of the score_i's is an unweighted score, in precedence\n order (e.g. score_2 is a higher sort order than score_3) and each\n score_i is paired with a direction, which is either 'desc' for\n decending sort (i.e. higher values first) or 'asc' for ascending sort\n (i.e. lower values first)\n :param score_band_width: how much \"space\" to allocate for each ordering.\n This should be higher than the highest possible component score. It\n does not need to be a power of 10, but reading your compund scores\n will be easier if it is.\n\n :rtype: list of tuples of (key, compound_score)\n '''\n score = 0\n for sub_score, direction in score_list:\n score *= score_band_width\n # note on positive/negative values here:\n # by default, redis sorts zsets in ascending order (i.e. lowest score\n # first), so for ascending sort, we just add the value as expected.\n # for a descending sort, we need to invert that, so we subtract the\n # component score.\n if direction == 'desc':\n score -= sub_score\n elif direction == 'asc':\n score += sub_score\n return score\n","sub_path":"redis_gadgets/prefix_indexer.py","file_name":"prefix_indexer.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"529424289","text":"import pandas as pd\r\n\r\nraw_df = pd.read_csv('LINKS.md', sep='\\n', names=['column']) # Lê o arquivo\r\n\r\nmd_titles = list(raw_df[raw_df['column'].str.startswith('##')].index) # Cria lista com os nomes de cada categoria\r\nmd_titles.append(len(raw_df)) # Adiciona o index do fim do dataframe \r\ntuples = [(md_titles[idx], md_titles[idx+1]) for idx in range(len(md_titles)-1)] # Cria tuplas com os intervalos (de cada categoria) a serem ordenados\r\n\r\nregex_df = raw_df[~raw_df['column'].str.startswith('##')] # Links da lista\r\nraw_df['regex'] = regex_df['column'].str.extract(r'(?:\\*{1}[ ]\\[)([\\w .,()\\-=+&%/:*#$@!?|<>]*)(?:\\])') # Regex para extrair somente o que tem dentro de cada []\r\nraw_df['regex'] = raw_df['regex'].str.lower() # Deixa todas as letras minúsculas (para ordenar ignorando as letras maiusculas)\r\n\r\n# Escreve no arquivo\r\nwith open('LINKS.md', 'w') as f:\r\n for t in tuples:\r\n f.write(raw_df.iloc[t[0]]['column'] + '\\n') # Escreve nome da categoria\r\n df = raw_df[t[0]+1 : t[1]] # Seleciona o intervalo\r\n df = df.sort_values('regex') # Ordena a categoria\r\n df['column'].apply(lambda x: f.write(x + '\\n')) # Escreve as linhas com links\r\n f.write('\\n') # Adiciona uma linha vazia entre cada categoria\r\n","sub_path":"ordena_md_categoria.py","file_name":"ordena_md_categoria.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"258370961","text":"global ad\nad = \"deneme2.txt\"\ndef dosyaac():\n sec=input(\"Dosya oluşturmak istediginize emin misiniz E/H >\")\n if(sec == 'E'):\n ad = input('Dosya adını giriniz:')\n ad = ad +'.txt'\n with open(ad,'w') as dosya:\n dosya.close()\n if(sec == 'H'):\n return\ndef listele():\n with open(ad,'r') as dosya:\n bilgiler = dosya.readlines()\n i = 0\n for bilgi in bilgiler:\n print(i,\".\",bilgi)\n i+=1\ndef bilgiekle():\n with open(ad, 'a') as dosya:\n bilgi = []\n bilgi.append(input(\"Isim Giriniz: \"))\n bilgi.append(input(\"1.Notu Giriniz: \"))\n dosya.writelines(bilgi)\ndef bilgisil():\n with open(ad,'r') as dosya:\n bilgiler=dosya.readlines()\n silinecek = int(input(\"Silinecek kayıt numarasi: \"))\n bilgiler.pop(silinecek)\n with open(ad,'w') as dosya:\n dosya.writelines(bilgiler)\n\nwhile 1:\n print(\"\"\"\nDosya olusturmakk icin 1:\nVeri silmek icin 2:\nBilgi eklemek icin 3:\nListelemek icin 4:\nCıkmak icin 5:\n\n \"\"\")\n secenek = int(input(\"Bir secenek giriniz:\"))\n if (secenek == 1):\n dosyaac()\n if (secenek == 2):\n bilgisil()\n if (secenek == 3):\n bilgiekle()\n if (secenek == 4):\n listele()\n if (secenek == 5):\n exit()\n","sub_path":"16701025/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"129410875","text":"from provider.model.scim_user import ScimUser\nfrom provider.model.user_store import user_store\n\nfrom flask import Blueprint, request, make_response, jsonify\nfrom util import init_logging\n\nlogger = init_logging(__name__)\n\n\ndef create_blueprint():\n scim_bp = Blueprint('scim_bp', __name__, template_folder='templates')\n\n @scim_bp.route('/scim/v2/User', methods=[\"POST\"])\n def user():\n try:\n if request.method == 'GET':\n return get_user(request.args)\n elif request.method == 'POST':\n return create_user(request.form)\n\n except Exception as ex:\n logger.exception(\"Exception occurred: \" + str(ex))\n return \"Error occurred: \" + ex.error_description, 400\n\n @scim_bp.route('/scim/v2/Users/', methods=['GET'])\n def get_user(user_id):\n user = user_store.get_by_id(user_id)\n if not user:\n return 404, 'Not found'\n scim_user = ScimUser.create_from(user, request.host_url)\n return make_scim_response(200, dict(scim_user.items()), user.get_etag())\n\n def create_user(parameters):\n user = ScimUser.create_from(request.json, request.host_url)\n user_store.add(user)\n return make_scim_response(201, dict(user.items()), user.get_etag())\n\n def make_scim_response(code, data, etag=None):\n resp = make_response(jsonify(data))\n resp.headers['Content-Type'] = 'application/scim+json'\n if etag:\n resp.headers['ETag'] = etag\n return resp, code\n\n return scim_bp\n","sub_path":"provider/endpoints/scim/scim.py","file_name":"scim.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"61236503","text":"from scipy import linalg\nimport numpy as np\n\nL = np.array([[4, -1, -1, -1, 0, 0, -1, 0],\n\t\t[-1, 3, -1, -1, 0, 0, 0, 0],\n\t\t[-1, -1, 3, -1, 0, 0, 0, 0],\n\t\t[-1, -1, -1, 3, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 2, -1, -1, 0],\n\t\t[0, 0, 0, 0, -1, 2, -1, 0],\n\t\t[-1, 0, 0, 0, -1, -1, 4, -1],\n\t\t[0, 0, 0, 0, 0, 0, -1, 1]])\n\nnode_mapping = {0:'A', 1:'B', 2:'C', 3:'D', 4:'E', 5:'F', 6:'G', 7:'H'}\n\n\nevals, evecs = linalg.eigh(L)\nprint(\"\\nEigen values:\")\n# print(evals)\nl = []\nfor v in evals:\n\tl.append(v)\nprint(l)\n\t\n\nprint(\"\\nEigen vectors: \")\nprint(evecs, \"\\n\")\n\n\nlambda2 = evals[1]\nx = evecs[:,1]\nprint(lambda2)\nprint(x)\n\n# Community 1\nprint(\"Community 1\")\nfor n in np.argwhere(x>0):\n\tprint(node_mapping[n[0]])\n\n# Community 2 \nprint(\"\\n Community 2\")\nfor n in np.argwhere(x<0):\n\tprint(node_mapping[n[0]])","sub_path":"Hw 3 - Modularity, Spectral Clustering, Clique-based communities/code/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"203153896","text":"#!/usr/bin/env python3\n\"\"\"This script collect MiniZinc instances and outputs them in a csv format\n\nThe MiniZinc instances are expected to be organised according to the MiniZinc\nbenchmarks structure:\n - Each problem is contained in its own folder\n - Problems contain one or more MiniZinc Model (.mzn) file\n - Each model is combined with all the data (.dzn / .json) in the same problem folder (and its subfolders)\n - If no data is found then it is assumed the model itself is an instance.\n\nFor convenience the script will print the number of collected instances on stderr.\n\nExample usage:\n python collect_instances.py minizinc-benchmarks > instances.csv\n\"\"\"\nimport csv\nimport os\nimport sys\nfrom pathlib import Path\n\nif len(sys.argv) != 2:\n print(f\"Usage: {sys.argv[0]} \")\n exit(1)\n\nassert len(sys.argv) >= 2\nbenchmarks_location = sys.argv[1]\ninstances = 0\n\n\nwriter = csv.writer(sys.stdout, dialect=\"unix\")\nwriter.writerow((\"problem\", \"model\", \"data_file\"))\nfor root, _, files in os.walk(benchmarks_location):\n for name in files:\n if name.endswith(\".mzn\"):\n problem = root.split(os.sep)[-1]\n datafiles = 0\n for nroot, _, nfiles in os.walk(root):\n for nname in nfiles:\n if nname.endswith(\".dzn\") or nname.endswith(\".json\"):\n datafiles = datafiles + 1\n instances += 1\n writer.writerow(\n (\n problem,\n Path(root + \"/\" + name),\n Path(nroot + \"/\" + nname),\n )\n )\n\n if datafiles == 0:\n instances += 1\n writer.writerow((problem, Path(root + \"/\" + name), \"\"))\n\nprint(f\"Nr. Instances = {instances}\", file=sys.stderr)\n","sub_path":"collect_instances.py","file_name":"collect_instances.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"609358525","text":"'''\nA decimal string is a string consisting of digits between 0 and 9. Internet protocol (IP) addresses\ncan be written as four decimal strings separated by periods, e.g. 192.168.1.201\n\nA careless programmer mangles a string representing an IP address in such a way that all the periods vanish.\nWrite a programme that determines where to add periods to a decimal string so that the resulting string\nis a valid address. There may be more than one valid IP address corresponding to a string, in which case\nyou should print all possibilities\n\nFor example, if the mangled string is \"19216811\" then two corresponding IP addresses are 192.168.1.1 \nand 19.216.81.1 (There are seven other possible IP addresses for this string)\n'''\n\n'''\nSolution:\nThere are three periods in a valid address, so we can enumerate all possible placements of these periods,\nand check whether all four corresponding substrings are between 0 and 255. We can reduce the number of \nplacements considered by spacing the periods 1 to 3 characters apart. We can also prune by stopping as soon\nas a substring is not valid. \n\nFor exmaple, if the string is \"19216811\", we could put the first period after \"1\", \"19\", \"192\". If the first part\nis \"1\", the second part could be \"9\", \"92\" and \"921\". Of these, \"921\" is illegal so we do not continue with it.\n'''\n\nclass Solution:\n\n def get_valid_ip_address(self, s):\n def is_valid_part(s):\n # '00', '000', '01', etc. are not valid, but '0' is valid.\n return len(s) == 1 or (s[0] != '0' and int(s) <= 255)\n\n result, parts = [], [None] * 4\n for i in range(1, min(4, len(s))):\n parts[0] = s[:i]\n if is_valid_part(parts[0]):\n for i in range(1, min(len(s) - i, 4)):\n parts[1] = s[i:i + j]\n if is_valid_part(parts[1]):\n for k in range(1, min(len(s) - i - j, 4)):\n parts[2], part[3] = s[i + j:i + j + k], s [i + j + k:]\n if is_valid_part(parts[2]) and is_valid_part(parts[2]):\n result.append('.'.join(parts))\n\n return result\n","sub_path":"epi/strings/get_valid_ip_address.py","file_name":"get_valid_ip_address.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"449176484","text":"#!/usr/bin/env python3\n# https://github.com/undefinedvalue0103/pylibs/blob/master/LICENSE\nimport time\nimport json\nimport pprint\n\nclass ToDo(object):\n def __init__(self, title, description=None, urgency=0, timestamp=None, id=0):\n self.title = title\n self.description = description\n self.urgency = urgency\n self.timestamp = timestamp\n self.id = id\n @property\n def __dict__(self):\n return dict(\n title=self.title,\n description=self.description,\n urgency=self.urgency,\n timestamp=self.timestamp,\n id=self.id)\n\ndef load_db():\n try:\n with open('tasks.json', 'r') as f:\n db = json.load(f)\n except:\n db = dict(\n tasks = [],\n lastmodified = time.time())\n save_db(db)\n return db\n\ndef save_db(db):\n with open('tasks.json', 'w') as f:\n json.dump(db, f, indent=4, ensure_ascii=False)\n\ndef create_task(title, description=None, urgency=0):\n db = load_db()\n pprint.pprint(db)\n if len(db['tasks']) == 0:\n last_id = 0\n else:\n last_id = max(task['id'] for task in db['tasks'])\n db['tasks'].append(\n ToDo(\n title,\n description,\n urgency,\n time.time(),\n last_id + 1).__dict__)\n db['lastmodified'] = time.time()\n save_db(db)\n return last_id + 1\ndef get_tasks():\n db = load_db()\n return [ToDo(**task) for task in db['tasks']]\ndef del_task(id):\n db = load_db()\n deleted = None\n for i, task in enumerate(db['tasks']):\n if task['id'] == id:\n deleted = db['tasks'].pop(i)\n break\n save_db(db)\n return deleted\n","sub_path":"todolib.py","file_name":"todolib.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"505551660","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom random import randint\r\n\r\nFONT = (\"Verdana\", 40, \"bold\")\r\nSCORE_FONT = (\"Verdana\", 10, \"bold\")\r\n\r\n\r\ndef new_game():\r\n board = [[0 for i in range(4)] for i in range(4)]\r\n return board\r\n\r\n\r\ndef add_two(board):\r\n zero_block = []\r\n for i in range(4):\r\n for j in range(4):\r\n if board[i][j] == 0:\r\n zero_block.append((i, j))\r\n if len(zero_block) != 0:\r\n tup = randint(0, len(zero_block) - 1)\r\n tup = zero_block[tup]\r\n board[tup[0]][tup[1]] = 2\r\n return board\r\n\r\n\r\ndef can_move_horizontal(board):\r\n count = 1\r\n for i in range(4):\r\n for j in range(1,4):\r\n if board[i][j] == board[i][j-1]:\r\n return True\r\n count += 1\r\n if count != 4:\r\n return True\r\n return False\r\n\r\n\r\ndef can_move_verticle(board):\r\n count = 0\r\n for i in range(1,4):\r\n for j in range(4):\r\n if board[i][j] == board[i-1][j]:\r\n return True\r\n count += 1\r\n if count != 4:\r\n return True\r\n return False\r\n\r\n\r\ndef current_state(board):\r\n # winning state\r\n for i in range(4):\r\n for j in range(4):\r\n if board[i][j] == \"2048\":\r\n return \"win\"\r\n # check if any entry if 0, then continue\r\n for i in range(4):\r\n for j in range(4):\r\n if board[i][j] == 0:\r\n return \"continue\"\r\n # check row above and on right, if any can combine\r\n for i in range(3):\r\n for j in range(3):\r\n if board[i][j] == board[i+1][j] or board[i][j] == board[i][j+1]:\r\n return \"continue\"\r\n for i in range(1,4):\r\n if board[3][i] == board[3][i-1] or board[i][3] == board[i-1][3]:\r\n return \"continue\"\r\n # else you lost!\r\n return \"lose\"\r\n\r\n\r\ndef up(board, score):\r\n if (not can_move_verticle(board)) and can_move_horizontal(board):\r\n return board, score\r\n\r\n for i in range(4):\r\n for j in range(1,4):\r\n if board[j][i] == board[j-1][i]:\r\n score += board[j][i]\r\n board[j-1][i] *= 2\r\n board[j][i] = 0\r\n\r\n for i in range(4):\r\n for j in range(4):\r\n if board[j][i] == 0:\r\n count = 0\r\n while board[j+count][i] == 0 and (j+count) < 3:\r\n count += 1\r\n if board[j+count][i] != 0:\r\n board[j][i] = board[j+count][i]\r\n board[j+count][i] = 0\r\n add_two(board)\r\n return board, score\r\n\r\n\r\ndef down(board, score):\r\n if (not can_move_verticle(board)) and can_move_horizontal(board):\r\n return board, score\r\n\r\n for i in range(3,-1,-1):\r\n for j in range(3,-1,-1):\r\n if board[j][i] == board[j-1][i]:\r\n score += board[j][i]\r\n board[j][i] *= 2\r\n board[j-1][i] = 0\r\n\r\n for i in range(3,-1,-1):\r\n for j in range(3,-1,-1):\r\n if board[j][i] == 0:\r\n count = 0\r\n while board[j-count][i] == 0 and (j-count) >0:\r\n count += 1\r\n if board[j-count][i] != 0:\r\n board[j][i] = board[j-count][i]\r\n board[j-count][i] = 0\r\n add_two(board)\r\n return board, score\r\n\r\n\r\ndef right(board, score):\r\n if (can_move_verticle(board)) and (not can_move_horizontal(board)):\r\n return board, score\r\n\r\n for i in range(3,-1,-1):\r\n for j in range(3,-1,-1):\r\n if board[i][j] == board[i][j-1]:\r\n score += board[i][j]\r\n board[i][j] *= 2\r\n board[i][j-1] = 0\r\n for i in range(3,-1,-1):\r\n for j in range(3,-1,-1):\r\n if board[i][j] == 0:\r\n count = 0\r\n while board[i][j-count] == 0 and (j-count) >0:\r\n count += 1\r\n if board[i][j-count] != 0:\r\n board[i][j] = board[i][j-count]\r\n board[i][j-count] = 0\r\n add_two(board)\r\n return board, score\r\n\r\n\r\ndef left(board, score):\r\n if (can_move_verticle(board)) and (not can_move_horizontal(board)):\r\n return board, score\r\n\r\n for i in range(4):\r\n for j in range(1,4):\r\n if board[i][j] == board[i][j-1]:\r\n score += board[i][j]\r\n board[i][j-1] *= 2\r\n board[i][j] = 0\r\n\r\n for i in range(4):\r\n for j in range(4):\r\n if board[i][j] == 0:\r\n count = 0\r\n while board[i][j+count] == 0 and (j+count) < 3:\r\n count += 1\r\n if board[i][j+count] != 0:\r\n board[i][j] = board[i][j+count]\r\n board[i][j+count] = 0\r\n add_two(board)\r\n return board, score\r\n\r\n\r\nclass Grid:\r\n def __init__(self, master):\r\n frame = Frame(master, height=500, width=500, bg=\"grey\")\r\n frame.grid()\r\n self.score = 0\r\n scoreFr = Frame(master, height=25, width=25, bg=\"dark grey\")\r\n self.scoreLb = Label(scoreFr, text=\"Score: \", height = 2, width = 10, font = SCORE_FONT)\r\n self.scoreLb.pack()\r\n scoreFr.grid(row = 0, column = 2, padx = 8, pady = 8)\r\n self.board = new_game()\r\n self.board = add_two(self.board)\r\n self.grid = []\r\n frame.bind(\"\", self.up)\r\n frame.bind(\"\", self.down)\r\n frame.bind(\"\", self.right)\r\n frame.bind(\"\", self.left)\r\n frame.focus_set()\r\n self.game_status = \"continue\"\r\n for i in range(4):\r\n row = []\r\n for j in range(4):\r\n cell = Frame(frame, height=125, width=125, bg=\"dark grey\")\r\n lb = Label(cell, text=\"\", justify=CENTER, height=2, width=4, font=FONT)\r\n row.append(lb)\r\n lb.pack()\r\n cell.grid(row=i, column=j, padx=4, pady=4)\r\n self.grid.append(row)\r\n\r\n self.update_board()\r\n master.mainloop()\r\n\r\n def up(self, event):\r\n self.board, self.score = up(self.board, self.score)\r\n self.update_board()\r\n self.current_status()\r\n\r\n def down(self, event):\r\n self.board, self.score = down(self.board, self.score)\r\n self.update_board()\r\n self.current_status()\r\n\r\n def right(self, event):\r\n self.board, self.score = right(self.board, self.score)\r\n self.update_board()\r\n self.current_status()\r\n\r\n def left(self, event):\r\n self.board, self.score = left(self.board, self.score)\r\n self.update_board()\r\n self.current_status()\r\n\r\n def current_status(self):\r\n self.game_status = current_state(self.board)\r\n print(self.game_status)\r\n if self.game_status == \"win\" or self.game_status == \"lose\":\r\n messagebox.showinfo(\"Game Status\", self.game_status)\r\n self.score = 0\r\n for i in range(4):\r\n for j in range(4):\r\n self.board[i][j] = 0\r\n self.update_board()\r\n\r\n def update_board(self):\r\n self.scoreLb.configure(text = \"Score: \"+str(self.score))\r\n for i in range(4):\r\n for j in range(4):\r\n if self.board[i][j] == 0:\r\n self.grid[i][j].configure(text=str(\"\"))\r\n else:\r\n self.grid[i][j].config(text=str(self.board[i][j]))\r\nroot = Tk()\r\nroot.geometry(\"790x580+100+100\")\r\ng = Grid(root)","sub_path":"PuzzleGame.py","file_name":"PuzzleGame.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"107659196","text":"from dataclasses import dataclass\nfrom itertools import product\nimport re\nfrom typing import Iterable\n\n# origin is (0,0), increasing down and to the right\n\n\n@dataclass(frozen=True)\nclass Point:\n x: int\n y: int\n\n\n@dataclass\nclass Claim:\n id: int\n from_left: int\n from_top: int\n width: int\n height: int\n\n def top_left(self) -> Point:\n return Point(self.from_left, self.from_top)\n\n def bottom_left(self) -> Point:\n return Point(self.from_left, self.from_top + self.height)\n\n def top_right(self) -> Point:\n return Point(self.from_left + self.width, self.from_top)\n\n def bottom_right(self) -> Point:\n return Point(self.from_left + self.width,\n self.from_top + self.height)\n\n @classmethod\n def from_string(cls, string: str):\n pattern = r'#(\\d+)\\s@\\s(\\d+),(\\d+):\\s(\\d+)x(\\d+)'\n [id, l, t, w, h] = re.search(pattern, string).groups()\n return cls(int(id), int(l), int(t), int(w), int(h))\n\n\ndef generate_points(claim: Claim) -> Iterable[Point]:\n top_left = claim.top_left()\n min_x, min_y = top_left.x, top_left.y\n bottom_right = claim.bottom_right()\n max_x, max_y = bottom_right.x, bottom_right.y\n\n point_product = product(range(min_x, max_x), range(min_y, max_y))\n return [Point(x, y) for (x, y) in point_product]\n\n\ndef part_one(lines: list[str]):\n claims = [Claim.from_string(x) for x in lines]\n d: dict[Point, int] = {}\n\n for claim in claims:\n points = generate_points(claim)\n upsert_points(d, points)\n\n return len([v for (_, v) in d.items() if v > 1])\n\n\ndef part_two(lines: list[str]):\n claims = [Claim.from_string(x) for x in lines]\n d: dict[Point, int] = {}\n claim_dict: dict[int, list[Point]] = {}\n\n for claim in claims:\n points = list(generate_points(claim))\n claim_dict[claim.id] = points\n upsert_points(d, points)\n\n for claim in claims:\n counts = [d[p] for p in claim_dict[claim.id]]\n if len([x for x in counts if x > 1]) == 0:\n return claim.id\n\n\ndef upsert_points(d: dict[Point, int], points: Iterable[Point]):\n for point in points:\n if point in d:\n d[point] += 1\n else:\n d[point] = 1\n\n\nif __name__ == '__main__':\n file_name = 'input.txt'\n try:\n with open(file_name) as f:\n print(part_two(f.readlines()))\n\n except FileNotFoundError:\n print(f'{file_name} not found')\n","sub_path":"2018/3_day/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"98949798","text":"import numpy as np\nimport time\nimport random\nimport json\nimport re\nfrom utils import *\nfrom story_telling import *\nimport pycorpora\n\nassigned_blanks = {}\n\n\nclass Character:\n def __init__(self, name, description):\n self.name = name\n self.description = description\n self.story_row = -1\n\n\nclass Story:\n \"\"\"\n Attributes:\n pages (np.ndarray): pages is a nxm matrix of type Page()\n challenges (dict):\n blanks (dict):\n \"\"\"\n\n def __init__(self, story_size):\n n = story_size[0]\n m = story_size[1]\n # pages is a nxm matrix of type Page()\n pages = np.zeros((n, m), dtype=Page)\n # Each element of the matrix is initialized\n for row in range(n):\n for column in range(m):\n pages[row, column] = Page()\n\n self.story_size = story_size\n\n self.pages = pages\n self.blanks = {}\n self.characters = []\n self.challenges = {}\n\n # --------------- SET UP: ---------------\n\n def setup_story(self):\n\n # Add content of the story\n set_example_story(self)\n\n # --------------- CHARACTERS: ---------------\n\n def add_character(self, name, description):\n character = Character(name, description)\n character.story_row = len(self.characters)\n self.characters.append(character)\n\n def get_character(self, character_name):\n for character in self.characters:\n if character.name == character_name:\n return character\n\n def get_character_story_row(self, character_name):\n \"\"\"\n pages is a matrix.\n Each character has a row in the pages matrix assigned\n\n Parameters:\n character_name (str):\n Returns:\n idx (int): his function returns the row that corresponds to the character\n \"\"\"\n # for character in self.characters:\n # print(character.name, end=' ')\n # print(\"\")\n\n for idx, character in enumerate(self.characters):\n if character.name == character_name:\n return idx\n\n # --------------- CHALLENGES: ---------------\n\n def load_all_challenges(self, challenges_json):\n with open(challenges_json) as json_file:\n challenges = json.load(json_file)\n self.challenges.update(challenges)\n\n def add_challenge(self, challenge_id, challenge):\n self.blanks[challenge_id] = challenge\n\n def get_challenge(self, challenge_id):\n return self.challenges[challenge_id]\n\n # --------------- BLANKS: ---------------\n #\n def add_blank_v2(self, blank_id, list_of_words, changes_every_time=False):\n blank = dict(\n changes_every_time=changes_every_time,\n list_of_words=list_of_words,\n )\n self.blanks[blank_id] = blank\n \n def get_the_word_for_the_blank_v2(self, blank_id):\n if blank_id in assigned_blanks.keys():\n ret_word = assigned_blanks[blank_id]\n else:\n \n blank = self.blanks[blank_id]\n random.seed()\n ret_word = random.choice(blank['list_of_words'])\n \n if not blank['changes_every_time']:\n assigned_blanks[blank_id] = ret_word\n return ret_word\n\n def add_blank(self, blank_id, word_type='', changes_every_time=False, list_of_words=None):\n \"\"\"\n Parameters:\n blank_id (str):\n (list_of_words (list of str): possible words to fill in the blank with)\n word_type(str): what category of word is this (flower, spell, etc), has to fit with json file names in pycorpora\n changes_every_time (bool): if the blank should be filled in with a (potentially) different word every time\n list_of_words\n \"\"\"\n blank = dict(\n changes_every_time=changes_every_time,\n list_of_words=list_of_words,\n word_type=word_type\n )\n self.blanks[blank_id] = blank\n\n def get_the_word_for_the_blank(self, blank_id):\n \"\"\"\n Description: It uses the blank to find a word to fill it with\n\n Parameters:\n blank_id:\n\n Returns:\n A word with which the blank will be filled\n \"\"\"\n \"\"\"\n Keyword blanks have to be called: treasure, map, tech, magic, weapon\n \"\"\"\n\n blank = self.blanks[blank_id]\n # Random word from the internet!\n if blank['changes_every_time']:\n \"\"\"\n In this case we replace the word with something from pycorpora\n Check word types\n \"\"\"\n if blank_id == \"spells\":\n return random.choice(pycorpora.words.spells['spells'])['incantation']\n elif blank_id == \"adverbs\":\n return random.choice(pycorpora.words.adverbs['adverbs'])\n elif blank_id == \"nouns\":\n return random.choice(pycorpora.words.nouns['nouns'])\n elif blank_id == \"strange_words\":\n return random.choice(pycorpora.words.strange_words['words'])\n elif blank_id == \"lovecraft_words\":\n return random.choice(pycorpora.words.literature.lovecraft_words['words'])\n elif blank_id == \"shakespeare_words\":\n return random.choice(pycorpora.words.literature.shakespeare_words['words'])\n\n else:\n return \"you messed up, buddy\" # LOL\n\n # part_of_speech = blank['part_of_speech']\n # word = get_random_word_from_the_internet(part_of_speech)\n # Random word from the list of words provided\n else:\n # list_of_words = self.blanks[blank_key]['list_of_words']\n # word = random.choice(list_of_words)\n if blank_id not in assigned_keywords:\n if blank_id == \"treasure\":\n treasure = random.choice(adventurer_treasure)\n assigned_keywords[\"treasure\"] = treasure\n return treasure\n elif blank_id == \"map\":\n map = random.choice(adventurer_map)\n assigned_keywords[\"map\"] = map\n return map\n elif blank_id == \"tech\":\n tech = random.choice(sci_fi_thing)\n assigned_keywords[\"tech\"] = tech\n return tech\n elif blank_id == \"magic\":\n magic = random.choice(magic_thing)\n assigned_keywords[\"magic\"] = magic\n return magic\n elif blank_id == \"weapon\":\n weapon = random.choice(detective_weapon)\n assigned_keywords[\"weapon\"] = weapon\n return weapon\n elif blank_id == \"villain\":\n name= get_random_name()\n assigned_keywords[\"villain\"] = name\n return name\n else:\n return assigned_keywords[blank_id]\n\n def fill_in_the_blanks(self, page_variation):\n \"\"\"\n Fills in all the blanks in a page_variation\n \"\"\"\n ret_txt = []\n for txt in page_variation.txt:\n found_keys = re.findall(r'~\\w+~', txt)\n for blank_key in found_keys:\n if blank_key[1:-1] in self.blanks.keys():\n word = self.get_the_word_for_the_blank_v2(blank_key[1:-1])\n # word = self.get_the_word_for_the_blank(blank_key[1:-1])\n txt = txt.replace(blank_key, word)\n ret_txt.append(txt)\n return ret_txt\n\n # --------------- OUTCOME SELECTION: ---------------\n\n @staticmethod\n def select_good_or_bad_outcome(page_variations, player):\n \"\"\"\n Description: It uses the blank to find a word to fill it with\n\n Parameters:\n page_variations (array of PageVariation):\n player (Player):\n\n Returns:\n ret_page_variation (PageVariation): One of the page variations from the page_variations array\n \"\"\"\n\n challenge_outcomes = player.challenge_outcomes\n challenge_outcome = challenge_outcomes[-1] # Outcome of last challenge\n prob_array = player.prob_array\n good_outcome = page_variations[0]\n bad_outcome = page_variations[1]\n\n # prob_array is a list of tuples like [(0.82, \"good\"), (0.63, \"bad\")]\n total_val = 0.0\n rand_num = round(random.uniform(-1, 1), 2)\n # all calculations performed for probability to get good event\n challenge_outcome = int(challenge_outcome)\n good_probability = max(min(challenge_outcome + rand_num, 1), 0)\n for el in prob_array:\n if el[1] == \"bad\":\n val = -1 * el[0]\n else:\n val = el[0]\n total_val = total_val + val\n good_probability = max(min(good_probability + total_val, 1), 0)\n if good_probability >= 0.5:\n prob_array.append((good_probability, \"good\"))\n ret_page_variation = good_outcome # Returned page variation\n else:\n # gets probability for bad event\n prob_array.append((1-good_probability, \"bad\"))\n ret_page_variation = bad_outcome # Returned page variation\n\n return ret_page_variation\n\n # --------------- PAGE ADDRESSING: ---------------\n\n def get_page_raw(self, row, column):\n return self.pages[row][column]\n\n def get_page(self, character_name, chapter, page):\n \"\"\"\n Parameters:\n character_name (str):\n chapter (int): A chapter consists of 3 pages\n page (str): Can only be {0, 1, 2}\n Returns:\n The page at specified location\n \"\"\"\n row = self.get_character_story_row(character_name)\n page_number = {'intro': 0, 'challenge': 1, 'outro': 2}\n column = chapter * 3 + page_number[page]\n return self.pages[row][column]\n\n\nclass Page:\n def __init__(self):\n self.page_name = ''\n self.page_variations = []\n self.page_type = ''\n self._last_page = False\n\n def add_page_variation(self, txt=None, challenge=None):\n page_var = PageVariation()\n page_var.txt = txt\n page_var.challenge = challenge\n self.page_variations.append(page_var)\n\n def set_page_type(self, page_type):\n correct_page_type = page_type == 'challenge' or page_type == 'outcome'\n assert correct_page_type, 'page_type must be equal to \\'challenge\\' or \\'outcome\\''\n self.page_type = page_type\n\n def set_last_page(self, last_page_bool):\n self._last_page = last_page_bool\n\n def is_it_the_last_page(self):\n return self._last_page\n\n\nclass PageVariation:\n \"\"\"\n Attributes:\n txt (list of strings): Contains the story content\n \"\"\"\n\n def __init__(self):\n self.txt = []\n self.challenge = None\n self.story_location = [0, 0, 0]\n","sub_path":"backend/story_structure_v2.py","file_name":"story_structure_v2.py","file_ext":"py","file_size_in_byte":10914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"102087556","text":"out = open('results/sys_tput_perm.csv', 'w')\nfor i in range(40320):\n sys_tput = -1\n f = open('permutation/'+str(i+1)+'.out', 'r')\n for line in f:\n tokens = line.split()\n if (len(tokens) > 0 and tokens[0] == 'System'):\n sys_tput = tokens[7].split('/')[0]\n f.close()\n out.write(str(i+1)+','+str(sys_tput))\n out.write('\\n')\nout.close()\n","sub_path":"scripts/extract_sys_tput.py","file_name":"extract_sys_tput.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"622267260","text":"\nimport os.path as path\nimport os\nimport argparse\n\ncurdir = path.abspath(os.curdir)\ncmakefile_dir = path.join(curdir,\"CMakeFiles\")\nbuild_dir = path.join(curdir,\"cmake_build\")\ntargetName = \"hello\"\ntargetName1 = \"hello_static\"\nbuildConfig = \"Release\"\n\n\ndef remove_project_file():\n os.system(\"rm -rf \" + build_dir)\n os.system(\"rm -rf \" + cmakefile_dir)\n print(\"clean finish!\")\n\ndef cmake_build_project():\n print(\"Begin Building...\")\n print(\"current Path:\"+ curdir)\n print(\"cmake build dir:\"+ build_dir)\n\n if not path.exists(build_dir):\n os.system(\"mkdir \" + build_dir)\n\n os.chdir(build_dir)\n\n os.system(\"cmake ..\")\n os.system(\"cmake --build . --target \"+targetName+\" --config \"+buildConfig)\n os.system(\"cmake --build . --target \"+targetName1+\" --config \"+buildConfig)\n\nparser = argparse.ArgumentParser(description=\"build help\")\nparser.add_argument('--clean',action = \"store_true\",help=\"clean project\")\nargs = parser.parse_args()\n\nprint(\"args:\"+str(vars(args)))\nif args.clean:\n remove_project_file()\nelse:\n cmake_build_project()\n","sub_path":"static-sample/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"49256942","text":"#====================================================================================\r\n# UNIVERSITY OF CAPE TOWN\r\n#\r\n# Author: Jason Pilbrough and Samantha Ball\r\n# \r\n# Create Date: 21.04.2020 18:42:46\r\n# Project Name: LEIA\r\n#\r\n# Description: \r\n# \r\n# Monitors serial port to receive data sent by FPGA over UART\r\n#\r\n# Revision: 1.0 - Final Version\r\n#====================================================================================\r\n\r\n\r\nimport serial\r\nimport time\r\nfrom serial.tools import list_ports\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\n\r\nglobal SERIAL_CHANNEL; SERIAL_CHANNEL = \"COM4\"\r\nglobal BAUD_RATE; BAUD_RATE = 8064000 #high baud rates must be a multiple of 115200\r\nglobal TIMEOUT; TIMEOUT = 20\r\nglobal IMAGE_HEIGHT; IMAGE_HEIGHT = 400 #160\r\nglobal IMAGE_WIDTH; IMAGE_WIDTH = 520\r\nglobal NUM_CHANNELS; NUM_CHANNELS = 3 #number of color channels in the image (greyscale=1, color = 3)\r\nglobal BYTES_TO_READ; BYTES_TO_READ = IMAGE_HEIGHT * IMAGE_WIDTH * NUM_CHANNELS\r\nglobal OUPUT_IMAGE_FILENAME; OUPUT_IMAGE_FILENAME = \"output.jpg\"\r\n\r\n\r\ndef receive_serial_data():\r\n\tprint(\"Listening on serial channel\" , SERIAL_CHANNEL, \"... ( timeout =\", TIMEOUT, \"sec, baud =\", BAUD_RATE,\")\")\r\n\tserial_coms = serial.Serial(SERIAL_CHANNEL, BAUD_RATE, timeout=TIMEOUT)\r\n\tdata_raw = np.frombuffer(serial_coms.read(BYTES_TO_READ), dtype=np.uint8)\r\n\r\n\t\r\n\tif(len(data_raw)==0):\r\n\t\tserial_coms.flushInput()\r\n\t\tserial_coms.flushOutput()\r\n\t\ttime.sleep(1)\r\n\t\tprint(\"Serial timeout.\\n\")\r\n\t\treturn 0;\r\n\r\n\tprint(\"Recieved\",len(data_raw),\"bytes of data.\")\r\n\r\n\tdata_clean = (data_raw[0:BYTES_TO_READ]).reshape(IMAGE_HEIGHT,IMAGE_WIDTH, NUM_CHANNELS)\r\n\r\n\r\n\tprint(\"Writing image to file...\")\r\n\t\r\n\tim = Image.fromarray(data_clean, mode=\"RGB\")\r\n\tim.save(OUPUT_IMAGE_FILENAME)\r\n\t\r\n\tserial_coms.flushInput()\r\n\tserial_coms.flushOutput()\r\n\ttime.sleep(1)\r\n\t\r\n\tprint(\"Done.\\n\")\r\n\treturn 1;\r\n \r\n\r\ndef list_available_ports():\r\n\tprint(\"Available ports:\")\r\n\tports = serial.tools.list_ports.comports()\r\n\r\n\tfor port in ports:\r\n\t\tprint(\" *\", port[0])\r\n\r\n\r\n\r\n#list_available_ports();\r\ncont = 1\r\nwhile(cont):\r\n\tcont = receive_serial_data();\r\n\t\r\n\t\r\n","sub_path":"python/UART_COMS.py","file_name":"UART_COMS.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"117241153","text":"from __future__ import absolute_import\n\nimport json\n\nimport falcon\n\nfrom ._base import set_dimmers, load_dimmers\n\n\nclass RequireJSON(object):\n\n def process_request(self, req, resp):\n if not req.client_accepts_json:\n raise falcon.HTTPNotAcceptable(\n 'This API only supports responses encoded as JSON.',\n href='http://docs.examples.com/api/json')\n\n if req.method in ('POST', 'PUT'):\n if 'application/json' not in req.content_type:\n raise falcon.HTTPUnsupportedMediaType(\n 'This API only supports requests encoded as JSON.',\n href='http://docs.examples.com/api/json')\n\n\nclass JSONTranslator(object):\n\n def process_request(self, req, resp):\n # req.stream corresponds to the WSGI wsgi.input environ variable,\n # and allows you to read bytes from the request body.\n #\n # See also: PEP 3333\n if req.content_length in (None, 0):\n # Nothing to do\n return\n\n body = req.stream.read()\n if not body:\n raise falcon.HTTPBadRequest('Empty request body',\n 'A valid JSON document is required.')\n\n try:\n req.context['doc'] = json.loads(body.decode('utf-8'))\n\n except (ValueError, UnicodeDecodeError):\n raise falcon.HTTPError(falcon.HTTP_753,\n 'Malformed JSON',\n 'Could not decode the request body. The '\n 'JSON was incorrect or not encoded as '\n 'UTF-8.')\n\n def process_response(self, req, resp, resource):\n if 'result' not in req.context:\n return\n\n resp.body = json.dumps(req.context['result'])\n\n\nclass ThingsResource:\n def on_post(self, req, resp):\n try:\n doc = req.context['doc']\n except KeyError:\n raise falcon.HTTPBadRequest(\n 'Missing thing',\n 'A thing must be submitted in the request body.')\n\n set_dimmers(doc)\n\n resp.status = falcon.HTTP_201\n req.context['result'] = load_dimmers()\n\n# Configure your WSGI server to load \"things.app\" (app is a WSGI callable)\napp = falcon.API(middleware=[\n RequireJSON(),\n JSONTranslator(),\n])\n\nthings = ThingsResource()\napp.add_route('/', things)\n","sub_path":"testing-architectures/implementations/backend/python/lighting/falcon.py","file_name":"falcon.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"527261242","text":"# -*- coding:utf-8 -*-\n# Author: yangbin\n# Created: 01/23/2018\n\n\"\"\"\nGiven a sorted array, remove the duplicates in-place such that each element appear only once and return the new length.\n\nDo not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.\n\nExample:\n\nGiven nums = [1,1,2],\n\nYour function should return length = 2, with the first two elements of nums being 1 and 2 respectively.\nIt doesn't matter what you leave beyond the new length.\n\"\"\"\n\nclass Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) <= 1:\n return len(nums)\n v = nums[0]\n c = 1\n for num in nums[1:]:\n if v == num:\n continue\n nums[c] = num \n c += 1 \n v = num\n return c \n","sub_path":"Array/rm_dup_from_sorted_array.py","file_name":"rm_dup_from_sorted_array.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"41726058","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n# Generated by the OpenERP plugin for Dia !\nfrom osv import fields,osv\n\nclass oper_recaudador(osv.osv):\n \"\"\"Class recauddor\"\"\"\n _name = 'oper.recaudador'\n _columns = {\n 'name': fields.char('Name', size=50, required=True, translate=True),\n #'SIS': fields.float('SIS', size=6, required=True, translate=True),\n 'multi_banco': fields.boolean('Es Multibanco'),\n 'partner_id': fields.many2one('res.partner','partner_id','Partner'),\n }\noper_recaudador()\n\nclass oper_archivos(osv.osv):\n \"\"\" Class archivos\"\"\"\n _name = 'oper.archivos'\n _columns = {\n 'name': fields.char('Descripción', size=30, required=True, traslate=True),\n 'recaudador_id': fields.many2one('oper.archivos',string='Archivo'),\n 'estructura_id': fields.many2one('oper.archivos', string='archivos'),\n 'tipo_archivo': fields.selection([('1','Universo'), ('2','Envio'),('3','Recepción')]),\n }\noper_archivos()\n\nclass oper_estructura(osv.osv):\n \"\"\"estructura\"\"\"\n _name = 'oper.estructura'\n _columns = {\n 'name': fields.char('Name', size=50, required=True, translate=True),\n 'encabezado': fields.selection([('si','Contiene'),('no','No Contiene')],'Contiene Encabezado',required=True),\n 'archivos_id': fields.one2many('oper.archivos', 'estructura_id',string='Archivo'),\n 'tipo_archivo': fields.selection([('1','texto rango fijo'),('2','CSV rango variable'), ('3','texto rango variable')], 'Tipo Archivo', required=True),\n 'separador': fields.selection([('1','sin separador'),('2','coma'), ('3','punto y coma')], 'Tipo Separador', required=True),\n 'campos_id': fields.one2many('oper.campos','estructura_id',string='Campos'),\n }\noper_estructura()\n\nclass oper_campos(osv.osv):\n \"\"\" Class campos\"\"\"\n _name = 'oper.campos'\n _columns = {\n 'estructura_id': fields.many2one('oper.estructura',string='Estructura'),\n 'tipo': fields.selection([('1','encabezado'),('2','detalle' )], 'Type=campos'),\n 'posicion': fields.integer('Posición', size=8, required=True, translate=True),\n 'q_caracteres': fields.integer('Numero de Caracteres', size=3, required=True, translate=True),\n 'tipo_dato': fields.selection([('1','Caracter'),('2','Numerico Entero'),('3','Numerico con Decimales')]),\n 'relleno': fields.boolean('Requiere Relleno'),\n 'caracter_relleno': fields.char(size=1),\n }\noper_campos()\n\nclass res_partner(osv.osv):\n \"\"\"Class res_partner\"\"\"\n _name = 'res.partner'\n _inherit = 'res.partner'\n _columns = {\n #'recaudador_ids': fields.many2one('oper.recaudador','partner_id',string='Recaudador'),\n 'recaudador_id': fields.many2one('oper.recaudador',string='Recaudador'),\n }\n\nres_partner()\n\n","sub_path":"recaudacion/recaudacion.py","file_name":"recaudacion.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"142423187","text":"__author__ = 'Ricardo'\n\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\n\n\ndef build_df(location):\n \"\"\"\n\n :param csv_location:\n :return:\n \"\"\"\n\n print('Dataframe is being built...')\n df1 = pd.read_csv(location)\n df1.rename(columns=lambda i: i.replace(' ', '_').lower(), inplace=True)\n\n return df1\n\n\ndef _impute_missing_values(df):\n ########\n # Takes in a dataframe and imputes missing values\n ########\n\n print('Values are being imputed in dataframe...')\n\n # Impute numerical variables with mean\n df1 = df.fillna(df.mean())\n # Impute categorical variables with mode\n df1 = df1.apply(lambda i: i.fillna(i.mode()[0]))\n\n return df1\n\n\ndef _process_features(df):\n ########\n # Takes in a dataframe and converts categorical variables into indicator variables\n # and rescales numerical variables between -1 and 1\n ########\n # Split dataset in two, one with all numerical features and one with all categorical features\n print('Processing features...')\n # num_df = df.select_dtypes(include=['float64'])\n # cat_df = df.select_dtypes(include=['object'])\n # # Convert categorical features into indicator variables\n # if len(cat_df.columns) > 0:\n # cat_df = _convert_to_indicators(cat_df)\n # # Rescale numerical features between -1 and 1\n # if len(num_df.columns) > 0:\n # num_df = ((1/(num_df.max()-num_df.min()))*(2*num_df-num_df.max()-num_df.min()))\n ### Since data was preprocessed\n df1 = ((1/(df.max()-df.min()))*(2*df-df.max()-df.min()))\n # # Recombine categorical and numerical feature into one dataframe\n # df = num_df.join(cat_df)\n # Replace NaN's that were caused by division by 0 when rescaling with 0's\n # This occurs when all values are 0 (eg. indicator variables)\n return df1.fillna(0)\n\n\ndef _convert_to_indicators(df):\n ########\n # Takes in a dataframe and makes a new dataframe with indicator variables\n # for each column of the provided dataframe\n ########\n\n print('Converting some features to indicator variables...')\n\n # Create a new data frame with indicator variables for the first column\n # Needs to be done with this so that other indicator variables can be joined by iteration\n\n df1 = pd.get_dummies(df.iloc[:, 0])\n df1 = df1.iloc[:, 0:1]\n\n # Iterate through columns creating indicator variables for each column and\n # join the indicator variables to the new dataframe created above\n\n if len(df.columns) > 1:\n for i in range(1, len(df.columns)):\n df2 = pd.get_dummies(df.iloc[:, i])\n df1 = df1.join(df2.iloc[:, 0:len(df2.columns)-1])\n\n return df1\n\n\ndef split(df):\n ########\n # Takes in two dataframes for the features and labels of a dataset and\n # outputs a dictionary with training and keys relating to training testing sets for each\n ########\n\n print('Performing prelimianry datasplit')\n\n labels = df.pop(df.columns[len(df.columns)-1])\n features = df\n x_train, x_test, y_train, y_test = train_test_split(features,\n labels,\n test_size=0.25,\n random_state=33)\n data_dict = {'x_test': x_test, 'x_train': x_train,\n 'y_test': y_test, 'y_train': y_train}\n return data_dict\n\n\ndef process_csv(location, impute_values=True, indicator_variable=False):\n\n df = build_df(location)\n\n # Separate dataframe into labels and features\n if impute_values and not indicator_variable:\n df = _impute_missing_values(df)\n\n df_dict = split(df)\n\n return df_dict","sub_path":"process_csv.py","file_name":"process_csv.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"355809401","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: Mine DOGAN \n# Author: Emre Akkaya \n\n\nfrom base.plugin.abstract_plugin import AbstractPlugin\nimport json, os\nimport traceback\nfrom PIL import ImageGrab\n\nclass TakeScreenshot(AbstractPlugin):\n def __init__(self, data, context):\n super(TakeScreenshot, self).__init__()\n self.data = data\n self.context = context\n self.logger = self.get_logger()\n self.message_code = self.get_message_code()\n\n self.temp_file_name = str(self.generate_uuid())\n self.shot_path = '{0}{1}.png'.format(str(self.Ahenk.received_dir_path()), self.temp_file_name)\n # self.take_screenshot = 'xwd -root -display :{1} | convert xwd:- ' + self.shot_path\n # self.take_screenshot = 'xwd -root -display :{1} | convert - jpg:- > ' + self.shot_path\n # export command for ltsp\n # self.take_screenshot = 'su {0} -c \\' export DISPLAY={1} && scrot ' + self.shot_path + ' \\''\n\n def handle_task(self):\n try:\n\n try:\n os.remove(self.shot_path)\n except Exception:\n pass\n\n # user_name = None\n #\n # if self.has_attr_json(self.data, self.Ahenk.dn()) and self.data[self.Ahenk.dn()] is not None:\n # user_name = self.data[self.Ahenk.dn()]\n #\n # if not user_name:\n # self.logger.debug('Taking screenshot with default display.')\n # arr = self.get_username_display()\n # self.logger.debug('Default username: {0} display: {1}'.format(arr[0], arr[1]))\n # if arr is None:\n # self.context.create_response(code=self.message_code.TASK_ERROR.value,\n # message='Ekran görüntüsü alırken hata oluştu: Varsayılan display\\'e erişilemedi.')\n # return\n #\n # ##permission\n # self.logger.debug(\n # 'Asking for screenshot to user {0} on {1} display'.format(arr[0], arr[1]))\n #\n # user_answer = self.show_message(':' + arr[1], arr[0],\n # \"Ekran görüntüsünün alınmasına izin veriyor musunuz?\",\n # \"Ekran Görüntüsü\")\n #\n # if user_answer is None:\n # self.logger.error('User answer could not kept.')\n # self.context.create_response(code=self.message_code.TASK_ERROR.value,\n # message='Ekran görüntüsü alırken hata oluştu: Kullanıcı iznine erişilemedi.')\n # return\n # elif user_answer is True:\n # self.logger.debug('User accepted for screenshot')\n # self.logger.debug('Taking screenshot with specified display: {0}'.format(arr[1]))\n # self.logger.debug(\n # 'Executing take screenshot command with user: {0} and display: {1}'.format(arr[0], arr[1]))\n # self.logger.debug(str(self.take_screenshot.format(arr[1])))\n # result_code, p_out, p_err = self.execute(self.take_screenshot.format(arr[1]), as_user=arr[0])\n #\n # if result_code != 0:\n # self.logger.error(\n # 'A problem occurred while running take screenshot command with default display')\n # self.context.create_response(code=self.message_code.TASK_ERROR.value,\n # message='Ekran görüntüsü alırken hata oluştu: Komut başarıyla çalıştırılamadı.')\n # return\n #\n # else:\n # self.logger.warning('User decline to take screenshot.')\n # self.context.create_response(code=self.message_code.TASK_WARNING.value,\n # message='Eklenti başarıyla çalıştı; fakat kullanıcı ekran görüntüsü alınmasına izin vermedi.')\n # return\n #\n # else:\n # user_display = self.Sessions.display(user_name)\n # if not user_display:\n # user_display = '0'\n #\n # ##permission\n # self.logger.debug(\n # 'Asking for screenshot to user {0} on {1} display'.format(user_name, user_display))\n #\n # user_answer = self.ask_permission(user_display, user_name,\n # \"Ekran görüntüsünün alınmasına izin veriyor musunuz?\",\n # \"Ekran Görüntüsü\")\n #\n # if user_answer is None:\n # self.logger.error('User answer could not kept.')\n # self.context.create_response(code=self.message_code.TASK_ERROR.value,\n # message='Ekran görüntüsü alırken hata oluştu: Kullanıcı iznine erişilemedi.')\n # return\n #\n # elif user_answer is True:\n # self.logger.debug('User accepted for screenshot')\n # self.logger.debug('Taking screenshot with specified display: {0}'.format(user_display))\n #\n # if self.Sessions.userip(user_name):\n # self.execute(self.take_screenshot.format(user_name, user_display),\n # ip=self.Sessions.userip(user_name))\n # self.scopy_from_remote(self.shot_path, self.shot_path, ip=self.Sessions.userip(user_name))\n # else:\n # self.execute(self.take_screenshot.format(user_name, user_display.replace(':', '')),\n # as_user=user_name)\n #\n # self.logger.debug('Screenshot command executed.')\n # else:\n # self.logger.warning('User decline to take screenshot.')\n # self.context.create_response(code=self.message_code.TASK_WARNING.value,\n # message='Eklenti başarıyla çalıştı; fakat kullanıcı ekran görüntüsü alınmasına izin vermedi.')\n # return\n # ##permission###\n snapshot = ImageGrab.grab()\n # save_path = \"C:\\\\Users\\\\YourUser\\\\Desktop\\\\MySnapshot.jpg\"\n snapshot.save(self.shot_path)\n if self.is_exist(self.shot_path):\n self.logger.debug('Screenshot file found.')\n\n data = {}\n md5sum = self.get_md5_file(str(self.shot_path))\n self.logger.debug('{0} renaming to {1}'.format(self.temp_file_name, md5sum))\n self.rename_file(self.shot_path, self.Ahenk.received_dir_path() + md5sum)\n self.logger.debug('Renamed.')\n data['md5'] = md5sum\n self.context.create_response(code=self.message_code.TASK_PROCESSED.value,\n message='Ekran görüntü9sü başarıyla alındı.',\n data=json.dumps(data),\n content_type=self.get_content_type().IMAGE_JPEG.value)\n self.logger.debug('SCREENSHOT task is handled successfully')\n else:\n raise Exception('Image not found this path: {0}'.format(self.shot_path))\n\n except Exception as e:\n self.logger.error(\n 'A problem occured while handling SCREENSHOT task: {0}'.format(traceback.format_exc()))\n self.context.create_response(code=self.message_code.TASK_ERROR.value,\n message='Ekran görüntüsü alırken hata oluştu: {0}'.format(str(e)))\n\n # def get_username_display(self):\n # result_code, p_out, p_err = self.execute(\"who | awk '{print $1, $5}' | sed 's/(://' | sed 's/)//'\", result=True)\n #\n # if result_code != 0:\n # return None\n # lines = str(p_out).split('\\n')\n # for line in lines:\n # arr = line.split(' ')\n # if len(arr) > 1 and str(arr[1]).isnumeric() is True and arr[0] != 'root':\n # return arr\n # return None\n\n\ndef handle_task(task, context):\n screenshot = TakeScreenshot(task, context)\n screenshot.handle_task()","sub_path":"plugins/screenshot/take-screenshot.py","file_name":"take-screenshot.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"13134567","text":"class Solution:\n def countBits(self, num):\n \"\"\"\n :type num: int\n :rtype: List[int]\n \"\"\"\n ans = [0] * (num + 1)\n\n for x in range(1, num+1):\n ans[x] = ans[x>>1] + (x & 1) # f[i] = f[i / 2] + i % 2\n # ans[x] = ans[x&(x-1)] + 1\n\n return ans","sub_path":"solutions/dp/problem338_Counting Bits.py","file_name":"problem338_Counting Bits.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"210933372","text":"#Skript um sich alle möglichen Laueindices für einen Punkt anzeigen zu lassen. Diese müssen dann manuell ausgewählt werden.\nfrom fractions import Fraction\nimport numpy as np\nfrom numpy import genfromtxt\nimport matplotlib.pyplot as plt\n\n#Imortiere Datensatz der mit KoordinatenConver.py erstellt wurde\ndata = genfromtxt('Koord.txt', delimiter=';')\nx = np.array(data[1:, 3])\ny = np.array(data[1:, 5])\nz = np.array(data[1:, 7])\n\n# l=[1,1,1,1,1,1,1,1]\n\n\n\nfor i in range(0, 8):\n j = np.linspace(1,10.1,1000)\n h = x[i]/z[i]*j\n k = y[i]/z[i]*j\n dif=np.abs(np.round(h,0)-h)+np.abs(np.round(k,0)-k)+np.abs(np.round(j,0)-j)\n plt.plot(j,dif, label=i+1)\n n = 1\n print('WWWWWWWWWWWWWWWWWWWWWWW')\n while j[n+1] < 10.:\n if dif[n-1]>dif[n] and dif[n+1]>dif[n]:\n l=j[n]\n a = x[i]/z[i]*l\n b = y[i]/z[i]*l\n if a<=10 and b<=10 and l<=10:\n print('%.2f %.2f %.2f' %(a,b,l))\n l = int(np.round(l))\n a = int(np.round(a))\n b = int(np.round(b))\n if a<=10 and b<=10 and l<=10:\n print(a,b,l)\n print('------------------')\n if (a==0):\n if((b+l)%2!=0):\n a=2*a\n b=2*b\n l=2*l\n elif (b==0):\n if((a+l)%2!=0):\n a=2*a\n b=2*b\n l=2*l\n elif (l==0):\n if((a+b)%2!=0):\n a=2*a\n b=2*b\n l=2*l\n elif(a%2==0 and (b%2!=0 or l!=0) or b%2==0 and (l%2!=0 or a!=0) or l%2==0 and (a%2!=0 or b!=0)):\n a=2*a\n b=2*b\n l=2*l\n if a<=10 and b<=10 and l<=10:\n # plt.text(x=j[n]-0.2, y=dif[n]-0.02, s=\"(%d %d %d)\" % (a,b,l), fontsize=10)\n plt.plot(j[n], dif[n], 'x', label=\"(%d %d %d), (%.2f %.2f %.2f)\" % (a,b,l, x[i]/z[i]*j[n], y[i]/z[i]*j[n], j[n]), ms=10)\n n += 1\n plt.legend(loc = 'upper right')\n plt.show()\n\n# l=j[np.where(dif == np.amin(dif))]\n# a = x[i]/z[i]*l\n# b = y[i]/z[i]*l\n# print(a,b,l)\n#\n# plt.show()\n","sub_path":"Versuch_428/MillerShowAll.py","file_name":"MillerShowAll.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"612749171","text":"#!/usr/bin/env python3\n\nimport pathlib\n\nfrom setuptools import find_packages, setup\n\nPROJECT_ROOT = pathlib.Path(__file__).parent\nREADME = (PROJECT_ROOT / 'README.md').read_text()\n\nsetup(\n name='freenit',\n version='0.0.9',\n description='REST API framework based on Flask-Smorest',\n long_description=README,\n long_description_content_type='text/markdown',\n url='https://github.com/freenit-framework/backend',\n author='Goran Mekić',\n author_email='meka@tilda.center',\n license='BSD',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Internet :: WWW/HTTP',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n keywords=[\n 'REST',\n 'openapi',\n 'swagger',\n 'flask',\n 'marshmallow',\n 'apispec'\n 'webargs',\n ],\n packages=find_packages(exclude=['tests*']),\n python_requires='>=3.5',\n install_requires=[\n 'bcrypt',\n 'flask-collect>=1.3.2',\n 'flask-cors>=3.0.8',\n 'flask-jwt-extended>=3.24.1',\n 'flask-security>=3.0.0',\n 'flask-smorest>=0.18.2',\n 'peewee-migrate>=1.1.6',\n ],\n include_package_data=True,\n package_data={\n '': [\n 'static/swaggerui/*',\n 'templates/*',\n 'project/*',\n 'project/bin/*',\n ]\n },\n scripts=['bin/freenit.sh'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"497375194","text":"import networkx as nx\n\nfrom math import isclose\n\nfrom .exploring import assert_numeric, extract_node\n\n\ndef step_layout(g, ego=None, iterations=1, weight='weight'):\n before = nx.get_node_attributes(g, 'pos')\n\n fixed = None if ego is None else [ego]\n\n return nx.spring_layout(g, pos=before, fixed=fixed, iterations=iterations, weight=weight)\n\n\nLAYOUTS = {\n 'bipartite': nx.bipartite_layout,\n 'circular': nx.circular_layout,\n 'kamada_kawai': nx.kamada_kawai_layout,\n 'planar': nx.planar_layout,\n 'random': nx.random_layout,\n 'shell': nx.shell_layout,\n 'spring': nx.spring_layout,\n 'spectral': nx.spectral_layout,\n 'step': step_layout,\n}\n\n\ndef normalize(g):\n if g.number_of_nodes() == 0:\n return\n\n xs = []\n ys = []\n for n in g.nodes:\n pos = g.nodes[n]['pos']\n xs.append(pos[0])\n ys.append(pos[1])\n\n xmin = min(xs)\n xmax = max(xs) - xmin\n ymin = min(ys)\n ymax = max(ys) - ymin\n\n for n in g.nodes:\n pos = g.nodes[n]['pos']\n x = 0.5 if isclose(xmax, 0) else (pos[0] - xmin) / xmax\n y = 0.5 if isclose(ymax, 0) else (pos[1] - ymin) / ymax\n g.nodes[n]['pos'] = (x, y)\n\n\ndef scatter(g, xmap, ymap):\n for n in g.nodes:\n x = assert_numeric(extract_node(g, n, xmap))\n y = assert_numeric(extract_node(g, n, ymap))\n g.nodes[n]['pos'] = (x, y)\n\n normalize(g)\n\n\ndef move(g, key, *args, **kwargs):\n try:\n layout = LAYOUTS[key]\n except KeyError:\n raise KeyError('layout key must be one of the following: ' + ', '.join('\"{}\"'.format(k) for k in LAYOUTS))\n\n after = layout(g, *args, **kwargs)\n\n for n, pos in after.items():\n g.nodes[n]['pos'] = (pos[0], pos[1])\n\n normalize(g)\n\n\ndef move_copy(g, h, key, *args, **kwargs):\n move(h, key, *args, **kwargs)\n\n for n in g.nodes:\n g.nodes[n]['pos'] = h.nodes[n]['pos']\n\n\ndef move_inverse(g, key, weight, *args, **kwargs):\n h = g.copy()\n for n, m in h.edges:\n if weight in g.edges[n, m]:\n h.edges[n, m][weight] = 1 / g.edges[n, m][weight]\n\n move_copy(g, h, key, *args, weight=weight, **kwargs)\n\n\ndef move_negative(g, key, weight, *args, **kwargs):\n h = g.copy()\n for n, m in h.edges:\n if weight in g.edges[n, m]:\n h.edges[n, m][weight] = -g.edges[n, m][weight]\n\n move_copy(g, h, key, *args, weight=weight, **kwargs)\n\n\ndef move_complement(g, key, *args, **kwargs):\n h = nx.complement(g)\n for n in h.nodes:\n h.nodes[n].update(g.nodes[n])\n\n move_copy(g, h, key, *args, **kwargs)\n","sub_path":"freeman/moving.py","file_name":"moving.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"324486522","text":"from pprint import pprint\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\ndef get_gens():\n scope = ['https://spreadsheets.google.com/feeds']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('secrets/credentials.json', scope)\n gc = gspread.authorize(credentials)\n\n wks = gc.open_by_key(\"1U8AcBr2ri3cNwYlUGFGB0aZuwZmmzR0eB5uJjD4_szE\").sheet1\n values = wks.get_all_values()\n \n gens = {}\n section = None\n headers = []\n for row in values:\n first_word = row[0]\n if not first_word.startswith(\"#\"):\n if section is None:\n if first_word != \"\":\n section = first_word.lower()\n gens[section] = {\n \"commands\": [word.strip() for word in section.split(\"/\")],\n \"values\": []\n }\n else:\n non_empty_fields = []\n for col in row:\n if col != \"\":\n non_empty_fields.append(col)\n else:\n break\n if not non_empty_fields:\n section = None\n headers = []\n elif not headers:\n headers = [col.lower() for col in non_empty_fields]\n for col in headers:\n gens[section][\"values\"].append([])\n else:\n for index, header in enumerate(headers):\n gens[section][\"values\"][index].append(row[index])\n return gens\n\n\nif __name__ == \"__main__\":\n pprint(get_gens())\n","sub_path":"gens.py","file_name":"gens.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"284561384","text":"# Piggetty.py \n# Cynthia Carter\n# IWU Fall 2015 CS 125\n# A program that converts the text to pig latin\n\n# Define a function called piggy(string) that returns a string\ndef piggy(word):\n\tn = 0\n\tvowels = \"aeiouAEIOU\"\n\tendword = \"\"\n\tfor letter in word:\n\t\t# Check if letter is a vowel\n\t\tif letter in vowels:\n\t\t\t# True? We are done\n\t\t\tif n == 0:\n\t\t\t\tpig = word + \"yay\"\n\t\t\tbreak\n\t\telse:\n\t\t\tendword += word[n]\n\t\t\tpig = word[n+1:] + endword + \"ay\"\n\t\tn += 1\n\t\n\treturn pig\n\n# Open the file *getty.txt* for reading. \ngettyfile = open (\"getty.txt\",\"r\")\n\n# Open a new file *piggy.txt* for writing. \nPmy_file = open (\"piggy.txt\",\"w\")\n\n# Read the getty.txt file into a string. \ngettystr = gettyfile.read()\n\n# Strip out bad characters (, - .). \ngettystr = gettystr.replace ('.', '')\ngettystr = gettystr.replace (',', '')\ngettystr = gettystr.replace ('-','')\n\n# Split the string into a list of words. \ngettylist = gettystr.split()\n\n# Create a new empty string. \nnewstr = \"\"\n\n# Loop through the list of words, pigifying each one. \nfor word in gettylist:\n\tnewword = piggy(word)\n\tnewstr = newstr + newword + \" \"\n\t\noutfile = open(\"piggy.txt\",\"w\")\nprint (newstr, gettylist, file=outfile)\noutfile.close()\ngettyfile.close()\n\n# Write the new string to piggy.txt. \n\n# close the files.","sub_path":"piggetty.py","file_name":"piggetty.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"382840956","text":"\"\"\"Defining input class.\"\"\"\nimport sys\nimport termios\nimport tty\nimport signal\nfrom config import TIMEOUT\nfrom time import time\n\n\nclass Get:\n \"\"\"Class to get input.\"\"\"\n\n def __call__(self):\n \"\"\"Defining __call__.\"\"\"\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n old_settings[3] = old_settings[3] & ~termios.ECHO\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\nclass AlarmException(Exception):\n \"\"\"Handling alarm exception.\"\"\"\n pass\n\n\ndef alarmHandler(signum, frame):\n \"\"\"Handling timeouts.\"\"\"\n raise AlarmException\n\n\ndef input_to(getch, timeout=0.1):\n \"\"\"Taking input from user.\"\"\"\n signal.signal(signal.SIGALRM, alarmHandler)\n signal.setitimer(signal.ITIMER_REAL, timeout)\n try:\n text = getch()\n signal.alarm(0)\n return text\n except AlarmException:\n signal.signal(signal.SIGALRM, signal.SIG_IGN)\n return None\n\n\ndef get_input():\n inputs = []\n begin = time()\n time_remaining = TIMEOUT - (time() - begin)\n while time_remaining > 0:\n inp = input_to(Get().__call__, time_remaining)\n if inp is not None:\n inputs.append(inp)\n time_remaining = TIMEOUT - (time() - begin)\n return inputs\n","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"153888248","text":"class Solution:\n def uniqueOccurrences(self, arr: List[int]) -> bool:\n a = Counter(arr)\n ans = list(a.values())\n ans.sort()\n ans = ans[::-1]\n for i in range(len(ans)-1):\n if ans[i] <= ans[i+1]:\n return 0\n return 1","sub_path":"1207-unique-number-of-occurrences/1207-unique-number-of-occurrences.py","file_name":"1207-unique-number-of-occurrences.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"548160884","text":"import json, argparse\nimport os, csv, re\nfrom urllib.parse import urlparse\nfrom hashlib import md5\nimport urllib.request\nfrom lib import baseclass\n\nclass ImageDownloader(baseclass.BaseClass):\n image_list_file = None\n image_list = []\n previously_downloaded_files = []\n skip_download_if_exists = True\n image_default_extension = None\n image_url_to_name = None\n subfolder_max_files = 1000\n _subfolder_index = 0\n _current_subdir = None\n override_download_folder = None\n override_output_file = None\n classes = []\n\n def __init__(self):\n super().__init__()\n\n def set_image_url_to_name(self, image_url_to_name):\n self.image_url_to_name = image_url_to_name\n\n def set_image_list_file(self, image_list_file=None):\n if image_list_file is not None:\n self.image_list_file = image_list_file\n else:\n self.image_list_file = os.path.join(self.project_root, 'lists', 'images.csv')\n\n if not os.path.isfile(self.image_list_file):\n raise FileNotFoundError(\"image list file not found: {}\".format(self.image_list_file))\n\n self.logger.info(\"image list file: {}\".format(self.image_list_file))\n\n def set_subfolder_max_files(self, max_files):\n if isinstance(max_files, int) and 100 < int < 100000:\n self.subfolder_max_files = max_files\n else:\n raise ValueError(\"max subfolder files must be int between 100 and 100000 ({})\".format(max_files))\n\n def set_skip_download_if_exists(self,state):\n if isinstance(state, bool):\n self.skip_download_if_exists = state\n self.logger.info(\"set skip download if exists: {}\".format(self.skip_download_if_exists))\n else:\n raise ValueError(\"skip download if exists must be a boolean ({})\".format(state))\n\n def set_override_download_folder(self, folder):\n if not os.path.exists(folder):\n raise FileNotFoundError(\"folder doesn't exist: {}\".format(folder))\n\n if not os.access(folder, os.W_OK):\n raise FileNotFoundError(\"folder not writeable: {}\".format(folder))\n\n self.override_download_folder = folder\n self.logger.info(\"using manual override image folder: {}\".format(self.override_download_folder))\n\n def set_override_output_file(self, override_output_file):\n self.override_output_file = override_output_file\n self.logger.info(\"using override output file: {}\".format(self.override_output_file))\n\n def read_image_list(self):\n with open(self.image_list_file, 'r', encoding='utf-8-sig') as file:\n c = csv.reader(file)\n for row in c:\n self.image_list.append(row)\n\n def _get_previously_downloaded_files(self):\n if not self.override_download_folder is None:\n pass\n # TODO\n # self.previously_downloaded_files = [{\"file\": file, \"path\": os.path.join(self.previously_downloaded_files,file)} \\\n # for file in os.listdir(self.override_download_folder) if os.path.isfile(os.path.join(self.override_download_folder, file))]\n else:\n for subdir, dirs, files in os.walk(self.image_root_path):\n for file in files:\n self.previously_downloaded_files.append( \\\n {\"file\": file, \"path\": os.path.join(subdir.replace(self.image_root_path,\"\"),file).lstrip(\"/\")})\n\n def _count_class(self,this_class):\n for idx, item in enumerate(self.classes):\n if item[\"class\"]==this_class:\n item[\"count\"] = item[\"count\"]+1\n self.classes[idx] = item\n return\n\n self.classes.append({\"class\":this_class,\"count\":1})\n\n def download_images(self):\n if self.skip_download_if_exists:\n self._get_previously_downloaded_files()\n\n downloaded = 0\n failed = 0\n skipped = 0\n\n if not self.override_output_file is None:\n outfile = self.override_output_file\n else:\n outfile = self.downloaded_images_file\n\n with open(outfile, 'w') as csvfile:\n c = csv.writer(csvfile, delimiter=',', quotechar='\"')\n\n for item in self.image_list:\n url = item[1]\n p = urlparse(url)\n this_class = item[0]\n\n if self.override_download_folder is None:\n self._set_download_subdir()\n\n if self.image_url_to_name is not None:\n filename = re.sub(self.image_url_to_name['expression'], self.image_url_to_name['replace'], p.path)\n filename += self.image_url_to_name['postfix']\n else:\n filename = os.path.basename(p.path)\n\n # print(item)\n # print(url)\n # print(p)\n # print(filename)\n\n if self.skip_download_if_exists:\n existing_images = [x for x in self.previously_downloaded_files if x[\"file\"] == filename]\n skip_download = len(existing_images) > 0\n else:\n skip_download = False\n\n if skip_download:\n c.writerow([this_class, url, existing_images[0][\"path\"]])\n self.logger.info(\"skipped (file exists): {}\".format(url))\n skipped += 1\n\n self._count_class(this_class)\n\n else:\n\n if not self.override_download_folder is None:\n file_to_save = os.path.join(self.override_download_folder, filename)\n subdir_to_write = self.override_download_folder\n # print(self.override_download_folder)\n # print(filename)\n # print(file_to_save)\n\n else:\n file_to_save = os.path.join(self.image_root_path, self._current_subdir,filename)\n subdir_to_write = self._current_subdir\n\n try:\n urllib.request.urlretrieve(url, file_to_save)\n c.writerow([this_class, url, os.path.join(subdir_to_write, filename)])\n self.logger.info(\"downloaded {} to {} \".format(url, file_to_save))\n downloaded += 1\n self._count_class(this_class)\n\n except Exception as e:\n self.logger.error(\"could not download {}: {}\".format(url, e))\n failed += 1\n\n\n\n self.logger.info(\"downloaded {}, skipped {}, failed {}\".format(downloaded, skipped, failed))\n\n def _set_download_subdir(self):\n self._current_subdir = md5(str(self._subfolder_index).encode('utf-8')).hexdigest()[:10]\n subdir_path = os.path.join(self.image_root_path, self._current_subdir)\n if not os.path.isdir(subdir_path):\n os.mkdir(subdir_path)\n\n n = len([name for name in os.listdir(subdir_path) if os.path.isfile(os.path.join(subdir_path, name))])\n if n >= self.subfolder_max_files:\n while True:\n self._subfolder_index += 1\n self._current_subdir = md5(str(self._subfolder_index).encode('utf-8')).hexdigest()[:10]\n if not os.path.isdir(os.path.join(self.image_root_path, self._current_subdir)):\n os.mkdir(os.path.join(self.image_root_path, self._current_subdir))\n break\n\n def write_classes(self):\n # print(self.classes)\n # print(self.class_list_file_csv)\n with open(self.class_list_file_csv, 'w') as csvfile:\n c = csv.writer(csvfile, delimiter=',', quotechar='\"')\n for this_class in self.classes:\n c.writerow([this_class[\"class\"],this_class[\"count\"]])\n self.logger.info(\"wrote {}\".format(self.class_list_file_csv))\n\n\nif __name__ == \"__main__\":\n\n downloader = ImageDownloader()\n downloader.set_project(os.environ)\n\n if 'IMAGE_URL_TO_NAME' in os.environ:\n downloader.set_image_url_to_name(json.loads(os.environ['IMAGE_URL_TO_NAME']))\n\n if 'IMAGE_LIST_FILE' in os.environ:\n downloader.set_image_list_file(os.environ['IMAGE_LIST_FILE'])\n else:\n downloader.set_image_list_file()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--override_download_folder\",type=str)\n parser.add_argument(\"--override_output_file\",type=str)\n parser.add_argument(\"--override_image_list\",type=str)\n args = parser.parse_args()\n\n if args.override_download_folder:\n downloader.set_override_download_folder(args.override_download_folder)\n\n if args.override_output_file:\n downloader.set_override_output_file(args.override_output_file)\n\n if args.override_image_list:\n downloader.set_image_list_file(args.override_image_list)\n\n downloader.read_image_list()\n downloader.download_images()\n downloader.write_classes()\n","sub_path":"code/image_downloader.py","file_name":"image_downloader.py","file_ext":"py","file_size_in_byte":8951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"110844572","text":"# -*- coding: utf-8 -*-\n# @File: test_demo.py\n# @Author: HanWenLu\n# @E-mail: wenlupay@163.com\n# @Time: 2020/10/26 11:08\n\nimport os\n\nimport pytest\nimport allure\n\nfrom pageobj.baidu import BaiDu\nfrom public.yaml_data import caseda\nfrom public.imgproce import Diff\nfrom config.ptahconf import DATA_FILE\nfrom public.logs import logger\n\nyamlfile = os.path.basename(__file__).replace('py', 'yaml') # 获取当前目运行文件\n\n\nclass TestBaiDu:\n\n @allure.feature(\"百度搜索\") # 测试用例特性��主要功能模块)\n @allure.story(\"所搜验证\") # 模块说明\n @allure.title(\"输入内容并搜索\") # 用例标题\n @allure.description('输入多参数搜索') # 用例描述\n @pytest.mark.testbaidu # 用列标记\n @pytest.mark.parametrize('content', caseda(yamlfile, 'test_baidu_search')) # 测试数据\n def test_baidu_search(self, webDriver,content):\n baidu=BaiDu(webDriver)\n\n with allure.step('输入搜索内容'):\n\n baidu.input_search_content(content)\n\n\n with allure.step('点击搜索'):\n\n baidu.click_search_button()\n\n baidu.sleep(3)\n\n # 对比查询后图片结果\n search_python=os.path.join(DATA_FILE,'python.png')\n search_relust=baidu.screen_shot('search')\n df=Diff.dHash(search_python,search_relust)\n assert df < 10\n\n\n\n","sub_path":"case/baid/test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"284283850","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nsortTargets.py\n\nCreated by José Sánchez-Gallego on 6 Nov 2014.\nLicensed under a 3-clause BSD license.\n\nRevision history:\n 6 Nov 2014 J. Sánchez-Gallego\n Initial version\n\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Ellipse\n\n\ndef simpleMesh(centre, radius, width=None, **kwargs):\n \"\"\"Creates a simple mesh for the field. Returns the centres of the cells\"\"\"\n\n if width is None:\n # If no width is defined, uses an adaptive value based on the radius.\n width = 0.2 * radius / 1.5\n\n ra0 = centre[0] - radius / np.cos(np.deg2rad(centre[1]))\n ra1 = centre[0] + radius / np.cos(np.deg2rad(centre[1]))\n dec0 = centre[1] - radius\n dec1 = centre[1] + radius\n\n mRA, mDec = np.meshgrid(np.arange(ra0, ra1 + width, width),\n np.arange(dec0, dec1 + width, width))\n\n coords = np.array([mRA.flatten(), mDec.flatten()]).T\n distanceToCentre = calculateSeparation(coords, centre)\n\n return coords[distanceToCentre <= radius]\n\n\ndef plotTargets(targets, centre, radius, plotGrid=None, plotAllTargets=None,\n filename=None, **kwargs):\n \"\"\"Creates a simple plot with the targets.\"\"\"\n\n raCen, decCen = centre\n\n if filename is None:\n filename = 'sortedTargets.pdf'\n\n plt.clf()\n plt.cla()\n\n fig, ax = plt.subplots()\n fig.set_size_inches(8, 8)\n\n plate = Ellipse((raCen, decCen),\n height=2 * radius,\n width=2 * radius / np.cos(decCen * np.pi / 180.),\n linewidth=2,\n edgecolor='k', facecolor='None')\n ax.add_patch(plate)\n\n ax.scatter(centre[0], centre[1], marker='o', s=25, color='k',\n edgecolor='k')\n\n if plotGrid is not None:\n ax.scatter(plotGrid[:, 0], plotGrid[:, 1], marker='x',\n color='0.5', s=10.)\n\n if plotAllTargets is not None:\n ax.scatter(plotAllTargets[:, 0], plotAllTargets[:, 1],\n marker='x', s=20, color='0.5')\n\n ax.scatter(targets[:, 0], targets[:, 1], marker='x', s=20, color='r')\n\n ax.set_xlim(raCen + 1.05 * radius / np.cos(decCen * np.pi / 180.),\n raCen - 1.05 * radius / np.cos(decCen * np.pi / 180.))\n ax.set_ylim(decCen - 1.05 * radius, decCen + 1.05 * radius)\n\n ax.set_xlabel(r'$\\alpha_{2000}$')\n ax.set_ylabel(r'$\\delta_{2000}$')\n\n plt.savefig(filename)\n\n plt.close('all')\n\n return\n\n\ndef calculateSeparation(coord1, coord2):\n \"\"\"Calculates the separation in the sky between a list of targets. This\n function replaces `astropy.coordinates.SkyCoord.separation`, which is\n still very slow for a large number of points.\"\"\"\n\n coord1 = np.atleast_2d(coord1)\n coord2 = np.atleast_2d(coord2)\n\n assert coord1.shape[0] == 1 or coord2.shape[0] == 1\n\n if coord1.shape[0] > 1:\n coord2, coord1 = coord1, coord2\n\n return np.sqrt(((coord1[:, 0] - coord2[:, 0]) *\n np.cos(np.deg2rad(coord1[:, 1])))**2 +\n (coord1[:, 1] - coord2[:, 1])**2)\n\n\ndef getTargetIdx(coords, assigned, grid, centre, radius):\n \"\"\"Returns the index of the target to assign, based on what has already\n been assigned.\"\"\"\n\n assignedCoords = coords[assigned]\n eField = np.zeros(len(grid))\n\n for ii in range(len(assignedCoords)):\n distance = calculateSeparation(assignedCoords[ii], grid)\n eField += 1. / distance\n\n eField += .1 / calculateSeparation(centre, grid)\n\n for alpha in range(0, 360, 10):\n ra = centre[0] + (radius * np.cos(np.deg2rad(alpha)) /\n np.cos(np.deg2rad(centre[1])))\n dec = centre[1] + radius * np.sin(np.deg2rad(alpha))\n eField += .5 / calculateSeparation(np.array([(ra, dec)]), grid)\n\n return np.argmin(eField)\n\n\ndef sortTargets(targets, centre=None, radius=1.49,\n limitTo=None, plot=False, **kwargs):\n \"\"\"Sorts a list of targets, evenly distributing them in a plate.\n\n This routine takes a list of targets and distributes them as uniformly as\n in a circular field of radius `radius`. Targets are decollided against\n themselves. The routine uses an electrostatic approach, replacing targets\n with positive charges and calculating the electric field in a grid.\n\n Parameters\n ----------\n targets : numpy.ndarray\n A Numpy array of shape NxM where N>=1 and M>2. Each row must be a\n target. The first two columns in the array will be considered the RA\n and Dec for the targets. Any other column will be ignored.\n\n centre : tuple-like object, opional\n A tuple or array of shape 1x2 with the coordinates of the field centre.\n If None, the centre is calculated as the centre of mass of all the\n targets.\n\n radius : float, optional\n The radius of the field, in degrees\n\n limitTo : int or None, optional\n If set to an integer, returns only that number of sorted targets.\n\n plot : bool, optional\n If True, a plot is generated. See `plotTargets` for more information\n on which kwargs parameters can be passed to the function.\n\n Returns\n -------\n result : tubple\n A tuple containing, first, the same input `targetList` but reordered\n according to the sorting algorithm. If `limitTo` is not None,\n the length of the list is the same as `limitTo`. The second element is\n index order referred to the original array.\n\n \"\"\"\n\n targets = np.atleast_2d(targets)\n\n if limitTo is None:\n limitTo = targets.shape[0]\n\n if centre is None:\n centre = np.mean(targets, axis=0)\n\n assert limitTo <= targets.shape[0], ('limitTo={0} but only {1} targets'\n .format(limitTo, targets.shape[0]))\n\n grid = simpleMesh(centre, radius, **kwargs)\n\n assigned = []\n\n while len(assigned) < limitTo:\n\n newGridIdx = getTargetIdx(targets, assigned, grid, centre, radius)\n\n distancesToGrid = calculateSeparation(targets, grid[newGridIdx])\n for ii in np.argsort(distancesToGrid):\n if ii not in assigned:\n assigned.append(ii)\n break\n\n sortedTargets = np.array([targets[ii] for ii in assigned])\n\n if plot:\n plotTargets(sortedTargets, centre, radius, plotAllTargets=targets,\n plotGrid=None, **kwargs)\n\n return sortedTargets, assigned\n","sub_path":"python/Gohan/utils/sortTargets.py","file_name":"sortTargets.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"375970185","text":"import torch\nimport torchvision\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nimport ml_tools as ml\n\n#--------------------------------------------\n# Access and define the FashionMNIST dataset\n#--------------------------------------------\n\ndef load_FashionMNIST_data(verbose=1):\n '''\n Define the dataset we are going to use, included data transformers.\n Download the data if we haven't already.\n '''\n if verbose > 0:\n print('Loading FashionMNIST dataset...')\n train_dataset = torchvision.datasets.FashionMNIST(\n root='.',\n train=True,\n transform=torchvision.transforms.ToTensor(), # normalizes values to [0,1], as well as type/shape conversion\n download=True\n )\n test_dataset = torchvision.datasets.FashionMNIST(\n root='.',\n train=False,\n transform=torchvision.transforms.ToTensor(), # normalizes values to [0,1], as well as type/shape conversion\n download=True\n )\n # Define the mapping of target labels (0-9) onto more descriptive strings for MNIST Fashion\n label_dict = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress', 4: 'Coat',\n 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}\n # Give use back some information about the dataset we've just loaded\n if verbose > 1:\n print(' Training data shape: {}'.format(train_dataset.data.shape))\n print(' Test data shape: {}'.format(test_dataset.data.shape))\n print(' Data min,max values: {},{}'.format(train_dataset.data.min(), train_dataset.data.max()))\n label_str = ', '.join(['{}:{}'.format(k,v) for k,v in label_dict.items()])\n print(' Data labels ({} categories): {}'.format(len(label_dict), label_str))\n return train_dataset, test_dataset, label_dict\n\n\n#-------------------------------------------------------\n# Define the data loaders we will use to train a model\n#------------------------------------------------------\n\ndef get_dataloaders(train_dataset, test_dataset, batch_size=256):\n '''\n Create our data loaders. Use batches for batch gradient descent so we aren't \n trying to load all training images into memory at the same time!\n '''\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n return train_loader, test_loader\n \n\n#--------------------------------------------\n# Make a plot of some example training data\n#--------------------------------------------\n\ndef display_example_data(train_dataset, label_dict, \n filename=None):\n '''\n Plot one example from each of the training data categories.\n Use this to confirm label assignments and to see what we are up against!\n '''\n fig = plt.figure(figsize=(16,8))\n axs = fig.subplots(2,5)\n for i,l in enumerate(list(label_dict.keys())):\n ind = list(train_dataset.targets).index(l) # just get the first example for each label\n im = train_dataset.data[ind]\n # Show an example image\n axs[i//5,i%5].imshow(im.reshape([28,28]),cmap='gray')\n axs[i//5,i%5].set_title('Label={}: {}'.format(l,label_dict[l]))\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n return\n\n\n#-------------------------------------------------\n# Make a plot of examples with their predictions\n#-------------------------------------------------\n\ndef display_pred_examples(example_indices, test_dataset, \n test_targets, test_predictions,\n label_dict, \n filename=None):\n '''\n Given a set of indices for the objects we want to see examples of \n (e.g., \"correct\" predictions or \"wrong\" predictions), plot a \n set of images for inspection.\n '''\n fig = plt.figure(figsize=(15,15))\n axs = fig.subplots(4,4)\n for i in range(16):\n idx = random.sample(list(example_indices), 1)[0]\n im = test_dataset.data[idx]\n axs[i//4,i%4].imshow(im.reshape([28,28]),cmap='gray')\n axs[i//4,i%4].set_title('True={} Pred={}'.format(label_dict[test_targets[idx]],\n label_dict[test_predictions[idx]]),fontsize='small')\n if filename:\n plt.savefig(filename)\n else:\n plt.show()\n return\n\n","sub_path":"examples/mnist_fashion/mnist_fashion_data.py","file_name":"mnist_fashion_data.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"169536044","text":"from Apps.PresupuestosCompartidos.models import PresupuestoCompartido, UsuariosPresupuestoCompartido, CompartidoCategorias\nfrom Apps.Promociones.models import Promociones\nfrom Apps.Usuarios.models import User\nfrom Apps.PresupuestosCompartidos.serializers import PresupuestosCompartidoSerializer, UsuariosPresupuestoCompartidoSerializer, CompartidoCategoriasSerializer\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.status import (\n HTTP_400_BAD_REQUEST,\n HTTP_404_NOT_FOUND,\n HTTP_200_OK\n)\nfrom decimal import Decimal\n\n# Create your views here.\nclass PresupuestosCompartidosViewSet(viewsets.ModelViewSet):\n queryset = PresupuestoCompartido.objects.all()\n serializer_class = PresupuestosCompartidoSerializer\n\n @action(methods=['post'], detail=False)\n def sumAportaciones(self, request):\n idPresupuesto = request.data.get(\"idPresupuesto\")\n\n\n\n montoDueño = PresupuestoCompartido.objects.get(id=idPresupuesto).monto\n aportaciones = UsuariosPresupuestoCompartido.objects.filter(presupuestoCompartido__id=idPresupuesto).values('monto')\n\n montoTotal = montoDueño\n\n for aportacion in aportaciones:\n print(aportacion['monto'])\n montoDueño+=aportacion['monto']\n\n\n # print(montoDueño)\n # print(aportaciones)\n\n presupuesto = PresupuestoCompartido.objects.get(id=idPresupuesto)\n presupuesto.monto = montoDueño\n presupuesto.save()\n\n\n\n\n return Response( status=HTTP_200_OK)\n\n\n\n @action(methods=['post'], detail=False)\n def getPresupuesto(self, request):\n codigo = request.data.get(\"codigo\")\n\n print(\"Codigo \" , codigo)\n\n try:\n presupuesto = PresupuestoCompartido.objects.get(codigo=codigo)\n print(presupuesto)\n\n dic = {}\n\n\n dic[str(presupuesto.id)] = {\"id\":str(presupuesto.id), \"monto\":str(presupuesto.monto), \"propietario\":str(presupuesto.usuarioPropietario.username)}\n\n\n return Response({\"Datos\":str(dic)}, status=HTTP_200_OK)\n except Exception as e:\n print(e)\n return Response({\"Error\": \"Este presupuesto no existe\"}, status=HTTP_400_BAD_REQUEST)\n\n @action(methods=['post'], detail=False)\n def getPresupuestos(self, request):\n idUser = request.data.get(\"idUser\")\n\n presupuestos = PresupuestoCompartido.objects.filter(usuarioPropietario__id=idUser)\n # presupuestos2 = UsuariosPresupuestoCompartido.objects.filter(usuario__id=idUser)\n\n dic = {}\n for presupuesto in presupuestos:\n dic[str(presupuesto.id)] = {\"id\":str(presupuesto.id),\"codigo\":str(presupuesto.codigo), \"monto\":str(presupuesto.monto), \"propietario\":str(presupuesto.usuarioPropietario.username)}\n\n # for presupuesto in presupuestos2:\n # dic[str(presupuesto.id)] = {\"id\":str(presupuesto.id),\"codigo\":str(presupuesto.codigo), \"monto\":str(presupuesto.monto), \"propietario\":str(presupuesto.usuarioPropietario.username)}\n\n print(dic)\n\n return Response({\"Datos\":str(dic)}, status=HTTP_200_OK)\n\n\n\n\n\n\n\n\nclass UsuariosPresupuestoCompartidoViewSet(viewsets.ModelViewSet):\n queryset = UsuariosPresupuestoCompartido.objects.all()\n serializer_class = UsuariosPresupuestoCompartidoSerializer\n\n @action(methods=['post'], detail=False)\n def getUsuariosCompartido(self, request):\n idPresupuesto = request.data.get(\"idPresupuesto\")\n\n presupuesto = PresupuestoCompartido.objects.get(id=idPresupuesto)\n\n usuarios = UsuariosPresupuestoCompartido.objects.filter(presupuestoCompartido=presupuesto)\n\n # print(usuarios)\n\n dic = {}\n\n for u in usuarios:\n dic[str(u.id)] = {\"id\":u.id, \"usuario\":u.usuario.username, \"monto\":str(u.monto)}\n # print(u.id)\n # print(u.usuario)\n # print(u.monto)\n\n print(dic)\n return Response({\"datos\": str(dic)}, status=HTTP_200_OK)\n\n @action(methods=['post'], detail=False)\n def getCreateUser(self, request):\n idPresupuesto = request.data.get(\"idPresupuesto\")\n idUsuario = request.data.get(\"idUsuario\")\n print(idPresupuesto, idUsuario)\n\n try:\n user = UsuariosPresupuestoCompartido.objects.get(presupuestoCompartido__id=idPresupuesto, usuario__id=idUsuario )\n return Response({\"detail\": \"Ya estas en este presupuesto\"}, status=HTTP_400_BAD_REQUEST)\n except Exception as e:\n # raise\n usuario = User.objects.get(id=idUsuario)\n presupuesto = PresupuestoCompartido.objects.get(id=idPresupuesto)\n user = UsuariosPresupuestoCompartido(presupuestoCompartido=presupuesto, usuario=usuario, monto=0 )\n user.save()\n print(user)\n return Response({\"detail\": str(user.id)}, status=HTTP_200_OK)\n\n @action(methods=['post'], detail=False)\n def eliminarUsuario(self, request):\n idUsuario = request.data.get(\"idUsuario\")\n print(idUsuario)\n return Response(status=HTTP_200_OK)\n\nclass CompartidoCategoriasViewSet(viewsets.ModelViewSet):\n queryset = CompartidoCategorias.objects.all()\n serializer_class = CompartidoCategoriasSerializer\n\n @action(methods=['post'], detail=False)\n def getCategorias(self, request):\n idPresupuesto = request.data.get(\"idPresupuesto\")\n categorias = CompartidoCategorias.objects.filter(presupuestoCompartido__id=idPresupuesto).values('categoria')\n\n promociones = Promociones.objects.filter(productoTienda__producto__categoria__in=categorias)\n\n dic = {}\n\n for p in promociones:\n # id = str(p.id)\n # id = '\"\"' + id + '\"\"'\n # print(p.foto.name)\n dic[str(p.id)]={\"id\":str(p.id), \"nombre\":str(p.descripcion),'foto':str(p.foto.name), 'lugar':p.productoTienda.tienda.nombre, 'vigencia':str(p.fechaVencimiento), 'categoria':p.productoTienda.producto.categoria.nombre, 'descripcion':p.descripcion, 'direccion':p.productoTienda.tienda.direccion, 'costo':str(p.costo), 'icono':str(p.productoTienda.tienda.icono)}\n # dic[str(p.id)]={\"id\":str(p.id), \"nombre\":str(p.descripcion), 'lugar':p.productoTienda.tienda.nombre, 'vigencia':str(p.fechaVencimiento), 'categoria':p.productoTienda.producto.categoria.nombre, 'descripcion':p.descripcion, 'direccion':p.productoTienda.tienda.direccion, 'costo':str(p.costo), 'icono':str(p.productoTienda.tienda.icono)}\n return Response({\"Datos\": str(dic)}, status=HTTP_200_OK)\n","sub_path":"Back/pedaap/Apps/PresupuestosCompartidos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"316758236","text":"\n# Standard Library\nimport json\nimport logging\nimport uuid\nimport maya\n\n# Third-Party\nimport django_rq\nfrom algoliasearch_django.decorators import disable_auto_indexing\nfrom dictdiffer import diff\nfrom openpyxl import Workbook\nfrom openpyxl.writer.excel import save_virtual_workbook\n\n# Django\nfrom django.apps import apps\nfrom django.contrib.auth.models import BaseUserManager\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.base import ContentFile\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import RegexValidator\nfrom django.core.validators import URLValidator\nfrom django.core.validators import validate_email\nfrom django.db import IntegrityError\nfrom django.db.models import CharField\nfrom django.db.models import F\nfrom django.db.models import Manager\nfrom django.db.models import Value\nfrom django.db.models.functions import Concat\nfrom django.forms.models import model_to_dict\nfrom django.utils.timezone import localdate\nfrom django.utils.timezone import now\n\n# First-Party\nfrom api.tasks import get_accounts\nfrom api.tasks import get_auth0\n\nlog = logging.getLogger(__name__)\n\nvalidate_url = URLValidator()\n\nvalidate_twitter = RegexValidator(\n regex=r'@([A-Za-z0-9_]+)',\n message=\"\"\"\n Must be a single Twitter handle\n in the form `@twitter_handle`.\n \"\"\",\n)\n\n\nclass AwardManager(Manager):\n def sort_tree(self):\n self.all().update(tree_sort=None)\n awards = self.order_by(\n '-status', # Actives first\n 'group__tree_sort', # Basic BHS Hierarchy\n '-kind', # Quartet, Chorus\n F('age').asc(nulls_first=True), # Null, Senior, Youth\n 'gender', #Male, mixed\n 'level', #Championship, qualifier\n 'size', # Plateau v1\n 'scope', # plateau\n 'name', # alpha\n )\n i = 0\n for award in awards:\n i += 1\n award.tree_sort = i\n award.save()\n return\n\nclass ChartManager(Manager):\n def get_report(self):\n wb = Workbook()\n ws = wb.active\n fieldnames = [\n 'PK',\n 'Title',\n 'Arrangers',\n 'Composers',\n 'Lyricists',\n 'Holders',\n 'Status',\n ]\n ws.append(fieldnames)\n charts = self.order_by('title', 'arrangers')\n for chart in charts:\n pk = str(chart.pk)\n title = chart.title\n arrangers = chart.arrangers\n composers = chart.composers\n lyricists = chart.lyricists\n holders = chart.holders\n status = chart.get_status_display()\n row = [\n pk,\n title,\n arrangers,\n composers,\n lyricists,\n holders,\n status,\n ]\n ws.append(row)\n file = save_virtual_workbook(wb)\n content = ContentFile(file)\n return content\n\n\nclass GridManager(Manager):\n def fill_grids(self, round, onstage, duration, num, max=10):\n venue = round.session.convention.venue\n timezone = venue.timezone.zone\n if not venue:\n return ValueError(\"Must have venue selected.\")\n maya_object = maya.when(onstage, timezone=timezone)\n while num <= max:\n onstage = maya_object.datetime()\n defaults = {\n 'onstage': onstage,\n 'venue': venue,\n }\n grid, created = round.grids.update_or_create(\n num=num,\n defaults=defaults,\n )\n num += 1\n maya_object = maya_object.add(minutes=duration)\n return\n\n\nclass GroupManager(Manager):\n def update_or_create_from_structure(self, structure):\n # Clean\n mc_pk = str(structure.id)\n raw_name = structure.name\n preferred_name = structure.preferred_name\n chorus_name = structure.chorus_name\n status = structure.status.name\n kind = structure.kind\n start_date = structure.established_date\n email = structure.email\n phone = structure.phone\n website = structure.website\n facebook = structure.facebook\n twitter = structure.twitter\n bhs_id = structure.bhs_id\n try:\n parent = str(structure.parent.id)\n except AttributeError:\n parent = None\n code = structure.chapter_code\n\n # Transform as needed\n name = raw_name.strip() if raw_name else ''\n preferred_name = \"{0} (NAME APPROVAL PENDING)\".format(preferred_name) if preferred_name else ''\n chorus_name = chorus_name.strip() if chorus_name else ''\n kind = getattr(\n self.model.KIND,\n kind.replace(\n 'chapter', 'chorus'\n ).replace(\n 'group', 'noncomp'\n ).replace(\n 'organization', 'international'\n )\n )\n if email:\n email = email.strip().lower()\n try:\n validate_email(email)\n except ValidationError:\n email = None\n else:\n email = None\n phone = phone.strip()\n website = website.strip()\n facebook = facebook.strip()\n twitter = twitter.strip()\n mem_status = getattr(self.model.MEM_STATUS, status.replace(\"-\", \"_\"))\n status = getattr(self.model.STATUS, status, self.model.STATUS.inactive)\n code = code.strip() if code else ''\n\n # Construct the group name\n if kind == self.model.KIND.quartet:\n # If the name has not been assigned, use preferred. Otherwise, call unknown.\n if not name:\n name = preferred_name if preferred_name else 'UNKNOWN'\n elif kind == self.model.KIND.chorus:\n name = chorus_name if chorus_name else 'UNKNOWN'\n else:\n name = name if name else 'UNKNOWN'\n\n # Clean website\n try:\n validate_url(website)\n except ValidationError:\n website = \"\"\n\n # Clean facebook\n try:\n validate_url(facebook)\n except ValidationError:\n facebook = \"\"\n\n # Clean twitter\n if '@' in twitter:\n if '/' in twitter:\n twitter = twitter.rpartition(\"/\")[2]\n else:\n twitter = twitter\n else:\n if '/' in twitter:\n twitter = twitter.rpartition('/')[2]\n else:\n twitter = \"@{0}\".format(twitter)\n try:\n validate_twitter(twitter)\n except ValidationError:\n twitter = \"\"\n\n # Monkey-patch for the AIC\n if str(bhs_id) in self.model.AIC:\n status = getattr(self.model.STATUS, 'aic')\n name = self.model.AIC[str(bhs_id)]\n defaults = {\n 'mc_pk': mc_pk,\n 'name': name,\n 'status': status,\n 'kind': kind,\n 'start_date': start_date,\n 'email': email,\n 'phone': phone,\n 'website': website,\n 'facebook': facebook,\n 'twitter': twitter,\n 'code': code,\n 'bhs_id': bhs_id,\n 'mem_status': mem_status,\n }\n\n # Get or Create\n group, created = self.get_or_create(\n kind=kind,\n bhs_id=bhs_id,\n )\n\n if created:\n description = \"Initial\"\n # Update Values\n for key, value in defaults.items():\n setattr(group, key, value)\n # Set parent on create only\n if kind == self.model.KIND.chorus:\n kind = self.model.KIND.chapter\n name = raw_name.strip() if raw_name else 'UNKNOWN'\n parent, make = self.get_or_create(\n name=name,\n code=code,\n bhs_id=bhs_id,\n kind=kind,\n )\n else:\n if parent:\n parent = self.get(mc_pk=parent)\n group.parent = parent\n # Do not transition groups in distdivs without divs\n divs = [\n 'MAD',\n 'FWD',\n 'EVG',\n 'LOL',\n 'NED',\n 'SWD',\n ]\n if parent.code in divs:\n group.save()\n return group, created\n else:\n # set prior values\n pre = model_to_dict(\n group,\n fields=[\n 'mc_pk',\n 'name',\n 'status',\n 'kind',\n 'start_date',\n 'email',\n 'phone',\n 'website',\n 'facebook',\n 'twitter',\n 'code',\n 'bhs_id',\n 'mem_status',\n ],\n )\n # update the group to new values\n for key, value in defaults.items():\n setattr(group, key, value)\n\n post = model_to_dict(\n group,\n fields=[\n 'mc_pk',\n 'name',\n 'status',\n 'kind',\n 'start_date',\n 'email',\n 'phone',\n 'website',\n 'facebook',\n 'twitter',\n 'code',\n 'bhs_id',\n 'mem_status',\n ],\n )\n result = list(diff(pre, post))\n if result:\n description = str(result)\n else:\n return group, created\n\n # Transition as appropriate\n if status == self.model.STATUS.active:\n group.activate(\n description=description,\n )\n elif status == self.model.STATUS.inactive:\n group.deactivate(\n description=description,\n )\n elif status == self.model.STATUS.aic:\n pass\n else:\n raise ValueError('Unknown status')\n\n # Finally, save the record\n group.save()\n return group, created\n\n def sort_tree(self):\n self.all().update(tree_sort=None)\n root = self.get(kind=self.model.KIND.international)\n i = 1\n root.tree_sort = i\n with disable_auto_indexing(model=self.model):\n root.save()\n for child in root.children.order_by('kind', 'code', 'name'):\n i += 1\n child.tree_sort = i\n with disable_auto_indexing(model=self.model):\n child.save()\n for grandchild in child.children.filter(\n kind=self.model.KIND.division,\n ).order_by('kind', 'name'):\n i += 1\n grandchild.tree_sort = i\n with disable_auto_indexing(model=self.model):\n grandchild.save()\n orgs = self.filter(\n kind__in=[\n self.model.KIND.chapter,\n self.model.KIND.chorus,\n self.model.KIND.quartet,\n ]\n ).order_by(\n 'kind',\n 'name',\n )\n for org in orgs:\n i += 1\n org.tree_sort = i\n with disable_auto_indexing(model=self.model):\n org.save()\n return\n\n def denormalize(self, cursor=None):\n groups = self.filter(status=self.model.STATUS.active)\n if cursor:\n groups = groups.filter(\n modified__gte=cursor,\n )\n for group in groups:\n group.denormalize()\n with disable_auto_indexing(model=self.model):\n group.save()\n return\n\n def update_seniors(self):\n quartets = self.filter(\n kind=self.model.KIND.quartet,\n status__gt=0,\n mc_pk__isnull=False,\n )\n\n for quartet in quartets:\n prior = quartet.is_senior\n is_senior = quartet.get_is_senior()\n if prior != is_senior:\n quartet.is_senior = is_senior\n with disable_auto_indexing(model=self.model):\n quartet.save()\n return\n\n def get_quartets(self):\n wb = Workbook()\n ws = wb.active\n fieldnames = [\n 'PK',\n 'Name',\n 'Kind',\n 'Organization',\n 'District',\n 'Division',\n 'Chapter',\n 'Senior?',\n 'BHS ID',\n 'Code',\n 'Status',\n ]\n ws.append(fieldnames)\n groups = self.filter(\n status=self.model.STATUS.active,\n kind=self.model.KIND.quartet,\n ).order_by('name')\n for group in groups:\n pk = str(group.pk)\n name = group.name\n kind = group.get_kind_display()\n organization = group.international\n district = group.district\n division = group.division\n chapter = group.chapter\n is_senior = group.is_senior\n bhs_id = group.bhs_id\n code = group.code\n status = group.get_status_display()\n row = [\n pk,\n name,\n kind,\n organization,\n district,\n division,\n chapter,\n is_senior,\n bhs_id,\n code,\n status,\n ]\n ws.append(row)\n file = save_virtual_workbook(wb)\n content = ContentFile(file)\n return content\n\n\nclass MemberManager(Manager):\n def update_or_create_from_join(self, join):\n # Clean\n mc_pk = str(join.id)\n structure = str(join.structure.id)\n human = str(join.subscription.human.id)\n start_date = join.established_date\n end_date = join.inactive_date\n part = join.vocal_part\n # inactive_date = join.inactive_date\n # inactive_reason = join.inactive_reason\n # sub_status = join.subscription.status\n # mem_code = join.membership.code\n # mem_status = join.membership.status.name\n\n # Ignore rows without approval flow\n if not join.paid:\n return\n\n # Set status\n if not end_date:\n status = self.model.STATUS.active\n elif end_date > localdate():\n status = self.model.STATUS.active\n else:\n status = self.model.STATUS.inactive\n\n part = getattr(\n self.model.PART,\n part.strip().lower() if part else '',\n None,\n )\n\n # inactive_reason = getattr(\n # self.model.INACTIVE_REASON,\n # inactive_reason.strip().replace(\"-\", \"_\").replace(\" \", \"\") if inactive_reason else '',\n # None,\n # )\n\n # mem_code = getattr(\n # self.model.MEM_CODE,\n # mem_code if mem_code else '',\n # None,\n # )\n\n # mem_status = getattr(\n # self.model.MEM_STATUS,\n # mem_status.strip().replace(\"-\", \"_\") if mem_status else '',\n # None,\n # )\n\n # Get the related fields\n Group = apps.get_model('api.group')\n group = Group.objects.get(\n mc_pk=structure,\n )\n Person = apps.get_model('api.person')\n try:\n person = Person.objects.get(\n mc_pk=human,\n )\n except Person.DoesNotExist:\n Human = apps.get_model('bhs.human')\n human = Human.objects.get(id=human)\n person, created = Person.objects.update_or_create_from_human(human)\n\n defaults = {\n 'mc_pk': mc_pk,\n 'status': status,\n 'start_date': start_date,\n 'end_date': end_date,\n 'part': part,\n }\n\n # get or create\n member, created = self.get_or_create(\n person=person,\n group=group,\n )\n\n if created:\n description = \"Initial\"\n # update the group to new values\n for key, value in defaults.items():\n setattr(member, key, value)\n\n else:\n pre = model_to_dict(\n member,\n fields=[\n 'mc_pk',\n 'status',\n 'start_date',\n 'end_date',\n 'part',\n ],\n )\n # update the group to new values\n for key, value in defaults.items():\n setattr(member, key, value)\n post = model_to_dict(\n member,\n fields=[\n 'mc_pk',\n 'status',\n 'start_date',\n 'end_date',\n 'part',\n ],\n )\n result = list(diff(pre, post))\n if result:\n description = str(result)\n else:\n return member, created\n\n # Transition as appropriate\n if status == self.model.STATUS.active:\n member.activate(\n description=description,\n )\n elif status == self.model.STATUS.inactive:\n member.deactivate(\n description=description,\n )\n else:\n raise ValueError('Unknown status')\n # Finally, save the record.\n member.save()\n return member, created\n\nclass OfficerManager(Manager):\n def update_or_create_from_role(self, role):\n # Clean\n mc_pk = str(role.id)\n office = role.name\n group = str(role.structure.id)\n person = str(role.human.id)\n start_date = role.start_date\n end_date = role.end_date\n\n # Set Variables\n today = now().date()\n if end_date:\n if end_date < today:\n status = self.model.STATUS.inactive\n else:\n status = self.model.STATUS.active\n else:\n status = self.model.STATUS.active\n\n # Get related fields\n Group = apps.get_model('api.group')\n group = Group.objects.get(mc_pk=group)\n Person = apps.get_model('api.person')\n person = Person.objects.get(mc_pk=person)\n Office = apps.get_model('api.office')\n office = Office.objects.get(name=office)\n\n defaults = {\n 'mc_pk': mc_pk,\n 'status': status,\n 'start_date': start_date,\n 'end_date': end_date,\n }\n\n # get or create\n officer, created = self.get_or_create(\n person=person,\n group=group,\n office=office,\n )\n\n if created:\n description = \"Initial\"\n else:\n pre = model_to_dict(\n officer,\n fields=[\n 'mc_pk',\n 'status',\n 'start_date',\n 'end_date',\n ],\n )\n # update the group to new values\n for key, value in defaults.items():\n setattr(officer, key, value)\n post = model_to_dict(\n officer,\n fields=[\n 'mc_pk',\n 'status',\n 'start_date',\n 'end_date',\n ],\n )\n result = list(diff(pre, post))\n if result:\n description = str(result)\n else:\n return officer, created\n\n # Transition as appropriate\n if status == self.model.STATUS.active:\n officer.activate(\n description=description,\n )\n elif status == self.model.STATUS.inactive:\n officer.deactivate(\n description=description,\n )\n else:\n raise ValueError('Unknown status')\n # Finally, save the record. Break link if an overwrite to MC\n officer.save()\n return officer, created\n\nclass PersonManager(Manager):\n def update_or_create_from_human(self, human):\n # Clean\n mc_pk = str(human.id)\n first_name = human.first_name\n middle_name = human.middle_name\n last_name = human.last_name\n nick_name = human.nick_name\n email = human.email\n birth_date = human.birth_date\n phone = human.phone\n cell_phone = human.cell_phone\n work_phone = human.work_phone\n bhs_id = human.bhs_id\n gender = human.sex\n part = human.primary_voice_part\n is_deceased = human.is_deceased\n\n # Same logic regardless of inbound form\n first_name = first_name.strip()\n try:\n middle_name = middle_name.strip()\n except AttributeError:\n middle_name = \"\"\n last_name = last_name.strip()\n try:\n nick_name = nick_name.replace(\"'\", \"\").replace('\"', '').replace(\"(\", \"\").replace(\")\", \"\").strip()\n except AttributeError:\n nick_name = \"\"\n if nick_name == first_name:\n nick_name = \"\"\n if email:\n email = email.strip().lower()\n try:\n validate_email(email)\n except ValidationError:\n email = None\n else:\n email = None\n if not phone:\n phone = ''\n if not cell_phone:\n cell_phone = ''\n if not work_phone:\n work_phone = ''\n if gender:\n gender = getattr(self.model.GENDER, gender.casefold(), None)\n else:\n gender = None\n if part:\n part = getattr(self.model.PART, part.casefold(), None)\n else:\n part = None\n\n is_deceased = bool(is_deceased)\n\n # Status check?\n # if is_deceased:\n # status = self.model.STATUS.active\n # else:\n # status = self.model.STATUS.inactive\n\n defaults = {\n 'mc_pk': mc_pk,\n 'first_name': first_name,\n 'middle_name': middle_name,\n 'last_name': last_name,\n 'nick_name': nick_name,\n 'email': email,\n 'birth_date': birth_date,\n 'phone': phone,\n 'cell_phone': cell_phone,\n 'work_phone': work_phone,\n 'bhs_id': bhs_id,\n 'gender': gender,\n 'part': part,\n 'is_deceased': is_deceased,\n }\n # Get or create\n try:\n person = self.get(\n mc_pk=mc_pk,\n )\n created = False\n except self.model.DoesNotExist:\n try:\n person = self.create(\n **defaults,\n )\n except IntegrityError as e:\n # Need to delete old offending record\n if \"api_person_bhs_id_key\" in str(e.args):\n old = self.get(\n bhs_id=bhs_id,\n )\n old.delete()\n person = self.create(\n **defaults,\n )\n else:\n defaults['mc_pk'] = mc_pk\n defaults.pop('bhs_id', None)\n try:\n person = self.create(\n **defaults,\n )\n except IntegrityError:\n defaults['mc_pk'] = mc_pk\n defaults['bhs_id'] = bhs_id\n defaults.pop('email', None)\n person = self.create(\n **defaults,\n )\n created = True\n\n if created:\n description = \"Initial\"\n # Update Values\n for key, value in defaults.items():\n setattr(person, key, value)\n else:\n # set prior values\n pre = model_to_dict(\n person,\n fields=[\n 'mc_pk',\n 'first_name',\n 'middle_name',\n 'last_name',\n 'nick_name',\n 'email',\n 'birth_date',\n 'phone',\n 'cell_phone',\n 'work_phone',\n 'bhs_id',\n 'gender',\n 'part',\n 'is_deceased',\n ],\n )\n for key, value in defaults.items():\n setattr(person, key, value)\n\n post = model_to_dict(\n person,\n fields=[\n 'mc_pk',\n 'first_name',\n 'middle_name',\n 'last_name',\n 'nick_name',\n 'email',\n 'birth_date',\n 'phone',\n 'cell_phone',\n 'work_phone',\n 'bhs_id',\n 'gender',\n 'part',\n 'is_deceased',\n ],\n )\n result = list(diff(pre, post))\n if result:\n description = str(result)\n else:\n return person, created\n\n # Transition as appropriate\n if person.status == person.STATUS.active:\n person.activate(\n description=description,\n )\n elif person.status == person.STATUS.inactive:\n person.deactivate(\n description=description,\n )\n else:\n pass\n # Finally, save the record\n person.save()\n return person, created\n\nclass UserManager(BaseUserManager):\n def update_or_create_from_subscription(self, subscription):\n # Clean\n mc_pk = str(subscription.id)\n human = subscription.human\n current_through = subscription.current_through\n\n status = getattr(\n self.model.STATUS,\n subscription.status,\n self.model.STATUS.inactive,\n )\n\n Person = apps.get_model('api.person')\n person, created = Person.objects.update_or_create_from_human(human)\n name = person.common_name\n email = person.email\n bhs_id = person.bhs_id\n if not email:\n return\n defaults = {\n 'mc_pk': mc_pk,\n 'status': status,\n 'name': name,\n 'email': email,\n 'bhs_id': bhs_id,\n 'current_through': current_through,\n 'person': person,\n }\n\n # Get or create\n try:\n user = self.get(\n person=person,\n )\n created = False\n except self.model.DoesNotExist:\n try:\n user = self.create_user(\n **defaults,\n )\n except IntegrityError as e:\n # Need to delete old offending record\n if \"api_user_mc_pk_key\" in str(e.args):\n old = self.get(\n mc_pk=mc_pk,\n )\n old.delete()\n user = self.create_user(\n **defaults,\n )\n else:\n raise(e)\n created = True\n\n if created:\n description = \"Initial\"\n else:\n pre = model_to_dict(\n user,\n fields=[\n 'mc_pk',\n 'status',\n 'name',\n 'email',\n 'bhs_id',\n 'current_through',\n 'person',\n ],\n )\n # update the person to new values\n for key, value in defaults.items():\n setattr(user, key, value)\n post = model_to_dict(\n user,\n fields=[\n 'mc_pk',\n 'status',\n 'name',\n 'email',\n 'bhs_id',\n 'current_through',\n 'person',\n ],\n )\n result = list(diff(pre, post))\n if result:\n description = str(result)\n else:\n return user, created\n\n if status == self.model.STATUS.active:\n user.activate(\n description=description,\n )\n elif status == self.model.STATUS.inactive:\n user.deactivate(\n description=description,\n )\n else:\n raise ValueError('Unknown status')\n user.save()\n return user, created\n\n\n def delete_orphans(self):\n auth0 = get_auth0()\n queue = django_rq.get_queue('low')\n accounts = get_accounts()\n users = list(self.filter(\n username__startswith='auth0|',\n ).values_list('username', flat=True))\n i = 0\n for account in accounts:\n if account[0] not in users:\n i += 1\n queue.enqueue(\n auth0.users.delete,\n account[0],\n )\n return i\n\n def create_user(self, username=None, **kwargs):\n pk = uuid.uuid4()\n if not username:\n username = \"orphan|{0}\".format(str(pk))\n user = self.model(\n id=pk,\n username=username,\n **kwargs\n )\n user.set_unusable_password()\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, password, **kwargs):\n user = self.model(\n username=username,\n status=self.model.STATUS.active,\n is_staff=True,\n **kwargs\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n","sub_path":"project/api/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":30199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"545567397","text":"\n\nfrom xai.brain.wordbase.verbs._fornicate import _FORNICATE\n\n#calss header\nclass _FORNICATING(_FORNICATE, ):\n\tdef __init__(self,): \n\t\t_FORNICATE.__init__(self)\n\t\tself.name = \"FORNICATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"fornicate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_fornicating.py","file_name":"_fornicating.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"149692265","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nimport logging\n_logger = logging.getLogger(__name__)\nimport xlsxwriter\n\nimport base64\nfrom io import BytesIO\n# from io import StringIO\n\nclass report_summary_header(models.Model):\n _name = 'aag.report_summary_header'\n _description = 'payroll header'\n\n name = fields.Char(\"Name\")\n month = fields.Integer(\"Month\")\n year = fields.Integer(\"Year\")\n\n export_file = fields.Binary(\"Export File\")\n export_filename = fields.Char(string=\"Export File Name\", )\n\n detail_ids = fields.One2many(comodel_name=\"aag.report_summary_detail\", inverse_name=\"header_id\")\n\n def action_generate(self):\n cr = self.env.cr\n\n sql = \"delete from aag_report_summary_detail where header_id=%s\"\n cr.execute(sql, (self.id,))\n\n sql = \"\"\"\n INSERT INTO aag_report_summary_detail (\n header_id,\n \"DEPT\",\n \"BASIC\",\n \"NET\"\n ) \n select \n %s,\n dept.id,\n (select sum(amount) from hr_payslip_line line \n join hr_payslip ps on line.slip_id=ps.id \n join hr_employee emp on ps.employee_id = emp.id\n where code='BASIC' and emp.department_id = dept.id and date_part('month', ps.date_to) = %s and date_part('year', ps.date_to) = %s) as BASIC,\n (select sum(amount) from hr_payslip_line line \n join hr_payslip ps on line.slip_id=ps.id \n join hr_employee emp on ps.employee_id = emp.id\n where code='NET' and emp.department_id = dept.id and date_part('month', ps.date_to) = %s and date_part('year', ps.date_to) = %s) as NET\n from hr_department dept\n \"\"\"\n cr.execute(sql, (self.id, self.month, self.year, self.month, self.year))\n\n _logger.info(\"--- done action_generate\")\n\n\n\n def action_export(self):\n file_data = BytesIO()\n workbook = xlsxwriter.Workbook(file_data)\n worksheet = workbook.add_worksheet()\n bold = workbook.add_format({'bold': True})\n numeric = workbook.add_format({'num_format': '#,##0'})\n\n # write header\n worksheet.write(\"A1\", \"IDNO\", bold)\n worksheet.write(\"B1\", \"BASIC\", bold)\n worksheet.write(\"C1\", \"I_TRANSPORT\", bold)\n worksheet.write(\"D1\", \"D_PPH21\", bold)\n worksheet.write(\"E1\", \"D_TRANSPORT\", bold)\n worksheet.write(\"F1\", \"NET\", bold)\n\n # write data \n row = 1\n for line in self.detail_ids:\n worksheet.write(row, 0, line.IDNO)\n worksheet.write(row, 1, line.BASIC, numeric)\n worksheet.write(row, 2, line.I_TRANSPORT, numeric)\n worksheet.write(row, 3, line.D_PPH21, numeric)\n worksheet.write(row, 4, line.D_TRANSPORT, numeric)\n worksheet.write(row, 5, line.NET, numeric)\n row += 1\n\n workbook.close()\n\n file_data.seek(0)\n self.export_file = base64.encodestring(file_data.getvalue())\n self.export_filename = 'report_payroll-%s-%s.xlsx' % (self.month, self.year)\n\n\n\n\n _logger.info(\"--- action_export\")\n\n\nclass report_summary_detail(models.Model):\n _name = 'aag.report_summary_detail'\n _description = 'payroll detail'\n\n header_id = fields.Many2one(comodel_name=\"aag.report_summary_header\")\n\n DEPT = fields.Integer(\"DEPT\")\n BASIC = fields.Integer(\"BASIC\")\n I_TRANSPORT = fields.Integer(\"I_TRANSPORT\")\n D_PPH21 = fields.Integer(\"D_PPH21\")\n D_TRANSPORT = fields.Integer(\"D_TRANSPORT\")\n NET = fields.Integer(\"NET\")\n\n","sub_path":"aag_report_payroll/models/report_summary.py","file_name":"report_summary.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"151917847","text":"from machine import Pin, ADC\nfrom time import sleep\nimport config\nimport network\nimport urequests\nimport sys\nimport ubinascii\n\nco2Pin = ADC(0)\nDEVICE_ADDRESS = ubinascii.hexlify(network.WLAN().config('mac'),':').decode()\n\n# voltaje a 400 ppm\nZERO_POINT_VOLTAGE = 0.545 #0.740#0.511#0.606 #0.563 \nV_2000 = 0.354\n\nREAD_SALMPLE_QTY = 10 #cantidad de muestreos por ciclo de medida\nREAD_INTERVAL = 0.05 #timepo entre muestreos por ciclo\nLOG_400 = 2.602\nLOG_2000 = 3.301\n\n# voltaje a 400pp - voltaje a 2000ppm (0.511 - 0.354)\nREACTION_VOLTAGE = ZERO_POINT_VOLTAGE - V_2000\n\ndef get_Read (co2Pin):\n suma = 0\n for x in range (0, READ_SALMPLE_QTY):\n suma += co2Pin.read()\n sleep(READ_INTERVAL)\n return {\n 'volts':(suma/READ_SALMPLE_QTY) * 3.3/1024,\n 'raw' : suma/READ_SALMPLE_QTY,\n }\n\ndef get_percentaje (v):\n return pow(10, (v - ZERO_POINT_VOLTAGE) / ( (REACTION_VOLTAGE) / (LOG_400 - LOG_2000) ) + LOG_400)\n\ndef connect_wifi():\n ap_if = network.WLAN(network.AP_IF)\n ap_if.active(False)\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n print('Connecting to WiFi...'+ config.WIFI_SSID)\n sta_if.active(True)\n sta_if.connect(config.WIFI_SSID, config.WIFI_PASSWORD)\n while not sta_if.isconnected(): #and countertrays < 10\n sleep(1)\n print('Network config:', sta_if.ifconfig())\n\n\ndef send_data(percentaje, volts, raw):\n json={\n \"unic_id\" : str(DEVICE_ADDRESS),\n \"values\" : [\n {\n 'value': percentaje,\n 'alias': 'CO2'\n },\n {\n 'value': raw,\n 'alias': 'ADC'\n },\n {\n 'value': volts,\n 'alias': 'volts'\n }\n ]\n }\n print(json)\n req = urequests.post(\n config.API_URL,\n json = json,\n headers = {'Content-Type': 'application/json'}\n )\n print(\"STATUS CODE\")\n print(req.status_code)\n print(req.text)\n if req.status_code < 400:\n print('Webhook invoked')\n else:\n print('Webhook failed')\n raise RuntimeError('Webhook failed')\n\ndef run():\n while True:\n connect_wifi()\n read = get_Read(co2Pin)\n volts = round(read['volts'],3)\n print('Voltage: {}'.format(volts))\n raw = read['raw']\n print('ADC {}'.format(raw))\n percentage = round(get_percentaje(volts),1)\n print('CO2: {} ppm'.format(percentage))\n try:\n send_data(percentage, volts, raw)\n except OSError as err:\n print(err)\n sleep(5)\n continue\n sleep(300)\n\nrun()\n","sub_path":"c02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"146475382","text":"\n\nfrom xai.brain.wordbase.verbs._fracture import _FRACTURE\n\n#calss header\nclass _FRACTURES(_FRACTURE, ):\n\tdef __init__(self,): \n\t\t_FRACTURE.__init__(self)\n\t\tself.name = \"FRACTURES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"fracture\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_fractures.py","file_name":"_fractures.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"394001817","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^stat/murders$', 'DataParser.views.murder_by_county', name='murder_by_county'),\n url(r'^chart/(?P.*)/(?P.*)$', 'DataParser.views.page_crime_by_agency', name='page_crime_by_agency'),\n url(r'^stat/(?P.*)/(?P.*)$', 'DataParser.views.data_crime_by_agency', name='data_crime_by_agency'),\n url(r'^$', 'DataParser.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"crimestats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"530731835","text":"# render_tempalte adds templates to file # url_for links to routes \n# flash and redirect are for redirection after form submital and flash message\n# abort renders 403 page\nfrom flask import (render_template, url_for, flash, redirect, request, abort, Blueprint)\n#imports the models\nfrom blogsite.models import User\n#import each created form classes from forms.py\nfrom blogsite.blueprints.users.forms import RegistrationForm, UpdateAccountForm\n# imports required variables from __init__\nfrom blogsite import db, bcrypt\n#imports required variables for logged in session\nfrom flask_login import login_user, current_user, logout_user, login_required\n#image uploader\nfrom blogsite.blueprints.users.image_helper import upload_file_to_s3, allowed_profile_images, delete_file_from_s3\n#secure naming\nfrom werkzeug.utils import secure_filename\n\n\nusers = Blueprint('users', __name__)\n############# USER RESOURCES/ROUTES #####################\n\n# USER CREATE/NEW\n@users.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n\t#set form = imported RegistrationForm Class\n\tform = RegistrationForm()\n\t#form validation for form \n\tif form.validate_on_submit():\n\t\t# created crypted password for user on database\n\t\thashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n\t\t#create user in database\n\t\tuser = User(username=form.username.data, email=form.email.data, password=hashed_password)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\tflash(f'Account created! You are no able to login!', 'success')\n\t\treturn redirect(url_for('main.home'))\n\treturn render_template('users/new.html', title='Register', form=form)#call form as argument\n\n# USER EDIT/UPDATE/ACCOUNT\n@users.route(\"/users//edit\", methods=['GET','POST'])\n@login_required\ndef edit(username):\n\tuser = User.query.filter_by(username=username).first_or_404()\n\tif user != current_user:\n\t\tabort(403)\n\tform = UpdateAccountForm()\n\tif form.validate_on_submit():\n\t\t# if form.picture.data:\n\t\t# \tfile = form.picture.data\n\t\t# if file and allowed_profile_images(file.filename):\n\t\t# \told_filename = user.image_file\n\t\t# \tdelete_file_from_s3(old_filename)\n\t\t# \tfile.filename = secure_filename(user.username + \"-\" + file.filename)\n\t\t# \toutput = upload_file_to_s3(file)\n\t\t# \tcurrent_user.picture_file = file.filename\n\n\t\tcurrent_user.username = form.username.data\n\t\tcurrent_user.email = form.email.data\n\t\tdb.session.commit()\n\t\tflash('Account Updated', 'success')\n\t\treturn redirect(url_for('main.home'))\n\telif request.method == 'GET':\n\t\tform.username.data = current_user.username\n\t\tform.email.data = current_user.email\n\t# image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n\treturn render_template('users/edit.html', title='Account', user=current_user,form=form)\n\n# USER PROFILE IMAGE ROUTE\n@users.route(\"/users//image\", methods=['POST'])\n@login_required\ndef upload_profile_image(username):\n\tform = UpdateAccountForm()\n\tuser = User.query.filter_by(username=username).first_or_404()\n\n # Prevent unauthorized user from changing data of another user\n\tif not user.id == current_user.id:\n\t\treturn render_template('users/edit.html', validation_errors=['Unauthorized!'], form=form, user=user)\n\t# Check if image in file for upload\n\tif \"image_file\" not in request.files:\n\t\tflash(\"No profile image\")\n\t\treturn render_template('users/edit.html', validation_errors=[], form=form, user=user)\n\n\tfile = request.files[\"image_file\"]\n\n\t# if no filename ask for new image\n\tif file.filename == \"\":\n\t\tflash(\"Please select a file\")\n\t\treturn render_template('edit.html', form=form)\n\n\t# check if file extension is acceptable\n\tif file and allowed_profile_images(file.filename):\n\t\t# if there is a previous file delete the file\n\t\told_filename = user.image_file\n\t\tdelete_file_from_s3(old_filename)\n\n\t\t# create a custom name for file\n\t\tfile.filename = secure_filename(user.username + \"-\" + file.filename)\n\t\t# upload the file\n\t\toutput = upload_file_to_s3(file)\n\t\t# set the user image file equal the bucket url\n\t\tuser.image_file = output\n\t\n\t\tdb.session.commit()\n\t\tflash(\"Profile Picture Updated!\")\n\n\t\t#redirect the user create the post action\n\t\treturn redirect(url_for('main.home'))\n\n\telse:\n\t\treturn redirect(\"/\")\n\n\n\n\n\n","sub_path":"blogsite/blueprints/users/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"37914760","text":"#!/usr/bin/env python3\n\nimport re\nimport csv\nfrom html.parser import HTMLParser\nimport codecs\nfrom bs4 import BeautifulSoup\n\n\ndef main(): \n # initialize a csv file to which we write each row of ATOM data\n with open('output.csv', 'w', newline='') as csvfile:\n fieldnames = [\n \"filename\",\n \"qubit_Parent_Slug\", \n \"title\",\n \"rad_General_Material_Designation\",\n \"alternate_Title\",\n \"level_Of_Description\", \n \"repository\",\n \"creators\",\n \"creator_Histories\",\n \"creator_Dates\",\n \"creator_Dates_Notes\",\n \"extent_And_Medium\",\n \"archival_History\",\n \"scope_And_Content\",\n \"arrangement\",\n \"language\",\n \"access_Conditions\",\n \"subject_Access_Points\",\n \"place_Access_Points\",\n \"name_Access_Points\",\n \"institution_Identifier\",\n \"revision_History\"\n ]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n # two text files contain lists of filenames pointing to HTML finding aids\n # the text files are necessary for avoiding many corrupt finding aids\n with open('fondsList.txt', 'r') as f:\n filenames = f.readlines()\n for filename in filenames:\n fondRow = Fonds(filename.strip())\n appendRow(fondRow, writer)\n del(fondRow)\n \n with open('seriesList.txt', 'r') as f:\n filenames = f.readlines()\n for filename in filenames: \n seriesRow = Series(filename.strip())\n appendRow(seriesRow, writer)\n # file data is found in the Series HTML and is thus logically nested\n # iterate over file data as single rows\n for fileRow in seriesRow.fileCollection:\n appendRow(fileRow, writer)\n del(seriesRow)\n\n\ndef appendRow(newRow, writer): \n # writes to the csv file\n writer.writerow( { \n \"filename\": newRow.filename,\n \"qubit_Parent_Slug\": newRow.qubit_Parent_Slug, \n \"title\": newRow.title,\n \"rad_General_Material_Designation\": newRow.rad_General_Material_Designation,\n \"alternate_Title\": newRow.alternate_Title,\n \"level_Of_Description\": newRow.level_Of_Description,\n \"repository\": newRow.repository,\n \"creators\": newRow.creators,\n \"creator_Histories\": newRow.creator_Histories,\n \"creator_Dates\": newRow.creator_Dates,\n \"creator_Dates_Notes\": newRow.creator_Dates_Notes,\n \"extent_And_Medium\": newRow.extent_And_Medium,\n \"archival_History\": newRow.archival_History,\n \"scope_And_Content\": newRow.scope_And_Content,\n \"arrangement\": newRow.arrangement,\n \"language\": newRow.language,\n \"access_Conditions\": newRow.access_Conditions,\n \"subject_Access_Points\": newRow.subject_Access_Points,\n \"place_Access_Points\": newRow.place_Access_Points,\n \"name_Access_Points\": newRow.name_Access_Points,\n \"institution_Identifier\": newRow.institution_Identifier,\n \"revision_History\": newRow.revision_History\n }\n )\n\n\nclass TAGStripper(HTMLParser):\n \n \"\"\"HTML tag remover for cleanly parsing HTML\"\"\"\n\n def __init__(self):\n super().__init__()\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\ndef strip_tags(html):\n s = TAGStripper()\n s.feed(html)\n return s.get_data()\n\n\nclass Record(object):\n\n \"\"\"HTML input, parses into ATOM fields as a complete ATOM record\"\"\"\n \n def __init__(self, filename): \n self.filename = filename\n with codecs.open(self.filename, \"r\",encoding='utf-8', errors='ignore') as fdata:\n self.soup = BeautifulSoup(fdata, 'lxml')\n self.creator_Dates = \"\"\n self.extent_And_Medium = \"\"\n self.scope_And_Content = \"\"\n self.title = \"\"\n self.rad_General_Material_Designation = \"\"\n self.alternate_Title = \"\"\n self.qubit_Parent_Slug = \"\" \n self.creators = \"\"\n self.level_Of_Description = \"\"\n self.repository = \"\"\n self.creator_Histories = \"\"\n self.creator_Dates_Notes = \"\"\n self.archival_History = \"\"\n self.arrangement = \"\"\n self.language = \"\"\n self.access_Conditions = \"\"\n self.subject_Access_Points = \"\"\n self.place_Access_Points = \"\"\n self.name_Access_Points = \"\"\n self.institution_Identifier = \"\"\n self.revision_History = \"\" \n self.data = self.soup.find('div',id='did')\n if self.data is None:\n self.data = self.soup.find('td',id='main')\n\n def __del__(self): pass\n\n\nclass Fonds(Record):\n \n \"\"\"Fonds specific ATOM record\"\"\"\n \n # initalize fonds variables and call scraper methods\n def __init__(self, filename):\n super(Fonds, self).__init__(filename)\n self.level_Of_Description = \"fonds\"\n self.repository = \"University Archives\"\n self.language = \"English\"\n self.institution_Identifier = \"AEU\"\n self.revision_History = re.sub('(\\s)+',' ',strip_tags(str(self.soup.find('td', id='institute'))))\n \n # scraping data from HTML and cleanup \n try:\n for content in self.data.contents:\n if re.search(r'records|audio|material|sound|recordings|media|textual|photographs|metre', str(content), re.IGNORECASE) is not None:\n self.rad_General_Material_Designation = self.rad_General_Material_Designation + str(content.strip()) + '\\n'\n self.rad_General_Material_Designation = self.rad_General_Material_Designation.strip()\n except:\n pass\n \n try:\n for content in self.data.contents:\n if re.search(r'acc', str(content), re.IGNORECASE) is not None: \n self.alternate_Title = content.strip()\n break\n except:\n pass\n \n try:\n for content in self.data.contents:\n if re.search(r'Fonds|Collection|Archives', str(content), re.IGNORECASE) is not None:\n self.title = content.strip()\n break\n except:\n pass\n \n try:\n for content in self.data.contents:\n if re.search(r'ACC', str(content), re.IGNORECASE) is not None:\n pass\n elif re.search(r'[12][78901][0-9]{2}', str(content)) is not None:\n self.creator_Dates = content.strip()\n break\n except:\n pass\n \n try:\n for content in self.data.contents:\n if re.search(r'records|audio|material|sound|recordings|media|textual|photographs|metre', str(content), re.IGNORECASE) is not None:\n self.extent_And_Medium = self.extent_And_Medium + str(content.strip()) + '\\n'\n self.extent_And_Medium.strip()\n except:\n pass \n \n if re.search(r\"Scope and Content(.+?)?
\", str(self.soup), re.DOTALL) is not None:\n self.scope_And_Content = strip_tags(re.sub('(\\s)+',' ',str(re.search(r\"Scope and Content(.+?)?
\", str(self.soup), re.DOTALL).groups(0)[0]))) \n if re.search(r\".+?(.+?)?
\", str(self.soup), re.DOTALL) is not None:\n self.creator_Histories = strip_tags(re.sub('(\\s)+',' ',str(re.search(r\".+?(.+?)?
\", str(self.soup), re.DOTALL).groups(0)[0])))\n if re.search(r\"Custodial History(.+?)?
\", str(self.soup), re.DOTALL) is not None:\n self.archival_History = strip_tags(re.sub('(\\s)+',' ',str(re.search(r\"Custodial History(.+?)?
\", str(self.soup), re.DOTALL).groups(0)[0]))) \n if re.search(r\".+?(.+?)?
\", str(self.soup), re.DOTALL) is not None:\n self.arrangement = strip_tags(re.sub('(\\s)+',' ',str(re.search(r\".+?(.+?)?
\", str(self.soup), re.DOTALL).groups(0)[0])))\n if re.search(r\"Restrictions on Access
(.+?)?
\", str(self.soup), re.DOTALL) is not None:\n self.access_Conditions = strip_tags(re.sub('(\\s)+',' ',str(re.search(r\"Restrictions on Access
(.+?)?
\", str(self.soup), re.DOTALL).groups(0)[0])))\n self.creators = re.sub(r'(Fonds|Collection|Archives|fonds|collection|archives)', '', self.title, re.IGNORECASE).strip()\n\n\nclass Series(Record):\n\n \"\"\"series specific ATOM record\"\"\"\n \n def __init__(self, filename):\n super(Series, self).__init__(filename)\n self.level_Of_Description = \"series\"\n self.fileCollection = []\n \n # scraping data from HTML and cleanup \n for content in self.data.find(id='series'):\n if re.search(\"\\W(.*?)[.][-]\", str(content)) is not None:\n self.title = re.search(\"\\W(.*?)[.]--\", str(content)).groups(0)[0]\n break \n for content in self.data.find(id='series'):\n if re.search(\"[-].(.+).[-]\", str(content)) is not None:\n self.creator_Dates = re.search(r'[-].(.+).[-]', str(content)).groups(0)[0]\n break\n for content in self.data.find(id='series'):\n if re.search(r'([-])+(.+)([-])(.+)', str(content), re.IGNORECASE) is not None:\n self.extent_And_Medium = re.search(r'([-])+(.+)([-])(.+)', str(content)).groups(0)[3]\n break\n if self.data.p is not None:\n self.scope_And_Content = re.sub('(\\s)+',' ',str(self.data.p.string)).strip()\n # special method, iterates over each file in the series\n # passes data to a new file object\n # stores object in a local (series level) list\n for listings in self.data.find_all(id='filelisting'):\n newFile = File(self.filename.strip(), listings)\n self.fileCollection.append(newFile)\n\n\nclass File(Record):\n \n \"\"\"file specific ATOM record\"\"\"\n \n def __init__(self, filename, listings):\n super(File, self).__init__(filename)\n self.level_Of_Description = \"file\"\n self.listings = listings\n \n # parses and writes row data to the file object\n self.alternate_Title = str(self.listings.contents[1].string) + ' ' + str(self.listings.contents[3].string)\n self.alternate_Title = self.alternate_Title.replace('None','').strip()\n self.title = str(re.sub( '(\\s)+', ' ', str(self.listings.contents[5].string) )).replace('None','')\n self.creator_Dates = str(self.listings.contents[7].string) \n\n\nif __name__ == '__main__':\n main()","sub_path":"metadata-wrangling/archives/archives.py","file_name":"archives.py","file_ext":"py","file_size_in_byte":11764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"61938641","text":"import Star\nimport JDN\nfrom decimal import Decimal\n\n__author__ = \"Moskvin Vasiliy (vasiliy.moscvin@yandex.ru)\"\n\n\nclass Variable(Star.Star):\n def __init__(self, name, ra, dec, period, epoch_center, t_duration):\n super().__init__(name, ra, dec)\n epoch_center = Decimal(epoch_center)\n self.period = Decimal(period)\n self.t_duration = Decimal(t_duration)\n self.epoch = (epoch_center - self.t_duration / 2,\n epoch_center,\n epoch_center + self.t_duration / 2)\n\n def __str__(self):\n return 'name = {} ra = {:.7f} dec = {:.7f} period = {:.7f} ' \\\n 'epoch_center = {:.7f} transit duration = {:.7f}'.format(self.name,\n self.ra,\n self.dec,\n self.period,\n self.epoch[1],\n self.t_duration)\n\n def get_ephemerid(self, epoch):\n \"\"\"\n Возвращает следующую эфемериду, по указанной эпохе\n :param epoch: эпоха\n :return: следующая эфемерида\n \"\"\"\n return epoch + self.period\n\n def get_next_ephemerid(self, *key):\n \"\"\"\n Возвращает значения следующей эфемериды по ключу (s, c, e). Если ключ не указан, то возвращается\n кортеж значений эфемерид: s - start, c - center, e - end\n :param key: эфемериду чего считать: start, center, end\n :return: эфемериды\n \"\"\"\n if not key:\n return tuple(map(lambda x: x + self.period, self.epoch))\n elif key[0] == \"s\":\n return self.get_ephemerid(self.epoch[0])\n elif key[0] == \"c\":\n return self.get_ephemerid(self.epoch[1])\n elif key[0] == \"e\":\n return self.get_ephemerid(self.epoch[2])\n else:\n raise Star.KeyInputError\n\n def get_list_ephemerids(self, n, *key):\n \"\"\"\n Возвращает список n следующих эфемерид, отсчитывая от: начала (s), центра (c) или конца (e) изменения блеска\n :param n: количество эфемерид\n :param key: ключ, указывающий начало отсчёта s - start, c - center, e - end, отсутствие ключа - возвращает\n эфемериды для всех трёх точек\n :return: список эфемерид\n \"\"\"\n if not key:\n eph = self.epoch\n elif key[0] == \"s\":\n eph = self.epoch[0]\n elif key[0] == \"c\":\n eph = self.epoch[1]\n elif key[0] == \"e\":\n eph = self.epoch[2]\n else:\n raise Star.KeyInputError\n if type(eph) == tuple:\n lst = [[], [], []]\n for i in range(n):\n for index, src in enumerate(eph):\n lst[index].append(self.get_ephemerid(src))\n eph = [x + self.period for x in eph]\n else:\n lst = []\n for i in range(n):\n lst.append(self.get_ephemerid(eph))\n eph += self.period\n return lst\n\n\nCOROT_2b = Variable(\"COROT_2b\", \"19 27 6.494\", \"1 23 1.17\", 1.7429964, 2454706.4016, 0.095)\n\n\n# print(COROT_2b)\n# try:\n# lst = COROT_2b.get_list_ephemerids(10)\n# except Star.KeyInputError:\n# print(\"Ошибка ключа. Нет такого ключа!\")\n# else:\n# if type(lst[0]) == list:\n# for i in zip(lst[0], lst[1], lst[2]):\n# print(\"{} {} {}\".format(*i))\n# else:\n# for i, src in enumerate(lst):\n# print(\"{}: {}\".format(i, src))\n\n\ndef sort_phemerides(*stars, n, key):\n \"\"\"\n Возвращает отсортированный список звёзд по их эфемеридам.\n Возможны два случая: сортируется по какому-либо только по одному ключу(s, c, e)\n или сортируются по всем трём ключам (ключ a)\n :param stars: кортеж звёзд\n :param n: количество эфемерид от указанных эпох для каждой звезды\n :param key: ключи (s - start, c - center, e - end, a - all)\n :return: отсортированный список звёзд по их эфемеридам\n \"\"\"\n lst_dict = []\n lst = [[], [], []]\n if key == \"a\":\n for i in stars:\n lst_dict.append({\"name\": i.name,\n \"s\": i.get_list_ephemerids(n, \"s\"),\n \"c\": i.get_list_ephemerids(n, \"c\"),\n \"e\": i.get_list_ephemerids(n, \"e\")})\n\n for i in lst_dict:\n lst[2].extend(i[\"s\"])\n lst[2].extend(i[\"c\"])\n lst[2].extend(i[\"e\"])\n lst[2].sort()\n flag = False\n\n for i in lst[2]:\n for j in lst_dict:\n for keys in j.keys():\n if keys != \"name\":\n if i in j[keys]:\n lst[0].append(j[\"name\"])\n lst[1].append(keys)\n flag = True\n break\n if flag:\n flag = False\n break\n return lst\n\n elif key == \"s\" or key == \"c\" or key == \"e\":\n for i in stars:\n lst_dict.append({\"name\": i.name,\n key: i.get_list_ephemerids(n, key)})\n for i in lst_dict:\n lst[2].extend(i[key])\n lst[2].sort()\n\n for i in lst[2]:\n for j in lst_dict:\n if i in j[key]:\n lst[0].append(j[\"name\"])\n lst[1].append(key)\n break\n return lst\n else:\n raise Star.KeyInputError\n\n\nst = Variable(\"one\", \"19 27 6.494\", \"1 23 1.17\", 1.2198669, 2457725.1784722, 0.095)\nst1 = Variable(\"two\", \"19 27 6.494\", \"1 23 1.17\", 1.4200246, 2457725.1618056, 0.095)\nst2 = Variable(\"tre\", \"19 27 6.494\", \"1 23 1.17\", 3.2130598, 2457728.6958333, 0.095)\n\nlst = sort_phemerides(st, st1, st2, n=20, key=\"a\")\n\nfor i in zip(*lst):\n print(\"{}\\t{}\\t{}\".format(i[0], i[1], JDN.get_formated_GD(str(i[2]))))\n","sub_path":"Variable.py","file_name":"Variable.py","file_ext":"py","file_size_in_byte":6681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"102828535","text":"\"\"\"\nTests for creating an empty dataset.\n\"\"\"\nfrom pathlib import Path\nfrom shlex import split\nfrom subprocess import run, PIPE\n\nimport pytest\nfrom click.testing import CliRunner\nfrom cookiecutter.main import cookiecutter\n\nrunner = CliRunner()\n\n# the default name\nNAME = \"testdataset\"\n\nCONFIG_STR = \"\"\"\ndefault_context:\n author_name: \"{author_name}\"\n email: \"{email}\"\n dataset_name: \"{dataset_name}\"\n project_url: \"{project_url}\"\n dataset_description: \"{dataset_description}\"\n version: \"0.1.0\"\nabbreviations:\n gh: https://github.com\n bb: https://bitbucket.org\n\"\"\"\n\n\ndef _path_to_waveform_file(path):\n \"\"\" return the path to a simple waveform file. \"\"\"\n return path / \"waveforms\" / \"somefile.txt\"\n\n\ndef _path_to_event_file(path):\n \"\"\" return path to event file. \"\"\"\n return path / \"events\" / \"2017\" / \"01\" / \"anotherfile.txt\"\n\n\ndef _make_config_files(path, **kwargs):\n \"\"\" Make a config file and save it to path. \"\"\"\n defaults = dict(\n author_name=\"Bob Ham\",\n email=\"example@gmail.com\",\n dataset_name=NAME,\n project_url=f\"https://github.com/bob-h/{NAME}\",\n dataset_description=\"A cool dataset for sure\",\n version=\"0.1.0\",\n )\n defaults.update(kwargs)\n save_str = CONFIG_STR.format(**defaults)\n\n path = Path(path)\n with path.open(\"w\") as fi:\n fi.write(save_str)\n\n\n@pytest.fixture(scope=\"class\")\ndef temp_dirs(tmp_path_factory):\n \"\"\" Create two temporary directories. \"\"\"\n p1 = Path(tmp_path_factory.mktemp(\"default\"))\n p2 = Path(tmp_path_factory.mktemp(\"configs\"))\n return p1, p2\n\n\n@pytest.fixture(scope=\"class\")\ndef new_dataset_default(temp_dirs, project_path):\n \"\"\" Create a new dataset and return its path \"\"\"\n config_path = temp_dirs[1] / \"config.txt\"\n out_path = temp_dirs[0]\n _make_config_files(config_path)\n cmd = (\n f\"cookiecutter {project_path} --output-dir {out_path} \"\n f\"--config-file {config_path} --no-input\"\n )\n run(split(cmd), check=True)\n # path to newly created package\n path = out_path / (\"opsdata_\" + NAME)\n # a simple file, and then a really nested file\n source_path = path / path.name / NAME\n simple_data = _path_to_waveform_file(source_path)\n nested_file = _path_to_event_file(source_path)\n # create data directories and files\n simple_data.parent.mkdir(exist_ok=True, parents=True)\n with simple_data.open(\"w\") as fi:\n fi.write(\"data\")\n nested_file.parent.mkdir(exist_ok=True, parents=True)\n with nested_file.open(\"w\") as fi:\n fi.write(\"more data\")\n return path\n\n\n@pytest.fixture(scope=\"class\")\ndef pip_installed_dataset(new_dataset_default):\n \"\"\" install the new dataset with pip \"\"\"\n cmd = f\"pip install {new_dataset_default}\"\n run(split(cmd), check=True)\n yield new_dataset_default\n run(split(f\"pip uninstall -y {new_dataset_default.name}\"), check=True)\n\n\nclass TestDataset:\n \"\"\" tests for the empty dataset. \"\"\"\n\n @pytest.fixture(scope=\"class\")\n def installed_path(self, pip_installed_dataset):\n \"\"\" return the installed path of the new dataset. \"\"\"\n cmd1 = f'import obsplus; ds = obsplus.load_dataset(\"{NAME}\"); ' \"print(ds.data_path)\"\n cmd = f\"python -c '{cmd1}'\"\n # if this doesn't raise the dataset is discoverable\n result = run(split(cmd), check=True, stdout=PIPE)\n return Path(result.stdout.decode(\"utf-8\").rstrip())\n\n def test_datafiles_exist(self, installed_path):\n \"\"\" Determine where the file lives and assert it has the datafiles. \"\"\"\n wave_file = _path_to_waveform_file(installed_path)\n event_file = _path_to_event_file(installed_path)\n assert wave_file.exists()\n assert event_file.exists()\n\n def test_exists(self, pip_installed_dataset):\n \"\"\" Ensure the new directory exists. \"\"\"\n assert pip_installed_dataset.exists()\n\n def test_load_dataset(self, pip_installed_dataset):\n \"\"\" Ensure the dataset can be loaded by obsplus \"\"\"\n cmd1 = (\n f'import obsplus; ds = obsplus.load_dataset(\"{NAME}\"); '\n \"assert(isinstance(ds, obsplus.DataSet))\"\n )\n cmd = f\"python -c '{cmd1}'\"\n # if this doesn't raise the dataset is discoverable\n run(split(cmd), check=True)\n\n def test_run_dataset_tests(self, pip_installed_dataset):\n \"\"\" Ensure the generated tests also pass. \"\"\"\n cmd = f\"pytest {pip_installed_dataset}\"\n run(split(cmd), check=True)\n","sub_path":"tests/test_create_dataset.py","file_name":"test_create_dataset.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"583258359","text":"import re\r\nimport collections\r\nimport os\r\n\r\n##\"doc_id\", \"title\", \"author\", \"created\", \"topic\", \"tagging\"\r\n\r\n\r\n##Первое задание\r\n\r\ndef docid():\r\n docid = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('content=\"(.+?)\" name=\"docid\"', line)\r\n if r:\r\n docid.append(r.group(1))\r\n return docid\r\n\r\ndef title():\r\n title = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('(.+?)', line)\r\n if r:\r\n title.append(r.group(1))\r\n return title\r\n \r\ndef author():\r\n author = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('content=\"(.+?)\" name=\"author\"', line)\r\n if r:\r\n author.append(r.group(1))\r\n return author\r\n\r\ndef created():\r\n created = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('content=\"(.+?)\" name=\"topic\"', line)\r\n if r:\r\n created.append(r.group(1))\r\n return created\r\n\r\ndef topic():\r\n topic = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('content=\"(.+?)\" name=\"created\"', line)\r\n if r:\r\n topic.append(r.group(1))\r\n return topic\r\n\r\ndef tag():\r\n tag = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('content=\"(.+?)\" name=\"tagging\"', line)\r\n if r:\r\n tag.append(r.group(1))\r\n return tag\r\n\r\ndef table(docid, title, author, created, topic, tag):\r\n with open('table.csv', 'w', encoding='utf-8') as t:\r\n table = []\r\n table.append('doc.id' + '\\t' + 'title' + '\\t' + 'author' + '\\t' + 'created' + '\\t' + 'topic' + '\\t' + 'tag')\r\n for i in range(15):\r\n table.append(docid[i] + '\\t' + title[i] + '\\t' + author[i] + '\\t' + created[i] + '\\t' + topic[i] + '\\t' + tag[i])\r\n table = '\\n'.join(table)\r\n t.write(table)\r\n\r\n##Второе задание\r\ndef abb():\r\n abb = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n r = re.search('lex=\"([А-Я]+)\"', line)\r\n if r:\r\n abb.append(r.group(1))\r\n return abb\r\n\r\ndef counter(abb):\r\n counter = collections.Counter(abb)\r\n return counter\r\n\r\ndef table1(d):\r\n table = []\r\n with open('abbs.csv', 'w', encoding='utf-8') as t:\r\n for word in sorted(d, key=d.get, reverse=True):\r\n table.append(word + '\\t' + str(d[word]))\r\n table = '\\n'.join(table)\r\n t.write(table)\r\n\r\n## из-за первого это не успела написать, но нужно создать словарь с предложениями и искать биграммы как в коде, но по элементам из словаря. Потом из выводить\r\ndef noun():\r\n noun = []\r\n for file in os.listdir():\r\n with open(file, 'r', encoding='utf-8') as f:\r\n text = f.readlines()\r\n## text = {}\r\n for i, line in enumerate(text):\r\n r = re.search('lex=\"(.+)\" .+S', line)\r\n if r:\r\n d = re.search('lex=\"(.+)\" .+gen', text[i+1])\r\n if d:\r\n noun.append(r.group(1) + ' ' + d.group(1))\r\n \r\n return noun\r\n\r\n##def grams():\r\n \r\n \r\ndef main():\r\n return table(docid(), title(), author(), created(), topic(), tag()), \\\r\n table1(counter(abb()))\r\n\r\n##по крайней мере это выводит биграммы\r\nprint(noun())\r\n##print(abb())\r\n##print(docid())\r\n##print(title())\r\n##print(author())\r\n##print(created())\r\n##print(topic())\r\n##print(tag())\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"test3/news/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"452214441","text":"from django.urls import path, include\nfrom . import views\n\n\nurlpatterns = [\n # 127.0.0.80000\n path('', views.grocery_list, name='grocery_list'),\n\n # 127.0.0.80000/groceries/new --> local\n path('groceries/new/', views.groceries_new, name='groceries_new'),\n\n # 127.0.0.80000/groceries/piggy --> local\n path('groceries/piggy/', views.piggy_page, name='piggy_page'),\n\n # 127.0.0.80000/groceries/ --> local\n path('groceries//', views.purchaser_list, name='purchaser_list'),\n\n # 127.0.0.80000/groceries/purchaser --> local\n path('groceries/purchaser/', views.user_purchaser_list, name='user_purchaser_list'),\n\n # 127.0.0.80000/groceries/bought_list -->\n path('groceries//bought_list/', views.bought_date_list, name='bought_date_list'),\n\n # 127.0.0.8000/groceries//edit_item/ --> local\n path('groceries//edit_item/', views.edit_item, name='edit_item'),\n\n # 127.0.0.80000/groceries//delete_grocery/ --> local\n path('groceries//delete', views.delete_item, name='delete_item'),\n\n # 127.0.0.80000/groceries/piggy_top_up/ -->\n path('groceries/piggy_top_up/', views.piggy_top_up, name='piggy_top_up'),\n\n # 127.0.0.80000/groceries/Piggy_top_up_list/ --> local\n path('groceries/piggy_top_up_list/', views.piggy_top_up_list, name='piggy_top_up_list'),\n\n # 127.0.0.80000/groceries//edit_item/ --> local\n path('groceries//edit_reason', views.edit_reason, name='edit_reason'),\n\n # 127.0.0.80000/groceries//delete_reason/ --> local\n path('groceries//delete_reason', views.delete_reason, name='delete_reason'),\n\n # 127.0.0.80000/groceries/item_paid/ --> local\n path('groceries//item_claim/', views.item_claim, name='item_claim'),\n\n # 127.0.0.8000/groceries/groceries_complete_list/ --> local\n path('groceries/groceries_complete_list/', views.groceries_complete_list, name = 'groceries_complete_list'),\n]","sub_path":"groceries/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"221538256","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport uuid\nfrom azure.cli.core.commands.arm import parse_resource_id\nimport azure.cli.core.azlogging as azlogging\nfrom azure.cli.core._util import CLIError\nfrom .custom import set_vm, _compute_client_factory\nlogger = azlogging.get_az_logger(__name__)\n\n_DATA_VOLUME_TYPE = 'DATA'\n_STATUS_ENCRYPTED = 'Encrypted'\n\nextension_info = {\n 'Linux': {\n 'publisher': 'Microsoft.Azure.Security',\n 'name': 'AzureDiskEncryptionForLinux',\n 'version': '0.1'\n },\n 'Windows': {\n 'publisher': 'Microsoft.Azure.Security',\n 'name': 'AzureDiskEncryption',\n 'version': '1.1'\n }\n}\n\n\ndef enable(resource_group_name, vm_name, # pylint: disable=too-many-arguments,too-many-locals, too-many-statements\n aad_client_id,\n disk_encryption_keyvault,\n aad_client_secret=None, aad_client_cert_thumbprint=None,\n key_encryption_keyvault=None,\n key_encryption_key=None,\n key_encryption_algorithm='RSA-OAEP',\n volume_type=None):\n '''\n Enable disk encryption on OS disk, Data disks, or both\n :param str aad_client_id: Client ID of AAD app with permissions to write secrets to KeyVault\n :param str aad_client_secret: Client Secret of AAD app with permissions to\n write secrets to KeyVault\n :param str aad_client_cert_thumbprint: Thumbprint of AAD app certificate with permissions\n to write secrets to KeyVault\n :param str disk_encryption_keyvault:the KeyVault where generated encryption key will be placed\n :param str key_encryption_key: KeyVault key name or URL used to encrypt the disk encryption key\n :param str key_encryption_keyvault: the KeyVault containing the key encryption key\n used to encrypt the disk encryption key. If missing, CLI will use --disk-encryption-keyvault\n '''\n # pylint: disable=no-member\n compute_client = _compute_client_factory()\n vm = compute_client.virtual_machines.get(resource_group_name, vm_name)\n os_type = vm.storage_profile.os_disk.os_type.value\n is_linux = _is_linux_vm(os_type)\n extension = extension_info[os_type]\n\n # 1. First validate arguments\n\n if not aad_client_cert_thumbprint and not aad_client_secret:\n raise CLIError('Please provide either --aad-client-id or --aad-client-cert-thumbprint')\n\n if volume_type is None:\n if vm.storage_profile.data_disks:\n raise CLIError('VM has data disks, please supply --volume-type')\n else:\n volume_type = 'OS'\n\n # encryption is not supported on all linux distros, but service never tells you\n # so let us verify at the client side\n if is_linux:\n image_reference = getattr(vm.storage_profile, 'image_reference', None)\n if image_reference:\n result, message = _check_encrypt_is_supported(image_reference, volume_type)\n if not result:\n logger.warning(message)\n\n # sequence_version should be unique\n sequence_version = uuid.uuid4()\n\n # retrieve keyvault details\n disk_encryption_keyvault_url = _get_key_vault_base_url(\n (parse_resource_id(disk_encryption_keyvault))['name'])\n\n # disk encryption key itself can be further protected, so let us verify\n if key_encryption_key:\n key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault\n if '://' not in key_encryption_key: # appears a key name\n key_encryption_key = _get_keyvault_key_url(\n (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)\n\n # 2. we are ready to provision/update the disk encryption extensions\n # The following logic was mostly ported from xplat-cli\n public_config = {\n 'AADClientID': aad_client_id,\n 'AADClientCertThumbprint': aad_client_cert_thumbprint,\n 'KeyVaultURL': disk_encryption_keyvault_url,\n 'VolumeType': volume_type,\n 'EncryptionOperation': 'EnableEncryption',\n 'KeyEncryptionKeyURL': key_encryption_key,\n 'KeyEncryptionAlgorithm': key_encryption_algorithm,\n 'SequenceVersion': sequence_version,\n }\n private_config = {\n 'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')\n }\n\n from azure.mgmt.compute.models import (VirtualMachineExtension, DiskEncryptionSettings,\n KeyVaultSecretReference, KeyVaultKeyReference,\n SubResource)\n\n ext = VirtualMachineExtension(vm.location, # pylint: disable=no-member\n publisher=extension['publisher'],\n virtual_machine_extension_type=extension['name'],\n protected_settings=private_config,\n type_handler_version=extension['version'],\n settings=public_config,\n auto_upgrade_minor_version=True)\n\n poller = compute_client.virtual_machine_extensions.create_or_update(\n resource_group_name, vm_name, extension['name'], ext)\n poller.result()\n\n # verify the extension was ok\n extension_result = compute_client.virtual_machine_extensions.get(\n resource_group_name, vm_name, extension['name'], 'instanceView')\n if extension_result.provisioning_state != 'Succeeded':\n raise CLIError('Extension needed for disk encryption was not provisioned correctly')\n if not (extension_result.instance_view.statuses and\n extension_result.instance_view.statuses[0].message):\n raise CLIError('Could not found url pointing to the secret for disk encryption')\n\n # 3. update VM's storage profile with the secrets\n status_url = extension_result.instance_view.statuses[0].message\n\n vm = compute_client.virtual_machines.get(resource_group_name, vm_name)\n secret_ref = KeyVaultSecretReference(secret_url=status_url,\n source_vault=SubResource(disk_encryption_keyvault))\n\n key_encryption_key_obj = None\n if key_encryption_key:\n key_encryption_key_obj = KeyVaultKeyReference(key_encryption_key,\n SubResource(key_encryption_keyvault))\n\n disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref,\n key_encryption_key=key_encryption_key_obj,\n enabled=True)\n\n vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings\n set_vm(vm)\n if is_linux and volume_type != _DATA_VOLUME_TYPE:\n # TODO: expose a 'wait' command to do the monitor and handle the reboot\n logger.warning(\"The encryption request was accepted. Please use 'show' command to monitor \"\n \"the progress. If you see 'VMRestartPending', please restart the VM, and \"\n \"the encryption will finish shortly\")\n\n\ndef disable(resource_group_name, vm_name, volume_type=None, force=False):\n '''\n Disable disk encryption on OS disk, Data disks, or both\n '''\n compute_client = _compute_client_factory()\n vm = compute_client.virtual_machines.get(resource_group_name, vm_name)\n # pylint: disable=no-member\n os_type = vm.storage_profile.os_disk.os_type.value\n\n # 1. be nice, figure out the default volume type and also verify VM will not be busted\n is_linux = _is_linux_vm(os_type)\n if is_linux:\n if volume_type:\n if not force:\n if volume_type == _DATA_VOLUME_TYPE:\n status = show(resource_group_name, vm_name)\n if status['osDisk'] == _STATUS_ENCRYPTED:\n raise CLIError(\"VM's OS disk is encrypted. Disabling encryption on data \"\n \"disk can render the VM unbootable. Use '--force' \"\n \"to continue\")\n else:\n raise CLIError(\"Only data disk is supported to disable on Linux VM\")\n else:\n volume_type = _DATA_VOLUME_TYPE\n elif volume_type is None:\n if vm.storage_profile.data_disks:\n raise CLIError(\"VM has data disks, please specify --volume-type\")\n\n # sequence_version should be incremented since encryptions occurred before\n extension = extension_info[os_type]\n sequence_version = uuid.uuid4()\n\n # 2. update the disk encryption extension\n # The following logic was mostly ported from xplat-cli\n public_config = {\n 'VolumeType': volume_type,\n 'EncryptionOperation': 'DisableEncryption',\n 'SequenceVersion': sequence_version,\n }\n\n from azure.mgmt.compute.models import (VirtualMachineExtension, DiskEncryptionSettings,\n KeyVaultSecretReference, KeyVaultKeyReference,\n SubResource)\n\n ext = VirtualMachineExtension(vm.location, # pylint: disable=no-member\n publisher=extension['publisher'],\n virtual_machine_extension_type=extension['name'],\n type_handler_version=extension['version'],\n settings=public_config,\n auto_upgrade_minor_version=True)\n\n poller = compute_client.virtual_machine_extensions.create_or_update(resource_group_name,\n vm_name,\n extension['name'], ext)\n poller.result()\n\n # 3. Remove the secret from VM's storage profile\n extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, vm_name,\n extension['name'],\n 'instanceView')\n if extension_result.provisioning_state != 'Succeeded':\n raise CLIError(\"Extension updating didn't succeed\")\n\n vm = compute_client.virtual_machines.get(resource_group_name, vm_name)\n disk_encryption_settings = DiskEncryptionSettings(enabled=False)\n vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings\n set_vm(vm)\n\n\ndef show(resource_group_name, vm_name):\n '''show the encryption status'''\n encryption_status = {\n 'osDisk': 'NotEncrypted',\n 'osDiskEncryptionSettings': None,\n 'dataDisk': 'NotEncrypted',\n 'osType': None\n }\n compute_client = _compute_client_factory()\n vm = compute_client.virtual_machines.get(resource_group_name, vm_name)\n # pylint: disable=no-member\n # The following logic was mostly ported from xplat-cli\n os_type = vm.storage_profile.os_disk.os_type.value\n is_linux = _is_linux_vm(os_type)\n encryption_status['osType'] = os_type\n extension = extension_info[os_type]\n extension_result = compute_client.virtual_machine_extensions.get(resource_group_name,\n vm_name,\n extension['name'],\n 'instanceView')\n logger.debug(extension_result)\n if extension_result.instance_view.statuses:\n encryption_status['progressMessage'] = extension_result.instance_view.statuses[0].message\n\n substatus_message = None\n if getattr(extension_result.instance_view, 'substatuses', None):\n substatus_message = extension_result.instance_view.substatuses[0].message\n\n encryption_status['osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings\n\n import json\n if is_linux:\n try:\n message_object = json.loads(substatus_message)\n except Exception: # pylint: disable=broad-except\n message_object = None # might be from outdated extension\n\n if message_object and ('os' in message_object):\n encryption_status['osDisk'] = message_object['os']\n else:\n encryption_status['osDisk'] = 'Unknown'\n\n if message_object and 'data' in message_object:\n encryption_status['dataDisk'] = message_object['data']\n else:\n encryption_status['dataDisk'] = 'Unknown'\n else:\n # Windows - get os and data volume encryption state from the vm model\n if (encryption_status['osDiskEncryptionSettings'].enabled and\n encryption_status['osDiskEncryptionSettings'].disk_encryption_key.secret_url):\n encryption_status['osDisk'] = _STATUS_ENCRYPTED\n\n if extension_result.provisioning_state == 'Succeeded':\n volume_type = extension_result.settings.get('VolumeType', None)\n about_data_disk = not volume_type or volume_type.lower() != 'os'\n if about_data_disk and extension_result.settings.get('EncryptionOperation', None) == 'EnableEncryption': # pylint: disable=line-too-long\n encryption_status['dataDisk'] = _STATUS_ENCRYPTED\n\n return encryption_status\n\n\ndef _is_linux_vm(os_type):\n return os_type.lower() == 'linux'\n\n\ndef _get_keyvault_key_url(keyvault_name, key_name):\n from azure.cli.core._profile import Profile\n\n def get_token(server, resource, scope): # pylint: disable=unused-argument\n return Profile().get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access\n\n from azure.keyvault import KeyVaultClient, KeyVaultAuthentication\n client = KeyVaultClient(KeyVaultAuthentication(get_token))\n result = client.keyvault.get_key(_get_key_vault_base_url(keyvault_name), key_name, '')\n return result.key.kid # pylint: disable=no-member\n\n\ndef _get_key_vault_base_url(vault_name):\n from azure.cli.core._profile import CLOUD\n suffix = CLOUD.suffixes.keyvault_dns\n return 'https://{}{}'.format(vault_name, suffix)\n\n\ndef _check_encrypt_is_supported(image_reference, volume_type):\n offer = getattr(image_reference, 'offer', None)\n publisher = getattr(image_reference, 'publisher', None)\n sku = getattr(image_reference, 'sku', None)\n\n # custom image?\n if not offer or not publisher or not sku:\n return (True, None)\n\n supported = [\n {\n 'offer': 'RHEL',\n 'publisher': 'RedHat',\n 'sku': '7.2'\n },\n {\n 'offer': 'RHEL',\n 'publisher': 'RedHat',\n 'sku': '7.3'\n },\n {\n 'offer': 'CentOS',\n 'publisher': 'OpenLogic',\n 'sku': '7.2n'\n },\n {\n 'offer': 'Ubuntu',\n 'publisher': 'Canonical',\n 'sku': '14.04'\n },\n {\n 'offer': 'Ubuntu',\n 'publisher': 'Canonical',\n 'sku': '16.04'\n }]\n\n if volume_type.upper() == _DATA_VOLUME_TYPE:\n supported.append({\n 'offer': 'CentOS',\n 'publisher': 'OpenLogic',\n 'sku': '7.2'\n },)\n\n for image in supported:\n if (image['publisher'].lower() == publisher.lower() and\n sku.lower().startswith(image['sku'].lower()) and\n offer.lower().startswith(image['offer'].lower())):\n return (True, None)\n\n sku_list = ['{} {}'.format(a['offer'], a['sku']) for a in supported]\n # pylint: disable=line-too-long\n message = \"Encryption might fail as current VM uses a distro not in the known list, which are '{}'\".format(sku_list)\n return (False, message)\n","sub_path":"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/disk_encryption.py","file_name":"disk_encryption.py","file_ext":"py","file_size_in_byte":15925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"530185605","text":"import json\nimport urllib.request\nfrom http.server import SimpleHTTPRequestHandler\nimport config\nfrom logger import logger\n\n\n# requests processing\nclass HttpProcessor(SimpleHTTPRequestHandler):\n def do_GET(self):\n if self.path == '/':\n return SimpleHTTPRequestHandler.do_GET(self) # <--- Response with redirection to index.html\n else:\n self.send_response(404)\n logger.info(\"Invalid endpoint\")\n\n def do_POST(self):\n if self.path == '/':\n logger.info(\"getting user input\")\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length).decode('utf-8').replace(\"amount=\", \"\") # <--- Gets user input\n logger.info(\"Receiving exchange rates\")\n req = urllib.request.Request(config.CURRENCY_URL)\n data = urllib.request.urlopen(req).read()\n data_json = json.loads(data.decode(\"utf-8\"))[\"Valute\"][\"USD\"][\"Value\"]\n if post_data.isdigit() and float(post_data) >= 0:\n logger.info(\"result formation\")\n value = float(post_data) * float(data_json)\n output_data = {\"currency\": \"USD\", \"exchange_rate\": data_json, \"user_input\": post_data, \"result\": value}\n logger.info(output_data)\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n logger.info(json.dumps(output_data).encode())\n self.wfile.write(json.dumps(output_data).encode())\n logger.info(\"result sent\")\n else:\n logger.error(\"Invalid value\")\n logger.info(post_data)\n return SimpleHTTPRequestHandler.do_GET(self)\n else:\n self.send_response(404)\n logger.info(\"Invalid endpoint\")\n","sub_path":"http_processor.py","file_name":"http_processor.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"607271594","text":"\"\"\"\nWe have a collection of stones, each stone has a positive integer weight.\n\nEach turn, we choose the two heaviest stones and smash them together. Suppose the stones have weights x and y with x <= y. The result of this smash is:\n\nIf x == y, both stones are totally destroyed;\nIf x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.\nAt the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)\n\n\n\nExample 1:\n\nInput: [2,7,4,1,8,1]\nOutput: 1\nExplanation:\nWe combine 7 and 8 to get 1 so the array converts to [2,4,1,1,1] then,\nwe combine 2 and 4 to get 2 so the array converts to [2,1,1,1] then,\nwe combine 2 and 1 to get 1 so the array converts to [1,1,1] then,\nwe combine 1 and 1 to get 0 so the array converts to [1] then that's the value of last stone.\n\n\nNote:\n\n1 <= stones.length <= 30\n1 <= stones[i] <= 1000\n\nSimulate the process. We can do it with a heap, or by sorting some list of stones every time we take a turn.\n\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def lastStoneWeight(self, stones: List[int]) -> int:\n # Solution 1 12 ms\n stones.sort(reverse=True)\n while len(stones) > 1:\n arr = stones[2:]\n y = stones[0]\n x = stones[1]\n if x != y:\n arr.append(y - x)\n stones = sorted(arr, reverse=True)\n if stones:\n return stones[0]\n return 0\n\n \"\"\"\n # Solution 2 #48 ms\n while len(stones) > 1:\n stone1, stone2 = stones[-1], stones[-2]\n if stone1 == stone2:\n stones.pop(-1)\n stones.pop(-1)\n else:\n stone1 = abs(stone1 - stone2)\n stones.pop(-1)\n stones[-1] = stone1\n if len(stones):\n return stones[-1]\n return 0\n \"\"\"\n\n\n# Main Call\nsolution = Solution()\nresult = solution.lastStoneWeight([2, 7, 4, 1, 8, 1])\nprint(result)\n","sub_path":"src/arrays/lastStoneWeight.py","file_name":"lastStoneWeight.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"81550596","text":"# -*- coding: utf-8 -*-\r\nimport matplotlib.pyplot as plt \r\nimport matplotlib.animation as animation\r\nimport numpy as np\r\nx = np.asarray([1,2,3,4,5,6])\r\ny = np.asarray([2,4,6,8,10,12])\r\ndef animate(i,factor):\r\n line.set_xdata(x[:i])\r\n line.set_ydata(y[:i])\r\n line2.set_xdata(x[:i])\r\n line2.set_ydata(factor*y[:i])\r\n return line,line2\r\nK = 0 # any factor \r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nline, = ax.plot([],[], '-')\r\nline2, = ax.plot([],[],'--')\r\nfor i in range(5):\r\n ax.set_xlim(np.min(x), np.max(x))\r\n ax.set_ylim(np.min(y), np.max(y))\r\n ani = animation.FuncAnimation(fig, animate, frames=len(x), fargs=(K,),\r\n interval=100, blit=True)\r\n plt.pause(0.5)\r\n","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"236866828","text":"from crudlfap import shortcuts as crudlfap\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.urls import reverse\nfrom ryzom.components import (\n A, Div, Icon, Li, Text, Ul\n)\n\nfrom ryzom.components import mwc\n\n\nfrom ryzom.components import Component\n\n\nclass Icon(Component):\n def __init__(self, name, **attrs):\n super().__init__(name, tag='mwc-icon', **attrs)\n\n\nclass NavMenu(Component):\n def __init__(self, request):\n active = request.path_info == crudlfap.site.views['home'].url\n content = [MenuItem(crudlfap.site.views['home'], request)]\n\n content = []\n for app, routers in crudlfap.site.get_app_menus(\n 'main', request).items():\n for router in routers:\n views = router.get_menu('model', request)\n if len(views) == 1:\n view = views[0]\n\n content.append(\n MenuItem(view, request, single_item=True)\n )\n else:\n content.append(MenuRouter(router, request))\n for view in router.get_menu('model', request):\n content.append(MenuItem(view, request, submenu=1))\n\n if not request.user.is_authenticated:\n content.append(\n Component(\n A(_('Log in'), href=reverse('crudlfap:login')),\n **{'tag': 'mwc-list-item', 'class': ''},\n )\n )\n else:\n content.append(Component(\n A(\n _('Log out'),\n **{\n 'data-noprefetch': 'true',\n 'href': reverse('crudlfap:logout'),\n }\n ),\n **{'tag': 'mwc-list-item', 'class': ''}\n ))\n\n if request.session.get('become_user', None):\n content.append(Li(\n A(\n ' '.join([\n str(_('Back to your account')),\n request.session['become_user_realname'],\n ]),\n **{\n 'data-noprefetch': 'true',\n 'href': reverse('crudlfap:su'),\n }\n ),\n **{'class': ''}\n ))\n\n super().__init__(\n *content,\n **{\n 'tag': 'mwc-list',\n 'class': 'crudlfap.components.menu.NavMenu',\n }\n )\n\n\nclass ListItem(Component):\n def __init__(self, view, request, single_item=False, graphic=None):\n if single_item:\n span = (Text(\n str(view.router.model._meta.verbose_name_plural.capitalize()))\n )\n elif getattr(view, 'router', None) is None:\n span = (Text(str(getattr(view, 'title', str(view)))))\n elif getattr(view.router, 'model', None) is None:\n span = (Text(str(getattr(view, 'title', str(view)))))\n else:\n span = (\n Text(str(view.title_menu.capitalize()))\n )\n\n content = [Component(span, tag='span')]\n attrs = dict(tag='mwc-list-item')\n\n show_icon = view if not single_item else view.router\n if getattr(show_icon, 'material_icon', ''):\n content.append(\n Icon(show_icon.material_icon, slot='graphic'),\n )\n attrs['graphic'] = graphic or 'icon'\n\n super().__init__(*content, **attrs)\n\n\nclass MenuItem(Component):\n def __init__(self, view, request, single_item=False, submenu=None):\n attrs = {\n 'href': view.url,\n 'title': str(view.title_link),\n 'class': 'MenuItem active' if request.path_info == view.url else '',\n 'tag': 'a',\n }\n\n if submenu:\n attrs['hidden'] = 'true'\n attrs['submenu'] = 'true'\n\n for key, value in getattr(view, 'link_attributes', {}).items():\n attrs[key] = value.replace('\"', '\\\\\"')\n\n if not getattr(view, 'turbolinks', True):\n attrs['data-turbolinks'] = 'false'\n\n return super().__init__(\n ListItem(view, request, single_item=single_item, graphic='medium' if submenu else 'icon'),\n **attrs\n )\n\n\nclass MenuRouter(Component):\n def __init__(self, router, request):\n self.active = ''\n for view in router.get_menu('model', request):\n if view.url == request.path_info:\n self.active = 'active'\n\n content = []\n content.append(\n Component(str(router.model._meta.verbose_name_plural.capitalize()), tag='span'),\n )\n if getattr(router, 'material_icon', ''):\n content.append(Icon(router.material_icon, slot='graphic'))\n\n super().__init__(\n *content,\n **{\n 'onclick': 'toggleSubmenu(this)',\n 'class': '',\n 'tag': 'mwc-list-item',\n 'graphic': 'icon',\n }\n )\n","sub_path":"src/crudlfap/components/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"436520877","text":"# -*- coding: utf-8 -*-\nfrom zvt.domain import FinanceCapitalStructure\n\nfrom zvt.recorders.emquantapi.finance.base_china_stock_finance_recorder import EmBaseChinaStockFinanceRecorder\nfrom zvt.utils.utils import add_func_to_value, first_item_to_float\n\ncapital_structure_map = {\n\n 'debt_asset_ratio': 'LIBILITYTOASSET', # 资产负债率\n 'em': 'ASSETSTOEQUITY', # 权益乘数\n 'ca_to_asset': 'CATOASSET', # 流动资产/总资产\n 'nc_to_asset': 'NCATOASSET', # 非流动资产/总资产\n 'tangible_assets_to_asset': 'TANGIBLEASSETSTOASSET', # 有形资产/总资产\n 'equity_to_total_capital': 'EQUITYTOTOTALCAPITAL', # 归属母公司股东的权益/全部投入资本\n 'interest_liblity_to_total_capital': 'INTERESTLIBILITYTOTOTALCAPITAL', # 带息负债/全部投入资本\n 'cl_to_libility': 'CLTOLIBILITY', # 流动负债/负债合计\n 'cnl_to_libility': 'NCLTOLIBILITY', # 非流动负债/负债合计\n 'interest_liblity_to_libility': 'INTERESLIBILITYTOLIBILITY', # 有息负债率\n\n}\nadd_func_to_value(capital_structure_map, first_item_to_float)\n\n\nclass ChinaStockFinanceCapitalStructure(EmBaseChinaStockFinanceRecorder):\n \"\"\"\n 财务指标-资本结构\n \"\"\"\n data_schema = FinanceCapitalStructure\n\n finance_report_type = 'FinanceCapitalStructure'\n\n data_type = 16\n\n def get_data_map(self):\n return capital_structure_map\n\n\n__all__ = ['ChinaStockFinanceCapitalStructure']\n\nif __name__ == '__main__':\n # init_log('income_statement.log')\n recorder = ChinaStockFinanceCapitalStructure(codes=['002572'])\n recorder.run()\n","sub_path":"zvt/recorders/emquantapi/finance/china_stock_finace_capital_structure.py","file_name":"china_stock_finace_capital_structure.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"381475373","text":"#!/usr/bin/env python3\n\"\"\"语法执行器\"\"\"\n\nimport pywind.lib.template.syntax_parser as syntax_parser\n\n\nclass ExecuteErr(Exception): pass\n\n\nclass execute(object):\n __exe_objects = None\n\n __kwargs = None\n\n __ext_block_functions = None\n __ext_functions = None\n\n # 运行步骤1\n __run_step1 = None\n\n # 结果缓冲区\n __buff = None\n\n def __init__(self):\n self.__exe_objects = {}\n self.__kwargs = {}\n\n self.__ext_block_functions = {}\n self.__ext_functions = {}\n self.__run_step1 = []\n self.__buff = []\n\n def register_ext_function(self, funcname, funcobj, is_block_func=False):\n \"\"\"注册扩展函数\n :param funcname:字符串函数名 \n :param funcobj: 函数对象\n :param is_block_func:是否是块函数\n :return: \n \"\"\"\n if is_block_func:\n self.__ext_block_functions[funcname] = funcobj\n else:\n self.__ext_functions[funcname] = funcobj\n\n return\n\n def unregister_ext_function(self, funcname, is_block_func=False):\n \"\"\"删除扩展函数\n :param funcname:函数名 \n :param is_block_func:是否是块函数 \n :return: \n \"\"\"\n if is_block_func:\n pydict = self.__ext_block_functions\n else:\n pydict = self.__ext_functions\n\n if funcname not in pydict: return\n del pydict[funcname]\n\n def set_exe_object(self, name, value):\n \"\"\"设置执行对象\n :param name: \n :param value: \n :return: \n \"\"\"\n self.__exe_objects[name] = value\n\n def put_to_buff(self, content):\n self.__buff.append(content)\n\n def exe(self, name):\n if name not in self.__exe_objects: raise ExecuteErr(\"cannot found execute object '%s'\" % name)\n\n sts = self.__exe_objects[name]\n cls = syntax_parser.parser()\n\n rs = cls.parse(sts)\n\n if not rs[0]:\n return []\n\n results = rs[1]\n for flag, content in results:\n if flag == syntax_parser.SYNTAX_FLAG_NONE:\n self.__run_step1.append((self.put_to_buff, content,))\n continue\n\n return results\n\n def __getattr__(self, item):\n pass\n\n\nfd = open(\"./syntax.txt\", \"r\")\n\nexe = execute()\nexe.set_exe_object(\"test\", fd.read())\nfd.close()\n\nprint(exe.exe(\"test\"))\n","sub_path":"pywind/lib/template/syntax_execute.py","file_name":"syntax_execute.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"316824394","text":"cnt = 0\ntot = 0.0\nwhile True:\n\tsval = input('Enter a number: ')\n\tif sval == 'done':\n\t\tbreak\n\ttry:\n\t\tfval = float(sval)\n\texcept:\n\t\tprint('Invalid Input')\n\t\tcontinue\n\tcnt += 1\n\ttot += fval\n#print('ALL DONE')\nprint(tot,cnt,tot/cnt)","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"546494858","text":"IN = 1\nOUT = 2\n\ndef parse_time(s):\n h, m = map(int, s.split(':'))\n return h*60 + m\n\n\nn = int(input())\ntimes = []\nfor _ in range(n):\n time_in, time_out = map(parse_time, input().split(' '))\n times.append((time_in, IN))\n times.append((time_out, OUT))\n\ntimes.sort()\nvisitors_in = 0\nmax_visitors = 0\nfor (time, in_or_out) in times:\n if in_or_out == IN:\n visitors_in += 1\n max_visitors = max(max_visitors, visitors_in)\n else:\n visitors_in -= 1\nprint(max_visitors)\n","sub_path":"acmp/76.py","file_name":"76.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"384473141","text":"# coding: utf-8\n\nfrom http import HTTPStatus\nfrom requests import get, put, post\nfrom uuid import uuid1\n\nfrom micro_contract.transaction import Transaction\nfrom the_3rd.cielo.tables import BIN_ANALYSIS\n\n#: DEVELOPER\n# MERCHANT_ID = '5f8b7ad9-8ecd-41b0-b9fa-76c78f1faefe'\n# MERCHANT_KEY = 'UZTOVFUZIHITGVSCWQWFHFDCCIJDXDSUOOPCCBYW'\n# HOST_REQUEST = 'https://apisandbox.cieloecommerce.cielo.com.br'\n# HOST_QUERY = 'https://apiquerysandbox.cieloecommerce.cielo.com.br'\n\n#: DEPLOY\nMERCHANT_ID = '10cfbea6-7ba9-46ad-8500-e03560fd015c'\nMERCHANT_KEY = 'Yt8g9zfhBIIR46fChldmREIxapaadASF2gVK0mn9'\nHOST_REQUEST = 'https://api.cieloecommerce.cielo.com.br/'\nHOST_QUERY = 'https://apiquery.cieloecommerce.cielo.com.br/'\n\n\ndef query_bin(number: str) -> dict:\n \"\"\"\n Consulta se um número de cartão é de crédito e apto a ser utilizado\n\n :param number:\n Número do cartão de crédito a ser analisado\n :return:\n `dict` com a resposta final\n \"\"\"\n buffer = {\n 'is_valid': False,\n 'provider': None,\n 'reason': None,\n 'acquirer_data': None\n }\n\n headers = {\n 'merchantId': MERCHANT_ID,\n 'merchantKey': MERCHANT_KEY,\n 'Content-Type': 'application/json'\n }\n\n response = get(f'{HOST_QUERY}/1/cardBin/{number[:6]}', headers=headers)\n\n if response.status_code == HTTPStatus.OK.value:\n acquirer_data = response.json()\n if acquirer_data['Status'] == '00' and acquirer_data['CardType'] in ('Crédito', 'Multiplo'):\n buffer['is_valid'] = True\n buffer['provider'] = acquirer_data['Provider']\n else:\n buffer['reason'] = '{} - {} - {}'.format(\n acquirer_data['Status'],\n BIN_ANALYSIS.get(acquirer_data['Status'], 'Status não definido'),\n acquirer_data['CardType']\n )\n\n buffer['acquirer_data'] = acquirer_data\n else:\n buffer['is_valid'] = True\n buffer['provider'] = 'Cielo'\n buffer['reason'] = 'Problemas de comunicação com a adquirente'\n buffer['acquirer_data'] = f'{response.status_code} - {response.reason}'\n\n return buffer\n\n\ndef authorization(transaction: str, number: str, cvv: str, brand: str, expiration: str, holder: str, value: float, installments: int, soft_description: str) -> dict:\n \"\"\"\n\n :param transaction:\n :param number:\n :param cvv:\n :param brand:\n :param expiration:\n :param holder:\n :param value:\n :param installments:\n :param soft_description:\n :return:\n \"\"\"\n transaction_obj = Transaction(transaction)\n\n headers = {\n 'merchantId': MERCHANT_ID,\n 'merchantKey': MERCHANT_KEY,\n 'Content-Type': 'application/json',\n 'RequestId': str(uuid1())\n }\n\n data_input = {\n 'MerchantOrderId': transaction,\n 'Customer': {\n 'Name': transaction_obj.consumer.full_name,\n 'Email': transaction_obj.consumer.email,\n 'Birthdate': transaction_obj.consumer.birthday.strftime('%Y-%m-%d'),\n 'Address': {\n 'Street': '{} {}'.format(transaction_obj.address.type, transaction_obj.address.full_name),\n 'Number': transaction_obj.address.number,\n 'Complement': transaction_obj.address.complement,\n 'ZipCode': transaction_obj.address.zipcode,\n 'City': transaction_obj.address.city,\n 'State': transaction_obj.address.state,\n 'Country': 'BRA'\n },\n 'DeliveryAddress': {\n 'Street': '{} {}'.format(transaction_obj.address.type, transaction_obj.address.full_name),\n 'Number': transaction_obj.address.number,\n 'Complement': transaction_obj.address.complement,\n 'ZipCode': transaction_obj.address.zipcode,\n 'City': transaction_obj.address.city,\n 'State': transaction_obj.address.state,\n 'Country': 'BRA'\n }\n },\n 'Payment': {\n 'Type': 'CreditCard',\n 'Amount': ''.join('{:.2f}'.format(value).split('.')),\n 'Currency': 'BRL',\n 'Country': 'BRA',\n 'ServiceTaxAmount': 0,\n 'Installments': installments,\n 'Interest': 'ByMerchant',\n 'Capture': False,\n 'Authenticate': False,\n 'SoftDescriptor': soft_description[:13] or 'Sofie',\n 'CreditCard': {\n 'CardNumber': number,\n 'Holder': holder,\n 'ExpirationDate': expiration,\n 'SecurityCode': cvv,\n 'SaveCard': 'false',\n 'Brand': brand\n }\n }\n }\n\n url = '{host}/{resource}'.format(host=HOST_REQUEST, resource='1/sales/')\n\n response = post(url, json=data_input, headers=headers)\n\n return response.json()\n\n\ndef capture(payment_id: str) -> dict:\n \"\"\"\n\n :param payment_id:\n :return:\n \"\"\"\n headers = {\n 'merchantId': MERCHANT_ID,\n 'merchantKey': MERCHANT_KEY,\n 'Content-Type': 'application/json',\n 'RequestId': str(uuid1())\n }\n\n url = '{host}/{resource}'.format(host=HOST_REQUEST, resource='1/sales/{}/capture'.format(payment_id))\n\n response = put(url, headers=headers)\n\n return response.json()\n\n\ndef cancel(payment_id: str) -> dict:\n \"\"\"\n\n :param payment_id:\n :return:\n \"\"\"\n headers = {\n 'merchantId': MERCHANT_ID,\n 'merchantKey': MERCHANT_KEY,\n 'Content-Type': 'application/json',\n 'RequestId': str(uuid1())\n }\n\n url = '{host}/{resource}'.format(host=HOST_REQUEST, resource='1/sales/{}/void'.format(payment_id))\n\n response = put(url, headers=headers)\n\n return response.json()\n","sub_path":"backend_apps/the_3rd/cielo/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"123787819","text":"from picamera import PiCamera\nimport time\n\ndef captureImage():\n\tcamera = PiCamera() \n \n\tcamera.start_preview()\n\tcamera.rotation = 90\n\ttime.sleep(1)\n\tcamera.capture('static/images/test.jpg')\n\tcamera.stop_preview()\n\nif __name__ == '__main__':\n captureImage()\n","sub_path":"registration/get_image.py","file_name":"get_image.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"354272884","text":"from flask import Blueprint, jsonify\nfrom ..models import Items\nfrom .middleware import admin_only\n\nblueprint = Blueprint('items', __name__, url_prefix=\"/api/models/items\")\n\n\n@blueprint.route('/', methods=['GET'])\n@admin_only\ndef get_all():\n items = Items.query.all()\n return jsonify([item.serialize() for item in items]), 200\n\n\n@blueprint.route('/', methods=['GET'])\n@admin_only\ndef get_by_id(item_id):\n item = Items.query.filter_by(id=item_id).first()\n return jsonify(item.serialize()), 200\n","sub_path":"DungeonAPI/blueprints/items_blueprint.py","file_name":"items_blueprint.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"561192527","text":"\"\"\"Module with functions for analyzing protein glycosylation.\n\nWritten by Jesse Bloom, 2008.\"\"\"\n\n\ndef FindNGlycanSites(headers_seqs):\n \"\"\"A function for finding potential N glycosylation sites in protein primary sequences.\n\n 'headers_seqs' is a list of sequences in the form of 2-tuples '(head, seq)' where\n 'head' is the sequence header and 'seq' is the sequence as one letter amino\n acid codes.\n The function finds all potential N-linked glycosylation sites, which are motifs\n of the form Asn-X-Ser or Asn-X-Thr where X can be any amino acid\n except proline. Any gaps in the protein sequences are ignored when looking for\n the sites, so that N-DS would be a site just like NDS.\n The returned variable is a list of the same length as 'headers_seqs'. Each entry in\n this list is another list of numbers, giving the indices (as integers ranging from\n 0 to sequence length - 1) of the Asn in the N-linked glycosylation site.\n\n >>> FindNGlycanSites([('seq1', 'MATNWSALNQT'), ('seq2', 'matnet')])\n [[3, 8], [3]]\n \n >>> FindNGlycanSites([('seq1', 'MATNPSAN-A')])\n [[]]\n \n >>> FindNGlycanSites([('seq1', 'MA--TN-DSA')])\n [[5]]\n \"\"\"\n nglycansites = []\n for (head, seq) in headers_seqs:\n seqsites = []\n seq = seq.upper()\n seqlength = len(seq)\n for ires in range(seqlength):\n if seq[ires] == 'N':\n jres = ires + 1\n while jres < seqlength and seq[jres] == '-':\n jres += 1\n if jres == seqlength:\n continue\n next = seq[jres]\n jres += 1\n while jres < seqlength and seq[jres] == '-':\n jres += 1\n if jres == seqlength:\n continue\n next2 = seq[jres]\n if next != 'P' and next2 in ['S', 'T']:\n seqsites.append(ires)\n nglycansites.append(seqsites)\n return nglycansites\n\n\n# Test with doctest\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"src/glycosylation.py","file_name":"glycosylation.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"361355886","text":"import re\nfrom collections import defaultdict\nnodes = {}\n\ndef getNumbers():\n # return [int(num) for num in re.findall(r'\\d+', \"2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\")]\n data = \"\"\n with open(\"Sources/08_01.txt\") as file:\n data = file.readline()\n return [int(num) for num in re.findall(r'\\d+', data)]\n\ndef getData(index, numbers):\n nodeIndex = index\n nodes[nodeIndex] = []\n childs = numbers[index]\n nodes[nodeIndex].append(childs)\n index += 1\n metaCount = numbers[index]\n index += 1\n lastIndex = index\n if childs > 0:\n for i in range(0, childs):\n nodes[nodeIndex].append(lastIndex)\n lastIndex= getData(lastIndex, numbers)\n if metaCount > 0:\n for i in range(lastIndex, lastIndex + metaCount):\n nodes[nodeIndex].append(numbers[i])\n index = i +1\n return index\n\ndef getValue(index):\n childs = nodes[index][0]\n value = 0\n if childs == 0:\n return sum(nodes[index][1:])\n else:\n childIndexes = nodes[index][nodes[index][0]+1:]\n for childIndex in childIndexes:\n if childIndex <= childs:\n value += getValue(nodes[index][childIndex])\n return value\n\ndef part2():\n numbers = getNumbers()\n getData(0, numbers)\n print(getValue(0))\n\npart2()","sub_path":"2018/08_02.py","file_name":"08_02.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"59485855","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sqlite3\nimport json\n\nwith open(\"mycfg.json\", 'r') as f:\n conn = sqlite3.connect(json.load(f)['LOCAL_DB_NM'])\n\nc = conn.cursor()\n\n# Create table\nc.execute('''CREATE TABLE rentings (post_id TEXT PRIMARY KEY, post_text TEXT,\n url TEXT, post_date INTEGER, type INTEGER, contacted INTEGER, remarks\n TEXT)''')\n\n# Save (commit) the changes\nconn.commit()\n\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"174538871","text":"#!/usr/bin/env python\n#import everything we need\nimport rospy\n\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\ntwist=Twist()\nglobal align\nalign=False\n\ndef callback(msg):\n\t#why is the number always the same? the difference is always the same\n\t\n\tlissixty = [msg.ranges[60],msg.ranges[59],msg.ranges[61]]\n\t#rint msg.ranges[60]\n\tnewsixty = []\n\n\tfor elements in lissixty:\n\t\tif elements != 0:\n\t\t\tnewsixty.append(elements)\n\t\n\n\tavsixty = sum(newsixty)/float(len(newsixty))\n\n\tlistwelve = [msg.ranges[120],msg.ranges[121],msg.ranges[119]]\n\n\tnewtwelve = []\n\n\tfor k in listwelve:\n\t\tif k != 0:\n\t\t\tnewtwelve.append(k)\n\n\t#return newtwelve\n\n\tavtwelve = sum(newtwelve)/float(len(newtwelve))\n\t\n\n\tdiff = avtwelve - avsixty\n\n\t# if msg.ranges[60]==0 or msg.ranges[120]==0:\n\t# \tpass\n\tif abs(diff)>.001:\n\t\t\ttwist.linear.x=0.1\n\t\t\ttwist.angular.z=diff*(2) #RADIAN per second 6.2 is one revolution 2 pie \n\t\t\tpub.publish(twist)\n\telse:\n\t\talign=True\n\n\ndef keepStraight():\n\tif align==True:\n\n\t\ttwist.linear.x=0.1\n\t\ttwist.angular.z=0\n\t\tpub.publish(twist)\n\t\t\n\n\n\nif __name__ == \"__main__\":\n\trospy.init_node('wallFlower1')\n\t#create a function that moves the robot forward\n\t\n\tpub=rospy.Publisher(\"cmd_vel\",Twist,queue_size=5)\n\tlaserScan = rospy.Subscriber(\"/scan\",LaserScan, callback)\n\n\twhile not rospy.is_shutdown(): \n\t\tkeepStraight()\n\n\n\n\n\n\n","sub_path":"wallFlower2.py","file_name":"wallFlower2.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"82935921","text":"'''\n55. Jump Game\n\nGiven an array of non-negative integers, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nDetermine if you are able to reach the last index.\n\nFor example:\nA = [2,3,1,1,4], return true.\n\nA = [3,2,1,0,4], return false.\n'''\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n if nums==None or nums==[] or nums[0]==0 and len(nums)>1:\n return False\n if len(nums) == 1:\n return True\n curr = nums[0]\n for jump in range(len(nums)):\n # ask: how to know that every step in nums can be reached?\n if curr >= len(nums) - 1:\n return True\n if jump <= curr:\n curr = max(curr, jump + nums[jump])\n return False\n\n# second attempt\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # idea: 1D array record first i elements can reach target index i O(n) => TL, go from start or backwards O(n)\n\n# sol1: from start record max reachable index\n max_reachable = 0\n for i, v in enumerate(nums):\n if i > max_reachable: return False # cannot reach index i\n max_reachable = max(max_reachable, i + v)\n return max_reachable >= len(nums) - 1\n\n\n# sol2: from back record max reachable index\n backward_traverse = len(nums) - 1\n for i in range(len(nums) - 1, -1, -1):\n if i + nums[i] >= backward_traverse:\n backward_traverse = i # backward_traverse can reach index i\n return not backward_traverse # meet index 0 from the back or not\n\n\n# sol3: 1D array slow O(n)\n dp = [True] + [False for _ in range(len(nums) - 1)] # initially positioned at the first index\n for i in range(len(nums)): # record reachable by first i elements\n if dp[i] == False:\n continue\n for j in range(1, 1 + nums[i]): # only update dp array when nums[i] is True\n if i + j + 1 >= len(nums): # can reach last index from nums[i]\n return True\n dp[i + j] = True\n return dp[-1]\n\nif __name__ == \"__main__\":\n nums = [1, 2]\n nums = [3,2,1,0,4]\n res = Solution().canJump(nums)\n print(res)\n\n","sub_path":"55_jumpGame_learn.py","file_name":"55_jumpGame_learn.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"248627698","text":"from scipy.interpolate import NearestNDInterpolator\nfrom constants import dpy,mid_months,bnd_months\nfrom Regions import Regions\nfrom netCDF4 import Dataset,num2date,date2num\nfrom datetime import datetime\nfrom cfunits import Units\nfrom copy import deepcopy\nfrom mpi4py import MPI\nimport numpy as np\nimport logging\n\nlogger = logging.getLogger(\"%i\" % MPI.COMM_WORLD.rank)\n\nclass VarNotInFile(Exception):\n def __str__(self): return \"VarNotInFile\"\n \nclass VarNotMonthly(Exception):\n def __str__(self): return \"VarNotMonthly\"\n\nclass VarNotInModel(Exception):\n def __str__(self): return \"VarNotInModel\"\n\nclass VarsNotComparable(Exception):\n def __str__(self): return \"VarNotComparable\"\n\nclass VarNotOnTimeScale(Exception):\n def __str__(self): return \"VarNotOnTimeScale\"\n\nclass UnknownUnit(Exception):\n def __str__(self): return \"UnknownUnit\"\n\nclass AreasNotInModel(Exception):\n def __str__(self): return \"AreasNotInModel\"\n\nclass MisplacedData(Exception):\n def __str__(self): return \"MisplacedData\"\n\nclass NotTemporalVariable(Exception):\n def __str__(self): return \"NotTemporalVariable\"\n\nclass NotSpatialVariable(Exception):\n def __str__(self): return \"NotSpatialVariable\"\n\nclass UnitConversionError(Exception):\n def __str__(self): return \"UnitConversionError\"\n\nclass AnalysisError(Exception):\n def __str__(self): return \"AnalysisError\"\n\nclass NotLayeredVariable(Exception):\n def __str__(self): return \"NotLayeredVariable\"\n \nclass NotDatasiteVariable(Exception):\n def __str__(self): return \"NotDatasiteVariable\"\n\n \ndef GenerateDistinctColors(N,saturation=0.67,value=0.67):\n r\"\"\"Generates a series of distinct colors.\n\n Computes N distinct colors using HSV color space, holding the\n saturation and value levels constant and linearly vary the\n hue. Colors are returned as a RGB tuple.\n\n Parameters\n ----------\n N : int\n number of distinct colors to generate\n saturation : float, optional\n argument of HSV color space\n value : float, optional\n argument of HSV color space\n\n Returns\n -------\n RGB_tuples : list\n list of N distinct RGB tuples\n \"\"\"\n from colorsys import hsv_to_rgb\n HSV_tuples = [(x/float(N), saturation, value) for x in range(N)]\n RGB_tuples = map(lambda x: hsv_to_rgb(*x), HSV_tuples)\n return RGB_tuples\n\ndef ConvertCalendar(t,tbnd=None):\n r\"\"\"Converts calendar representations to a single standard.\n\n This routine converts the representation of time to the ILAMB\n default: days since 1850-1-1 00:00:00 on a 365-day calendar. This\n is so we can make comparisons with data from other models and\n benchmarks. We use cfunits time conversion capability.\n\n Parameters\n ----------\n t : netCDF4 variable\n the netCDF4 variable which represents time\n tbnd : netCDF4 variable, optional\n the netCDF4 variable which represents the bounds of time\n\n Returns\n -------\n ta : numpy.ndarray\n a numpy array of the converted times\n tabnd : numpy.ndarray, optional\n a numpy array of the converted boundary times\n\n \"\"\"\n # If not calendar is given, we will assume it is 365_day\n unit = t.units\n if \"calendar\" in t.ncattrs():\n calendar = t.calendar.lower()\n else:\n calendar = \"365_day\"\n \n # If bounds are given, we will use those instead and later compute\n # the time as the midpoint of the bounds.\n if tbnd is None:\n ta = t\n else:\n ta = tbnd\n \n # The datum might be different, use netCDF functions to shift it\n ta = num2date(ta[...],unit ,calendar=calendar)\n ta = date2num(ta ,\"days since 1850-1-1\",calendar=calendar)\n\n # Differences in calendars need to be handled differently\n # depending on the intended temporal resolution. Here we introduce\n # special code for different cases.\n if tbnd is None:\n if t[...].size == 1:\n dt = 0\n else:\n dt = (ta[1:]-ta[:-1]).mean()\n else:\n dt = (ta[:,1]-ta[:,0]).mean()\n if np.allclose(dt,30,atol=3): # monthly\n\n tmid = np.copy(ta)\n if tmid.ndim > 1: tmid = ta.mean(axis=1)\n \n # Determine the month index by finding to which mid_month day\n # the middle time point is closest.\n def _dpyShift(tmid,ta,dpy):\n yrs = np.floor((tmid / float(dpy)))*365.\n ind = np.abs((tmid % float(dpy))[:,np.newaxis]-mid_months).argmin(axis=1)\n if ta.ndim == 1:\n ta = yrs + mid_months[ind]\n if ta.ndim == 2:\n ta[:,0] = yrs + bnd_months[ind]\n ta[:,1] = yrs + bnd_months[ind+1]\n return ta\n if calendar == \"360_day\":\n ta = _dpyShift(tmid,ta,360)\n elif calendar == \"366_day\":\n ta = _dpyShift(tmid,ta,366)\n elif calendar in [\"365_day\",\"noleap\"]:\n ta = _dpyShift(tmid,ta,365) \n elif calendar in [\"proleptic_gregorian\",\"gregorian\",\"standard\",\"julian\"]:\n # we can use datetime to get the Julian day and then find\n # how these line up with mid_months\n tmid = num2date(tmid,\"days since 1850-1-1\",calendar=calendar)\n yrs = [float(t.year-1850)*365. for t in tmid]\n tmid = [float(t.timetuple().tm_yday) for t in tmid]\n tmid = np.asarray(tmid)\n ind = np.abs(tmid[:,np.newaxis]-mid_months).argmin(axis=1)\n if ta.ndim == 1:\n ta = yrs + mid_months[ind]\n if ta.ndim == 2:\n ta[:,0] = yrs + bnd_months[ind]\n ta[:,1] = yrs + bnd_months[ind+1]\n else:\n raise ValueError(\"Unsupported calendar: %s\" % calendar)\n\n if tbnd is None: return ta\n t = ta.mean(axis=1)\n return t,ta\n\ndef CellAreas(lat,lon,lat_bnds=None,lon_bnds=None):\n \"\"\"Given arrays of latitude and longitude, return cell areas in square meters.\n\n Parameters\n ----------\n lat : numpy.ndarray\n a 1D array of latitudes which represent cell centroids\n lon : numpy.ndarray\n a 1D array of longitudes which represent cell centroids\n\n Returns\n -------\n areas : numpy.ndarray\n a 2D array of cell areas in [m2]\n \"\"\"\n from constants import earth_rad\n\n if (lat_bnds is not None and lon_bnds is not None):\n return earth_rad**2*np.outer((np.sin(lat_bnds[:,1]*np.pi/180.)-\n np.sin(lat_bnds[:,0]*np.pi/180.)),\n (lon_bnds[:,1]-lon_bnds[:,0])*np.pi/180.)\n\n x = np.zeros(lon.size+1)\n x[1:-1] = 0.5*(lon[1:]+lon[:-1])\n x[ 0] = lon[ 0]-0.5*(lon[ 1]-lon[ 0])\n x[-1] = lon[-1]+0.5*(lon[-1]-lon[-2])\n if(x.max() > 181): x -= 180\n x = x.clip(-180,180)\n x *= np.pi/180.\n\n y = np.zeros(lat.size+1)\n y[1:-1] = 0.5*(lat[1:]+lat[:-1])\n y[ 0] = lat[ 0]-0.5*(lat[ 1]-lat[ 0])\n y[-1] = lat[-1]+0.5*(lat[-1]-lat[-2])\n y = y.clip(-90,90)\n y *= np.pi/180.\n\n dx = earth_rad*(x[1:]-x[:-1])\n dy = earth_rad*(np.sin(y[1:])-np.sin(y[:-1]))\n areas = np.outer(dx,dy).T\n\n return areas\n\ndef GlobalLatLonGrid(res,**keywords):\n r\"\"\"Generates a latitude/longitude grid at a desired resolution\n \n Computes 1D arrays of latitude and longitude values which\n correspond to cell interfaces and centroids at a given resolution.\n\n Parameters\n ----------\n res : float\n the desired resolution of the grid in degrees\n from_zero : boolean\n sets longitude convention { True:(0,360), False:(-180,180) }\n\n Returns\n -------\n lat_bnd : numpy.ndarray\n a 1D array of latitudes which represent cell interfaces\n lon_bnd : numpy.ndarray\n a 1D array of longitudes which represent cell interfaces\n lat : numpy.ndarray\n a 1D array of latitudes which represent cell centroids\n lon : numpy.ndarray\n a 1D array of longitudes which represent cell centroids\n \"\"\"\n from_zero = keywords.get(\"from_zero\",False)\n res_lat = keywords.get(\"res_lat\",res)\n res_lon = keywords.get(\"res_lon\",res)\n nlon = int(360./res_lon)+1\n nlat = int(180./res_lat)+1\n lon_bnd = np.linspace(-180,180,nlon)\n if from_zero: lon_bnd += 180\n lat_bnd = np.linspace(-90,90,nlat)\n lat = 0.5*(lat_bnd[1:]+lat_bnd[:-1])\n lon = 0.5*(lon_bnd[1:]+lon_bnd[:-1])\n return lat_bnd,lon_bnd,lat,lon\n\ndef NearestNeighborInterpolation(lat1,lon1,data1,lat2,lon2):\n r\"\"\"Interpolates globally grided data at another resolution\n\n Parameters\n ----------\n lat1 : numpy.ndarray\n a 1D array of latitudes of cell centroids corresponding to the \n source data\n lon1 : numpy.ndarray\n a 1D array of longitudes of cell centroids corresponding to the \n source data\n data1 : numpy.ndarray\n an array of data to be interpolated of shape = (lat1.size,lon1.size,...)\n lat2 : numpy.ndarray\n a 1D array of latitudes of cell centroids corresponding to the \n target resolution\n lon2 : numpy.ndarray\n a 1D array of longitudes of cell centroids corresponding to the \n target resolution\n\n Returns\n -------\n data2 : numpy.ndarray\n an array of interpolated data of shape = (lat2.size,lon2.size,...)\n \"\"\"\n rows = np.apply_along_axis(np.argmin,1,np.abs(lat2[:,np.newaxis]-lat1))\n cols = np.apply_along_axis(np.argmin,1,np.abs(lon2[:,np.newaxis]-lon1))\n data2 = data1[np.ix_(rows,cols)]\n return data2\n \ndef TrueError(lat1_bnd,lon1_bnd,lat1,lon1,data1,lat2_bnd,lon2_bnd,lat2,lon2,data2):\n r\"\"\"Computes the pointwise difference between two sets of gridded data\n\n To obtain the pointwise error we populate a list of common cell\n interfaces and then interpolate both input arrays to the composite\n grid resolution using nearest-neighbor interpolation.\n\n Parameters\n ----------\n lat1_bnd, lon1_bnd, lat1, lon1 : numpy.ndarray\n 1D arrays corresponding to the latitude/longitudes of the cell \n interfaces/centroids\n data1 : numpy.ndarray\n an array of data to be interpolated of shape = (lat1.size,lon1.size,...)\n lat2_bnd, lon2_bnd, lat2, lon2 : numpy.ndarray\n 1D arrays corresponding to the latitude/longitudes of the cell \n interfaces/centroids\n data2 : numpy.ndarray\n an array of data to be interpolated of shape = (lat2.size,lon2.size,...)\n\n Returns\n -------\n lat_bnd, lon_bnd, lat, lon : numpy.ndarray\n 1D arrays corresponding to the latitude/longitudes of the cell \n interfaces/centroids of the resulting error\n error : numpy array\n an array of the pointwise error of shape = (lat.size,lon.size,...)\n \"\"\"\n # combine limits, sort and remove duplicates\n lat_bnd = np.hstack((lat1_bnd,lat2_bnd)); lat_bnd.sort(); lat_bnd = np.unique(lat_bnd)\n lon_bnd = np.hstack((lon1_bnd,lon2_bnd)); lon_bnd.sort(); lon_bnd = np.unique(lon_bnd)\n\n # need centroids of new grid for nearest-neighbor interpolation\n lat = 0.5*(lat_bnd[1:]+lat_bnd[:-1])\n lon = 0.5*(lon_bnd[1:]+lon_bnd[:-1])\n\n # interpolate datasets at new grid\n d1 = NearestNeighborInterpolation(lat1,lon1,data1,lat,lon)\n d2 = NearestNeighborInterpolation(lat2,lon2,data2,lat,lon)\n \n # relative to the first grid/data\n error = d2-d1\n return lat_bnd,lon_bnd,lat,lon,error\n\ndef SympifyWithArgsUnits(expression,args,units):\n \"\"\"Uses symbolic algebra to determine the final unit of an expression.\n \n Parameters\n ----------\n expression : str\n the expression whose units you wish to simplify\n args : dict\n a dictionary of numpy arrays whose keys are the\n variables written in the input expression\n units : dict\n a dictionary of strings representing units whose keys are the\n variables written in the input expression\n\n \"\"\"\n from sympy import sympify,postorder_traversal\n\n # The traversal needs that we make units commensurate when\n # possible\n keys = args.keys()\n for i in range(len(keys)):\n ikey = keys[i]\n for j in range(i+1,len(keys)):\n jkey = keys[j]\n if Units(units[jkey]).equivalent(Units(units[ikey])):\n args [jkey] = Units.conform(args[jkey],\n Units(units[jkey]),\n Units(units[ikey]),\n inplace=True)\n units[jkey] = units[ikey]\n \n # We need to do what sympify does but also with unit\n # conversions. So we traverse the expression tree in post order\n # and take actions based on the kind of operation being performed.\n expression = sympify(expression)\n for expr in postorder_traversal(expression):\n \n if expr.is_Atom: continue \n ekey = str(expr) # expression key\n \n if expr.is_Add:\n\n # Addition will require that all args should be the same\n # unit. As a convention, we will try to conform all units\n # to the first variable's units. \n key0 = None\n for arg in expr.args:\n key = str(arg)\n if not args.has_key(key): continue\n if key0 is None:\n key0 = key\n else:\n # Conform these units to the units of the first arg\n Units.conform(args[key],\n Units(units[key]),\n Units(units[key0]),\n inplace=True)\n units[key] = units[key0]\n\n args [ekey] = sympify(str(expr),locals=args)\n units[ekey] = units[key0]\n\n elif expr.is_Pow:\n\n assert len(expr.args) == 2 # check on an assumption\n power = float(expr.args[1])\n args [ekey] = args[str(expr.args[0])]**power\n units[ekey] = Units(units[str(expr.args[0])])\n units[ekey] = units[ekey]**power\n \n elif expr.is_Mul:\n\n unit = Units(\"1\")\n for arg in expr.args:\n key = str(arg)\n if units.has_key(key): unit *= Units(units[key])\n \n args [ekey] = sympify(str(expr),locals=args)\n units[ekey] = Units(unit).formatted()\n\n return args[ekey],units[ekey]\n\ndef ComputeIndexingArrays(lat2d,lon2d,lat,lon):\n \"\"\"Blah.\n\n Parameters\n ----------\n lat : numpy.ndarray\n A 1D array of latitudes of cell centroids\n lon : numpy.ndarray\n A 1D array of longitudes of cell centroids\n\n \"\"\"\n # Prepare the interpolator\n points = np.asarray([lat2d.flatten(),lon2d.flatten()]).T\n values = np.asarray([(np.arange(lat2d.shape[0])[:,np.newaxis]*np.ones (lat2d.shape[1])).flatten(),\n (np.ones (lat2d.shape[0])[:,np.newaxis]*np.arange(lat2d.shape[1])).flatten()]).T\n fcn = NearestNDInterpolator(points,values)\n LAT,LON = np.meshgrid(lat,lon,indexing='ij')\n gmap = fcn(LAT.flatten(),LON.flatten()).astype(int)\n return gmap[:,0].reshape(LAT.shape),gmap[:,1].reshape(LAT.shape)\n\ndef FromNetCDF4(filename,variable_name,alternate_vars=[],t0=None,tf=None,group=None):\n \"\"\"Extracts data from a netCDF4 datafile for use in a Variable object.\n \n Intended to be used inside of the Variable constructor. Some of\n the return arguments will be None depending on the contents of the\n netCDF4 file.\n\n Parameters\n ----------\n filename : str\n Name of the netCDF4 file from which to extract a variable\n variable_name : str\n Name of the variable to extract from the netCDF4 file\n alternate_vars : list of str, optional\n A list of possible alternate variable names to find\n t0 : float, optional\n If temporal, specifying the initial time can reduce memory\n usage and speed up access time.\n tf : float, optional\n If temporal, specifying the final time can reduce memory\n usage and speed up access time.\n\n Returns\n -------\n data : numpy.ma.masked_array\n The array which contains the data which constitutes the variable\n unit : str\n The unit of the input data\n name : str\n The name of the variable, will be how it is saved in an output netCDF4 file\n time : numpy.ndarray\n A 1D array of times in days since 1850-01-01 00:00:00\n time_bnds : numpy.ndarray\n A 1D array of time bounds in days since 1850-01-01 00:00:00\n lat : numpy.ndarray\n A 1D array of latitudes of cell centroids\n lon : numpy.ndarray\n A 1D array of longitudes of cell centroids\n area : numpy.ndarray\n A 2D array of the cell areas\n ndata : int\n Number of data sites this data represents\n depth_bnds : numpy.ndarray\n A 1D array of the depth boundaries of each cell\n \"\"\"\n try:\n dset = Dataset(filename,mode=\"r\")\n if group is None:\n grp = dset\n else:\n grp = dset.groups[group]\n except RuntimeError:\n raise RuntimeError(\"Unable to open the file: %s\" % filename)\n\n found = False\n if variable_name in grp.variables.keys():\n found = True\n var = grp.variables[variable_name]\n else:\n while alternate_vars.count(None) > 0: alternate_vars.pop(alternate_vars.index(None))\n for var_name in alternate_vars:\n if var_name in grp.variables.keys():\n found = True\n var = grp.variables[var_name]\n if found == False:\n alternate_vars.insert(0,variable_name)\n raise RuntimeError(\"Unable to find [%s] in the file: %s\" % (\",\".join(alternate_vars),filename))\n\n # Check on dimensions\n time_name = [name for name in var.dimensions if \"time\" in name.lower()]\n lat_name = [name for name in var.dimensions if \"lat\" in name.lower()]\n lon_name = [name for name in var.dimensions if \"lon\" in name.lower()]\n data_name = [name for name in var.dimensions if \"data\" in name.lower()]\n missed = [name for name in var.dimensions if name not in (time_name +\n lat_name +\n lon_name +\n data_name)]\n\n # Lat/lon might be indexing arrays, find their shape\n shp = None\n if (len(lat_name) == 0 and len(lon_name) == 0 and len(missed) >= 2 and len(data_name) == 0):\n # remove these dimensions from the missed variables\n i,j = var.dimensions[-2],var.dimensions[-1]\n if i in missed: missed.pop(missed.index(i))\n if j in missed: missed.pop(missed.index(j))\n i = grp.variables[i]\n j = grp.variables[j]\n if (np.issubdtype(i.dtype,np.integer) and\n np.issubdtype(j.dtype,np.integer)): shp = [len(i),len(j)]\n\n # Lat/lon might just be sizes\n if (len(lat_name) == 1 and len(lon_name) == 1):\n if not (lat_name[0] in grp.variables and lon_name[0] in grp.variables):\n shp = [len(grp.dimensions[lat_name[0]]),len(grp.dimensions[lon_name[0]])]\n\n # If these were sizes, then we need to find the correct 2D lat/lon arrays\n if shp is not None:\n\n # We want to remove any false positives we might find. I don't\n # want to consider variables which are 'bounds' or dimensions\n # of others, nor those that don't have the correct shape.\n bnds = [grp.variables[v].bounds for v in grp.variables if \"bounds\" in grp.variables[v].ncattrs()]\n dims = [v for v in grp.variables if (v in grp.dimensions)]\n poss = [v for v in grp.variables if (v not in dims and\n v not in bnds and\n np.allclose(shp,grp.variables[v].shape) if len(shp) == len(grp.variables[v].shape) else False)]\n lat_name = [name for name in poss if \"lat\" in name.lower()]\n lon_name = [name for name in poss if \"lon\" in name.lower()]\n \n # If still ambiguous, look inside the variable attributes for\n # the presence of the variable name to give further\n # preference.\n attrs = [str(var.getncattr(attr)) for attr in var.ncattrs()]\n if len(lat_name) == 0: raise ValueError(\"Unable to find values for the latitude dimension in %s\" % (filename))\n if len(lat_name) > 1:\n tmp_name = [name for name in lat_name if np.any([name in attr for attr in attrs])]\n if len(tmp_name) > 0: lat_name = tmp_name\n if len(lon_name) == 0: raise ValueError(\"Unable to find values for the longitude dimension in %s\" % (filename))\n if len(lon_name) > 1:\n tmp_name = [name for name in lon_name if np.any([name in attr for attr in attrs])]\n if len(tmp_name) > 0: lon_name = tmp_name\n\n # Time dimension\n if len(time_name) == 1:\n time_name = time_name[0]\n time_bnd_name = grp.variables[time_name].bounds if (time_name in grp.variables and\n \"bounds\" in grp.variables[time_name].ncattrs()) else None\n if time_bnd_name not in grp.variables: time_bnd_name = None\n elif len(time_name) >= 1:\n raise ValueError(\"Ambiguous choice of values for the time dimension [%s] in %s\" % (\",\".join(time_name),filename))\n else:\n time_name = None\n time_bnd_name = None\n\n # Lat dimension\n if len(lat_name) == 1:\n lat_name = lat_name[0]\n lat_bnd_name = grp.variables[lat_name].bounds if (lat_name in grp.variables and\n \"bounds\" in grp.variables[lat_name].ncattrs()) else None\n if lat_bnd_name not in grp.variables: lat_bnd_name = None\n elif len(lat_name) >= 1:\n raise ValueError(\"Ambiguous choice of values for the latitude dimension [%s] in %s\" % (\",\".join(lat_name),filename))\n else:\n lat_name = None\n lat_bnd_name = None\n\n # Lon dimension\n if len(lon_name) == 1:\n lon_name = lon_name[0]\n lon_bnd_name = grp.variables[lon_name].bounds if (lon_name in grp.variables and\n \"bounds\" in grp.variables[lon_name].ncattrs()) else None\n if lon_bnd_name not in grp.variables: lon_bnd_name = None\n elif len(lon_name) >= 1:\n raise ValueError(\"Ambiguous choice of values for the longitude dimension [%s] in %s\" % (\",\".join(lon_name),filename))\n else:\n lon_name = None\n lon_bnd_name = None\n\n # Data dimension\n if len(data_name) == 1:\n data_name = data_name[0]\n elif len(data_name) >= 1:\n raise ValueError(\"Ambiguous choice of values for the data dimension [%s] in %s\" % (\",\".join(data_name),filename))\n else:\n data_name = None\n\n # The layered dimension is whatever is leftover since its name\n # could be many things\n if len(missed) == 1:\n depth_name = missed[0]\n depth_bnd_name = grp.variables[depth_name].bounds if (depth_name in grp.variables and\n \"bounds\" in grp.variables[depth_name].ncattrs()) else None\n if depth_bnd_name not in grp.variables: depth_bnd_name = None\n elif len(missed) >= 1:\n raise ValueError(\"Ambiguous choice of values for the layered dimension [%s] in %s\" % (\",\".join(missed),filename))\n else:\n depth_name = None\n depth_bnd_name = None\n \n # Based on present values, get dimensions and bounds\n t = None; t_bnd = None\n lat = None; lat_bnd = None\n lon = None; lon_bnd = None\n depth = None; depth_bnd = None\n data = None;\n cbounds = None\n if time_name is not None:\n if time_bnd_name is None:\n t = ConvertCalendar(grp.variables[time_name])\n else:\n t,t_bnd = ConvertCalendar(grp.variables[time_name],grp.variables[time_bnd_name])\n if \"climatology\" in grp.variables[time_name].ncattrs():\n cbounds = grp.variables[grp.variables[time_name].climatology]\n if not np.allclose(cbounds.shape,[12,2]):\n raise RuntimeError(\"ILAMB only supports annual cycle style climatologies\")\n cbounds = np.round(cbounds[0,:]/365.+1850.)\n if lat_name is not None: lat = grp.variables[lat_name] [...]\n if lat_bnd_name is not None: lat_bnd = grp.variables[lat_bnd_name] [...]\n if lon_name is not None: lon = grp.variables[lon_name] [...]\n if lon_bnd_name is not None: lon_bnd = grp.variables[lon_bnd_name] [...]\n if depth_name is not None:\n dunit = None\n if \"units\" in grp.variables[depth_name].ncattrs(): dunit = grp.variables[depth_name].units\n depth = grp.variables[depth_name][...]\n if depth_bnd_name is not None:\n depth_bnd = grp.variables[depth_bnd_name][...]\n if dunit is not None:\n if not Units(dunit).equivalent(Units(\"m\")):\n raise ValueError(\"Non-linear units [%s] of the layered dimension [%s] in %s\" % (dunit,depth_name,filename))\n depth = Units.conform(depth,Units(dunit),Units(\"m\"),inplace=True)\n if depth_bnd is not None:\n depth_bnd = Units.conform(depth_bnd,Units(dunit),Units(\"m\"),inplace=True)\n \n if data_name is not None:\n data = len(grp.dimensions[data_name])\n # if we have data sites, there may be lat/lon data to come\n # along with them although not a dimension of the variable\n for key in grp.variables.keys():\n if \"lat\" in key: lat_name = key\n if \"lon\" in key: lon_name = key\n if lat_name is not None: lat = grp.variables[lat_name][...]\n if lon_name is not None: lon = grp.variables[lon_name][...]\n if lat.size != data: lat = None\n if lon.size != data: lon = None\n\n # read in data array, roughly subset in time if bounds are\n # provided for added effciency\n if (t is not None) and (t0 is not None or tf is not None):\n begin = 0; end = t.size\n if t0 is not None: begin = max(t.searchsorted(t0)-1,begin)\n if tf is not None: end = min(t.searchsorted(tf)+1,end)\n v = var[begin:end,...]\n t = t [begin:end]\n if t_bnd is not None:\n t_bnd = t_bnd[begin:end,:]\n else:\n v = var[...]\n\n # If lat and lon are 2D, then we will need to interpolate things\n if lat is not None and lon is not None:\n if lat.ndim == 2 and lon.ndim == 2:\n assert lat.shape == lon.shape\n \n # Create the grid\n res = 1.0\n lat_bnds = np.arange(round(lat.min(),0),\n round(lat.max(),0)+res/2.,res)\n lon_bnds = np.arange(round(lon.min(),0),\n round(lon.max(),0)+res/2.,res)\n lats = 0.5*(lat_bnds[:-1]+lat_bnds[1:])\n lons = 0.5*(lon_bnds[:-1]+lon_bnds[1:])\n ilat,ilon = ComputeIndexingArrays(lat,lon,lats,lons)\n r = np.sqrt( (lat[ilat,ilon]-lats[:,np.newaxis])**2 +\n (lon[ilat,ilon]-lons[np.newaxis,:])**2 )\n v = v[...,ilat,ilon]\n v = np.ma.masked_array(v,mask=v.mask+(r>2*res))\n lat = lats\n lon = lons\n lat_bnd = np.zeros((lat.size,2))\n lat_bnd[:,0] = lat_bnds[:-1]\n lat_bnd[:,1] = lat_bnds[+1:]\n lon_bnd = lon_bnds\n lon_bnd = np.zeros((lon.size,2))\n lon_bnd[:,0] = lon_bnds[:-1]\n lon_bnd[:,1] = lon_bnds[+1:]\n \n # handle incorrect or absent masking of arrays\n if type(v) != type(np.ma.empty(1)):\n mask = np.zeros(v.shape,dtype=int)\n if \"_FillValue\" in var.ncattrs(): mask += (np.abs(v-var._FillValue )<1e-12)\n if \"missing_value\" in var.ncattrs(): mask += (np.abs(v-var.missing_value)<1e-12)\n v = np.ma.masked_array(v,mask=mask,copy=False)\n\n # handle units problems that cfunits doesn't\n if \"units\" in var.ncattrs():\n units = var.units.replace(\"unitless\",\"1\")\n else:\n units = \"1\"\n dset.close()\n \n return v,units,variable_name,t,t_bnd,lat,lat_bnd,lon,lon_bnd,depth,depth_bnd,cbounds,data\n \ndef Score(var,normalizer,FC=0.999999):\n \"\"\"Remaps a normalized variable to the interval [0,1].\n\n Parameters\n ----------\n var : ILAMB.Variable.Variable\n The variable to normalize, usually represents an error of some sort\n normalizer : ILAMB.Variable.Variable\n The variable by which we normalize \n \"\"\"\n from Variable import Variable\n name = var.name.replace(\"bias\",\"bias_score\")\n name = name.replace(\"diff\",\"diff_score\")\n name = name.replace(\"rmse\",\"rmse_score\")\n name = name.replace(\"iav\" ,\"iav_score\")\n np.seterr(over='ignore',under='ignore')\n\n data = None\n if \"bias\" in var.name or \"diff\" in var.name:\n deno = np.ma.copy(normalizer.data)\n if (deno.size - deno.mask.sum()) > 1: deno -= deno.min()*FC \n data = np.exp(-np.abs(var.data/deno))\n elif \"rmse\" in var.name:\n data = np.exp(-var.data/normalizer.data)\n elif \"iav\" in var.name:\n data = np.exp(-np.abs(var.data/normalizer.data))\n data[data<1e-16] = 0.\n np.seterr(over='raise',under='raise')\n return Variable(name = name,\n data = data,\n unit = \"1\",\n ndata = var.ndata,\n lat = var.lat, lat_bnds = var.lat_bnds,\n lon = var.lon, lon_bnds = var.lon_bnds,\n area = var.area)\n\ndef ComposeSpatialGrids(var1,var2):\n \"\"\"Creates a grid which conforms the boundaries of both variables.\n \n This routine takes the union of the latitude and longitude\n cell boundaries of both grids and returns a new set of\n latitudes and longitudes which represent cell centers of the\n new grid.\n \n Parameters\n ----------\n var1,var2 : ILAMB.Variable.Variable\n The two variables for which we wish to find a common grid\n \n Returns\n -------\n lat : numpy.ndarray\n a 1D array of latitudes of cell centroids\n lon : numpy.ndarray\n a 1D array of longitudes of cell centroids\n \"\"\"\n if not var1.spatial: il.NotSpatialVariable()\n if not var2.spatial: il.NotSpatialVariable()\n def _make_bnds(x):\n bnds = np.zeros(x.size+1)\n bnds[1:-1] = 0.5*(x[1:]+x[:-1])\n bnds[ 0] = max(x[ 0]-0.5*(x[ 1]-x[ 0]),-180)\n bnds[-1] = min(x[-1]+0.5*(x[-1]-x[-2]),+180)\n return bnds\n lat1_bnd = _make_bnds(var1.lat)\n lon1_bnd = _make_bnds(var1.lon)\n lat2_bnd = _make_bnds(var2.lat)\n lon2_bnd = _make_bnds(var2.lon)\n lat_bnd = np.hstack((lat1_bnd,lat2_bnd)); lat_bnd.sort(); lat_bnd = np.unique(lat_bnd)\n lon_bnd = np.hstack((lon1_bnd,lon2_bnd)); lon_bnd.sort(); lon_bnd = np.unique(lon_bnd)\n lat = 0.5*(lat_bnd[1:]+lat_bnd[:-1])\n lon = 0.5*(lon_bnd[1:]+lon_bnd[:-1])\n return lat,lon\n\ndef ScoreSeasonalCycle(phase_shift):\n \"\"\"Computes the seasonal cycle score from the phase shift.\n\n Possible remove this function as we do not compute other score\n components via a ilamblib function.\n \"\"\"\n from Variable import Variable\n return Variable(data = (1+np.cos(np.abs(phase_shift.data)/365*2*np.pi))*0.5,\n unit = \"1\",\n name = phase_shift.name.replace(\"phase_shift\",\"phase_shift_score\"),\n ndata = phase_shift.ndata,\n lat = phase_shift.lat, lat_bnds = phase_shift.lat_bnds,\n lon = phase_shift.lon, lon_bnds = phase_shift.lon_bnds,\n area = phase_shift.area)\n\ndef _composeGrids(v1,v2):\n lat_bnds = np.unique(np.hstack([v1.lat_bnds.flatten(),v2.lat_bnds.flatten()]))\n lon_bnds = np.unique(np.hstack([v1.lon_bnds.flatten(),v2.lon_bnds.flatten()]))\n lat_bnds = lat_bnds[(lat_bnds>=- 90)*(lat_bnds<=+ 90)]\n lon_bnds = lon_bnds[(lon_bnds>=-180)*(lon_bnds<=+180)]\n lat_bnds = np.vstack([lat_bnds[:-1],lat_bnds[+1:]]).T\n lon_bnds = np.vstack([lon_bnds[:-1],lon_bnds[+1:]]).T\n lat = lat_bnds.mean(axis=1)\n lon = lon_bnds.mean(axis=1)\n return lat,lon,lat_bnds,lon_bnds\n\ndef AnalysisMeanState(ref,com,**keywords):\n \"\"\"Perform a mean state analysis.\n\n This mean state analysis examines the model mean state in space\n and time. We compute the mean variable value over the time period\n at each spatial cell or data site as appropriate, as well as the\n bias and RMSE relative to the observational variable. We will\n output maps of the period mean values and bias. For each spatial\n cell or data site we also estimate the phase of the variable by\n finding the mean time of year when the maximum occurs and the\n phase shift by computing the difference in phase with respect to\n the observational variable. In the spatial dimension, we compute a\n spatial mean for each of the desired regions and an average annual\n cycle. \n\n Parameters\n ----------\n obs : ILAMB.Variable.Variable\n the observational (reference) variable\n mod : ILAMB.Variable.Variable\n the model (comparison) variable\n regions : list of str, optional\n the regions overwhich to apply the analysis\n dataset : netCDF4.Dataset, optional\n a open dataset in write mode for caching the results of the\n analysis which pertain to the model\n benchmark_dataset : netCDF4.Dataset, optional\n a open dataset in write mode for caching the results of the\n analysis which pertain to the observations\n space_mean : bool, optional\n disable to compute sums of the variable over space instead of\n mean values\n table_unit : str, optional\n the unit to use when displaying output in tables on the HTML page\n plots_unit : str, optional\n the unit to use when displaying output on plots on the HTML page\n\n \"\"\"\n from Variable import Variable\n regions = keywords.get(\"regions\" ,[\"global\"])\n dataset = keywords.get(\"dataset\" ,None)\n benchmark_dataset = keywords.get(\"benchmark_dataset\",None)\n space_mean = keywords.get(\"space_mean\" ,True)\n table_unit = keywords.get(\"table_unit\" ,None)\n plot_unit = keywords.get(\"plot_unit\" ,None)\n mass_weighting = keywords.get(\"mass_weighting\" ,False)\n skip_rmse = keywords.get(\"skip_rmse\" ,False)\n skip_iav = keywords.get(\"skip_iav\" ,False)\n skip_cycle = keywords.get(\"skip_cycle\" ,False)\n ILAMBregions = Regions()\n spatial = ref.spatial\n normalizer = None\n \n # Only study the annual cycle if it makes sense\n if not ref.monthly: skip_cycle = True\n if ref.time.size < 12: skip_cycle = True\n\n # We find \n if spatial:\n lat,lon,lat_bnds,lon_bnds = _composeGrids(ref,com)\n REF = ref.interpolate(lat=lat,lon=lon,lat_bnds=lat_bnds,lon_bnds=lon_bnds)\n COM = com.interpolate(lat=lat,lon=lon,lat_bnds=lat_bnds,lon_bnds=lon_bnds)\n \n # We find the mean values over the time period on the original\n # grid/datasites of each dataset\n ref_timeint = ref.integrateInTime(mean=True)\n com_timeint = com.integrateInTime(mean=True)\n if spatial:\n \n REF_timeint = REF.integrateInTime(mean=True)\n COM_timeint = COM.integrateInTime(mean=True)\n\n # Masks \n ref_mask = REF_timeint.data.mask\n com_mask = COM_timeint.data.mask\n ref_and_com = (ref_mask == False) * (com_mask == False)\n ref_not_com = (ref_mask == False) * (com_mask == True )\n com_not_ref = (ref_mask == True ) * (com_mask == False) \n ref_and_COM = Variable(name = \"ref_and_COM\", unit = ref.unit,\n data = np.ma.masked_array(COM_timeint.data,mask=(ref_and_com==False)),\n lat = lat, lat_bnds = lat_bnds,\n lon = lon, lon_bnds = lon_bnds,\n area = COM_timeint.area)\n COM_not_ref = Variable(name = \"COM_not_ref\", unit = ref.unit,\n data = np.ma.masked_array(COM_timeint.data,mask=(com_not_ref==False)),\n lat = lat, lat_bnds = lat_bnds,\n lon = lon, lon_bnds = lon_bnds,\n area = COM_timeint.area)\n REF_and_com = Variable(name = \"REF_and_com\", unit = REF.unit,\n data = np.ma.masked_array(REF_timeint.data,mask=(ref_and_com==False)),\n lat = lat, lat_bnds = lat_bnds,\n lon = lon, lon_bnds = lon_bnds,\n area = REF_timeint.area)\n REF_not_com = Variable(name = \"REF_not_com\", unit = REF.unit,\n data = np.ma.masked_array(REF_timeint.data,mask=(ref_not_com==False)),\n lat = lat, lat_bnds = lat_bnds,\n lon = lon, lon_bnds = lon_bnds,\n area = REF_timeint.area)\n \n # Apply intersection mask\n REF.data.mask += np.ones(REF.time.size,dtype=bool)[:,np.newaxis,np.newaxis] * (ref_and_com==False)\n COM.data.mask += np.ones(COM.time.size,dtype=bool)[:,np.newaxis,np.newaxis] * (ref_and_com==False)\n REF_timeint.data.mask = (ref_and_com==False)\n COM_timeint.data.mask = (ref_and_com==False)\n \n else:\n \n REF = ref\n COM = com\n REF_timeint = ref_timeint\n COM_timeint = com_timeint\n if mass_weighting: normalizer = REF_timeint.data\n \n # Compute the bias, RMSE, and RMS maps using the interpolated\n # quantities\n bias = REF_timeint.bias(COM_timeint)\n bias_score_map = Score(bias,REF_timeint)\n if spatial:\n bias_score_map.data.mask = (ref_and_com==False) # for some reason I need to explicitly force the mask\n if not skip_rmse:\n rmse = REF.rmse(COM)\n rms = REF.rms ()\n rmse_score_map = Score(rmse,rms)\n\n # The phase shift comes from the interpolated quantities\n if not skip_cycle:\n ref_cycle = REF.annualCycle()\n com_cycle = COM.annualCycle()\n ref_maxt_map = ref_cycle.timeOfExtrema(etype=\"max\")\n com_maxt_map = com_cycle.timeOfExtrema(etype=\"max\")\n shift_map = ref_maxt_map.phaseShift(com_maxt_map)\n shift_score_map = ScoreSeasonalCycle(shift_map)\n shift_map.data /= 30.; shift_map.unit = \"months\"\n \n # Scalars\n ref_period_mean = {}; ref_spaceint = {}; ref_mean_cycle = {}; ref_dtcycle = {}\n com_period_mean = {}; com_spaceint = {}; com_mean_cycle = {}; com_dtcycle = {}\n bias_val = {}; bias_score = {}; rmse_val = {}; rmse_score = {}\n space_std = {}; space_cor = {}; sd_score = {}; shift = {}; shift_score = {}\n ref_union_mean = {}; ref_comp_mean = {}\n com_union_mean = {}; com_comp_mean = {}\n for region in regions:\n if spatial:\n ref_period_mean[region] = ref_timeint .integrateInSpace(region=region,mean=space_mean)\n ref_union_mean [region] = REF_and_com .integrateInSpace(region=region,mean=space_mean)\n com_union_mean [region] = ref_and_COM .integrateInSpace(region=region,mean=space_mean)\n ref_comp_mean [region] = REF_not_com .integrateInSpace(region=region,mean=space_mean)\n com_comp_mean [region] = COM_not_ref .integrateInSpace(region=region,mean=space_mean)\n ref_spaceint [region] = REF .integrateInSpace(region=region,mean=True)\n com_period_mean[region] = com_timeint .integrateInSpace(region=region,mean=space_mean)\n com_spaceint [region] = COM .integrateInSpace(region=region,mean=True)\n bias_val [region] = bias .integrateInSpace(region=region,mean=True)\n bias_score [region] = bias_score_map .integrateInSpace(region=region,mean=True,weight=normalizer)\n if not skip_cycle:\n ref_mean_cycle[region] = ref_cycle .integrateInSpace(region=region,mean=True)\n ref_dtcycle [region] = deepcopy(ref_mean_cycle[region])\n ref_dtcycle [region].data -= ref_mean_cycle[region].data.mean()\n com_mean_cycle[region] = com_cycle .integrateInSpace(region=region,mean=True)\n com_dtcycle [region] = deepcopy(com_mean_cycle[region])\n com_dtcycle [region].data -= com_mean_cycle[region].data.mean() \n shift [region] = shift_map .integrateInSpace(region=region,mean=True,intabs=True)\n shift_score [region] = shift_score_map.integrateInSpace(region=region,mean=True,weight=normalizer) \n if not skip_rmse:\n rmse_val [region] = rmse .integrateInSpace(region=region,mean=True)\n rmse_score [region] = rmse_score_map .integrateInSpace(region=region,mean=True,weight=normalizer)\n space_std[region],space_cor[region],sd_score[region] = REF_timeint.spatialDistribution(COM_timeint,region=region)\n else:\n ref_period_mean[region] = ref_timeint .siteStats(region=region)\n ref_spaceint [region] = ref .siteStats(region=region) \n com_period_mean[region] = com_timeint .siteStats(region=region)\n com_spaceint [region] = com .siteStats(region=region)\n bias_val [region] = bias .siteStats(region=region)\n bias_score [region] = bias_score_map .siteStats(region=region,weight=normalizer)\n if not skip_cycle:\n ref_mean_cycle [region] = ref_cycle .siteStats(region=region)\n ref_dtcycle [region] = deepcopy(ref_mean_cycle[region])\n ref_dtcycle [region].data -= ref_mean_cycle[region].data.mean()\n com_mean_cycle [region] = com_cycle .siteStats(region=region)\n com_dtcycle [region] = deepcopy(com_mean_cycle[region])\n com_dtcycle [region].data -= com_mean_cycle[region].data.mean()\n shift [region] = shift_map .siteStats(region=region,intabs=True)\n shift_score [region] = shift_score_map.siteStats(region=region,weight=normalizer)\n if not skip_rmse:\n rmse_val [region] = rmse .siteStats(region=region)\n rmse_score [region] = rmse_score_map .siteStats(region=region,weight=normalizer)\n \n ref_period_mean[region].name = \"Period Mean (original grids) %s\" % (region)\n ref_spaceint [region].name = \"spaceint_of_%s_over_%s\" % (ref.name,region)\n com_period_mean[region].name = \"Period Mean (original grids) %s\" % (region)\n com_spaceint [region].name = \"spaceint_of_%s_over_%s\" % (ref.name,region)\n bias_val [region].name = \"Bias %s\" % (region)\n bias_score [region].name = \"Bias Score %s\" % (region)\n if not skip_rmse:\n rmse_val [region].name = \"RMSE %s\" % (region)\n rmse_score [region].name = \"RMSE Score %s\" % (region)\n if not skip_cycle:\n ref_mean_cycle[region].name = \"cycle_of_%s_over_%s\" % (ref.name,region)\n ref_dtcycle [region].name = \"dtcycle_of_%s_over_%s\" % (ref.name,region)\n com_mean_cycle[region].name = \"cycle_of_%s_over_%s\" % (ref.name,region)\n com_dtcycle [region].name = \"dtcycle_of_%s_over_%s\" % (ref.name,region)\n shift [region].name = \"Phase Shift %s\" % (region)\n shift_score [region].name = \"Seasonal Cycle Score %s\" % (region)\n if spatial:\n ref_union_mean[region].name = \"Benchmark Period Mean (intersection) %s\" % (region)\n com_union_mean[region].name = \"Model Period Mean (intersection) %s\" % (region) \n ref_comp_mean [region].name = \"Benchmark Period Mean (complement) %s\" % (region)\n com_comp_mean [region].name = \"Model Period Mean (complement) %s\" % (region) \n sd_score [region].name = \"Spatial Distribution Score %s\" % (region)\n \n # Unit conversions\n def _convert(var,unit):\n if type(var) == type({}):\n for key in var.keys(): var[key].convert(unit)\n else:\n var.convert(unit)\n\n if table_unit is not None:\n for var in [ref_period_mean,com_period_mean,ref_union_mean,com_union_mean,ref_comp_mean,com_comp_mean]:\n _convert(var,table_unit)\n if plot_unit is not None:\n plot_vars = [com_timeint,ref_timeint,bias,com_spaceint,ref_spaceint,bias_val]\n if not skip_rmse: plot_vars += [rmse,rmse_val]\n if not skip_cycle: plot_vars += [com_mean_cycle,ref_mean_cycle,com_dtcycle,ref_dtcycle]\n for var in plot_vars: _convert(var,plot_unit)\n \n # Rename and optionally dump out information to netCDF4 files\n com_timeint .name = \"timeint_of_%s\" % ref.name\n bias .name = \"bias_map_of_%s\" % ref.name\n bias_score_map .name = \"biasscore_map_of_%s\" % ref.name\n \n out_vars = [com_period_mean,\n ref_union_mean,\n com_union_mean,\n ref_comp_mean,\n com_comp_mean,\n com_timeint,\n com_mean_cycle,\n com_dtcycle,\n bias,\n bias_score_map,\n bias_val,\n bias_score,\n shift,\n shift_score]\n if com_spaceint[com_spaceint.keys()[0]].data.size > 1: out_vars.append(com_spaceint)\n if not skip_cycle:\n com_maxt_map .name = \"phase_map_of_%s\" % ref.name\n shift_map .name = \"shift_map_of_%s\" % ref.name\n shift_score_map.name = \"shiftscore_map_of_%s\" % ref.name\n out_vars.append(com_maxt_map)\n out_vars.append(shift_map)\n out_vars.append(shift_score_map)\n if not skip_rmse:\n rmse .name = \"rmse_map_of_%s\" % ref.name\n rms .name = \"rms_map_of_%s\" % ref.name\n rmse_score_map.name = \"rmsescore_map_of_%s\" % ref.name\n out_vars.append(rmse)\n out_vars.append(rms )\n out_vars.append(rmse_score_map)\n out_vars.append(rmse_val)\n out_vars.append(rmse_score)\n if dataset is not None:\n for var in out_vars:\n if type(var) == type({}):\n for key in var.keys(): var[key].toNetCDF4(dataset,group=\"MeanState\")\n else:\n var.toNetCDF4(dataset,group=\"MeanState\")\n for key in sd_score.keys():\n sd_score[key].toNetCDF4(dataset,group=\"MeanState\",\n attributes={\"std\":space_std[key].data,\n \"R\" :space_cor[key].data})\n \n # Rename and optionally dump out information to netCDF4 files\n out_vars = [ref_period_mean,ref_timeint]\n if ref_spaceint[ref_spaceint.keys()[0]].data.size > 1: out_vars.append(ref_spaceint)\n ref_timeint .name = \"timeint_of_%s\" % ref.name\n if not skip_cycle:\n ref_maxt_map.name = \"phase_map_of_%s\" % ref.name\n out_vars += [ref_maxt_map,ref_mean_cycle,ref_dtcycle]\n if benchmark_dataset is not None:\n for var in out_vars:\n if type(var) == type({}):\n for key in var.keys(): var[key].toNetCDF4(benchmark_dataset,group=\"MeanState\")\n else:\n var.toNetCDF4(benchmark_dataset,group=\"MeanState\")\n \n return \n\n \ndef AnalysisRelationship(dep_var,ind_var,dataset,rname,**keywords):\n \"\"\"Perform a relationship analysis.\n \n Expand to provide details of what exactly is done.\n\n Parameters\n ----------\n dep_var : ILAMB.Variable.Variable\n the dependent variable\n ind_var : ILAMB.Variable.Variable\n the independent variable\n dataset : netCDF4.Dataset\n a open dataset in write mode for caching the results of the\n analysis which pertain to the model\n rname : str\n the name of the relationship under study\n regions : list of str, optional\n a list of units over which to apply the analysis\n dep_plot_unit,ind_plot_unit : str, optional\n the name of the unit to use in the plots found on the HTML output\n \n \"\"\" \n def _extractMaxTemporalOverlap(v1,v2): # should move?\n t0 = max(v1.time.min(),v2.time.min())\n tf = min(v1.time.max(),v2.time.max())\n for v in [v1,v2]:\n begin = np.argmin(np.abs(v.time-t0))\n end = np.argmin(np.abs(v.time-tf))+1\n v.time = v.time[begin:end]\n v.data = v.data[begin:end,...]\n mask = v1.data.mask + v2.data.mask\n v1 = v1.data[mask==0].flatten()\n v2 = v2.data[mask==0].flatten()\n return v1,v2\n\n # grab regions\n regions = keywords.get(\"regions\",[\"global\"])\n \n # convert to plot units\n dep_plot_unit = keywords.get(\"dep_plot_unit\",dep_var.unit)\n ind_plot_unit = keywords.get(\"ind_plot_unit\",ind_var.unit) \n if dep_plot_unit is not None: dep_var.convert(dep_plot_unit)\n if ind_plot_unit is not None: ind_var.convert(ind_plot_unit)\n\n # if the variables are temporal, we need to get period means\n if dep_var.temporal: dep_var = dep_var.integrateInTime(mean=True)\n if ind_var.temporal: ind_var = ind_var.integrateInTime(mean=True)\n mask = dep_var.data.mask + ind_var.data.mask\n\n # analysis over regions\n for region in regions:\n\n lats,lons = ILAMBregions[region]\n rmask = (np.outer((dep_var.lat>lats[0])*(dep_var.latlons[0])*(dep_var.lon t0:\n begin -= 1\n if begin <= 0:\n begin = 0\n break\n while v.time_bnds[end, 1] < tf:\n end += 1\n if end >= v.time.size-1:\n end = v.time.size-1\n break\n v.time = v.time [begin:(end+1) ]\n v.time_bnds = v.time_bnds[begin:(end+1),...]\n v.data = v.data [begin:(end+1),...]\n return v\n \ndef MakeComparable(ref,com,**keywords):\n r\"\"\"Make two variables comparable.\n\n Given a reference variable and a comparison variable, make the two\n variables comparable or raise an exception explaining why they are\n not.\n\n Parameters\n ----------\n ref : ILAMB.Variable.Variable\n the reference variable object\n com : ILAMB.Variable.Variable\n the comparison variable object\n clip_ref : bool, optional\n enable in order to clip the reference variable time using the\n limits of the comparison variable (defult is False)\n mask_ref : bool, optional\n enable in order to mask the reference variable using an\n interpolation of the comparison variable (defult is False)\n eps : float, optional\n used to determine how close you can be to a specific time\n (expressed in days since 1-1-1850) and still be considered the\n same time (default is 30 minutes)\n window : float, optional\n specify to extend the averaging intervals (in days since\n 1-1-1850) when a variable must be coarsened (default is 0)\n\n Returns\n -------\n ref : ILAMB.Variable.Variable\n the modified reference variable object\n com : ILAMB.Variable.Variable\n the modified comparison variable object\n\n \"\"\" \n # Process keywords\n clip_ref = keywords.get(\"clip_ref\" ,False)\n mask_ref = keywords.get(\"mask_ref\" ,False)\n eps = keywords.get(\"eps\" ,30./60./24.)\n window = keywords.get(\"window\" ,0.)\n extents = keywords.get(\"extents\" ,np.asarray([[-90.,+90.],[-180.,+180.]]))\n logstring = keywords.get(\"logstring\",\"\")\n \n # If one variable is temporal, then they both must be\n if ref.temporal != com.temporal:\n msg = \"%s Datasets are not uniformly temporal: \" % logstring\n msg += \"reference = %s, comparison = %s\" % (ref.temporal,com.temporal)\n logger.debug(msg)\n raise VarsNotComparable()\n\n # If the reference is spatial, the comparison must be\n if ref.spatial and not com.spatial:\n msg = \"%s Datasets are not uniformly spatial: \" % logstring\n msg += \"reference = %s, comparison = %s\" % (ref.spatial,com.spatial)\n logger.debug(msg)\n raise VarsNotComparable()\n\n # If the reference is layered, the comparison must be\n if ref.layered and not com.layered:\n if ref.depth.size == 1:\n com.layered = True\n com.depth = ref.depth\n com.depth_bnds = ref.depth_bnds\n shp = list(com.data.shape)\n insert = 0\n if com.temporal: insert = 1\n shp.insert(insert,1)\n com.data = com.data.reshape(shp)\n else:\n msg = \"%s Datasets are not uniformly layered: \" % logstring\n msg += \"reference = %s, comparison = %s\" % (ref.layered,com.layered)\n logger.debug(msg)\n raise NotLayeredVariable()\n \n # If the reference represents observation sites, extract them from\n # the comparison\n if ref.ndata is not None and com.spatial: com = com.extractDatasites(ref.lat,ref.lon)\n\n # If both variables represent observations sites, make sure you\n # have the same number of sites and that they represent the same\n # location. Note this is after the above extraction so at this\n # point the ndata field of both variables should be equal.\n if ref.ndata != com.ndata:\n msg = \"%s One or both datasets are understood as site data but differ in number of sites: \" % logstring\n msg += \"reference = %d, comparison = %d\" % (ref.ndata,com.ndata)\n logger.debug(msg)\n raise VarsNotComparable()\n if ref.ndata is not None:\n if not (np.allclose(ref.lat,com.lat) or np.allclose(ref.lon,com.lon)):\n msg = \"%s Datasets represent sites, but the locations are different: \" % logstring\n msg += \"maximum difference lat = %.f, lon = %.f\" % (np.abs((ref.lat-com.lat)).max(),\n np.abs((ref.lon-com.lon)).max())\n logger.debug(msg)\n raise VarsNotComparable()\n\n # If the datasets are both spatial, ensure that both represent the\n # same spatial area and trim the datasets if not.\n if ref.spatial and com.spatial:\n\n lat_bnds = (max(ref.lat_bnds[ 0,0],com.lat_bnds[ 0,0],extents[0,0]),\n min(ref.lat_bnds[-1,1],com.lat_bnds[-1,1],extents[0,1]))\n lon_bnds = (max(ref.lon_bnds[ 0,0],com.lon_bnds[ 0,0],extents[1,0]),\n min(ref.lon_bnds[-1,1],com.lon_bnds[-1,1],extents[1,1]))\n\n # Clip reference\n diff = np.abs([ref.lat_bnds[[0,-1],[0,1]]-lat_bnds,\n ref.lon_bnds[[0,-1],[0,1]]-lon_bnds])\n if diff.sum() >= 5.:\n shp0 = np.asarray(np.copy(ref.data.shape),dtype=int)\n ref.trim(lat=lat_bnds,lon=lon_bnds)\n shp = np.asarray(np.copy(ref.data.shape),dtype=int)\n msg = \"%s Spatial data was clipped from the reference: \" % logstring\n msg += \" before: %s\" % (shp0)\n msg += \" after: %s\" % (shp )\n logger.info(msg)\n\n # Clip comparison\n diff = np.abs([com.lat_bnds[[0,-1],[0,1]]-lat_bnds,\n com.lon_bnds[[0,-1],[0,1]]-lon_bnds])\n if diff.sum() >= 5.:\n shp0 = np.asarray(np.copy(com.data.shape),dtype=int)\n com.trim(lat=lat_bnds,lon=lon_bnds)\n shp = np.asarray(np.copy(com.data.shape),dtype=int)\n msg = \"%s Spatial data was clipped from the comparison: \" % logstring\n msg += \" before: %s\" % (shp0)\n msg += \" after: %s\" % (shp )\n logger.info(msg)\n \n \n if ref.temporal:\n\n # If the reference time scale is significantly larger than the\n # comparison, coarsen the comparison\n if np.log10(ref.dt/com.dt) > 0.5:\n com = com.coarsenInTime(ref.time_bnds,window=window)\n \n # Time bounds of the reference dataset\n t0 = ref.time_bnds[ 0,0]\n tf = ref.time_bnds[-1,1]\n\n # Find the comparison time range which fully encompasses the reference\n com = ClipTime(com,t0,tf)\n \n if clip_ref:\n\n # We will clip the reference dataset too\n t0 = max(t0,com.time_bnds[ 0,0])\n tf = min(tf,com.time_bnds[-1,1])\n ref = ClipTime(ref,t0,tf)\n\n else:\n \n # The comparison dataset needs to fully cover the reference in time\n if (com.time_bnds[ 0,0] > (t0+eps) or\n com.time_bnds[-1,1] < (tf-eps)):\n msg = \"%s Comparison dataset does not cover the time frame of the reference: \" % logstring\n msg += \" t0: %.16e <= %.16e (%s)\" % (com.time_bnds[0, 0],t0+eps,com.time_bnds[0, 0] <= (t0+eps))\n msg += \" tf: %.16e >= %.16e (%s)\" % (com.time_bnds[1,-1],tf-eps,com.time_bnds[1,-1] >= (tf-eps))\n logger.debug(msg)\n raise VarsNotComparable()\n\n # Check that we now are on the same time intervals\n if ref.time.size != com.time.size:\n msg = \"%s Datasets have differing numbers of time intervals: \" % logstring\n msg += \"reference = %d, comparison = %d\" % (ref.time.size,com.time.size)\n logger.debug(msg)\n raise VarsNotComparable() \n if not np.allclose(ref.time_bnds,com.time_bnds,atol=0.75*ref.dt):\n msg = \"%s Datasets are defined at different times\" % logstring\n logger.debug(msg)\n raise VarsNotComparable()\n\n if ref.layered:\n\n # Try to resolve if the layers from the two quantities are\n # different\n if ref.depth.size == com.depth.size == 1:\n ref = ref.integrateInDepth(mean = True) \n com = com.integrateInDepth(mean = True) \n elif ref.depth.size != com.depth.size:\n # Compute the mean values from the comparison over the\n # layer breaks of the reference.\n if ref.depth.size == 1 and com.depth.size > 1:\n com = com.integrateInDepth(z0=ref.depth_bnds[ 0,0],\n zf=ref.depth_bnds[-1,1],\n mean = True)\n ref = ref.integrateInDepth(mean = True) # just removing the depth dimension \n else:\n if not np.allclose(ref.depth,com.depth):\n msg = \"%s Datasets have a different layering scheme\" % logstring\n logger.debug(msg)\n raise VarsNotComparable()\n\n # Convert the comparison to the units of the reference\n com = com.convert(ref.unit)\n \n return ref,com\n\n\ndef CombineVariables(V):\n \"\"\"Combines a list of variables into a single variable.\n\n This routine is intended to be used to merge variables when\n separate moments in time are scattered over several files.\n\n Parameters\n ----------\n V : list of ILAMB.Variable.Variable\n a list of variables to merge into a single variable\n \n Returns\n -------\n v : ILAMB.Variable.Variable\n the merged variable\n \"\"\"\n from Variable import Variable\n \n # checks on data\n assert type(V) == type([])\n for v in V: assert v.temporal\n if len(V) == 1: return V[0]\n \n # Put list in order by initial time\n V.sort(key=lambda v: v.time[0])\n\n # Check the beginning and ends times for monotonicity\n nV = len(V)\n t0 = np.zeros(nV)\n tf = np.zeros(nV)\n nt = np.zeros(nV,dtype=int)\n ind = [0]\n for i,v in enumerate(V):\n t0[i] = v.time[ 0]\n tf[i] = v.time[-1]\n nt[i] = v.time.size\n ind.append(nt[:(i+1)].sum())\n \n # Checks on monotonicity\n assert (t0[1:]-t0[:-1]).min() >= 0\n assert (tf[1:]-tf[:-1]).min() >= 0\n assert (t0[1:]-tf[:-1]).min() >= 0\n\n # Assemble the data\n shp = (nt.sum(),)+V[0].data.shape[1:]\n time = np.zeros(shp[0])\n time_bnds = np.zeros((shp[0],2))\n data = np.zeros(shp)\n mask = np.zeros(shp,dtype=bool)\n for i,v in enumerate(V):\n time [ind[i]:ind[i+1]] = v.time\n time_bnds[ind[i]:ind[i+1],...] = v.time_bnds\n data [ind[i]:ind[i+1],...] = v.data\n mask [ind[i]:ind[i+1],...] = v.data.mask\n\n # If assembled from single slice files and no time bounds were\n # provided, they will not be reflective of true bounds here. If\n # any dt's are 0, make time_bounds none and recompute in the\n # constructor.\n if np.any((time_bnds[:,1]-time_bnds[:,0])<1e-12): time_bnds = None\n \n v = V[0]\n return Variable(data = np.ma.masked_array(data,mask=mask),\n unit = v.unit,\n name = v.name,\n time = time,\n time_bnds = time_bnds,\n depth = v.depth,\n depth_bnds = v.depth_bnds,\n lat = v.lat,\n lon = v.lon,\n area = v.area,\n ndata = v.ndata)\n\ndef ConvertBoundsTypes(x):\n y = None\n if x.ndim == 2:\n y = np.zeros(x.shape[0]+1)\n y[:-1] = x[ :, 0]\n y[ -1] = x[-1,-1]\n if x.ndim == 1:\n y = np.zeros((x.shape[0]-1,2))\n y[:,0] = x[:-1]\n y[:,1] = x[+1:]\n return y\n \ndef LandLinInterMissingValues(mdata):\n land = np.any(mdata.mask,axis=0)==False\n data = np.ma.masked_array(mdata)\n data.data[data.mask] = 0.\n data.fill_value = 0.\n data = data.data\n land = land.astype(int)\n smooth = data*land[np.newaxis,...]\n suml = np.copy(land)\n smooth[:,1:-1,1:-1] += data[:, :-2, :-2]*land[np.newaxis, :-2, :-2]\n suml [ 1:-1,1:-1] += land[ :-2, :-2]\n smooth[:,1:-1,1:-1] += data[:, :-2,1:-1]*land[np.newaxis, :-2,1:-1]\n suml [ 1:-1,1:-1] += land[ :-2,1:-1]\n smooth[:,1:-1,1:-1] += data[:, :-2, +2:]*land[np.newaxis, :-2, +2:]\n suml [ 1:-1,1:-1] += land[ :-2, +2:]\n smooth[:,1:-1,1:-1] += data[:,1:-1, :-2]*land[np.newaxis,1:-1, :-2]\n suml [ 1:-1,1:-1] += land[ 1:-1, :-2]\n smooth[:,1:-1,1:-1] += data[:,1:-1, +2:]*land[np.newaxis,1:-1, +2:]\n suml [ 1:-1,1:-1] += land[ 1:-1, +2:]\n smooth[:,1:-1,1:-1] += data[:, +2:, :-2]*land[np.newaxis, +2:, :-2]\n suml [ 1:-1,1:-1] += land[ +2:, :-2]\n smooth[:,1:-1,1:-1] += data[:, +2:,1:-1]*land[np.newaxis, +2:,1:-1]\n suml [ 1:-1,1:-1] += land[ +2:,1:-1]\n smooth[:,1:-1,1:-1] += data[:, +2:, +2:]*land[np.newaxis, +2:, +2:]\n suml [ 1:-1,1:-1] += land[ +2:, +2:]\n smooth /= suml.clip(1)\n smooth = (mdata.mask==True)*smooth + (mdata.mask==False)*mdata.data\n return smooth\n","sub_path":"ilamb/ilamb/src/ILAMB/ilamblib.py","file_name":"ilamblib.py","file_ext":"py","file_size_in_byte":66595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"418173256","text":"import ipyvuetify as v\nimport sepal_ui.sepalwidgets as sw\nfrom traitlets import CBool, Int, link\n\n__all__ = [\"BoolQuestion\", \"Tabs\", \"TaskMsg\"]\n\n\nclass BoolQuestion(v.Flex, sw.SepalWidget):\n v_model = CBool(False).tag(sync=True)\n\n def __init__(self, question, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n bool_radios = v.RadioGroup(\n row=True,\n v_model=self.v_model,\n children=[\n v.Radio(label=\"No\", value=False),\n v.Radio(label=\"Yes\", value=True),\n ],\n )\n\n link((bool_radios, \"v_model\"), (self, \"v_model\"))\n\n self.children = [v.Html(tag=\"h3\", children=[question]), bool_radios]\n\n\nclass Tabs(v.Card):\n current = Int(0).tag(sync=True)\n\n def __init__(self, titles, content, **kwargs):\n self.background_color = \"primary\"\n self.dark = True\n\n self.tabs = [\n v.Tabs(\n v_model=self.current,\n children=[\n v.Tab(children=[title], key=key) for key, title in enumerate(titles)\n ],\n )\n ]\n\n self.content = [\n v.TabsItems(\n v_model=self.current,\n children=[\n v.TabItem(children=[content], key=key)\n for key, content in enumerate(content)\n ],\n )\n ]\n\n self.children = self.tabs + self.content\n\n link((self.tabs[0], \"v_model\"), (self.content[0], \"v_model\"))\n\n super().__init__(**kwargs)\n\n\nclass TaskMsg(sw.Flex):\n colors = [\"info\", \"success\", \"error\", \"warning\"]\n\n def __init__(self, msg=\"\"):\n super().__init__()\n\n self.class_ = \"d-flex\"\n self.icon = sw.Icon(children=[\"mdi-circle\"], color=\"info\")\n\n self.children = [msg, v.Spacer(), self.icon]\n\n def set_msg(self, msg):\n \"\"\"mutate and set new message by replacing\"\"\"\n\n self.children = [msg] + self.children[1:]\n\n def set_state(self, state_color):\n \"\"\"sets a state (color) to the icon\"\"\"\n\n if state_color not in self.colors:\n raise Exception(f\"Invalid color, use {self.colors}\")\n\n if state_color == \"success\":\n self.icon.children = [\"mdi-checkbox-marked-circle\"]\n elif state_color == \"warning\":\n self.icon.children = [\"mdi-information\"]\n\n self.icon.color = state_color\n","sub_path":"component/widget/custom_widgets.py","file_name":"custom_widgets.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"359324407","text":"from datetime import date, time\n\nfrom django.core.exceptions import ValidationError\nfrom django.test import TestCase\n\nfrom .models import Semester, ClassDay, Course, CourseSchedule, DAYS_OF_WEEK\n\n\nclass SemesterBaseTestCase(TestCase):\n def setUp(self):\n time_frame = (date(2016, 3, 5), date(2016, 6, 19))\n Semester.objects.create(time_frame=time_frame)\n self.semester = Semester.objects.first()\n\n\nclass ClassDayTestCase(SemesterBaseTestCase):\n def test_date_out_of_range(self):\n with self.assertRaises(ValidationError):\n class_day = ClassDay(date=date(2016, 1, 1), semester=self.semester)\n class_day.full_clean()\n\n def test_duplicates(self):\n class_day_1 = ClassDay(date=date(2016, 5, 1), semester=self.semester)\n class_day_1.full_clean()\n class_day_1.save()\n\n with self.assertRaises(ValidationError):\n class_day_2 = ClassDay(date=date(2016, 5, 1), semester=self.semester)\n class_day_2.full_clean()\n\n\nclass CourseTestCase(SemesterBaseTestCase):\n def test_no_lecture_and_lab(self):\n course = Course(\n name='Test',\n semester=self.semester,\n lecture_available=False,\n lab_available=False\n )\n\n with self.assertRaises(ValidationError):\n course.full_clean()\n\n course.save()\n\n def test_no_lecture(self):\n course = Course.objects.create(\n name='Test',\n semester=self.semester,\n lecture_available=False,\n lab_available=True\n )\n\n self.assertIsNotNone(course.pk)\n\n def test_no_lab(self):\n course = Course.objects.create(\n name='Test',\n semester=self.semester,\n lecture_available=True,\n lab_available=False\n )\n\n self.assertIsNotNone(course.pk)\n\n def test_lecture_and_lab(self):\n course = Course.objects.create(\n name='Test',\n semester=self.semester,\n lecture_available=True,\n lab_available=True\n )\n\n self.assertIsNotNone(course.pk)\n\n\nclass CourseScheduleTestCase(SemesterBaseTestCase):\n def test_lecture_available(self):\n course = self.get_course(lecture_available=True)\n course_schedule = self.get_course_schedule(\n course=course, course_type=Course.TYPES.lecture)\n\n course_schedule.full_clean()\n course_schedule.save()\n\n self.assertIsNotNone(course_schedule.pk)\n\n def test_lecture_not_available(self):\n course = self.get_course(lecture_available=False)\n course_schedule = self.get_course_schedule(\n course=course, course_type=Course.TYPES.lecture)\n\n with self.assertRaises(ValidationError):\n course_schedule.full_clean()\n\n def test_lab_available(self):\n course = self.get_course(lab_available=True)\n course_schedule = self.get_course_schedule(\n course=course, course_type=Course.TYPES.lab)\n\n course_schedule.full_clean()\n course_schedule.save()\n\n self.assertIsNotNone(course_schedule.pk)\n\n def test_lab_not_available(self):\n course = self.get_course(lab_available=False)\n course_schedule = self.get_course_schedule(\n course=course, course_type=Course.TYPES.lab)\n\n with self.assertRaises(ValidationError):\n course_schedule.full_clean()\n\n def test_incorrect_week_interval(self):\n course = self.get_course(lecture_available=True, lab_available=True)\n\n course_schedule = self.get_course_schedule(\n course=course, course_type=Course.TYPES.lab, week_interval=0)\n\n with self.assertRaises(ValidationError):\n course_schedule.full_clean()\n\n def test_end_greater_than_start(self):\n course = self.get_course(lecture_available=True, lab_available=True)\n\n course_schedule = self.get_course_schedule(\n course=course, course_type=Course.TYPES.lab, start=time(10), end=time(8))\n\n with self.assertRaises(ValidationError):\n course_schedule.full_clean()\n\n def get_course(self, lecture_available=False, lab_available=False):\n return Course.objects.create(\n name='Test',\n semester=self.semester,\n lecture_available=lecture_available,\n lab_available=lab_available\n )\n\n def get_course_schedule(self, **kwargs):\n if 'start' not in kwargs:\n kwargs['start'] = time(8)\n if 'end' not in kwargs:\n kwargs['end'] = time(10)\n\n return CourseSchedule(\n day_of_week=DAYS_OF_WEEK.mon,\n **kwargs\n )\n","sub_path":"core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"391762663","text":"import pygame\nimport random\nimport math\nimport socket\nimport time\nimport select\n\nclass paddle:\n def __init__(self, x, y, client):\n self.posx = x # 100 for left 540 for right\n self.posy = y # 240, the centre\n self.size = 50\n self.sx = 640\n self.sy = 480\n self.client = client\n self.score = 0\n\n def changey (self, dy):\n if gameover:\n return\n if dy < self.size: # Bumper at the top of the screen\n self.posy = self.size\n elif dy>(self.sy-self.size): # Bumper at the bottom\n self.posy = self.sy-self.size\n else:\n self.posy = dy # New y position, centre of the paddle\n\n# This class represents the ball in the pong game. It has a position and speed,\n# a color, and a size. It also needs to know about the two paddles and the display.\nclass ballobj:\n def __init__ (self, disp, l, r):\n global coms\n self.posx = 200 # Current position is (posx, posy)\n self.posy = 300\n self.speed = 5 # Current overall speed\n self.dx = 4 # Current chane in x and y each frame\n self.dy = 4\n self.disp = disp # The display\n self.size = 5 # Size of the ball\n self.color = (255,255,255) # Color of the ball\n self.countdown = -1 # Reamining delay (frames) after score\n self.resetDelay = 50 # Total delay after score\n if disp != None:\n self.sx = disp.get_width() # size of the display.\n self.sy = disp.get_height()\n else:\n self.sx = 640\n self.sy = 480\n self.left = l # The left paddle (class instance)\n self.right = r # Right paddle (class instance)\n# coms.initBall (self.posx, self.posy, self.dx, self.dy) # Transmit initialization to clients\n\n def move (self): # Move the ball one step. Check collisions\n global coms\n if gameover:\n return\n if self.countdown > 0: # delay after a score\n self.countdown -= 1\n return\n if self.countdown == 0:\n self.posx = 240\n self.posy = random.random ()*100 + 200\n self.countdown = -1\n\n self.posx = self.posx + self.dx\n self.posy = self.posy + self.dy\n if self.posx > 560: # bounce off of the wall\n self.left.score += 1\n self.countdown = self.resetDelay\n if self.posx < 50:\n self.right.score += 1\n self.countdown = self.resetDelay\n self.posy = self.sy\n self.dy = -self.dy\n if self.posy < self.size/2:\n self.posy = self.size/2+1\n self.dy = -self.dy\n if self.posy > 480-self.size/2:\n self.posy = 480-self.size/2\n self.dy = -self.dy\n if self.collision(): # Paddle collision?\n self.dx = self.dx + (random.random ()-0.5)*0.2 # A slight change after bouncing\n self.dy = self.dy + (random.random ()-0.5)*0.2\n d = math.sqrt (self.dx*self.dx + self.dy*self.dy)\n self.dx = (self.dx/d)*self.speed\n self.dy = (self.dy/d)*self.speed\n\n def collision (self): # Does the ball collide with a paddle?\n if self.posx <= self.left.posx and self.posyself.left.posy-self.left.size:\n if self.posx < self.left.posx-2:\n return False\n self.dx = -self.dx\n return True\n\n if self.posx >= self.right.posx and self.posyself.right.posy-self.right.size:\n if self.posx > self.right.posx+2:\n return False\n self.dx = -self.dx\n return True\n return False\n\ndef makeMessage(code, p1, p2):\n return '{:02d}{:03d}{:03d}'.format(code, p1, p2)\n\ndef getMessage(s):\n try:\n code = s[0:2]\n p1 = s[2:5]\n p2 = s[5:8]\n return (int(code), int(p1), int(p2))\n except:\n abort()\n return None\n\ndef readMessage (client):\n m = client.recv(8)\n if m == b'':\n abort()\n else:\n return getMessage (m)\n\ndef sendBPos(client, x, y): # Send ball position\n client.send(bytes(makeMessage(BPOS, x, y), 'utf-8'))\n\ndef sendScore (client, lscore, rscore):\n client.send (bytes(makeMessage(GOAL, lscore, rscore), 'utf-8'))\n\ndef sendPPos(client, x):\n global PPOS, ws\n client.send(bytes(makeMessage(PPOS, x, 0), 'utf-8'))\n if client==ls:\n k = 0\n else:\n k = 1\n\ndef sendGameOver(client):\n client.send(bytes(makeMessage(OVER, 0, 0), 'utf-8'))\n\ndef sendDesi (client, k):\n client.send(bytes(makeMessage(DESI, k, 0), 'utf-8'))\n\ndef abort ():\n print (\"Abort server - client has shut down.\")\n exit ()\n\ndef netInit():\n global serversocket\n print(\"Communication initialize\")\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create a socket object\n host = socket.gethostname() # get local machine name\n port = 9999\n serversocket.bind((host, port)) # bind to the port\n serversocket.listen(2) # queue up to 2 requests\n print(\"Server listening...\")\n\ndef getPlayers(): # Try to connect to two players (clients)\n global ls, rs, serversocket\n\n print (\"Waiting for left player.\")\n while True: # Open the left client\n try:\n ls,laddr = serversocket.accept()\n except: # Failed\n time.sleep (1)\n continue\n print (\"Left player is in.\")\n sendDesi(ls, 0) # Tell the client it is the left player\n break\n\n print (\"Waiting for right player\")\n while True: # Open the right client\n try:\n rs,raddr = serversocket.accept()\n except:\n time.sleep (1)\n continue\n print (\"Right player is in.\")\n sendDesi (rs, 1) # Tell the client it is the right player\n break\n return\n\nPPOS = 1 # Message codes\nBPOS = 2\nDESI = 0\nGOAL = 4\nOVER = 8\nrs = None # Right and left clients\nls = None\nHOST = \"192.168.1.168\"\nPORT = 9999\ngameover = False\nbackground = pygame.image.load(\"background.png\")\n\nserversocket = None\npygame.init()\nclock = pygame.time.Clock()\ndisplay = None\n\nnetInit()\ngetPlayers() # Wait for players to connect\npleft = paddle (100, 240, ls) # Create left player paddle\npright = paddle (540, 240, rs) # Create right player paddle\nball = ballobj(display, pleft, pright) # Create the ball.\nwstr = [\"Left\", \"Right\"]\n\nwhile True:\n clock.tick(50)\n for event in pygame.event.get(): # Only event should be QUIT.\n if event.type == pygame.QUIT:\n exit()\n\n# ------------------ Get Paddle positions ------------------------------------\n m1 = readMessage(rs) # Read paddle message sent by RIGHT client.\n pright.changey(m1[1]) # Also change the position of the paddle\n m1 = readMessage(ls) # Read paddle message sent by Left client.\n pleft.changey(m1[1]) # Also change the position of the paddle\n\n# --------------------------- Move ball, send position ---------------------------------\n ball.move() # Move the ball\n sendBPos(ls, int(ball.posx), int(ball.posy))\n sendBPos(rs, int(ball.posx), int(ball.posy))\n\n# ----------------------------- Send paddle pos to clients -----------------------------\n ls.send(bytes(makeMessage(PPOS, pleft.posy, pright.posy), 'utf-8'))\n rs.send(bytes(makeMessage(PPOS, pleft.posy, pright.posy), 'utf-8'))\n\n# ----------------------------- Send score to both clients -----------------------------\n sendScore(ls, pleft.score, pright.score)\n sendScore (rs, pleft.score, pright.score)\n","sub_path":"CompanionFiles.GameDev/Code/Chapter10/pong/pongServerb.py","file_name":"pongServerb.py","file_ext":"py","file_size_in_byte":7920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"318951761","text":"\"\"\"\n1.堆和栈\n内存区域中分堆区和栈区:栈区间的内存的开辟和释放都是系统自动的,堆区间的内存是手动开辟的,手动释放的\n内存的管理管理的是堆区间的内存\n\n2.数据的存储\na.python中所有的数据都是对象,都是保存在堆中的\nb.python中的所有变量存储的都是存在堆中的数据地址。存了对象的地址的变量又叫对象的引用\nc.默认情况下,创建对象就会在堆区间开辟空间,存储数据,并且将地址返回;如果对象是数字或者字符串\n不会直接开辟空间,会做缓存,每次使用的时候会先去缓存区中看之前有没有存储过,如果有就直接返回之前数据的地址,没有才开辟新的空间,存储数据\n\n3.数据的销毁\npython中通过\"垃圾回收机制\"来管理内存的释放\n原理:看一个对象是否销毁,就看这个对象的引用计数,是否为零,不为零就不销毁\n引用计数:对象的引用个数\n注意:系统不会时时刻刻检查对象的引用计数是否是零,而是隔一段时间,检测一次,如果检测到垃圾就回收\n\"\"\"\nfrom sys import getrefcount\n\"\"\"\ngetrefcount(对象)- 获取指定对象的引用计数\n\"\"\"\n#1.增加引用计数:使用变量存对象的地址\nlist1 =[1] #对象[1]的引用计数1\nlist2 =list1 #对象[1]的引用计数2\nlist3 =[list1,100]#对象[1]的引用计数3\nprint(getrefcount(list1))\n#2.减少引用计数\n\"\"\"\na.删除引用\nb.让当前对象的引用成为别的对象的引用\n\n\"\"\"\nprint(getrefcount(50))","sub_path":"Python1808/第一阶段/day15-面向对象和pygame/04-内存管理机制.py","file_name":"04-内存管理机制.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"134860602","text":"######################################################################\n#작성일: 11/1\n#마지막 변경일: 11/1\n#작성자: 20132885 손태선\n#기능: 차를 운전함. 실행파일\n#입력: 좌회전 또는 우회전 키보드 입력\n#출력: car module로 방향, 속도 출력\n######################################################################\n'''운전자가 휠을 돌리고 엑셀을 밟는 것 처럼 차에 신호를 보냅니다.\n이렇게 car와 driver를 나누면 이후 추가될 다른 요구에도 driver의\n행동만 바꾸면 되니 car 모듈을 수정할 필요가 없습니다.\n\n예시 코드와의 차이점\n1.자동차에서 해결해야 할 일부 기능을 여기서 해결하던 점 수정 (GPIO.setwarnings(false))\n2.주행환경이 바뀔�� 마다 출력의 세부수치를 바꿔줘야 하기 때문에\n자주 수정할 수 있는 여기서 차량의 전후진, 회전을 결정\n\n'''\n\nimport car\nimport trackingModule\nimport time\n\ndef decision():#여기서 회전안해도되 판정 나올때 까지 회전 하고 풀어줌\n\tpass\n\n\ndef turn(count):\n\t#time.sleep(0.5)\n\n\t########\n\tdirection=False\n\tif count==1:#우회전\n\t\tdirection=True\n\ttime.sleep(0.5)\n\twhile not (trackingModule.navigator()&4):\n\t\tcar.engine(direction, not direction, 36, 39)\n\t\ttime.sleep(0.001)\n\tcar.engine(True, True, 0, 0)\n\ttime.sleep(0.5)\n\n\twhile not (trackingModule.navigator()&4):\n\t\tcar.engine(not direction, direction, 36, 39)\n\t\ttime.sleep(0.0001)\n\tcar.engine(True,True,0,0)\n\ttime.sleep(0.5)\n\n\n\t########\n\n\t'''while count!=0:\n\t\t#car.engine(True, True, 0, 0)\n\t\t#time.sleep(0.1)\n\n\t\tcar.engine(True, False, 36, 36)#2연속 회전 시\n\t\ttime.sleep(0.5)\n\n\t\twhile not (trackingModule.navigator()&4):\n\t\t\tcar.engine(True, False, 36, 39)\n\t\t\ttime.sleep(0.001)\n\t\tcar.engine(True, True, 0, 0)\n\t\ttime.sleep(0.5)\n\t\t\t\n\t\twhile not (trackingModule.navigator()&4):\n\t\t\tcar.engine(False, True, 36, 39)\n\t\t\ttime.sleep(0.0001)\n\t\tcar.engine(True, True, 0, 0)\n\t\ttime.sleep(0.5)\n\n\t\tcount-=1'''\n\ndef lineTracking():\n\tprint(\"자동주행을 시작합니다.\")\n\t#전진후 좌우 교정과 판단을 동시에\n\n\twhile True:\n\t\tcount=0\n\t\tturn_finish=False\n\t\tcar.engine(True, True, 42, 42)\n\t\ttime.sleep(0.001)\n\n\t\tbit=2\t\n\n\t\tcount=0\n\t\twhile True:\n\t\t\tbit=trackingModule.bitCount()\n\t\t\t#if bit>2:\n\t\t\t#\tprint(bit)\n\t\t\tif trackingModule.where_to_go()!=0:\n\t\t\t\tbreak\n\n\t\t\t'''if bit==5:\n\t\t\t\tprint(\"CC\")\n\t\t\t\tbreak\n\t\t\telif bit==0:\t\n\t\t\t\tprint(\"jj\")\n\t\t\t\tbreak\n\t\t\telif bit>2:\n\t\t\t\tprint(\"more go\", bit)\n\t\t\t\twhile bit!=0:\n\t\t\t\t\tbit=trackingModule.bitCount()\n\t\t\t\tbreak'''\n\t\t\tli = trackingModule.li[len(trackingModule.li)-1]\n\n\t\t\tif li[2]==False:\n\t\t\t\tcar.engine(True,True,0,0)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\tif li[1]==True:\n\t\t\t\t\twhile trackingModule.bit4()!=True:\n\t\t\t\t\t\tcar.engine(False, True, 35, 35)\n\t\t\t\t\t\tif trackingModule.bitCount()==0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\twhile trackingModule.bit4()!=True:\n\t\t\t\t\t\tcar.engine(True, False, 35, 35)\n\t\t\t\t\t\tif trackingModule.bitCount()==0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\tcar.engine(True, True, 0,0)\n\t\t\t\ttime.sleep(0.05)\n\t\t\t\tcar.engine(True, True, 42, 42)\n\n\t\t#decision\n\t\tcount=trackingModule.where_to_go()\n\t\tprint(count)\n\n\t\tfor a in reversed(trackingModule.li):\n\t\t\tprint(a)\n\t\tprint()\n\n\t\tturn(count)\n\t\t\n\t\ttrackingModule.li_clear()\n\t\t#once=False\n\n\t\t\n\n\n\n\n\n\tprint(\"자동주행을 마칩니다.\")\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\t#차 시동을 건다.\n\t\tcar.startUp()\n\n\t\tlineTracking()\n\t\t\t\t\n\t\t#차 시동을 끈다.\n\t\tcar.turnOff()\n\t#ctrl + c 키로 종료한 경우\n\texcept KeyboardInterrupt:\n\t\tprint(\"강제종료 하셨습니다.\")\n\t#오류로 인한 종료 시 시동끄기 함수 호출. 없다면 오류로 프로그램이 종료된 이후에도 바퀴가 돌 수 있다.\n\tfinally:\n\t\tcar.turnOff()\n\n\n","sub_path":"module/save/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"109160526","text":"import glob\nfrom os import symlink, chdir, mkdir\nfrom pathlib import Path\nimport os\n\nspws = {3: list(range(4)),\n 6: list(range(7)),}\n\ndefault_lines = {'n2hp': '93.173700GHz',\n 'sio': '217.104984GHz',\n 'h2co303': '218.222195GHz',\n '12co': '230.538GHz',\n 'h30a': '231.900928GHz',\n 'h41a': '92.034434GHz',\n \"c18o\": \"219.560358GHz\",\n \"ch3cn\": \"92.26144GHz\",\n \"ch3cch\": \"102.547983GHz\",\n }\n\n\ndirnames = {'fullcubes_12m': 'spw[0-9]_12M_spw[0-9]',\n 'linecubes_12m': 'spw[0-9]_12M_[!s]',\n 'fullcubes_7m12m': 'spw[0-9]_7M12M_spw[0-9]',\n 'linecubes_7m12m': 'spw[0-9]_7M12M_[!s]',\n 'bsens': 'bsens_12M_*.tt0',\n 'cleanest': 'merged_12M*.tt0',\n '7m12m': 'merged_7M12M*.tt0',\n '7m12m_bsens': 'bsens_7M12M*.tt0',\n '7m': 'merged_7M_*.tt0',\n '7m_bsens': 'bsens_7M_*.tt0',\n }\n\n\nbasepath = Path('/orange/adamginsburg/ALMA_IMF/2017.1.01355.L/imaging_results/')\n\nreleasepath = Path('/orange/adamginsburg/ALMA_IMF/2017.1.01355.L/July2020Release/')\n\nwith open(basepath / '../scigoals/file_list.txt', 'w') as fh1:\n with open(basepath / '../scigoals/file_tree.txt', 'w') as fh2:\n\n for field in \"G008.67 G337.92 W43-MM3 G328.25 G351.77 G012.80 G327.29 W43-MM1 G010.62 W51-IRS2 W43-MM2 G333.60 G338.93 W51-E G353.41\".split():\n if not os.path.exists(releasepath / field):\n mkdir(releasepath / field)\n for band in (3,6):\n bandpath = Path(f\"B{band}\")\n if not os.path.exists(releasepath / field / bandpath):\n mkdir(releasepath / field / bandpath)\n for dirname, globstr in dirnames.items():\n if not os.path.exists(releasepath / field / bandpath / dirname):\n mkdir(releasepath / field / bandpath / dirname)\n cwd = os.getcwd()\n chdir(releasepath / field / bandpath / dirname)\n globbo = str(basepath / f\"{field}_B{band}*{globstr}*\")\n filelist = glob.glob(globbo)\n fitsglobbo = str(basepath / f\"{field}_B{band}*{globstr}*fits\")\n filelist += glob.glob(fitsglobbo)\n #print(field, band, dirname, config, filelist)\n for fn in filelist:\n #print(f\"Linking {dotdot / fn} to {os.getcwd()}\")\n basename = os.path.basename(fn)\n if not os.path.exists(basename):\n symlink(fn, basename)\n elif not os.path.exists(os.readlink(basename)):\n os.unlink(basename)\n symlink(fn, basename)\n fh1.write(os.path.realpath(basename) + \"\\n\")\n fh2.write(os.path.join(os.getcwd(), basename) + \"\\n\")\n chdir(cwd)\n","sub_path":"analysis/link_files_July2020release.py","file_name":"link_files_July2020release.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"377279841","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport pandas as pd\nimport config\nfrom flask import Flask, render_template, send_file, flash, redirect, url_for, safe_join, request, abort\nfrom forms import SearchForm, DownloadForm, EmptyForm\n\nif sys.version_info.major >= 3:\n from io import StringIO\nelse:\n from StringIO import StringIO\n\nc = config.Config()\n\ntry:\n from opengrid.library import houseprint\nexcept ImportError:\n sys.path.append(c.get('backend', 'opengrid'))\n from opengrid.library import houseprint\n\napp = Flask(__name__)\nSECRET_KEY = \"secret_key\" # TODO add a real key in the config file\napp.config.from_object(__name__)\n\ntry:\n hp = houseprint.Houseprint()\nexcept:\n print(\"Connection failed, loading houseprint from cache\")\n hp = houseprint.load_houseprint_from_file(\"cache_hp.hp\")\nelse:\n hp.save(\"cache_hp.hp\")\n\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/data\")\ndef data():\n devices = hp.get_devices()\n devices.sort(key=lambda x: x.key)\n return render_template('data.html', fluksos=devices)\n\n\n@app.route(\"/development\")\ndef development():\n return render_template('development.html')\n\n\n@app.route(\"/subscribe\")\ndef subscribe():\n return render_template('subscribe.html')\n\n\n@app.route(\"/flukso/\")\ndef flukso(fluksoid):\n f = hp.find_device(fluksoid)\n\n if f is None:\n abort(404)\n\n sensors = f.get_sensors()\n sensors.sort(key=lambda x: x.type)\n\n return render_template(\n 'flukso.html',\n flukso=f,\n sensors=sensors\n )\n\n\n@app.route(\"/sensor/\")\ndef sensor(sensorid):\n s = hp.find_sensor(sensorid)\n\n if s is None:\n abort(404)\n\n analyses = ['timeseries']\n if s.type == 'electricity' and not s.system == 'solar':\n analyses.append('standby_horizontal')\n analyses.append('standby_vertical')\n\n return render_template(\n 'sensor.html',\n sensor=s,\n analyses=analyses\n )\n\n\n@app.route(\"/standby_horizontal/\")\ndef standby_horizontal(sensorid):\n s = hp.find_sensor(sensorid)\n\n filename = 'standby_horizontal_' + sensorid + '.png'\n\n if not figure_exists(filename):\n flash('No standby_horizontal graph found for this sensor')\n return redirect(url_for('sensor', sensorid=sensorid))\n\n return render_template(\n 'analysis_image.html',\n analysisname='Standby Horizontal',\n filename=filename,\n sensor=s\n )\n\n\n@app.route(\"/standby_vertical/\")\ndef standby_vertical(sensorid):\n s = hp.find_sensor(sensorid)\n\n filename = 'standby_vertical_{}.png'.format(s.key)\n\n if not figure_exists(filename):\n flash('No standby_vertical graph found for this sensor')\n return redirect(url_for('sensor', sensorid=sensorid))\n\n return render_template(\n 'analysis_image.html',\n analysisname='Standby Vertical',\n filename=filename,\n sensor=s)\n\n\n@app.route(\"/timeseries/\")\ndef timeseries(sensorid):\n s = hp.find_sensor(sensorid)\n\n path = c.get('backend', 'figures')\n filename = 'TimeSeries_{}.html'.format(s.key)\n file_path = safe_join(path, filename)\n\n if not os.path.exists(file_path):\n flash('No timeseries graph found for this sensor')\n return redirect(url_for('sensor', sensorid=sensorid))\n\n with open(file_path, \"r\") as html_graph:\n content = html_graph.read()\n\n return render_template(\n 'analysis_html.html',\n analysisname='Time Series',\n sensor=s,\n content=content\n )\n\n\n@app.route(\"/figures/\")\ndef figure(filename):\n path = c.get('backend', 'figures')\n file_path = safe_join(path, filename)\n\n return send_file(file_path)\n\n\ndef figure_exists(filename):\n path = c.get('backend', 'figures')\n file_path = safe_join(path, filename)\n\n return os.path.exists(file_path)\n\n\n@app.route(\"/search\", methods=['GET', 'POST'])\ndef search():\n form = SearchForm()\n if request.method == 'POST' and form.validate():\n f = hp.find_device(form.search_string.data)\n if f is not None: # flukso was found\n return redirect(url_for('flukso', fluksoid=f.key))\n else:\n flash(\"Sorry, we couldn't find that Fluksometer\")\n\n return render_template(\n \"search.html\",\n form=form)\n\n\n@app.route(\"/download\", methods=['GET', 'POST'])\n@app.route(\"/download/\")\ndef download(guid=None):\n form = DownloadForm()\n\n if request.method == 'POST' and form.validate():\n s = hp.find_device(form.guid.data)\n if s is None:\n s = hp.find_sensor(form.guid.data)\n\n if s is None:\n flash(\"ID not found\")\n else:\n try:\n # We need to connect and disconnect with tmpo\n # to make sure the website doesn't lock access to the sqlite\n hp.init_tmpo()\n tmpos = hp.get_tmpos()\n output = StringIO()\n df = s.get_data(\n head=pd.Timestamp(form.start.data),\n tail=pd.Timestamp(form.end.data),\n resample=form.resample.data\n )\n tmpos.dbcon.close()\n except:\n # This will happen if another process is currently using the tmpo\n flash(\"Error connecting to the data storage, please try again later\")\n else:\n df.to_csv(output, encoding='utf-8')\n output.seek(0)\n return send_file(\n output,\n mimetype=\"text/csv\",\n as_attachment=True,\n attachment_filename='{}.csv'.format(s.key)\n )\n if guid is not None:\n form.guid.data = guid\n\n return render_template(\n 'download.html',\n form=form\n )\n\n\n@app.route(\"/issue30\", methods=['GET', 'POST'])\ndef issue30():\n form = EmptyForm() # Empty form, only validates the secret token to protect against cross-site scripting\n\n if request.method == 'POST' and form.validate():\n try:\n hp.init_tmpo()\n tmpos = hp.get_tmpos()\n hp.sync_tmpos()\n tmpos.dbcon.close()\n except:\n flash(\"Error syncing TMPO, please try again later\")\n else:\n flash(\"TMPO Sync Successful\")\n\n return render_template(\n 'issue30.html',\n form=form\n )\n\n\n@app.errorhandler(404)\ndef internal_error(error):\n flash('ERROR 404 - Page not found')\n return redirect(url_for('index'))\n\n\nif __name__ == \"__main__\":\n try:\n env = c.get('env', 'type')\n except:\n env = 'prod'\n\n if env == 'dev':\n app.run(debug=True)\n else:\n app.run(debug=False, host='0.0.0.0', port=5000)\n","sub_path":"website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"132557507","text":"__author__ = 'arkilic'\n\nfrom distutils.core import setup\n\nsetup(\n name='sampleManager',\n version='0.0.x',\n author='Arman Arkilic',\n packages=[\"sampleManager\",\n \"sampleManager.collectionapi\",\n \"sampleManager.config\",\n \"sampleManager.dataapi\",\n \"sampleManager.database\",\n \"sampleManager.session\",\n \"sampleManager.userapi\",\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"114589809","text":"import sys\nsys.path.append(\"..\")\nfrom dataloader_2_stream import GolfDB_2_Stream\nfrom model_2_stream import EventDetector\nfrom util import *\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport os\nfrom torch.utils.tensorboard import SummaryWriter\nfrom myeval import myeval\nfrom data.config import cfg\nimport torch.nn as nn\nif __name__ == '__main__':\n # training configuration\n split = cfg.SPLIT\n iterations = cfg.ITERATIONS\n it_save = cfg.IT_SAVE # save model every 100 iterations\n n_cpu = cfg.CPU_NUM\n seq_length = cfg.SEQUENCE_LENGTH\n bs = cfg.BATCH_SIZE # batch size\n k = 10 # frozen layers\n\n model = EventDetector(pretrain=True,\n width_mult=1.,\n lstm_layers=1,\n lstm_hidden=256,\n bidirectional=True,\n dropout=False)\n freeze_layers(k, model)\n \n model.train()\n\n # model = nn.DataParallel(model)\n model.cuda()\n dataset = GolfDB_2_Stream(data_file='../data/train_split_{}.pkl'.format(split),\n vid_dir=cfg.OPT_RESIZE_FILE_PATH,\n seq_length=seq_length,\n train=True)\n\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=n_cpu,\n drop_last=True)\n\n # the 8 golf swing events are classes 0 through 7, no-event is class 8\n # the ratio of events to no-events is approximately 1:35 so weight classes accordingly:\n weights = torch.FloatTensor(\n [1/8, 1/8, 1/8, 1/8, 1/8, 1/8, 1/8, 1/8, 1/35]).cuda()\n criterion = torch.nn.CrossEntropyLoss(weight=weights)\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)\n\n losses = AverageMeter()\n\n # writer = SummaryWriter()\n\n if not os.path.exists('models'):\n os.mkdir('models')\n\n i = 0\n while i < iterations:\n # for p in optimizer.param_groups:\n # print(p['lr'])\n for sample in data_loader:\n images, keypoints, labels = sample['images'].cuda(), sample['keypoints'].cuda(), sample['labels'].cuda()\n logits = model(images,keypoints)\n labels = labels.view(bs*seq_length)\n loss = criterion(logits, labels)\n optimizer.zero_grad()\n loss.backward()\n losses.update(loss.item(), images.size(0))\n optimizer.step()\n print('Iteration: {}\\tLoss: {loss.val:.4f} ({loss.avg:.4f})'.format(\n i, loss=losses))\n i += 1\n if i % it_save == 0:\n torch.save({'optimizer_state_dict': optimizer.state_dict(),\n 'model_state_dict': model.state_dict()}, 'models/swingnet_{}.pth.tar'.format(i))\n if i == 1000:\n for p in optimizer.param_groups:\n p['lr'] *= 0.1\n if i == 2000:\n for p in optimizer.param_groups:\n p['lr'] *= 0.1\n if i == 10000:\n for p in optimizer.param_groups:\n p['lr'] *= 0.1\n if i == iterations:\n break\n","sub_path":"skeleton_opt_streams/train_2_stream.py","file_name":"train_2_stream.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"45578769","text":"\n#MIT License\n\n#Copyright (c) 2016 Eric Beanland\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\nimport array\nimport atexit\nimport shutil\nimport os\nimport sys\n\nimport sane\nimport cairocffi as cairo\nfrom PIL import Image, ImageFilter, ImageChops\n\n#Workaround for missing cairo.ImageSurface.create_for_data implementation\n#save to temp png then load into Gtk image widget\nTEMPFILENAME = '.docscanner.png'\n\ndef init_sane():\n \"\"\"Initialize the sane module and return (sane_version, devices_list).\"\"\"\n sane_version = sane.init()\n devices = sane.get_devices()\n atexit.register(sane.exit)\n\n return sane_version, devices\n\n\ndef scan(devid):\n dev = sane.open(devid)\n\n # start device and get an image\n dev.start()\n im = dev.snap()\n dev.close()\n\n # find edges\n im2 = im.filter(ImageFilter.FIND_EDGES)\n # use difference to create bounding box\n diff = ImageChops.difference(im, im2)\n bbox = diff.getbbox()\n if bbox:\n # crop to bounding box\n im3 = im.crop(bbox)\n im = im3\n\n return image_to_surface(im)\n\n\ndef image_to_surface(pil_image):\n \"\"\"Convert PIL image to Cairo surface.\"\"\"\n if pil_image.mode == 'L':\n pil_image = pil_image.convert('RGBA')\n\n if sys.byteorder == 'little':\n # swap red and blue channels for little-endians\n r,g,b,a = pil_image.split()\n pil_image = Image.merge(\"RGBA\", (b,g,r,a))\n else:\n # swap channels for big-endians\n r,g,b,a = pil_image.split()\n pil_image = Image.merge(\"RGBA\", (a,r,g,b))\n\n # covert image to writable buffer / array\n img_bytes = pil_image.tobytes()\n arr = array.array('B', img_bytes)\n\n width, height = pil_image.size\n # get stride for width\n stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_RGB24,\n width)\n\n # create cairo surface from array; will fail with NotImplementedError\n # if using the pycairo module\n surface = cairo.ImageSurface.create_for_data(arr, cairo.FORMAT_RGB24,\n width, height, stride)\n\n return surface\n\n\ndef save_file(filename):\n print(filename)\n shutil.move(TEMPFILENAME, filename)\n\n","sub_path":"src/DocScanner/imaging.py","file_name":"imaging.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"454077728","text":"import numpy as np\n\n\ndef semantic_down_sample_voxel(full_voxel, scaled_vox_size, label_length=13):\n vox_size = np.array(full_voxel.shape)\n resize_voxel = np.expand_dims(full_voxel, axis=0)\n for axis in range(3):\n resize_voxel = np.split(resize_voxel, scaled_vox_size[axis], axis=3 - axis)\n resize_voxel = np.concatenate(tuple(resize_voxel), axis=0)\n resize_voxel = np.reshape(resize_voxel, [-1, np.prod(vox_size / scaled_vox_size).astype(np.int32)])\n stat_voxel = []\n for elem in resize_voxel:\n stat_voxel.append(np.bincount(elem, minlength=label_length)[1:])\n down_samples = np.stack(stat_voxel, axis=0)\n down_samples_indices = np.argmax(down_samples, axis=-1)\n final_voxel = np.reshape(np.where(down_samples.max(axis=-1) >= 3, down_samples_indices + 1,\n np.zeros(down_samples_indices.shape, down_samples_indices.dtype)),\n scaled_vox_size)\n return final_voxel\n","sub_path":"tools/utilize.py","file_name":"utilize.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"352196945","text":"def binary_addition(A, B):\n # assumes A and B are of the same length\n C = [None] * (len(A) + 1)\n carry = 0\n # start from the rightmost digit\n for i in range(len(A) - 1, -1, -1):\n C[i + 1] = (A[i] + B[i] + carry) % 2\n carry = (A[i] + B[i] + carry) // 2\n C[0] = carry\n return C\n\n\nA = [1, 0, 1, 1]\nB = [1, 0, 1, 0]\nC = binary_addition(A, B)\n\nprint('Arrays:')\nprint(A)\nprint(B)\nprint('Result:')\nprint(C)\n","sub_path":"chapter_2/binary_addition.py","file_name":"binary_addition.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"598249944","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# --- IMPLEMENTATION GOES HERE -----------------------------------------------\n# Student helpers (functions, constants, etc.) can be defined here, if needed\n\n\n\n# ----------------------------------------------------------------------------\n\n\n\n\n\ndef uoc_foursquare_genkey(keyword1, keyword2):\n \"\"\"\n EXERCISE 1: Four-Square Key Generation\n :keyword1: string with the first key word\n :keyword2: string with the second key word\n :return: tuple with the 4 matrices that form a Four-Square key\n \"\"\"\n\n square = ([],[],[],[])\n\n # --- IMPLEMENTATION GOES HERE ---\n listOfCharacters=[\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\n \"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\",\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\n\n #En primer lugar nos aseguraremos de que las palabras clave están compuestas\n #por caracteres alfanuméricos:\n while keyword1.isalnum()==False:\n print(\"Sólo se adminten caracteres alfanuméricos\")\n keyword1=input(\"Dame una clave 1 (keyword1) válida \")\n\n while keyword2.isalnum()==False:\n print(\"Sólo se adminten caracteres alfanuméricos\")\n keyword2=input(\"Dame una clave 2 (keyword2) válida \")\n\n #Revisaremos las palabras clave. En caso de haber caracteres escritos en\n #minúscula, los pasaremos a mayúsculas.\n keyword1=keyword1.upper()\n keyword2=keyword2.upper()\n\n keyword1Listed=[]\n keyword2Listed=[]\n\n for i in keyword1:\n keyword1Listed.append(i)\n\n for i in keyword2:\n keyword2Listed.append(i)\n\n #Creamos una lista que contenga los caracteres de cada una de las palabras clave \n #y a continuación todos los caracteres de la A a la Z (en mayúsculas) y del 0 al 9\n keyword1Listed=keyword1Listed+listOfCharacters\n keyword2Listed=keyword2Listed+listOfCharacters\n\n listOfKeyword1=[]\n listOfKeyword2=[]\n\n #Eliminamos los caracteres repetidos en ambas listas de modo que obtengamos listas\n #de 36 campos que contengan los caracteres de la A a la Z (en mayúsculas) y del 0 al 9\n #sin repetición y comenzando con las palabras clave \"keyword1\" y \"keyword2\".\n for i in keyword1Listed:\n if i not in listOfKeyword1:\n \n listOfKeyword1.append(i)\n\n for i in keyword2Listed:\n if i not in listOfKeyword2:\n \n listOfKeyword2.append(i)\n\n upperLeftMatrix=[]\n upperRightMatrix=[]\n lowerLeftMatrix=[]\n lowerRightMatrix=[]\n\n #Preparamos las matrices que compondrán el cuadrado con listas compuestas por\n #6 listas de 6 elemntos cada una\n\n for k in range(0,4):\n \n if k==0:\n #preparamos la matriz superior izquierda:\n charger = listOfCharacters\n actualizer = upperLeftMatrix\n elif k==1:\n #preparamos la matriz inferior izquierda:\n charger = listOfKeyword2\n actualizer = lowerLeftMatrix\n elif k==2:\n #preparamos la matriz superior derecha:\n charger = listOfKeyword1\n actualizer = upperRightMatrix\n elif k==3:\n #preparamos la matriz inferior derecha:\n charger = listOfCharacters\n actualizer = lowerRightMatrix\n\n\n for i in range(0, 6):\n row=[]\n\n for j in range(0, 6):\n row.append(charger[0])\n charger.append(charger[0])\n charger.remove(charger[0])\n\n actualizer.append(row)\n\n #cargamos en la tupla square las matrices\n square=(upperLeftMatrix,lowerLeftMatrix,upperRightMatrix,lowerRightMatrix)\n\n # --------------------------------\n\n return square\n\n\ndef uoc_foursquare_cipher(message, key):\n \"\"\"\n EXERCISE 2: Four-Square cipher\n :message: message to cipher (plaintext)\n :key: key to use when ciphering the message (as it is returned by uoc_foursquare_genkey() )\n :return: ciphered text\n \"\"\"\n\n ciphertext = \"\"\n\n #### IMPLEMENTATION GOES HERE ####\n\n #El listado de caracteres con el orden sin alterar se encuentra en los elemntos 0 y 3\n #(que corresponden con los cuadrantes superior izquierdo e inferior derecho del cuadro)\n #de la lista y son idénticos, obtendremos este conjunto de caracteres alfanuméricos\n #sin desordenar y los almacenaremos en una variable (plaintext) en mi caso.\n plaintext=key[0]\n #Haremos lo mismo con los cuadrantes 1 y 2 (inferior izquierdo y superior derecho)\n cypher1=key[2]\n cypher2=key[1]\n\n #Nos aseguramos de que el mensaje sólo contenga caracteres alfanuméricos\n while message.isalnum()==False:\n print(\"Sólo se adminten caracteres alfanuméricos\")\n message=input(\"Escribe una palabra para cifrar válida \")\n\n #Obtenemos la longitud del mensaje\n longitudMensaje=len(message)\n\n #En caso de ser un número impar de caracteres, deberemos añadir una \"X\" al final\n evenCharacters = longitudMensaje%2\n\n if evenCharacters==1:\n message=message+\"X\"\n\n #Nos aseguramos de que todos los caracteres estén en mayúscula\n message=message.upper()\n\n\n for i in range (0,longitudMensaje,2):\n #Agrupamos los caracteres del mensaje a cifrar en digrafos\n letter1=message[i]\n letter2=message[i+1]\n\n rowIndex1=0\n rowIndex2=0\n columnIndex1=0\n columnIndex2=0\n \n #Obtenemos las \"coordenadas\" en los cuadrados de las letras que conforman cada par \n for j in range (0,6):\n row=plaintext[j]\n if letter1 in row:\n rowIndex1=j\n if letter2 in row:\n rowIndex2=j\n \n for k in range (0,6):\n letter=row[k]\n if letter1==letter:\n columnIndex1=k\n \n if letter2==letter:\n columnIndex2=k\n \n\n #Localizamos la letra correspondiente (primer caracter en cuadrante superior derecho\n #y segundo caracter en cuadrante inferior izquierdo) según las coordenadas obtenidas\n cypherRowIndex1=rowIndex1\n cypherRowIndex2=rowIndex2\n cypherColumnIndex1=columnIndex2\n cypherColumnIndex2=columnIndex1\n\n cypherRow1=cypher1[cypherRowIndex1]\n cypherRow2=cypher2[cypherRowIndex2]\n\n cypherLetter1=cypherRow1[cypherColumnIndex1]\n cypherLetter2=cypherRow2[cypherColumnIndex2]\n\n #Concatenamos en orden al str ciphertext los caracteres que vamos obteniendo.\n ciphertext= ciphertext + cypherLetter1 + cypherLetter2\n\n\n\n ##################################\n\n return ciphertext\n\n\ndef uoc_foursquare_decipher(message, key):\n \"\"\"\n EXERCISE 3: Four-Square decipher\n :message: message to decipher (ciphertext)\n :key: key to use when deciphering the message (as it is returned by uoc_foursquare_genkey() )\n :return: plaintext corresponding to the ciphertext\n \"\"\"\n\n plaintext = \"\"\n\n #### IMPLEMENTATION GOES HERE ####\n\n #El listado de caracteres con el orden sin alterar se encuentra en los elemntos 0 y 3\n #(que corresponden con los cuadrantes superior izquierdo e inferior derecho del cuadro)\n #de la lista y son idénticos, obtendremos este conjunto de caracteres alfanuméricos\n #sin desordenar y los almacenaremos en una variable (plaintextMatrix) en mi caso.\n plaintextMatrix=key[0]\n #Haremos lo mismo con los cuadrantes 1 y 2 (inferior izquierdo y superior derecho)\n cypher1=key[2]\n cypher2=key[1]\n\n #Nos aseguramos de que el mensaje cifrado sólo contenga caracteres alfanuméricos\n while message.isalnum()==False:\n print(\"Sólo se adminten caracteres alfanuméricos\")\n message=input(\"Escribe una palabra para descifrar válida \")\n\n #Obtenemos la longitud del mensaje\n longitudMensaje=len(message)\n\n #Nos aseguramos de que todos los caracteres estén en mayúscula\n ciphertext=message.upper()\n\n for i in range (0,longitudMensaje,2):\n #Agrupamos los caracteres del mensaje a cifrar en digrafos\n letter1=ciphertext[i]\n letter2=ciphertext[i+1]\n\n rowIndex1=0\n rowIndex2=0\n columnIndex1=0\n columnIndex2=0\n \n #Obtenemos las \"coordenadas\" en los cuadrados de las letras que conforman cada par \n #Primer caracter:\n for j in range (0,6):\n row=cypher1[j]\n if letter1 in row:\n rowIndex1=j\n \n for k in range (0,6):\n letter=row[k]\n if letter1==letter:\n columnIndex1=k\n \n #Segundo caracter:\n for j in range (0,6):\n row=cypher2[j]\n if letter2 in row:\n rowIndex2=j\n \n for k in range (0,6):\n letter=row[k]\n if letter2==letter:\n columnIndex2=k\n\n #Localizamos la letra correspondiente (primer caracter en cuadrante superior derecho\n #y segundo caracter en cuadrante inferior izquierdo) según las coordenadas obtenidas\n clearRowIndex1=rowIndex1\n clearRowIndex2=rowIndex2\n clearColumnIndex1=columnIndex2\n clearColumnIndex2=columnIndex1\n\n clearRow1=plaintextMatrix[clearRowIndex1]\n clearRow2=plaintextMatrix[clearRowIndex2]\n\n clearLetter1=clearRow1[clearColumnIndex1]\n clearLetter2=clearRow2[clearColumnIndex2]\n\n #Concatenamos en orden al str ciphertext los caracteres que vamos obteniendo.\n plaintext= plaintext + clearLetter1 + clearLetter2\n \n #Según mensaje del tablón, si el último caracter es una \"X\", hay que eliminarla\n\n ultimo=plaintext[longitudMensaje-1]\n newplaintext=\"\"\n if ultimo==\"X\":\n newplaintext = plaintext\n plaintext=\"\"\n longTextoClaro = len(newplaintext)\n for i in range (0, longTextoClaro-1):\n plaintext = plaintext+newplaintext[i]\n\n\n ##################################\n\n return plaintext\n\n\n","sub_path":"Criptografia/PECS/2019-2020_Sem1/02 Práctica/PR1/T2019_Practica1_Skeleton.py","file_name":"T2019_Practica1_Skeleton.py","file_ext":"py","file_size_in_byte":10004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"49773741","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nParse image records from a Miro export and push them into a DynamoDB table.\n\nUsage:\n miro_adapter.py --table= --collection= --bucket= --key=\n miro_adapter.py -h | --help\n\nOptions:\n -h --help Show this screen.\n --table= DynamoDB table to write the Miro data to.\n --collection= Name of the associated Miro images collection.\n --bucket= S3 bucket containing the Miro XML dumps.\n --key= Key of the Miro XML dump in the S3 bucket.\n\n\"\"\"\n\nimport json\nimport time\n\nimport boto3\nimport docopt\n\nfrom utils import generate_images\n\n\ndef push_to_dynamodb(table_name, collection_name, image_data):\n \"\"\"\n Given the name of a Dynamo table and some image data, push it\n into DynamoDB.\n \"\"\"\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n\n with table.batch_writer() as batch:\n for i, image in enumerate(image_data, start=1):\n print('Pushing image %d with ID %s' % (i, image['image_no_calc']))\n batch.put_item(\n Item={\n 'MiroID': image['image_no_calc'],\n 'MiroCollection': collection_name,\n 'ReindexShard': 'default',\n 'ReindexVersion': 1,\n 'data': json.dumps(image, separators=(',', ':'))\n }\n )\n if i % 50 == 0:\n time.sleep(5)\n\n\nif __name__ == '__main__':\n args = docopt.docopt(__doc__)\n image_data = generate_images(bucket=args['--bucket'], key=args['--key'])\n push_to_dynamodb(\n table_name=args['--table'],\n collection_name=args['--collection'],\n image_data=image_data\n )\n","sub_path":"miro_adapter/miro_adapter.py","file_name":"miro_adapter.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"133851893","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.utils.timezone import make_aware\nimport datetime\n\nfrom core.models import Event, EventComment\n\nfrom event.serializers import ListCreateEventCommentSerializer\n\n\ndef sample_user(**params):\n \"\"\"Create and return a sample user\"\"\"\n return get_user_model().objects.create_user(**params)\n\n\ndef sample_event(user):\n \"\"\"Create and return a sample event\"\"\"\n default = {\n 'title': 'test title',\n 'description': 'test description',\n 'image': None,\n 'event_time': make_aware(datetime.datetime.now())\n .strftime('%Y-%m-%d %H:%M:%S'),\n 'address': 'test address',\n 'fee': 500,\n }\n\n return Event.objects.create(organizer=user, **default)\n\n\ndef sample_event_comment(event, user, comment='test comment', **params):\n \"\"\"Create and return a sample comment\"\"\"\n default = {\n 'event': event,\n 'user': user,\n 'comment': comment,\n }\n default.update(params)\n\n return EventComment.objects.create(**default)\n\n\nclass EventCommentSerializerApiTests(TestCase):\n \"\"\"Test event comment serializer API\"\"\"\n\n def setUp(self):\n self.organaizer = sample_user(\n email='organaizer@matsuda.com',\n password='testpass'\n )\n self.event = sample_event(self.organaizer)\n self.event_comment = sample_event_comment(\n self.event,\n self.organaizer\n )\n\n def test_create_event_comment_successful(self):\n \"\"\"Test create a new event comment\"\"\"\n payload = {\n 'event': self.event.id,\n 'user': self.organaizer.id,\n 'comment': 'test comment'\n }\n serializer = ListCreateEventCommentSerializer(data=payload)\n self.assertTrue(serializer.is_valid())\n\n def test_event_comment_too_long_comment(self):\n \"\"\"Test fail creating a new event comment because of long comment\"\"\"\n payload = {\n 'event': self.event.id,\n 'user': self.organaizer.id,\n 'comment': 'test comment' * 100\n }\n serializer = ListCreateEventCommentSerializer(data=payload)\n self.assertFalse(serializer.is_valid())\n self.assertCountEqual(serializer.errors.keys(), ['comment'])\n\n def test_create_event_comment_blank(self):\n \"\"\"Test fail creating a new event comment because of comment blank\"\"\"\n payload = {\n 'event': self.event.id,\n 'user': self.organaizer.id,\n 'comment': ''\n }\n serializer = ListCreateEventCommentSerializer(data=payload)\n self.assertFalse(serializer.is_valid())\n self.assertCountEqual(serializer.errors.keys(), ['comment'])\n","sub_path":"src/api/event/tests/test_event_comment_serializer.py","file_name":"test_event_comment_serializer.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"266617215","text":"from django.urls import path, re_path\r\nfrom api.views import course, shoppingcar\r\n\r\nurlpatterns = [\r\n path('coursecategory/', course.CourseCategoryView.as_view({'get': 'list'})),\r\n\r\n re_path(\"course/$\", course.CourseView.as_view({\"get\": \"list\"})),\r\n re_path(\"course/(?P\\d+)/$\", course.CourseView.as_view({\"get\": \"retrieve\"})),\r\n\r\n path(\"shoppingcar/\", shoppingcar.ShoppingCarViewSet.as_view())\r\n\r\n]\r\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"67481085","text":"import math\n\nquery_count = int(input())\n\ndef get_sum(last_idx):\n if last_idx == 0:\n return 0\n else:\n positive = last_idx // 2\n added = 0\n if last_idx % 2 == 1:\n added = last_idx * math.pow(-1, last_idx)\n return positive + added\n\nans_arr = [0] * query_count\nfor j in range(query_count):\n start_end = list(map(int, input().split()))\n start = start_end[0]\n end = start_end[1]\n ans_arr[j] = get_sum(end) - get_sum(start-1)\n\n\nfor ans in ans_arr:\n print(int(ans))\n\n","sub_path":"CodeForce/Round#524_Div2/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"222736800","text":"class Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n rows = len(matrix)\n cols = len(matrix[0])\n directions = [(0,1),(1,0),(0,-1),(-1,0)]\n changed_directions = 0\n active_direction = 0\n VISITED = 101\n result = []\n row = col = 0\n result.append(matrix[row][col])\n matrix[row][col] = VISITED\n \n while changed_directions < 2:\n while True:\n next_row = row + directions[active_direction][0]\n next_col = col + directions[active_direction][1]\n \n print(f\"{next_row}--{next_col}\")\n if not(0 <= next_row < rows and 0 <= next_col < cols):\n break\n \n if matrix[next_row][next_col] == VISITED:\n break\n \n row = next_row\n col = next_col\n \n result.append(matrix[row][col])\n matrix[row][col] = VISITED\n \n changed_directions = 0\n \n active_direction = (active_direction+1) % 4\n changed_directions +=1\n \n return result","sub_path":"54-spiral-matrix/54-spiral-matrix.py","file_name":"54-spiral-matrix.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"167794352","text":"import requests\nimport json\nprint(\"Enter url of user\")\nurl=input()\n#print(url)\nx=url.split(\"https://github.com/\")[1]\nprint(x)\ninfo=\"https://api.github.com/users/%s/repos?client_id=3e1b331062fb84d354a1&client_secret=3fc60c537530938bcae819e91fd97c59f246cbd7\"%x\n#print(info)\nr=requests.get(info).json()\n#print((r))\nwith open(\"data.json\",\"w\") as output:\n json.dump(r,output)\nwith open(\"data.json\",\"r\") as output2:\n data=json.load(output2)\nfinal_forks=[]\nfinal_name=[]\nfor i in range(0,len(data)):\n forks=data[i][\"forks\"]\n name=data[i][\"name\"]\n final_forks.append(forks)\n final_name.append(name)\n zipped=list(zip(final_name,final_forks))\n#print(zipped)\n \nfor j in range(0,10):\n if j<=len(zipped)-1:\n print(zipped[j])\n else:\n break\n\n#print(final_forks)\n#print(final_name)\n#print(list(zipped))\n#print(\"forks_count: \",data[\"forks_count\"])\n\n","sub_path":"opengenus2.py","file_name":"opengenus2.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"318909864","text":"import json\n\nimport time\nfrom twisted.web import resource\n\n\nclass DebugEndpoint(resource.Resource):\n\n def __init__(self):\n resource.Resource.__init__(self)\n self.putChild(\"open_files\", DebugOpenFilesEndpoint())\n self.putChild(\"open_sockets\", DebugOpenSocketsEndpoint())\n self.putChild(\"threads\", DebugThreadsEndpoint())\n self.putChild(\"cpu\", DebugCPUEndpoint())\n self.putChild(\"memory\", DebugMemoryEndpoint())\n self.putChild(\"log\", DebugLogEndpoint())\n\n\nclass DebugOpenFilesEndpoint(resource.Resource):\n\n def render_GET(self, request):\n return json.dumps({\"open_files\": [{\"path\": \"a/b/c.log\", \"fd\": 3}, {\"path\": \"d/e/f.txt\", \"fd\": 4}]})\n\n\nclass DebugOpenSocketsEndpoint(resource.Resource):\n\n def render_GET(self, request):\n return json.dumps({\"open_sockets\": [\n {\"family\": 2, \"status\": \"ESTABLISHED\", \"laddr\": \"0.0.0.0:0\", \"raddr\": \"0.0.0.0:0\", \"type\": 30},\n {\"family\": 2, \"status\": \"OPEN\", \"laddr\": \"127.0.0.1:1234\", \"raddr\": \"134.233.89.7:3849\", \"type\": 30}\n ]})\n\n\nclass DebugThreadsEndpoint(resource.Resource):\n\n def render_GET(self, request):\n return json.dumps({\"threads\": [\n {\"thread_id\": 12345, \"thread_name\": \"fancy_thread\", \"frames\": ['line 1', 'line 2']},\n {\"thread_id\": 5653, \"thread_name\": \"another_thread\", \"frames\": ['line 1']},\n {\"thread_id\": 8784, \"thread_name\": \"twisted\", \"frames\": ['line 1', 'line 2']}]})\n\n\nclass DebugCPUEndpoint(resource.Resource):\n\n def __init__(self):\n resource.Resource.__init__(self)\n self.putChild(\"history\", DebugCPUHistoryEndpoint())\n\n\nclass DebugCPUHistoryEndpoint(resource.Resource):\n\n def render_GET(self, request):\n now = time.time()\n return json.dumps({\"cpu_history\": [\n {\"time\": now, \"cpu\": 5.3},\n {\"time\": now + 5, \"cpu\": 10.5},\n {\"time\": now + 10, \"cpu\": 50},\n {\"time\": now + 15, \"cpu\": 57},\n {\"time\": now + 20, \"cpu\": 40},\n {\"time\": now + 25, \"cpu\": 30},\n {\"time\": now + 30, \"cpu\": 34}]})\n\n\nclass DebugMemoryEndpoint(resource.Resource):\n\n def __init__(self):\n resource.Resource.__init__(self)\n self.putChild(\"history\", DebugMemoryHistoryEndpoint())\n\n\nclass DebugMemoryHistoryEndpoint(resource.Resource):\n\n def render_GET(self, request):\n now = time.time()\n return json.dumps({\"memory_history\": [\n {\"time\": now, \"mem\": 5000},\n {\"time\": now + 5, \"mem\": 5100},\n {\"time\": now + 10, \"mem\": 5150},\n {\"time\": now + 15, \"mem\": 5125},\n {\"time\": now + 20, \"mem\": 5175},\n {\"time\": now + 25, \"mem\": 5100},\n {\"time\": now + 30, \"mem\": 5150}]})\n\n\nclass DebugLogEndpoint(resource.Resource):\n\n def render_GET(self, request):\n sample_logs = ''.join([\"Sample log [%d]\\n\" % i for i in xrange(10)])\n return json.dumps({\"content\": sample_logs, \"max_lines\": 10})\n","sub_path":"FakeTriblerAPI/endpoints/debug_endpoint.py","file_name":"debug_endpoint.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"450275867","text":"import requests\nimport json\n\nr = requests.get(\"https://jsonplaceholder.typicode.com/todos\")\n\ndownloadList = r.json()\n\ndef how_many_true(list_, key, keyWithBoolean):\n dictionary ={}\n for record in list_:\n if record[keyWithBoolean] == True:\n try:\n dictionary[record[key]] += 1\n except:\n dictionary[record[key]] = 1\n return dictionary\n\nuserIdWithPoints = how_many_true(downloadList, \"userId\", \"completed\")\n\ndef keys_with_top_values(dictionary):\n list_ = []\n maxValue = max(dictionary.values())\n for key, value in dictionary.items():\n if(value == maxValue):\n list_.append(key)\n return list_\n\nuserIdWithPoints = how_many_true(downloadList, \"userId\", \"completed\")\n\ntheBestUsers = keys_with_top_values(userIdWithPoints)\n\nprint(theBestUsers)\n\ndef tail_of_address_ampersand(myList, key = \"id\"):\n i = 1\n wynik = \"\"\n for x in myList:\n if(len(myList) == i):\n wynik += key + \"=\" + str(x)\n else:\n wynik += key + \"=\" + str(x) + \"&\"\n i += 1\n \n return wynik\n\n\n#sposób 2\nfor theBestUser in theBestUsers:\n r = requests.get(\"https://jsonplaceholder.typicode.com/users/\" + str(theBestUser))\n user = r.json()\n print(\"Ciasteczko dostaje użytkownik o imieniu\", user[\"name\"])\n\n#sposób 3\nr = requests.get(\"https://jsonplaceholder.typicode.com/users?\" + tail_of_address_ampersand(theBestUsers))\nusers = r.json()\nfor user in users:\n print (\"Ciasteczko dostaje użytkownik o imieniu\", user[\"name\"])\n","sub_path":"V2prizeTheBestUsers.py","file_name":"V2prizeTheBestUsers.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"449443480","text":"import torch\nimport numpy as np\nfrom scipy.signal import get_window\nimport librosa.util as librosa_util\nfrom utils import load_audio_to_torch\n\ndef get_spectrum(stft, hparams, path, device='cpu',\n drop_lf_bands=3, #ignore noisy low frequency bands when detecting silence\n peak_range=3, # range below overall spectral peak for a frame to be considered speech\n trim=(1, 3), # include frames before, after first/last detected speech\n noise_quant=(0.03, 0.1), # mean frame intensity quantile to use as noise\n noise_reduce=0.7, # fraction of noise to replace with noise_floor\n noise_floor=5e-5,\n remove_noise=False):\n audio = load_audio_to_torch(\n path, hparams.sampling_rate, wav_scale=False)[0]\n spect = spect_raw = stft.mel_spectrogram(\n audio.to(device).unsqueeze(0)).squeeze(0).cpu().numpy()\n\n if spect.shape[-1] < 30:\n warnings.warn(f'unexpectedly short audio: {path}')\n\n # trim leading/trailing silence\n if trim is not None and trim!=False:\n spectral_peaks = np.max(spect[drop_lf_bands:], axis=0)\n loud = np.argwhere(\n (spectral_peaks > np.max(spectral_peaks)-peak_range)\n ).squeeze()\n lo, hi = max(0, loud[0]-trim[0]), min(spect.shape[1], loud[-1]+trim[1])\n else:\n lo, hi = 0, spect.shape[1]\n\n # reduce background noise\n noise = 0\n if remove_noise:\n spectral_mean = np.mean(spect[drop_lf_bands:], axis=0)\n quiet = np.argwhere((\n (spectral_mean < np.quantile(spectral_mean, noise_quant[1]))\n & (spectral_mean > np.quantile(spectral_mean, noise_quant[0]))\n )).squeeze()\n if quiet.ndim > 0 and len(quiet) > 0:\n noise = spect[:, quiet].mean(1, keepdims=True)\n\n spect = spect[:, lo:hi]\n\n if remove_noise:\n spect = np.log(np.maximum(\n np.exp(spect) - noise_reduce*np.exp(noise),\n noise_floor))\n\n return {\n 'audio': audio[lo*hparams.hop_length:hi*hparams.hop_length],\n 'spect': spect,\n 'spect_raw': spect_raw\n }\n\ndef window_sumsquare(window, n_frames, hop_length=200, win_length=800,\n n_fft=800, dtype=np.float32, norm=None):\n \"\"\"\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n \"\"\"\n if win_length is None:\n win_length = n_fft\n\n n = n_fft + hop_length * (n_frames - 1)\n x = np.zeros(n, dtype=dtype)\n\n # Compute the squared window at the desired length\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa_util.normalize(win_sq, norm=norm)**2\n win_sq = librosa_util.pad_center(win_sq, n_fft)\n\n # Fill the envelope\n for i in range(n_frames):\n sample = i * hop_length\n x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]\n return x\n\n\ndef griffin_lim(magnitudes, stft_fn, n_iters=30, verbose=False):\n \"\"\"\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n \"\"\"\n\n angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))\n angles = angles.astype(np.float32)\n angles = torch.autograd.Variable(torch.from_numpy(angles))\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n\n iters = range(n_iters)\n if verbose:\n from tqdm import tqdm\n iters = tqdm(list(iters), desc='GL step')\n\n for i in iters:\n _, angles = stft_fn.transform(signal)\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n return signal\n\n\ndef dynamic_range_compression(x, C=1, clip_val=1e-5):\n \"\"\"\n PARAMS\n ------\n C: compression factor\n \"\"\"\n return torch.log(torch.clamp(x, min=clip_val) * C)\n\n\ndef dynamic_range_decompression(x, C=1):\n \"\"\"\n PARAMS\n ------\n C: compression factor used to compress\n \"\"\"\n return torch.exp(x) / C\n","sub_path":"audio_processing.py","file_name":"audio_processing.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}
+{"seq_id":"240162897","text":"import os\n\npath = os.getcwd()\nfiles = (\"1.txt\", \"2.txt\", \"3.txt\")\n\nwith open('final_file', 'w', encoding='utf-8') as final_file:\n dict_of_files = {}\n dict_of_lens = {}\n\n for file_name in files:\n file = open(f\"{path}\\{file_name}\", 'r', encoding='utf-8')\n dict_of_files[file_name] = file.readlines()\n len_of_lines = len(dict_of_files[file_name])\n if len_of_lines in dict_of_lens.keys():\n dict_of_lens[len_of_lines].append(file_name)\n else:\n dict_of_lens[len_of_lines] = [file_name]\n file.close()\n\n for len_of_lines in sorted(dict_of_lens.keys(), reverse=False):\n for file_name in dict_of_lens[len_of_lines]:\n final_file.write('\\n' + file_name + '\\n')\n final_file.write(str(len_of_lines)+'\\n')\n for line in dict_of_files[file_name]:\n final_file.write(line)\n","sub_path":"Task 3 - files/Task 3.py","file_name":"Task 3.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}