diff --git "a/1908.jsonl" "b/1908.jsonl" new file mode 100644--- /dev/null +++ "b/1908.jsonl" @@ -0,0 +1,718 @@ +{"seq_id":"567048387","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nt, rx, ry, rz, vx, vy, vz, mag, e = np.loadtxt(\"../../data/1000000.dat\", unpack=True)\n\nF_rz = np.fft.fft(rz)\nfreq = 1.0/t\n\nplt.plot(freq, F_rz, linewidth=\"0.5\")\nplt.show()\n","sub_path":"analyis/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"18938303","text":"\"\"\"\n@purpose: Feature extraction from Geotiff images\n@author: Kemeng Liu\n@contact: kemeng.liu@stud-mail.uni-wuerzburg.de\n\"\"\"\n\n\nimport os\nfrom glob import glob\nimport numpy as np\nfrom scipy import ndimage, stats\nimport rasterio\n\n\ndef thdFilter(inpath, outpath):\n \"\"\"From each input image tile, extract a mask from pixels \n of the highest 2.5% of the histogram of pixel values, and \n output the binary masks as Geotiff files.\n \n \n Parameters\n ----------\n inpath : string\n The path to the directory containing all input Geotiff files.\n outpath : string\n The path to the directory where the output files should be located.\n \n \n Returns\n -------\n A printed message indicating the location of the output files.\n \"\"\"\n\n\n # Function to get z-score\n def normalize(img):\n z_score = np.zeros(img.shape)\n for i in range(0, len(img)):\n mu = float(img[i].mean())\n sigma = float(img[i].std())\n z_score[i] = (img[i]-mu)/sigma\n return z_score\n \n # Function to get statistical Mode of numpy array\n def getMode(x):\n return float(stats.mode(x)[0])\n \n \n # List Files\n tiles = glob(os.path.join(inpath, '*'))\n \n for j in range(len(tiles)):\n # Read Raster Files\n with rasterio.open(tiles[j]) as src:\n img = src.read()\n kwargs = src.meta.copy()\n \n # 1. Standardize Image\n img_nor = normalize(img)\n \n \n # 2. Reclassify with threshold 97.5% (z-score >1.96)\n binary = np.where(img_nor > 1.96, 1, 0)\n # Select only One Band\n binary = binary[0]\n \n \n # 3. Spatial Filter (3x3, mode)\n # Apply 3x3 spatial filter (moving window)\n binary_flt = ndimage.generic_filter(binary, function=getMode, size=3)\n \n \n # 4. Export Filtered Raster\n binary_flt = binary_flt.astype('uint8')\n # Update Metadata\n kwargs.update({\n \"driver\": \"GTiff\",\n \"count\": 1,\n \"height\": binary_flt.shape[0],\n \"width\": binary_flt.shape[1],\n \"transform\": kwargs['transform'],\n \"crs\": kwargs['crs']\n })\n # Export Masked DEM\n outpath_msk = os.path.join(outpath, tiles[j].split('.')[0].split('\\\\')[-1]+\"_msk.tif\")\n with rasterio.open(outpath_msk, \"w\", **kwargs) as dest:\n dest.write(binary_flt, indexes=1)\n \n return print(\"The output files are located at \", outpath)\n\n \n \npath_input = \"C:/EAGLE/trace_gfz/tile_analysis/sample/input/vessel\" \npath_output = \"C:/EAGLE/trace_gfz/tile_analysis/sample/output/thdFilter/vessel\" \n \nthdFilter(inpath=path_input, outpath=path_output)\n","sub_path":"script/thdFilter.py","file_name":"thdFilter.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"463124947","text":"import httplib2\nimport time\nfrom oauth2client.client import SignedJwtAssertionCredentials\nfrom apiclient import discovery, errors\n\n\nclass BigQueryStreamingInsert(object):\n def __init__(self, project, dataset, table, fields,\n credentials=('', ''), expiration=90, table_suffix='_%b'):\n \"\"\"\n Create a BigQuery streaming insert client.\n - project - project ID in BigQuery\n - dataset - dataset ID in BigQuery\n - table - table ID in BigQuery\n - fields - list of fields that describe table \n see https://developers.google.com/bigquery/docs/reference/v2/tables#schema\n for fields syntax.\n Example:\n [\n {'name': 'uuid', 'type': 'STRING'},\n {'name': 'user', 'type': 'STRING'}\n ]\n - credentials - tuple of (service_account, private_key) for Signed JWT\n https://developers.google.com/accounts/docs/OAuth2ServiceAccount\n - expiration - expiration of table, in days. If set to false (0 or\n None) then there is no expiration.\n - table_suffix - table name's suffix. It uses strftime directives.\n If you use expiration, then it makes sense to generate unique name\n for table for a period of rotation. Say, if you want to have monthly\n logs kept for a 90 days then you use suffix '_%b' which is month's\n name. The you will have table of one month's data that is kept\n for 90 days. \n \"\"\"\n self.bq_project = project\n self.bq_dataset = dataset\n self.bq_table = unicode(table) + unicode(time.strftime(table_suffix))\n self.bq_fields = fields\n # BigQuery uses milliseconds\n self.bq_expiration = 1000 * 60 * 60 *24 * expiration\n self.service = self._get_service(*credentials)\n\n def _get_service(self, service_account, private_key, http_timeout=30):\n jwt_credentials = SignedJwtAssertionCredentials(\n service_account,\n private_key,\n scope='https://www.googleapis.com/auth/bigquery')\n http = jwt_credentials.authorize(httplib2.Http(timeout=http_timeout))\n return discovery.build('bigquery', 'v2', http=http)\n\n def _create_table(self):\n return self.service.tables().insert(\n projectId=self.bq_project,\n datasetId=self.bq_dataset,\n body={\n \"kind\": \"bigquery#table\",\n \"tableReference\": {\n \"projectId\": self.bq_project,\n \"datasetId\": self.bq_dataset,\n \"tableId\": self.bq_table\n },\n \"schema\": { \"fields\": self.bq_fields },\n \"expirationTime\": int(1000 * time.time() + self.bq_expiration)\n },\n ).execute()\n\n def _insert_row(self, row_id, row_data):\n assert len(self.bq_fields) == len(row_data), \\\n \"Mismatch in row size and number of fields in table\"\n return self.service.tabledata().insertAll(\n projectId=self.bq_project,\n datasetId=self.bq_dataset,\n tableId=self.bq_table,\n body={\n 'kind': 'bigquery#tableDataInsertAllRequest',\n 'rows': [{\n 'insertId': row_id,\n 'json': row_data\n }]\n }\n ).execute()\n\n def insert_one(self, row_id, row_data):\n \"\"\"\n Insert a row into a table. \n - row_id - unique identified of row (uuid is a good choice)\n - row_data - row data\n \"\"\"\n try:\n result = self._insert_row(row_id, row_data)\n except errors.HttpError as e:\n if e.resp['status'] == '404':\n # The table doesn't exist, we should create it\n self._create_table()\n result = self._insert_row(row_id, row_data)\n else:\n # Re-raise exception\n raise\n # Sometimes BigQuery returns 200, but there are errors in body\n # https://developers.google.com/bigquery/streaming-data-into-bigquery#troubleshooting\n if result.get('insertErrors'):\n raise IOError(\"Insert into BigQuery failed, insertErrors=%s\" %\n result['insertErrors'])\n","sub_path":"bq.py","file_name":"bq.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123862761","text":"import torch\nfrom collections import OrderedDict\n\nclass LatencyObserver:\n \"\"\"\n Deprecated. Please use torchprof.Profile instead.\n \"\"\"\n _module_inputs = {}\n _prof_measures = []\n use_cuda = False\n\n def __init__(self, module: torch.nn.Module, use_cuda: bool = False):\n \"\"\"Deprecated. Please use torchprof.Profile instead.\n Wrap all modules with a autograd profiler hook.\n\n Arguments:\n module (torch.nn.Module): The pytorch model to profile\n use_cuda (bool): Enable profiling cuda\n \"\"\"\n print(\"Deprecated. Please use torchprof.Profile instead.\")\n self.module = module\n self._register_module_hooks(self.module)\n self.use_cuda = use_cuda\n\n def __str__(self):\n tree = self._measures_to_tree()\n format_lines = self._structure_pretty_format(tree)\n\n # get the maximum character lengths for each column\n heading = [\"Module\", \"CPU Time\", \"CUDA Time\"]\n max_lens = [max(map(len, col)) for col in zip(*([heading] + format_lines))]\n\n # create the heading\n disp = \"{:<{}s}\".format(heading[0], max_lens[0]) + \" | \"\n disp += \"{:>{}s}\".format(heading[1], max_lens[1]) + \" | \"\n disp += \"{:>{}s}\".format(heading[2], max_lens[2]) + \"\\n\"\n disp += \"-|-\".join([\"-\" * mlen for mlen in max_lens]) + \"\\n\"\n for line in format_lines:\n label, cpu_time, cuda_time = line\n disp += \"{:<{}s}\".format(label, max_lens[0]) + \" | \"\n disp += \"{:>{}s}\".format(cpu_time, max_lens[1]) + \" | \"\n disp += \"{:>{}s}\".format(cuda_time, max_lens[2]) + \"\\n\"\n return disp\n\n def __repr__(self):\n return repr(self._measures_to_tree())\n\n @staticmethod\n def _structure_pretty_format(tree):\n pretty_lines = LatencyObserver._pretty_format(tree)\n format_lines = []\n for idx, pretty_line in enumerate(pretty_lines):\n depth, name, measures = pretty_line\n cpu_time, gpu_time = [\n torch.autograd.profiler.format_time(x) for x in measures\n ]\n pre = \"\"\n prev_depths = [pl[0] for pl in pretty_lines[:idx]]\n next_depths = [pl[0] for pl in pretty_lines[idx + 1 :]]\n current = True\n while depth:\n if current:\n if depth in next_depths and next_depths[0] >= depth:\n pre = \"\\u251c\\u2500\\u2500 \"\n else:\n pre = \"\\u2514\\u2500\\u2500 \"\n else:\n if depth in next_depths:\n pre = \"\\u2502 \" + pre\n else:\n pre = \" \" + pre\n depth -= 1\n current = False\n format_lines.append([pre + name, cpu_time, gpu_time])\n return format_lines\n\n @staticmethod\n def _pretty_format(tree, depth=0):\n pretty_lines = []\n for name, subtree in tree.items():\n measures = subtree.pop(None)\n pretty_lines.append([depth, name, measures])\n pretty_lines.extend(LatencyObserver._pretty_format(subtree, depth + 1))\n return pretty_lines\n\n @staticmethod\n def _prof_to_latency(prof_measures):\n prof_latency = []\n for trace, prof in prof_measures:\n cpu_time = sum([e.cpu_time_total for e in prof.function_events])\n cuda_time = sum([e.cuda_time_total for e in prof.function_events])\n prof_latency.append((trace, (cpu_time, cuda_time)))\n return prof_latency\n\n @staticmethod\n def _trace_to_key(trace):\n return \".\".join(trace)\n\n def _input_hook(self, trace):\n def _save_input(_self, module_input):\n self._module_inputs[self._trace_to_key(trace)] = module_input\n\n return _save_input\n\n def _register_module_hooks(self, module, name=None, ancestors=[]):\n if name is None:\n name = module._get_name()\n trace = ancestors + [name]\n module.register_forward_pre_hook(self._input_hook(trace))\n\n for child_name, child in module.named_children():\n self._register_module_hooks(child, name=child_name, ancestors=trace)\n\n def _measure_recursive_latency(self, child, name, ancestors):\n trace = ancestors + [name]\n child_input = self._module_inputs[self._trace_to_key(trace)]\n\n with torch.autograd.profiler.profile(use_cuda=self.use_cuda) as prof:\n child(*child_input)\n self._prof_measures.append((trace, prof))\n\n # recurse into children to get layer specific profile metrics\n for gchild_name, gchild in child.named_children():\n self._measure_recursive_latency(gchild, name=gchild_name, ancestors=trace)\n\n def _measures_to_tree(self):\n tree = OrderedDict()\n for trace, measurements in self._prof_to_latency(self._prof_measures):\n current_tree = tree\n for depth, module in enumerate(trace, 1):\n if module not in current_tree:\n current_tree[module] = OrderedDict()\n if depth == len(trace):\n current_tree[module][None] = measurements\n current_tree = current_tree[module]\n return tree\n\n def measure_latency(\n self,\n module_input: torch.Tensor,\n module_name: str = \"\",\n raw_profile: bool = False,\n ):\n \"\"\"Calculate layer by layer latency of a forward pass of the module.\n Each module is run seperately, so measured runtime of nested modules may not sum to equal the parent.\n\n Arguments:\n module_input (torch.Tensor): value for model forward pass\n module_name (str): Name of model, defaults to calling `_get_name()`\n raw_profile (bool): Return latency as raw autograd profile, default `False`\n \"\"\"\n self._module_inputs = {}\n self._prof_measures = []\n\n if module_name is \"\":\n module_name = self.module._get_name()\n trace = [module_name]\n\n # get overall module performance, seed module input values\n with torch.autograd.profiler.profile(use_cuda=self.use_cuda) as prof:\n self.module(module_input)\n self._prof_measures.append((trace, prof))\n\n # recurse into children to get layer specific profile metrics\n for child_name, child in self.module.named_children():\n self._measure_recursive_latency(child, name=child_name, ancestors=trace)\n\n if raw_profile:\n return self._prof_measures\n return self._prof_to_latency(self._prof_measures)\n","sub_path":"torchprof/latency_observer.py","file_name":"latency_observer.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278545181","text":"import os\nimport zipfile\nimport argparse\nimport xml.etree.ElementTree as ET\nimport pycld2\nfrom langid.langid import LanguageIdentifier, model\nidentifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)\n\nclass OpusLangid:\n\n def __init__(self, arguments):\n parser = argparse.ArgumentParser(prog='opus_langid',\n description='Add language ids to sentences in plain xml '\n 'files or xml files in zip archives using pycld2 and '\n 'langid.py')\n parser.add_argument('-f', help='File path', required=True)\n parser.add_argument('-t',\n help='Target file path. By default, the original file is edited')\n parser.add_argument('-v', help='Verbosity. -v: print current xml file',\n action='count', default=0)\n parser.add_argument('-s',\n help='Suppress error messages in language detection',\n action='store_true')\n\n if len(arguments) == 0:\n self.args = parser.parse_args()\n else:\n self.args = parser.parse_args(arguments)\n\n self.suppress = self.args.s\n\n def detectLanguage(self, sentence, sid):\n try:\n clddetails = pycld2.detect(sentence)\n except Exception as e:\n if not self.suppress:\n print('Sentence id <{0}>: {1}'.format(sid, e))\n clddetails = (0, 0, ((0, 'un', 0.0), 0))\n try:\n lidetails = identifier.classify(sentence)\n except Exception as e:\n if not self.suppress:\n print('Sentence id <{0}>: {1}'.format(sid, e))\n lidetails = ('un', 0.0)\n\n cldlan = clddetails[2][0][1]\n cldconf = str(round(clddetails[2][0][2]/100, 2))\n lilan, liconf = [str(round(x,2)) if type(x) == float\n else x for x in lidetails]\n\n return cldlan, cldconf, lilan, liconf\n\n def addIds(self, filename):\n tree = ET.parse(filename)\n root = tree.getroot()\n for stag in root.iter('s'):\n if stag.find('w') != None:\n sentence = []\n for wtag in stag.iter('w'):\n sentence.append(wtag.text)\n sentence = ' '.join(sentence)\n else:\n sentence = stag.text\n cldlan, cldconf, lilan, liconf = self.detectLanguage(\n sentence, stag.attrib['id'])\n stag.attrib['cld2'] = cldlan\n stag.attrib['cld2conf'] = cldconf\n stag.attrib['langid'] = lilan\n stag.attrib['langidconf'] = liconf\n\n return tree\n\n def editOrRemove(self, tempname):\n if self.args.t:\n os.rename(tempname, self.args.t)\n else:\n os.remove(self.args.f)\n os.rename(tempname, self.args.f)\n\n def writeIdsToFile(self, filename, fileobj):\n if self.args.v > 0:\n print(filename)\n filename = filename.replace('/','_')+'_opus_langid_temp.temp.xml'\n tree = self.addIds(fileobj)\n tree.write(filename, encoding='utf-8', xml_declaration=True)\n return filename\n\n def processFiles(self):\n try:\n tempname = self.args.f.replace('/','_')+'_opus_langid_temp.temp.zip'\n with zipfile.ZipFile(self.args.f, 'r') as zip_arc:\n with zipfile.ZipFile(tempname, 'w') as new_arc:\n for filename in zip_arc.filelist:\n with zip_arc.open(filename.filename) as text_file:\n if filename.filename[-4:] == '.xml':\n temp_xml = self.writeIdsToFile(\n filename.filename, text_file)\n with open(temp_xml, 'rb') as temp_bytes:\n new_bytes = b''.join(temp_bytes.readlines())\n os.remove(temp_xml)\n else:\n new_bytes = b''.join(text_file.readlines())\n new_arc.writestr(filename, new_bytes)\n except zipfile.BadZipFile:\n tempname = self.writeIdsToFile(self.args.f, self.args.f)\n self.editOrRemove(tempname)\n\n","sub_path":"opustools_pkg/opustools_pkg/old_opus_langid.py","file_name":"old_opus_langid.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"165674684","text":"# -*- coding: utf-8 -*-\n__author__ = 'Yuan'\n\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor\nimport datastore.topic_store\nfrom urlutil.util import *\n\n\ndown_path = u'''d:\\ceshi\\pic'''\n\n\ndef do_down_image(img):\n global mutex\n print(img[1])\n b = read_url_retry(img[1], timeout=30, retry=4)\n if b is not None:\n print(\"{0} OK\".format(img[1]))\n mutex.acquire()\n datastore.topic_store.image_store(down_path, img[0], img[1], b, img[2])\n mutex.release()\n\n\ndef go_on():\n global mutex\n try:\n mutex.acquire()\n return datastore.topic_store.image_not_downloaded_sum(down_path) != 0\n finally:\n mutex.release()\n\nif __name__ == '__main__':\n each_loop_count = 128\n pool = ThreadPoolExecutor(128)\n global mutex\n mutex = threading.Lock()\n while go_on():\n img = []\n try:\n mutex.acquire()\n img = datastore.topic_store.get_images_not_downloaded(down_path, each_loop_count)\n finally:\n mutex.release()\n for i in img:\n pool.submit(do_down_image, i)\n\n","sub_path":"down_images.py","file_name":"down_images.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"614825603","text":"from odd.artifact import Issue, Location, PythonModule\nfrom odd.plugin import Plugin\n\nfrom odd_bunch.ast_utils import iter_imports\n\n\nclass LegacyImport(Plugin):\n _handles = {\"python_module\"}\n _emits = {\"issue\"}\n\n def on_python_module(self, python_module: PythonModule):\n version = python_module.addon.odoo_version\n\n def issue(module_import, import_name):\n yield Issue(\n \"legacy_import\",\n f\"Legacy import `{import_name}`\",\n python_module.addon,\n locations=[\n Location(python_module.path, module_import.position.start_pos_col_1)\n ],\n categories=[\"deprecated\"],\n )\n\n for imp in iter_imports(python_module.node):\n if imp.from_names:\n # from openerp import X\n if version >= 10 and imp.from_names[:1] == (\"openerp\",):\n yield from issue(imp, \"openerp\")\n\n # from (openerp|odoo).osv import X\n if imp.from_names[:2] in ((\"openerp\", \"osv\"), (\"odoo\", \"osv\")):\n yield from issue(imp, \"osv\")\n # from (openerp|odoo) import osv\n elif (\n imp.from_names[:1] in ((\"openerp\",), (\"odoo\",))\n and \"osv\" in imp.names\n ):\n yield from issue(imp, \"osv\")\n\n else:\n # import openerp\n # import openerp.X\n if version >= 10 and imp.names[:1] == (\"openerp\",):\n yield from issue(imp, \"openerp\")\n\n # import (openerp|odoo).osv\n if imp.names[:2] in ((\"openerp\", \"osv\"), (\"odoo\", \"osv\")):\n yield from issue(imp, \"osv\")\n","sub_path":"odd_bunch/plugin/legacy_import.py","file_name":"legacy_import.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292388250","text":"# coding: utf-8\r\nimport paho.mqtt.client as mqtt\r\nimport sys, pygame, time, datetime\r\nfrom pygame.locals import * # pygame中で使用できる定数群をimport\r\n\r\n\r\ndef JoystickDetection(args):\r\n host = args # サーバの固定IPを設定\r\n port = 1883 # 通信に使用するポート番号を指定\r\n topic = ['PicoRover/1st','PicoRover/2nd','PicoRover/3rd','PicoRover/stop']\r\n range = 1024 # server側で使用するpwmのrange\r\n sleeptime = 0.05 # socketで送信した後のスリープ時間\r\n target = 0\r\n\r\n\r\n try:\r\n client = mqtt.Client(protocol=mqtt.MQTTv311)\r\n client.connect(host, port=port, keepalive=60)\r\n\r\n except KeyboardInterrupt:\r\n print ('')\r\n print ('KeyboardInterrupt')\r\n sys.exit()\r\n\r\n except:\r\n print(u'接続できませんでした')\r\n sys.exit()\r\n\r\n\r\n try:\r\n pygame.init() # pygameの初期化\r\n j = pygame.joystick.Joystick(0) # ジョイスティックオブジェクトの作成\r\n j.init() # オブジェクトの初期化\r\n print (u'Joystickの名称: ' + j.get_name())\r\n print (u'WebカメラのIP:' + ' ' + host)\r\n print (u'Webカメラのポート:' + u' ' + u'8080')\r\n print (u'-------START-------')\r\n print ('')\r\n print (u'前進 : 右スティックUP')\r\n print (u'後退 : 右スティックDOWN')\r\n print (u'ステアリング左 : 左スティックLEFT')\r\n print (u'ステアリング右 : 左スティックRIGHT')\r\n print (u'終了 : PSボタン')\r\n\r\n except pygame.error: # tryがうまくいかない場合pygame.errorからエラーがraiseされる\r\n print (u'Joystickが見つかりませんでした。')\r\n sys.exit() # コントローラがつながれていないのでプログラムを終了\r\n\r\n # ボタンダウンとハットモーション入力をブロック\r\n pygame.event.set_blocked([JOYBUTTONUP, JOYHATMOTION])\r\n # value1,value2を初期化\r\n value1 = 0\r\n value2 = 1500 # 1500はサーボモータがセンター位置になるパルス幅\r\n timer_old = datetime.datetime.now()\r\n\r\n try:\r\n while 1:\r\n for e in pygame.event.get(): # イベントを取得\r\n # 取得したイベントをチェックし、サーバ側にボタンに対応するstringを送信\r\n if e.type == pygame.locals.JOYAXISMOTION:\r\n value1_new = round(j.get_axis(3) * range) # 右スティックの値からデューティ比を計算\r\n value2_new = round((j.get_axis(0) * 500) + 1500) # 左スティックの値からパルス幅を計算\r\n\r\n value1_new = int(value1_new) # value1,value2の値をfloatからintに変換\r\n value2_new = int(value2_new)\r\n\r\n if (abs(value1_new - value1) <= 20) and (abs(value2_new - value2) <= 20): # value1,value2の値の微小な変化は無視する\r\n pass\r\n\r\n elif abs(value1_new - value1) > 20: # value1の値の微小な変化は送信しない\r\n value1 = value1_new # value1の値を更新する\r\n\r\n value1_str = 'A' + str(value1) + ':' # value2をserver側で識別するため頭にAをつける。:は終端文字。文字列が連結した時にサーバ側でsplitするため。\r\n print (value1_str) # デバッグ用に送信する値を表示\r\n client.publish(topic[target],value1_str)\r\n #client_sock.send(value1_str.encode('utf-8'))\r\n time.sleep(sleeptime) # 値を送ったら時間を空ける。この時間がない、もしくはサーバ側でGPIOを操作した後の休止時間よりある程度短い場合文字列が連結してサーバ側でエラーが出る\r\n\r\n elif abs(value2_new - value2) > 20: # value2の値の微小な変化は送信しない\r\n value2 = value2_new # value2の値を更新する\r\n\r\n value2_str = 'B' + str(value2) + ':' # value2をserver側で識別するため頭にBをつける\r\n print (value2_str) # デバッグ用に送信する値を表示\r\n client.publish(topic[target],value2_str)\r\n #client_sock.send(value2_str.encode('utf-8'))\r\n time.sleep(sleeptime) # 値を送ったら時間を空ける。この時間がない、もしくはサーバ側でGPIOを操作した後の休止時間よりある程度短い場合文字列が連結してサーバ側でエラーが出る\r\n elif e.type == pygame.locals.JOYBUTTONDOWN:\r\n print(e.button)\r\n if e.button == 2:\r\n target = 0;\r\n print(topic[target]);\r\n client.publish(\"PicoRover/change\",\"1st\");#マーカー切り替えのために専用のTopicにメッセージを送信\r\n #切り替えのタイミングで全機体を一時停止(スティックを倒したまま切り替えたときに、動き続けるのを防止するため)\r\n client.publish(topic[0],\"A0:\");\r\n client.publish(topic[1],\"A0:\");\r\n client.publish(topic[2],\"A0:\");\r\n elif e.button == 3:\r\n target = 1;\r\n print(topic[target]);\r\n client.publish(\"PicoRover/change\",\"2nd\");\r\n client.publish(topic[0],\"A0:\");\r\n client.publish(topic[1],\"A0:\");\r\n client.publish(topic[2],\"A0:\");\r\n elif e.button == 1:\r\n target = 2;\r\n print(topic[target]);\r\n client.publish(\"PicoRover/change\",\"3rd\");\r\n client.publish(topic[0],\"A0:\");\r\n client.publish(topic[1],\"A0:\");\r\n client.publish(topic[2],\"A0:\");\r\n elif e.button == 0:\r\n target = 3;\r\n print(topic[target]);\r\n client.publish(\"PicoRover/change\",\"stop\");\r\n client.publish(topic[0],\"A0:\");\r\n client.publish(topic[1],\"A0:\");\r\n client.publish(topic[2],\"A0:\");\r\n if e.button == 7:\r\n print ('E:')\r\n client.publish(topic[0],'E:')\r\n client.publish(topic[1],'E:')\r\n client.publish(topic[2],'E:')\r\n pygame.quit()\r\n sys.exit()\r\n\r\n else:\r\n pass\r\n #定期的に生存信号「S」をRaspberry Piに��信\r\n timer_latest = datetime.datetime.now()\r\n delta = timer_latest - timer_old\r\n timer = delta.total_seconds()\r\n if timer >= 0.2:\r\n timer_old = datetime.datetime.now()\r\n client.publish(topic[0],'S:')\r\n client.publish(topic[1],'S:')\r\n client.publish(topic[2],'S:') \r\n print ('S:')\r\n\r\n except KeyboardInterrupt:\r\n print ('')\r\n print (u'Keyboardinterrupt')\r\n\r\n finally:\r\n client.publish(topic[0],'E:')\r\n client.publish(topic[1],'E:')\r\n client.publish(topic[2],'E:')\r\n print (u'-------EXIT-------')\r\n pygame.quit()\r\n sys.exit()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n args = sys.argv # コマンドラインから引数を格納\r\n JoystickDetection(args[1])\r\n\r\n# end\r\n","sub_path":"MQTTPicoRover_s.py","file_name":"MQTTPicoRover_s.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"571201589","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 18 17:00:09 2021\n\n@author: vittorio\n\"\"\"\nimport torch\nimport argparse\nimport os\nimport numpy as np\nimport multiprocessing as mp\nimport multiprocessing.pool\n\nimport World\n\nfrom utils import Encode_Data\nfrom BatchBW_HIL_torch import BatchBW\n\nfrom evaluation import HierarchicalStochasticSampleTrajMDP\nfrom evaluation import eval_policy\n\nimport TRPO\nimport GAIL\nimport PPO\nimport UATRPO\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass NoDaemonProcess(multiprocessing.Process):\n # make 'daemon' attribute always return False\n def _get_daemon(self):\n return False\n def _set_daemon(self, value):\n pass\n daemon = property(_get_daemon, _set_daemon)\n\n# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool\n# because the latter is only a wrapper function, not a proper class.\nclass MyPool(multiprocessing.pool.Pool):\n Process = NoDaemonProcess\n \ndef IL(env, args, seed):\n \n Trajectories = np.load(\"./Expert_data/Trajectories.npy\", allow_pickle=True).tolist()\n Rotation = np.load(\"./Expert_data/Rotation.npy\", allow_pickle=True).tolist()\n \n TrainingSet = Trajectories[args.coins]\n Labels = Rotation[args.coins]\n \n state_samples, action_samples, encoding_info = Encode_Data(TrainingSet, Labels)\n \n state_dim = state_samples.shape[1]\n action_dim = env.action_size\n option_dim = args.number_options\n termination_dim = 2\n \n kwargs = {\n \t\"state_dim\": state_dim,\n \"action_dim\": action_dim,\n \"option_dim\": option_dim,\n \"termination_dim\": termination_dim,\n \"state_samples\": state_samples,\n \"action_samples\": action_samples,\n \"M_step_epoch\": args.maximization_epochs_IL,\n \"batch_size\": args.batch_size_IL,\n \"l_rate\": args.l_rate_IL,\n \"encoding_info\": encoding_info\n }\n \n Agent_BatchHIL_torch = BatchBW(**kwargs)\n\n Loss = 100000\n evaluation_HIL = []\n for i in range(args.N_iterations):\n print(f\"Iteration {i+1}/{args.N_iterations}\")\n loss = Agent_BatchHIL_torch.Baum_Welch()\n if loss > Loss:\n Agent_BatchHIL_torch.reset_learning_rate(args.l_rate_IL/10)\n Loss = loss\n [trajBatch_torch, controlBatch_torch, OptionsBatch_torch, \n TerminationBatch_torch, RewardBatch_torch] = HierarchicalStochasticSampleTrajMDP(Agent_BatchHIL_torch, env, args.evaluation_max_n_steps, args.evaluation_episodes, 'standard', TrainingSet[0,:])\n avg_reward = np.sum(RewardBatch_torch)/args.evaluation_episodes\n evaluation_HIL.append(avg_reward)\n \n print(\"---------------------------------------\")\n print(f\"Seed {seed}, Evaluation over {args.evaluation_episodes} episodes: {avg_reward:.3f}\")\n print(\"---------------------------------------\")\n \n # Save\n np.save(f\"./results/FlatRL/IL_{args.env}_{seed}\", evaluation_HIL)\n Agent_BatchHIL_torch.save(f\"./models/FlatRL/IL/IL_{args.env}_{seed}\")\n \n \ndef RL(env, args, seed):\n \n Trajectories = np.load(\"./Expert_data/Trajectories.npy\", allow_pickle=True).tolist()\n Rotation = np.load(\"./Expert_data/Rotation.npy\", allow_pickle=True).tolist()\n TrainingSet = Trajectories[args.coins]\n Labels = Rotation[args.coins]\n state_samples, action_samples, encoding_info = Encode_Data(TrainingSet, Labels)\n state_dim = state_samples.shape[1]\n action_dim = env.action_size\n \n # Initialize policy \n if args.policy == \"TRPO\":\n kwargs = {\n \"state_dim\": state_dim,\n \"action_dim\": action_dim,\n \"encoding_info\": encoding_info,\n \"num_steps_per_rollout\": args.number_steps_per_iter\n }\n # Target policy smoothing is scaled wrt the action scale\n policy = TRPO.TRPO(**kwargs)\n if args.load_model and args.IL:\n \tpolicy.load_actor(f\"./models/FlatRL/IL/IL_{args.env}_{seed}\", HIL=args.IL) \n \n # Initialize policy \n if args.policy == \"UATRPO\":\n kwargs = {\n \"state_dim\": state_dim,\n \"action_dim\": action_dim,\n \"encoding_info\": encoding_info,\n \"num_steps_per_rollout\": args.number_steps_per_iter\n }\n # Target policy smoothing is scaled wrt the action scale\n policy = UATRPO.UATRPO(**kwargs)\n if args.load_model and args.IL:\n \tpolicy.load_actor(f\"./models/FlatRL/IL/IL_{args.env}_{seed}\", HIL=args.IL) \n \n if args.policy == \"PPO\":\n kwargs = {\n \"state_dim\": state_dim,\n \"action_dim\": action_dim,\n \"encoding_info\": encoding_info,\n \"num_steps_per_rollout\": args.number_steps_per_iter\n }\n # Target policy smoothing is scaled wrt the action scale\n policy = PPO.PPO(**kwargs)\n if args.load_model and args.IL:\n \tpolicy.load_actor(f\"./models/FlatRL/IL/IL_{args.env}_{seed}\", HIL=args.IL) \n \n if args.GAIL:\n kwargs = {\n \"state_dim\": state_dim,\n \"action_dim\": action_dim,\n \"expert_states\": state_samples,\n \"expert_actions\": action_samples,\n }\n IRL = GAIL.Gail(**kwargs)\n \t\n # Evaluate untrained policy\n evaluations = [eval_policy(policy, env, seed, 0)]\n \n for i in range(int(args.max_iter)):\n \n if args.GAIL and args.Mixed_GAIL:\n rollout_states, rollout_actions = policy.GAE(env, args.GAIL, IRL.discriminator, 'standard', TrainingSet[0,:], args.Mixed_GAIL)\n mean_expert_score, mean_learner_score = IRL.update(rollout_states, rollout_actions)\n print(f\"Expert Score: {mean_expert_score}, Learner Score: {mean_learner_score}\")\n policy.train(Entropy = True)\n \n elif args.GAIL:\n rollout_states, rollout_actions = policy.GAE(env, args.GAIL, IRL.discriminator, 'standard', TrainingSet[0,:])\n mean_expert_score, mean_learner_score = IRL.update(rollout_states, rollout_actions)\n print(f\"Expert Score: {mean_expert_score}, Learner Score: {mean_learner_score}\")\n policy.train(Entropy = True)\n \n else:\n rollout_states, rollout_actions = policy.GAE(env)\n policy.train(Entropy = True)\n \n # Evaluate episode\n if (i + 1) % args.eval_freq == 0:\n evaluations.append(eval_policy(policy, env, seed, i+1, args.evaluation_episodes, 'standard', TrainingSet[0,:])) \n \n return evaluations, policy\n \n \ndef train(env, args, seed): \n \n # Set seeds\n env.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n \n if args.IL:\n IL(env, args, seed)\n \n evaluations, policy = RL(env, args, seed)\n \n return evaluations, policy\n\n\nif __name__ == \"__main__\":\n \n Trajectories = np.load(\"./Expert_data/Trajectories.npy\", allow_pickle=True).tolist()\n Rotation = np.load(\"./Expert_data/Rotation.npy\", allow_pickle=True).tolist()\n Coins_location = np.load(\"./Expert_data/Coins_location.npy\")\n len_trajs = []\n for i in range(len(Trajectories)):\n len_trajs.append(len(Trajectories[i]))\n \n mean_len_trajs = int(np.mean(len_trajs))\n \n parser = argparse.ArgumentParser()\n #General\n parser.add_argument(\"--number_options\", default=1, type=int) # number of options\n parser.add_argument(\"--policy\", default=\"UATRPO\") # Policy name (TD3, DDPG or OurDDPG)\n parser.add_argument(\"--seed\", default=21, type=int) # Sets Gym, PyTorch and Numpy seeds\n parser.add_argument(\"--env\", default=\"Foraging\") # Sets Gym, PyTorch and Numpy seeds\n parser.add_argument(\"--number_steps_per_iter\", default=30000, type=int) # Time steps initial random policy is used 25e3\n parser.add_argument(\"--eval_freq\", default=1, type=int) # How often (time steps) we evaluate\n parser.add_argument(\"--max_iter\", default=200, type=int) # Max time steps to run environment\n parser.add_argument(\"--coins\", default=2, type=int)\n parser.add_argument(\"--multiprocessing\", action=\"store_true\")\n parser.add_argument(\"--Nprocessors\", default=int(0.5*multiprocessing.cpu_count()), type=int)\n #IL\n parser.add_argument(\"--IL\", default=True, type=bool) # Batch size for HIL\n parser.add_argument(\"--size_data_set\", default=3000, type=int) # Batch size for HIL\n parser.add_argument(\"--batch_size_IL\", default=32, type=int) # Batch size for HIL\n parser.add_argument(\"--maximization_epochs_IL\", default=10, type=int) # Optimization epochs HIL\n parser.add_argument(\"--l_rate_IL\", default=0.001, type=float) # Optimization epochs HIL\n parser.add_argument(\"--N_iterations\", default=11, type=int) # Number of EM iterations\n # IRL\n parser.add_argument(\"--GAIL\", default=False) # Frequency of delayed critic updates\n parser.add_argument(\"--Mixed_GAIL\", default=False) \n # HRL\n parser.add_argument(\"--start_timesteps\", default=25e3, type=int) # Time steps before training default=25e3\n parser.add_argument(\"--save_model\", action=\"store_false\") # Save model and optimizer parameters\n parser.add_argument(\"--load_model\", default=True, type=bool) # Model load file name, \"\" doesn't load, \"default\" uses file_name\n parser.add_argument(\"--load_model_path\", default=\"\") \n # Evaluation\n parser.add_argument(\"--evaluation_episodes\", default=10, type=int)\n parser.add_argument(\"--evaluation_max_n_steps\", default = mean_len_trajs, type=int)\n args = parser.parse_args()\n \n if args.multiprocessing: \n file_name = f\"{args.policy}_IL_{args.IL}_GAIL_{args.GAIL}_Mixed_{args.Mixed_GAIL}_{args.env}_{args.Nprocessors}\"\n print(\"---------------------------------------\")\n print(f\"Policy: {args.policy}, IL: {args.IL}, GAIL: {args.GAIL}, Mixed: {args.Mixed_GAIL}, Env: {args.env}, NSeeds: {args.Nprocessors}\")\n print(\"---------------------------------------\")\n \n else:\n file_name = f\"{args.policy}_IL_{args.IL}_GAIL_{args.GAIL}_Mixed_{args.Mixed_GAIL}_{args.env}_{args.seed}\"\n print(\"---------------------------------------\")\n print(f\"Policy: {args.policy}, IL: {args.IL}, GAIL: {args.GAIL}, Mixed: {args.Mixed_GAIL}, Env: {args.env}, Seed: {args.seed}\")\n print(\"---------------------------------------\")\n \n \n if not os.path.exists(\"./results/FlatRL\"):\n os.makedirs(\"./results/FlatRL\")\n \n if not os.path.exists(f\"./models/FlatRL/{file_name}\"):\n os.makedirs(f\"./models/FlatRL/{file_name}\")\n \n if not os.path.exists(\"./models/FlatRL/IL\"):\n os.makedirs(\"./models/FlatRL/IL\")\n \n coins_location = Coins_location[args.coins,:,:] \n \n env = World.Foraging.env(coins_location)\n ctx = mp.get_context('spawn')\n \n if args.multiprocessing: \n arguments = [(env, args, seed) for seed in range(args.Nprocessors)] \n with ctx.Pool(args.Nprocessors) as pool:\n results = pool.starmap(train, arguments)\n pool.close()\n pool.join()\n \n evaluations = []\n for i in range(args.Nseed):\n evaluations.append(results[i][0])\n \n np.save(f\"./results/FlatRL/mean_{file_name}\", np.mean(evaluations,0))\n np.save(f\"./results/FlatRL/std_{file_name}\", np.std(evaluations,0))\n np.save(f\"./results/FlatRL/steps_{file_name}\", np.linspace(0, args.max_iter*args.number_steps_per_iter, len(np.mean(evaluations,0))))\n \n if args.save_model: \n index = np.argmax(np.max(evaluations,1))\n policy = results[index][1]\n policy.save(f\"./models/FlatRL/{file_name}\")\n else:\n evaluations, policy = train(env, args, args.seed)\n if args.save_model: \n np.save(f\"./results/FlatRL/evaluation_{file_name}\", evaluations)\n policy.save_actor(f\"./models/FlatRL/{file_name}\")\n ","sub_path":"main_flat_on_policy.py","file_name":"main_flat_on_policy.py","file_ext":"py","file_size_in_byte":12051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"527707923","text":"import actions\nimport random\n\nclass Player:\n def __init__(self, money_payout_rates, my_spawn_point, their_spawn_point):\n self.money_payout_rates = money_payout_rates,\n self.my_spawn_point = my_spawn_point\n self.their_spawn_point = their_spawn_point\n\n def take_turn(self, guys, my_food, their_food, my_money, their_money):\n self.width = len(guys)\n self.height = len(guys[0])\n\n allTargetCells = []\n movedCells = []\n toMoveCells = []\n\n orders = {}\n for x in range(self.width):\n for y in range(self.height):\n if not guys[x][y]: continue\n num_guys, is_mine = guys[x][y]\n if not is_mine: continue\n if num_guys == 1: continue\n \n toMoveCells.append( [x,y] )\n \n while len(toMoveCells) > 0:\n [x,y] = toMoveCells.pop(0)\n num_guys, is_mine = guys[x][y]\n\n if not guys[x][y] or guys[x][y] < 1:\n asdf()\n continue\n \n movedCells.append([x,y])\n nextAction = self.getNextAction(guys,x,y)\n\n if [x,y] in allTargetCells:\n orders[(x,y), nextAction] = num_guys\n else:\n orders[(x,y), nextAction] = num_guys - 1\n \n targetCell = [x + actions.OFFSETS[nextAction][0], y + actions.OFFSETS[nextAction][1]]\n allTargetCells.append(targetCell)\n \n if( self.nextMoveValid(guys, movedCells, targetCell[0], targetCell[1]) ):\n toMoveCells.append( targetCell )\n \n return orders\n \n def nextMoveValid( self, guys, movedCells, newX, newY):\n if [newX,newY] in movedCells:\n return False\n if newX < 0 or newY < 0:\n return False\n if newX > self.width or newY > self.height:\n return False\n if not guys[newX][newY]:\n return False\n num_guys, is_mine = guys[newX][newY]\n return is_mine\n \n def getNextAction( self, guys, x, y ):\n minDist2Free = 100\n pos2Free = [0,0]\n minDist2Enemy = 100\n pos2Enemy = [0,0]\n \n for destX in range(self.width):\n for destY in range(self.height):\n curDist = abs(x-destX) + abs(y-destY)\n\n if not guys[destX][destY]:\n if curDist < minDist2Free:\n pos2Free = [destX, destY]\n minDist2Free = curDist\n else:\n num_guys, is_mine = guys[destX][destY]\n if not is_mine:\n if curDist < minDist2Enemy:\n pos2Enemy = [destX,destY]\n minDist2Enemy = curDist\n \n if (minDist2Free) < (3*minDist2Enemy):\n dx = x-pos2Free[0]\n dy = y-pos2Free[1]\n #print 'a'\n else:\n dx = x-pos2Enemy[0]\n dy = y-pos2Enemy[1]\n \n #print minDist2Free,minDist2Enemy\n #print dx,dy\n #asdf()\n if abs(dx)>abs(dy):\n if dx < 0:\n return actions.RIGHT\n else:\n return actions.LEFT\n else:\n if dy < 0:\n return actions.UP\n else:\n return actions.DOWN\n\n\n","sub_path":"entry_missing.py","file_name":"entry_missing.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"122634108","text":"import glob\nimport os\n\ntxt_files = glob.glob(\"*.txt\")\n\nnumber=0;\n\n\nnumbers=[]\nunnamed=[]\n\nfor filename in txt_files:\n\tnumber = filename[4:-4]\n\ttry:\n\t\tx=int(number)\n\t\tnumbers.append(x)\n\texcept:\n\t\tunnamed.append(filename)\n\nm=max(numbers)\nmax=int(m)\n\nfor file in unnamed:\n\tmax=max+1\n\tos.rename(file,\"eco_000\"+str(max)+'.txt')\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"356792605","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 13 08:18:07 2019\r\n\r\n@author: mohammad\r\n\"\"\"\r\n# to calculate Euler's \r\n# Totient Function \r\n\r\ndef gcd(a, b): \r\n \r\n if (a == 0): \r\n return b \r\n return gcd(b % a, a) \r\n\r\ndef phi(n): \r\n \r\n result = 1\r\n for i in range(2, n): \r\n if (gcd(i, n) == 1): \r\n result+=1\r\n return result \r\n\r\n\r\nvalue = int (input (\"enter the number\")) \r\nprint (\"the phi is \"+str(phi(value))) # count of the numbers that considered as coprime with value\r\n\r\nlist1= []\r\nfor i in range (1,value-1):\r\n if (gcd(i,value)==1):\r\n list1.append(i)\r\nprint (\"the numbers are \") \r\nprint (list1) # the list contain the numbers that considered as coprime with value \r\n","sub_path":"phi.py","file_name":"phi.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"81692850","text":" #############################\n # Amazon Interview Question #\n #############################\n \nar = [1,1,0,1,1,0,1,1,1,1]\nl = []\nseqonetot = 0\nfor i in range(len(ar)):\n if ar[i] == 1:\n seqonetot += 1\n\n else:\n if seqonetot > 0:\n l.append(seqonetot)\n l.append(ar[i])\n seqonetot = 0\n\n#print(l)\nmaxseq = 0\nfor i in range(len(l)):\n x = l[i]\n if (i + 1) < len(l):\n x += 1\n if (i + 2) < len(l):\n x += l[i+2]\n if x > maxseq:\n maxseq = x\n\nprint(maxseq)\n","sub_path":"Longest_Subsequence.py","file_name":"Longest_Subsequence.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"124962787","text":"#!/bin/env python\nfrom __future__ import print_function\nfile_a = 'find_ip_a.txt'\nfile_b = 'find_ip_b.txt'\na = [i.strip() for i in open(file_a)]\nfor i in open(file_b):\n\tif i.split()[0] in a:\n\t\tprint(i, end='')\nprint()\n## end\n","sub_path":"find_ip.py","file_name":"find_ip.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"72149509","text":"from math import*\nx = eval(input(\"Valor de x:\" ))\nk = int(input(\"Valor de k: \"))\n\ncont = 1\nsinal = -1\nresultado = x\nexp = 3\n\nwhile( cont < k ):\n\tresultado = resultado + (sinal *(x ** exp / factorial (exp)))\n\tsinal = sinal * -1\n\texp = exp + 2\n\tcont += 1\nprint(round(resultado, 10))\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4176/codes/1716_2505.py","file_name":"1716_2505.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"58247814","text":"import os \nimport random\n#data_dir =\"data/custom\"\nimport pandas as pd\nimport csv\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.corpus import wordnet as wn \ndf=pd.read_csv(\"ch1.txt.csv\")\ndata_src=[]\ndata_dest=[]\ndata_test_ques=[]\ndata_test_ans=[]\ndata_train_ques=[]\ndata_train_ans=[]\ndata_train_ans1=[]\ndata_train_ques1=[]\ndata_test_ques1=[]\ndata_test_ans1=[]\ndata_test_ques2=[]\ndata_test_ans2=[]\ndata_train_ques2=[]\ndata_train_ans2=[]\na=len(df['source'])\n\ndef load_data(string=\"\",robot=\"\",start=\"\", end=\"\"):\n for i in range(a):\n if df.loc[i].at['source']=='human':\n string=string+df.loc[i].at['text']\n if robot!=\"\":\n data_dest.append(start + robot + end)\n robot=\"\"\n #print(type(string))\n else:\n if string!=\"\":\n data_src.append(string)\n string=\"\" \n if df.loc[i].at['source']=='robot':\n robot=robot+df.loc[i].at['text']\n if robot!=\"\":\n data_dest.append(start + robot + end)\n robot=\"\"\n if string!=\"\":\n data_src.append(string)\n string=\"\" \n \n \n print(len(data_src))\n #print(\"human:\",data_src)\n #print(\"robot:\",data_dest)\n print(len(data_dest))\n\ndef input1(input1=True):\n return data_src\ndef output1(output1=True):\n return data_dest \n\n\ndef word_syns(data_dest1,data_src3):\n \n for idx in range(len(data_dest1)):\n answer=data_dest1[idx]\n n_answer1=answer\n #sentences=[]\n words = word_tokenize(answer)\n taged_tokens=nltk.pos_tag(words)\n \n #print(\"Pos tag of word answer:\",taged_tokens1)\n sentence=data_src3[idx]\n for word,tag in taged_tokens:\n #print(word)\n synonymList ={}\n syno=[]\n if word!='ssss' and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':\n if tag=='NN'or tag== 'VBN':\n wordNetSynset = wn.synsets(word) \n if len(wordNetSynset) != 0:\n #print(\"word:\",word)\n #print(\"Pos tag of word:\",tag)\n for synSet in wordNetSynset:\n for synWords in synSet.lemma_names():\n if synWords not in syno:\n syno.append(synWords)\n synonymList[word]=syno\n #print(\"list of syno:\",syno)\n #print(\"list of syno:\",synonymList)\n ns='/'.join(syno)\n #print(ns)\n n_answer1=n_answer1.replace(word,ns)\n #print(\"sentence:\",sentence)\n #print(\"augmented_sentence:\",n_sentence)\n for key in synonymList:\n for i in range(len(synonymList[key])):\n n_answer=answer\n n_answer = n_answer.replace(word,synonymList[word][i])\n #sentences.append(n_sentence)\n if n_answer not in data_train_ans1:\n \n data_train_ans1.append(n_answer)\n\n data_train_ques1.append(sentence)\n \n else:\n if answer not in data_train_ans1:\n \n data_train_ans1.append(answer)\n \n data_train_ques1.append(sentence)\n \n \n #print(sentence)\n #print(\"lis of sentence:\",data_train_ques2)\n #print(\"lis of sentence:\",data_train_ans2)\n #print(n_sentence)\n #data_train_ques1.append(n_sentence)\n #print(\"new list:\",data_train_ans1)\n print(len(data_train_ques1))\n print(len(data_train_ans1))\n return data_train_ques1,data_train_ans1\n\ndef word_syn1(data_train_ques1,data_train_ans1):\n \n for idx in range(len(data_train_ques1)):\n #print(idx)\n answer1=data_train_ans1[idx]\n question=data_train_ques1[idx]\n words1 = word_tokenize(question)\n taged_tokens=nltk.pos_tag(words1)\n for word,tag in taged_tokens:\n #print(word)\n synonymList1 ={}\n syno1=[]\n if word!='ssss'and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':\n if tag=='NN'or tag== 'VBN':\n wordNetSynset = wn.synsets(word) \n if len(wordNetSynset) != 0:\n #print(\"word:\",word)\n #print(\"Pos tag of word:\",tag)\n for synSet in wordNetSynset:\n for synWords in synSet.lemma_names():\n if synWords not in syno1:\n syno1.append(synWords)\n synonymList1[word]=syno1 \n\n #print(\"sentence:\",syno1)\n #print(\"augmented_sentence:\",n_sentence)\n for key in synonymList1:\n for i in range(len(synonymList1[key])):\n n_sentence=question\n n_sentence=n_sentence.replace(word,synonymList1[word][i])\n #if question in data_train_ques3:\n data_train_ques2.append(n_sentence)\n data_train_ans2.append(answer1)\n\n #if question not in data_train_ques3:\n\n else:\n if question not in data_train_ques2:\n data_train_ques2.append(question)\n data_train_ans2.append(answer1)\n\n\n #print(data_train_ques3)\n print(len(data_train_ques2))\n print(len(data_train_ans2)) \n return data_train_ques2,data_train_ans2\n\ndef prepare_seq2seq_files(data_train_ques2,data_train_ans2,TESTSET_SIZE =50000):\n \n\n # choose 30,000 (TESTSET_SIZE) items to put into testset\n test_ids = random.sample([i for i in range(len(data_train_ques2))],TESTSET_SIZE)\n \n for i in range(len(data_train_ques2)):\n if i in test_ids:\n data_test_ques.append(data_train_ques2[i]+'\\n')\n data_test_ans.append(data_train_ans2[i]+ '\\n' )\n else:\n data_train_ques.append(data_train_ques2[i]+'\\n')\n data_train_ans.append(data_train_ans2[i]+ '\\n' )\n #if i%100== 0:\n # print (\"written lines\",i)\n\ndef train_encoder(input1=True):\n return data_train_ques\n\ndef train_decoder(output1=True):\n return data_train_ans \n\ndef test_encoder(input1=True):\n return data_test_ques\n\ndef test_decoder(output1=True):\n return data_test_ans \n \n\n\n\ndef word_syns2(data_dest3,data_src5):\n \n for idx in range(len(data_dest3)):\n answer2=data_dest3[idx]\n n_answer2=answer2\n #sentences=[]\n words3 = word_tokenize(answer2)\n taged_tokens3=nltk.pos_tag(words3)\n \n #print(\"Pos tag of word answer:\",taged_tokens1)\n sentence2=data_src5[idx]\n for word,tag in taged_tokens3:\n #print(word)\n synonymList3 ={}\n syno3=[]\n if word!='ssss' and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':\n if tag=='NN'or tag== 'VBN':\n wordNetSynset = wn.synsets(word) \n if len(wordNetSynset) != 0:\n #print(\"word:\",word)\n #print(\"Pos tag of word:\",tag)\n for synSet in wordNetSynset:\n for synWords in synSet.lemma_names():\n if synWords not in syno3:\n syno3.append(synWords)\n synonymList3[word]=syno3 \n #print(\"list of syno:\",syno)\n #print(\"list of syno:\",synonymList)\n ns3='/'.join(syno3)\n #print(ns3)\n n_answer2=n_answer2.replace(word,ns3)\n #print(\"sentence:\",sentence)\n #print(\"augmented_sentence:\",n_sentence)\n for key in synonymList3:\n for i in range(len(synonymList3[key])):\n n_answer3=answer2\n n_answer3 = n_answer3.replace(word,synonymList3[word][i])\n #sentences.append(n_sentence)\n if n_answer3 not in data_test_ans1:\n \n data_test_ans1.append(n_answer3)\n\n data_test_ques1.append(sentence2)\n \n else:\n if answer2 not in data_test_ans1:\n \n data_test_ans1.append(answer2)\n \n data_test_ques1.append(sentence2)\n \n \n #print(sentence)\n #print(\"lis of sentence:\",data_train_ques2)\n #print(\"lis of sentence:\",data_train_ans2)\n #print(n_sentence)\n #data_train_ques1.append(n_sentence)\n #print(\"new list:\",data_train_ans1)\n print(len(data_test_ques1))\n print(len(data_test_ans1))\n return data_test_ques1,data_test_ans1\n\ndef word_syn3(data_test_ques1,data_test_ans1):\n\n for idx in range(len(data_test_ques1)):\n #print(idx)\n answer2=data_test_ans1[idx]\n question1=data_test_ques1[idx]\n words1 = word_tokenize(question1)\n taged_tokens1=nltk.pos_tag(words1)\n for word,tag in taged_tokens1:\n #print(word)\n synonymList5 ={}\n syno5=[]\n if word!='ssss'and word!='r' and word!='i'and word!='o'and word!='t' and word!='y'and word!='af' and word!='s' and word!='d'and word!='c'and word!='ti' and word!='u' and word!='da' and word!='te'and word!='si'and word!='la' and word!='le'and word!='el' and word!='al' and word!='se'and word!='e'and word!='n' and word!='se' and word!='es'and word!='d':\n if tag=='NN'or tag== 'VBN':\n wordNetSynset = wn.synsets(word) \n if len(wordNetSynset) != 0:\n #print(\"word:\",word)\n #print(\"Pos tag of word:\",tag)\n for synSet in wordNetSynset:\n for synWords in synSet.lemma_names():\n if synWords not in syno5:\n syno5.append(synWords)\n synonymList5[word]=syno5 \n\n #print(\"sentence:\",syno5)\n #print(\"augmented_sentence:\",n_sentence)\n for key in synonymList5:\n for i in range(len(synonymList5[key])):\n n_sentence1=question1\n n_sentence1=n_sentence1.replace(word,synonymList5[word][i])\n #if question in data_train_ques3:\n data_test_ques2.append(n_sentence1)\n data_test_ans2.append(answer2)\n\n\n\n #if question not in data_train_ques3:\n\n else:\n if question1 not in data_test_ques2:\n data_test_ques2.append(question1)\n data_test_ans2.append(answer2)\n\n\n #print(data_train_ques3)\n print(len(data_test_ques2))\n print(len(data_test_ans2)) \n return data_test_ques2,data_test_ans2\n","sub_path":"chatbot(Bahdanau attention) small dataset/data1.py","file_name":"data1.py","file_ext":"py","file_size_in_byte":13037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"516124828","text":"import time\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nnames_1.sort()\nnames_2.sort()\n\nduplicates = []\ni = 0\nj = 0\nwhile i < len(names_1) and j < len(names_2):\n if names_1[i] == names_2[j]:\n duplicates.append(names_1[i])\n i += 1\n j += 1\n elif names_1[i] < names_2[j]:\n i += 1\n else:\n j += 1 \n\n# duplicates = []\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129553245","text":"#! /usr/bin/env python3\n\nimport bs4, requests\n\n\ndef getTopic(url) :\n res = requests.get(url)\n res.raise_for_status()\n\n soup = bs4.BeautifulSoup(res.text, 'html.parser')\n elems = soup.select('#app > div > main > div > div > header > header > div > span._3jtfvo8J > h1')\n return elems[0].text.strip()\n \n\ntopic = getTopic('https://elements.envato.com/about')\nprint(topic)\n\n\n\n\n\nhelloFile = open('/Users/teerasakyukantapornpong/Desktop/MyPythonScripts/output/output.txt', 'a')\nhelloFile.write(topic)\nhelloFile.close()\n","sub_path":"getTopic.py","file_name":"getTopic.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23389586","text":"import numpy as np\r\nfrom termcolor import colored\r\n\r\nvir_iris_data = np.genfromtxt('iris_for_ML.csv', delimiter=',')\r\n\r\nrandom_iris_data = np.random.permutation(vir_iris_data)\r\n\r\n\r\ntest_data = random_iris_data[0:10,:4]\r\ntrain_data = random_iris_data[10:,:4]\r\n\r\ntest_lbl = random_iris_data[0:10,4:]\r\ntrain_lbl = random_iris_data[10:,4:]\r\n\r\ntmp=[]\r\nfor i in range(len(test_lbl)):\r\n if test_lbl[i] == 1:\r\n t = np.array([1,0,0])\r\n elif test_lbl[i] == 2:\r\n t = np.array([0,1,0])\r\n else:\r\n t = np.array([0,0,1]) \r\n tmp.append(t) \r\nnew_test_lbl=np.array(tmp)\r\n\r\ntmp=[]\r\nfor i in range(len(train_lbl)):\r\n if train_lbl[i] == 1:\r\n t = np.array([1,0,0])\r\n elif train_lbl[i] == 2:\r\n t = np.array([0,1,0])\r\n else:\r\n t = np.array([0,0,1]) \r\n tmp.append(t) \r\nnew_train_lbl=np.array(tmp)\r\n\r\n\r\n\r\n\r\nprint(\"\\nrandom_iris_data: \\n\",colored(random_iris_data, 'red'),\"\\n\")\r\nprint(\"\\ntest_data: \\n\",colored(test_data, 'green'),\"\\n\")\r\nprint(\"\\ntrain_data: \\n\",colored(train_data, 'blue'),\"\\n\")\r\nprint(\"\\ntest_lbl: \\n\",colored(test_lbl, 'green'),\"\\n\")\r\nprint(\"\\nnew_test_lbl: \\n\",colored(new_test_lbl, 'green'),\"\\n\")\r\nprint(\"\\ntrain_lbl: \\n\",colored(train_lbl, 'blue'),\"\\n\")\r\nprint(\"\\nnew_train_lbl: \\n\",colored(new_train_lbl, 'blue'),\"\\n\")\r\n\r\n\r\n# A = np.random.randint(100, size=(10,3))\r\n# print(\"\\nA: \",A,\"\\n\")\r\n# idx = np.random.permutation(A)\r\n# print(\"\\nidx: \",idx,\"\\n\")","sub_path":"lab12/KNN-iris-1.py","file_name":"KNN-iris-1.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"481800076","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.path.append('../')\n\nfrom structured_data import index_excels_normalTable\n\nexcel = \"excels_factores_económicos/Contratos registrados por sexo, edad y sector economico 2016 a 2018.xlsx\"\nsheet = 0\nname_index = \"index_contratos_sexo_edad_sector\"\ntype_index = \"structured\"\n\n\ntable_start_and_end = {\n \"start_row\": 0,\n \"start_col\": 0,\n \"end_row\": 57298,\n \"end_col\": 7,\n \"start_value_row\": 1,\n \"start_value_col\": 0\n}\n\ntype_items = {\n \"place\" : str,\n \"gender\" : str,\n \"sector\" : str,\n \"age\" : str,\n \"type\" : str,\n \"year\" : str,\n \"month\" : str,\n \"value\" : int\n}\n\nattributes_to_fixed={\n \"item_creation\": \"Original\",\n}\n\nname_items = [\"place\", \"gender\", \"sector\", \"age\", \"type\", \"year\", \"month\", \"value\"]\n\nchange_months = 'month'\n\nlowercase_letters = [\"gender\",\"place\", \"sector\"]\n\nfields_to_change = {\n 'Gran canaria' : ['place','GRAN CANARIA'],\n 'El hierro' : ['place','EL HIERRO'],\n 'La gomera' : ['place','LA GOMERA'],\n 'La palma' : ['place','LA PALMA'],\n 'Tenerife' : ['place','TENERIFE'],\n 'Lanzarote' : ['place','LANZAROTE'],\n 'Fuerteventura' : ['place','FUERTEVENTURA'],\n 'El rosario' : ['place','El Rosario'],\n 'El sauzal' : ['place','El Sauzal'],\n 'El tanque' : ['place','El Tanque'],\n 'Guia de isora' : ['place','Guía de Isora'],\n 'Icod de los vinos' : ['place','Icod de los Vinos'],\n 'Santa cruz de tenerife' : ['place','Santa Cruz de Tenerife'],\n 'Puerto de la cruz' : ['place','Puerto de la Cruz'],\n 'La laguna' : ['place','La Laguna'],\n 'La guancha' : ['place','La Guancha'],\n 'Los realejos' : ['place','Los Realejos'],\n 'La matanza' : ['place','La Matanza'],\n 'La orotava' : ['place','La Orotava'],\n 'La victoria' : ['place','La Victoria'],\n 'Santa ursula' : ['place','Santa Úrsula'],\n 'Los silos' : ['place','Los Silos'],\n 'San miguel de abona' : ['place','San Miguel de Abona'],\n 'Santiago del teide' : ['place','Santiago del Teide'],\n 'San juan de la rambla' : ['place','San Juan de la Rambla'],\n}\n\n\n\nattributes_to_fixed={\n \"item_creation\": \"Original\",\n}\n\n\nindex_excels_normalTable.main(excel, sheet, name_index, type_index, table_start_and_end, type_items, name_items, change_months=change_months, lowercase_letters = lowercase_letters, attributes_to_fixed = attributes_to_fixed, fields_to_change = fields_to_change)\n\n\n\n","sub_path":"airflow/data_analysis/classify_elastic/structured_data/index_factores_económicos/index_contratos_por_sexo_edad_y_sector_economico.py","file_name":"index_contratos_por_sexo_edad_y_sector_economico.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53355462","text":"import unittest\nfrom hypothesis import given, strategies, settings, example, assume\nfrom enigma import Enigma\nimport logging\n\nimport string\n\nlogger = logging.getLogger('ENIGMATEST')\nlogging.basicConfig()\nlogger.setLevel('DEBUG')\n\n\nclass TestEnigma(unittest.TestCase):\n\n @given(\n phrase=strategies.text(\n alphabet=string.ascii_uppercase,\n min_size=1,\n max_size=10),\n rotor_list=strategies.lists(\n strategies.integers(min_value=1, max_value=8),\n min_size=3,\n max_size=4),\n reflector=strategies.sampled_from(['B', 'C']),\n key=strategies.text(\n alphabet=string.ascii_uppercase,\n min_size=3,\n max_size=4)\n )\n @settings(max_examples=100, min_satisfying_examples=10, timeout=10)\n @example(phrase=\"FORK\", rotor_list=[1, 2, 3], reflector='B', key='ABC')\n def testEnigma(self, phrase, rotor_list, reflector, key):\n assume(len(key) == len(rotor_list))\n\n def make_machine():\n if len(rotor_list) == 3:\n enigma_type = 'M3'\n elif len(rotor_list) == 4:\n enigma_type = 'M4'\n\n machine = Enigma(\n rotor_list=rotor_list,\n user_reflector=reflector,\n enigma_type=enigma_type,\n debug='DEBUG'\n )\n machine.set_key(key)\n return machine\n\n machine = make_machine()\n logger.debug(\"Encrypting %s\", phrase)\n result = machine.type_phrase(phrase)\n\n logger.debug(\"Finding Original\")\n machine = make_machine()\n orig = machine.type_phrase(result)\n logger.debug(\n \"Key '%s'\\n -> Running Enigma: Phrase Conversion %s -----> %s ------> %s\", key, phrase, result, orig)\n logger.debug(\"Machine type: %s\" % machine.type)\n assert phrase == orig, \"ERROR: Reverse Encryption Does Not Match Original Phrase\"\n\n @given(\n key=strategies.text(alphabet=string.ascii_uppercase, max_size=10),\n rotor_list=strategies.lists(\n strategies.integers(min_value=1, max_value=8),\n min_size=3,\n max_size=4),\n )\n def testEngimaKeyCheck(self, key, rotor_list):\n assume(len(key) != len(rotor_list))\n\n if len(rotor_list) == 3:\n enigma_type = 'M3'\n elif len(rotor_list) == 4:\n enigma_type = 'M4'\n\n machine = Enigma(\n rotor_list=rotor_list,\n enigma_type=enigma_type,\n debug='DEBUG'\n )\n with self.assertRaises(ValueError):\n machine.set_key(key)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_enigma3.py","file_name":"test_enigma3.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"502469469","text":"import os\r\nimport uuid\r\n\r\nfrom flask import (\r\n render_template,\r\n request,\r\n redirect,\r\n session,\r\n url_for,\r\n Blueprint,\r\n abort,\r\n send_from_directory)\r\n\r\nfrom models.user import User\r\nfrom models.user_role import UserRole\r\nfrom routes import current_user\r\n\r\nmain = Blueprint('index', __name__)\r\n\r\n\"\"\"\r\n用户在这里可以\r\n 访问首页\r\n 注册\r\n 登录\r\n\r\n用户登录后, 会写入 session, 并且定向到 /profile\r\n\"\"\"\r\n\r\n\r\n@main.route(\"/\")\r\ndef index():\r\n u = current_user()\r\n return render_template(\"index.html\", u=u)\r\n\r\n\r\n@main.route(\"/login/view\")\r\ndef login_view():\r\n u = current_user()\r\n return render_template(\"login.html\", u=u)\r\n\r\n\r\n@main.route(\"/register\", methods=['POST'])\r\ndef register():\r\n # form = request.args\r\n form = request.form\r\n # 用类函数来判断\r\n u = User.register(form)\r\n return redirect(url_for('.index'))\r\n\r\n\r\n@main.route(\"/login\", methods=['POST'])\r\ndef login():\r\n print('enter_login')\r\n form = request.form\r\n u = User.validate_login(form)\r\n if u is None:\r\n # 转到 topic.index 页面\r\n print('login_None')\r\n return redirect(url_for('coolwater_topic.index'))\r\n else:\r\n # session 中写入 user_id\r\n session['user_id'] = u.id\r\n print('login_session', session['user_id'])\r\n # 设置 cookie 有效期��� 永久\r\n session.permanent = True\r\n return redirect(url_for('coolwater_topic.index'))\r\n\r\n\r\n@main.route('/profile')\r\ndef profile():\r\n u = current_user()\r\n if u.user_role is UserRole.guest:\r\n return redirect(url_for('index.login_view'))\r\n else:\r\n return render_template('profile.html', u=u)\r\n\r\n\r\n@main.route('/user/')\r\ndef user_detail(id):\r\n u = User.find(id)\r\n if u is None:\r\n abort(404)\r\n else:\r\n return render_template('profile.html', u=u)\r\n\r\n\r\n@main.route('/image/add', methods=['POST'])\r\ndef avatar_add():\r\n file = request.files['avatar']\r\n\r\n # ../../root/.ssh/authorized_keys\r\n # filename = secure_filename(file.filename)\r\n suffix = file.filename.split('.')[-1]\r\n filename = '{}.{}'.format(str(uuid.uuid4()), suffix)\r\n path = os.path.join('images', filename)\r\n file.save(path)\r\n\r\n u = current_user()\r\n User.update(u.id, image='/images/{}'.format(filename))\r\n\r\n return redirect(url_for('.profile'))\r\n\r\n\r\n@main.route('/images/')\r\ndef image(filename):\r\n # 不要直接拼接路由,不安全,比如\r\n # open(os.path.join('images', filename), 'rb').read()\r\n return send_from_directory('images', filename)\r\n\r\n\r\n@main.route(\"/logout\")\r\ndef logout():\r\n session.clear()\r\n return redirect(url_for('index.login_view'))\r\n\r\n\r\n@main.app_context_processor\r\ndef base_inject_user():\r\n return dict(base_inject_user=current_user())\r\n","sub_path":"routes/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"608308378","text":"#!/usr/bin/env python\nimport os\nimport subprocess\nimport sys\nfrom setuptools import setup, find_packages\n\nif os.path.exists('relic'):\n sys.path.insert(1, 'relic')\n import relic.release\nelse:\n try:\n import relic.release\n except ImportError:\n try:\n subprocess.check_call(\n ['git', 'clone', 'https://github.com/jhunkeler/relic.git'])\n sys.path.insert(1, 'relic')\n import relic.release\n except subprocess.CalledProcessError as e:\n print(e)\n exit(1)\n\nversion = relic.release.get_info()\nrelic.release.write_template(version, 'lib/stsci/tools')\n\nsetup(\n name = 'stsci.tools',\n version = version.pep386,\n author = 'STScI',\n author_email = 'help@stsci.edu',\n description = 'Collection of STScI utility functions',\n url = 'https://github.com/spacetelescope/stsci.tools',\n classifiers = [\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires = [\n 'astropy',\n 'numpy',\n ],\n setup_requires = [\n 'pytest-runner'\n ],\n tests_require = [\n 'pytest',\n 'pytest-doctestplus'\n ],\n package_dir = {\n '': 'lib',\n },\n packages = find_packages('lib'),\n package_data = {\n '': ['LICENSE.txt'],\n 'stsci/tools/tests': ['data/*.*']\n },\n entry_points = {\n 'console_scripts': [\n 'convertwaiveredfits=stsci.tools.convertwaiveredfits:main',\n 'convertlog=stsci.tools.convertlog:main'\n ],\n },\n)\n","sub_path":"pypi_install_script/pyraf-2.1.15.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"538393153","text":"#import multiprocessing\n#import TerminalGameUsbDetector\n#import TerminalGameLauncher\n\n#if __name__ == \"__main__\":\n# TransferValue = multiprocessing.Value('i',0)\n\n# p1 = multiprocessing.process(target=TerminalGameLauncher.TitleScreen, args=(TransferValue))\n# p2 = multiprocessing.process(target=TerminalGameUsbDetector.UsbMonitor, args=(TransferValue))\n\n# p1.start()\n# p2.start()\n\n# p1.join()\n# p2.join()\n\n# print(TransferValue)\n\nimport threading,time,random,queue\nimport TerminalGameUsbDetector, TerminalGameLauncher\n\ndef test1():\n while True:\n print(\"a\")\n\ndef test2():\n #for i in range(5):\n # print(\"b\")\n while True:\n print(\"b\")\n\ndef handle_script_a(q):\n TerminalGameLauncher.TitleScreen(q)\n\ndef handle_script_b(q):\n TerminalGameUsbDetector.UsbMonitor(q)\n\nif __name__ == \"__main__\":\n q = queue.Queue()\n q.put(1)\n Tgame = threading.Thread(target=handle_script_a,args=(q,))\n Tdetector = threading.Thread(target=handle_script_b,args=(q,))\n Tdetector.daemon = True\n Tdetector.start()\n Tgame.start()\n Tgame.join()\n Tdetector.join()\n\n\n\n\n\n","sub_path":"Old Scripts (not used anymore)/MPtest.py","file_name":"MPtest.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"350753956","text":"import spacy\n\n# Load English tokenizer, tagger, parser, NER and word vectors\nnlp = spacy.load(\"en_core_web_sm\")\n\n\ndef find_entities(text):\n document = {}\n doc = nlp(text)\n for entity in doc.ents:\n document[entity.label_] = entity.text\n return document\n","sub_path":"app/models/named_entity_recognition/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"233584409","text":"str=input()\nlength=len(str)\nSA=[]\n\nfor i in range(0,length):\n SA.append(0)\n\ni=length-1\n\nwhile i>=0:\n rank=1\n temp=str[i:]\n for j in range(0,length):\n cmp=str[j:];\n Min=min(len(temp),len(cmp))\n for k in range(0,Min):\n if temp[k]==cmp[k]:\n if k==Min-1 and Min==len(cmp):\n rank=rank+1\n elif temp[k]>cmp[k]:\n rank=rank+1\n break\n else:\n break\n SA[rank-2]=i+1\n i=i-1\n\nfor x in SA:\n print(x,end='')\n if x!=SA[length-1]:\n print(\" \",end='')\nprint()","sub_path":"Code/CodeRecords/2341/60832/258992.py","file_name":"258992.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"488180146","text":"#!/usr/bin/env python\n#\n# http://qiita.com/yohei1126@github/items/de355f540a83743213fb\n#\n\nimport configparser,os,datetime\nimport paho.mqtt.client as mqtt\nfrom PIL import Image as img\n\nconf = configparser.SafeConfigParser()\nconf.read(\"./conf.ini\")\n\nurl = conf.get(\"server\",\"url\")\nmqtt_photo=conf.get(\"mqtt\",\"photo\")\nprint(url,mqtt_photo)\n\n# The callback for when the client receives a CONNACK response from the server.\n\n\ndef on_connect(client, userdata, rc):\n print(\"Connected with result code \" + str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n# client.subscribe(\"test\")\n# client.subscribe(\"my/device/stillcam\")\n client.subscribe(mqtt_photo)\n\n# The callback for when a PUBLISH message is received from the server.\n\n\ndef on_message(client, userdata, msg):\n print(\"Saving Photofile\")\n filename = \"./image/\" + datetime.datetime.today().strftime(\"%H%M%S%f\") + \".jpg\"\n with open(filename, 'wb') as fd:\n fd.write(msg.payload)\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(url, 1883, 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()\n","sub_path":"myapp/mqtt/sub/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"597970420","text":"#!/usr/bin/env python3\n\n\"\"\"\nCS3B, Assignment #8, Local Dictionary\nNicholas Noochla-or\n\"\"\"\n\nimport json\nfrom os import scandir\nfrom types import DynamicClassAttribute\nfrom datalist import Node, LinkedList, DataNode, DataList\nfrom enum import Enum\n\nclass DictionaryEntry():\n def __init__(self, word, part_of_speech, definition, example=None):\n self._word = word\n self._part_of_speech = part_of_speech\n self._definition = definition\n self._example = example\n \n @property\n def word(self):\n return self._word\n \n @word.setter\n def word(self, word):\n self._word = word\n \n @property\n def part_of_speech(self):\n return self._part_of_speech\n\n @part_of_speech.setter\n def part_of_speech(self, part_of_speech):\n self._part_to_speech = part_of_speech\n\n @property\n def definition(self):\n return self._definition\n\n @definition.setter\n def definition(self, d):\n self._definition = d\n\n @property\n def example(self):\n return self._example\n\n @example.setter\n def example(self, ex):\n self._example = ex\n \n def __str__(self):\n s = (f\"Word : {self.word}\\n\"\n f\"Part of Speech: {self.part_of_speech}\\n\"\n f\"Definition : {self.definition}\\n\"\n f\"Example : {self.example}\")\n return s\n\n\nclass LocalDictionary():\n local_dict = {}\n def __init__(self, dictionary_json_name=\"dictionary.json\"):\n with open(dictionary_json_name) as f:\n deserialized = json.load(f, object_hook=self.custom_decode)\n\n def custom_decode(self, o):\n try:\n if o['word'] and o['part_of_speech'] and o['definition']:\n new_entry_word = o['word']\n if 'example' in o:\n self.local_dict[new_entry_word] = DictionaryEntry(o['word'], o['part_of_speech'], o['definition'], o['example'])\n return DictionaryEntry(o['word'], o['part_of_speech'], o['definition'], o['example'])\n else:\n self.local_dict[new_entry_word] = DictionaryEntry(o['word'], o['part_of_speech'], o['definition'])\n return DictionaryEntry(o['word'], o['part_of_speech'], o['definition'])\n except:\n return o\n \n def search(self, word):\n try:\n return self.local_dict[word]\n except:\n raise KeyError(\"could not find word \" + word)\n\nclass DictionaryEntryCache(DataList):\n def __init__(self, capacity = 10):\n DataList.__init__(self)\n if capacity > 0:\n self.capacity = capacity\n else:\n raise ValueError(\"capacity lower than 1\")\n \n def add(self, entry):\n \n occupied = self.head\n occupied_count = 0\n while occupied.next:\n occupied_count += 1\n occupied = occupied.next\n # print(occupied_count)\n if occupied_count == self.capacity:\n temp = self.head\n for x in range(occupied_count-1):\n temp = temp.next\n temp.next = None\n\n if type(entry) is DictionaryEntry:\n self.add_to_head(entry)\n\n\n def search(self, word):\n temp = self.head\n try:\n if temp['word'] == word:\n # print(\"Head: \", temp)\n # print(\"temp['word']: \", temp['word'])\n return temp\n except:\n pass\n\n while temp.next:\n if temp.next['word'] == word:\n self.remove(temp.next)\n self.add_to_head(temp.next)\n return temp.next\n temp = temp.next\n raise KeyError(\"DictionaryEntryCache\")\n \n\nclass DictionarySource(Enum):\n LOCAL = 1\n CACHE = 2\n\n\nclass Dictionary():\n def __init__(self):\n self._localDictionary = LocalDictionary()\n self._dictionaryEntryCache = DictionaryEntryCache()\n\n def search(self, word):\n \n try:\n found_word = self._dictionaryEntryCache.search(word)\n return (self._dictionaryEntryCache.search(word), DictionarySource.CACHE)\n except:\n pass\n \n try:\n found_word = self._localDictionary.search(word)\n self._dictionaryEntryCache.add(found_word)\n return (found_word, DictionarySource.LOCAL)\n except:\n raise KeyError(\"DictionarySearch\")\n\n\n\n\n\ndef main():\n\n main_dictionary = Dictionary()\n user_input = '1'\n while user_input != 'qq':\n user_input = input(\"Enter a word to lookup: \")\n (word_search, location) = main_dictionary.search(user_input)\n try:\n (word_search, location) = main_dictionary.search(user_input)\n \n print(\"main word_search: \", word_search)\n print(\"location: \", location)\n except:\n print(\"Error when searching: \", user_input)\n return 0\n\n\n\n # print(LD.local_dict)\n\n # print(LD.search('fly'))\n # LD = LocalDictionary()\n\n # testCache = DictionaryEntryCache(3)\n # testCache.add(LD.search('fly'))\n # testCache.add(LD.search('foothill'))\n # testCache.add(LD.search('ace'))\n # testCache.add(LD.search('python')) \n # entry1 = DictionaryEntry(\"test1\", \"noun\", \"test1\")\n # entry2 = DictionaryEntry(\"test2\", \"noun\", \"test2\")\n # entry3 = DictionaryEntry(\"test3\", \"noun\", \"test3\")\n # testCache.add(entry1)\n # testCache.add(entry2)\n # testCache.add(entry3)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"cs3B/008-LocalDictionary/assignment08.py","file_name":"assignment08.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"587423764","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c), 2016-2017, SISSA (International School for Advanced Studies).\n# All rights reserved.\n# This file is distributed under the terms of the MIT License.\n# See the file 'LICENSE' in the root directory of the present\n# distribution, or http://opensource.org/licenses/MIT.\n#\n# @author Davide Brunato \n#\n\"\"\"\nThis module runs tests concerning the building of XSD schemas with the 'xmlschema' package.\n\"\"\"\nfrom _test_common import *\nimport fileinput\nimport glob\n\nimport xmlschema\nfrom xmlschema.exceptions import XMLSchemaParseError, XMLSchemaURLError, XMLSchemaKeyError\n\n\ndef create_schema_tests(pathname):\n def make_test_schema_function(xsd_file, expected_errors):\n def test_schema(self):\n # print(\"Run %s\" % self.id())\n try:\n xs = xmlschema.XMLSchema(xsd_file, validation='lax')\n except (XMLSchemaParseError, XMLSchemaURLError, XMLSchemaKeyError) as err:\n num_errors = 1\n errors = [str(err)]\n else:\n num_errors = len(xs.all_errors)\n errors = xs.all_errors\n\n if num_errors != expected_errors:\n print(\"\\nTest n.%r: %r errors, %r expected.\" % (self.id()[-3:], num_errors, expected_errors))\n if num_errors == 0:\n raise ValueError(\"found no errors when %d expected.\" % expected_errors)\n else:\n raise ValueError(\n \"n.%d errors expected, found %d: %s\" % (expected_errors, num_errors, errors[0])\n )\n else:\n self.assertTrue(True, \"Successfully created schema for {}\".format(xsd_file))\n return test_schema\n\n # Optional int argument: []\n if len(sys.argv) > 1:\n test_only = int(sys.argv.pop())\n else:\n test_only = None\n\n tests = {}\n test_num = 0\n for line in fileinput.input(glob.iglob(pathname)):\n line = line.strip()\n if not line or line[0] == '#':\n continue\n\n test_args = get_test_args(line)\n filename = test_args[0]\n try:\n total_errors = int(test_args[1])\n except (IndexError, ValueError):\n total_errors = 0\n\n test_file = os.path.join(os.path.dirname(fileinput.filename()), filename)\n if not os.path.isfile(test_file) or os.path.splitext(test_file)[1].lower() != '.xsd':\n continue\n\n test_func = make_test_schema_function(test_file, total_errors)\n test_name = os.path.join(os.path.dirname(sys.argv[0]), os.path.relpath(test_file))\n test_num += 1\n if test_only is None or test_num == test_only:\n klassname = 'Test_schema_{0:03d}_{1}'.format(test_num, test_name)\n tests[klassname] = type(\n klassname, (XMLSchemaTestCase,),\n {'test_schema_{0:03d}'.format(test_num): test_func}\n )\n\n return tests\n\n\nif __name__ == '__main__':\n pkg_folder = os.path.dirname(os.getcwd())\n sys.path.insert(0, pkg_folder)\n globals().update(create_schema_tests(os.path.join(pkg_folder, \"tests/*/testfiles\")))\n unittest.main()\n","sub_path":"xmlschema/tests/test_schemas.py","file_name":"test_schemas.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"32717742","text":"from util import HttpRequestsUtil\nfrom config import Env_IP\n\nenv_ip = Env_IP.api_test_env\nurl = env_ip + '/user/customerPage'\n\n\ndef do_get(token, page, visit_plan, limit, full_name, longitude, latitude):\n params = {\n 'page': page,\n 'visitPlan': visit_plan,\n 'limit': limit,\n 'full_name': full_name,\n 'longitude': longitude,\n 'latitude': latitude\n }\n return HttpRequestsUtil.do_get(url, token, params)\n\n\ndef do_get_without_location(token, page, visit_plan, limit, full_name):\n params = {\n 'page': page,\n 'visitPlan': visit_plan,\n 'limit': limit,\n 'full_name': full_name\n }\n return HttpRequestsUtil.do_get(url, token, params)\n\n\ndef do_get_all(token):\n params = {\n 'limit': 20,\n 'page': 1\n }\n return HttpRequestsUtil.do_get(url, token, params)\n","sub_path":"api/user/customerPage.py","file_name":"customerPage.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"14727926","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport re\nimport random\nimport numpy as np\nfrom datetime import datetime\nimport os\nimport random\nimport sys\nimport threading\n\ntf.app.flags.DEFINE_string('data_directory',\n '/home/qiong/Documents/DATA/places205/data/vision/torralba/deeplearning/images256',\n 'data directory')\n\ntf.app.flags.DEFINE_string('train_file','/home/qiong/Documents/DATA/places205/train_places205.csv',\n 'Training data file')\ntf.app.flags.DEFINE_string('validation_file','/home/qiong/Documents/DATA/places205/val_places205.csv',\n 'Validation data file')\ntf.app.flags.DEFINE_string('output_directory','/home/qiong/Documents/DATA/places205/tmp/',\n 'Output data directory')\n\ntf.app.flags.DEFINE_integer('train_shards', 1960,'Number of shards in training TFRecord files.')\ntf.app.flags.DEFINE_integer('validation_shards', 56,'Number of shards in validation TFRecord files.')\n\ntf.app.flags.DEFINE_integer('num_threads', 8,'Number of threads to preprocess the images.')\n# The labels file contains a list of valid labels are held in this file.\n# Assumes that the file contains entries as such:\n# dog\n# cat\n# flower\n# where each line corresponds to a label. We map each label contained in\n# the file to an integer corresponding to the line number starting from 0.\ntf.app.flags.DEFINE_string('labels_file','/home/qiong/Documents/DATA/places205/places205_labels.txt',\n'Labels file')\n\n\nFLAGS = tf.app.flags.FLAGS\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _convert_to_example(filename, image_buffer, label, text, height, width):\n \"\"\"Build an Example proto for an example.\n Args:\n filename: string, path to an image file, e.g., '/path/to/example.JPG'\n image_buffer: string, JPEG encoding of RGB image\n label: integer, identifier for the ground truth for the network\n text: string, unique human-readable, e.g. 'dog'\n height: integer, image height in pixels\n width: integer, image width in pixels\n Returns:\n Example proto\n \"\"\"\n\n colorspace = 'RGB'\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/colorspace': _bytes_feature(colorspace),\n 'image/channels': _int64_feature(channels),\n 'image/class/label': _int64_feature(label),\n 'image/class/text': _bytes_feature(text),\n 'image/format': _bytes_feature(image_format),\n 'image/filename': _bytes_feature(os.path.basename(filename)),\n 'image/encoded': _bytes_feature(image_buffer)}))\n return example\n\nclass ImageCoder(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data,\nchannels=3)\n\n def png_to_jpeg(self, image_data):\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def decode_jpeg(self, image_data):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\ndef _is_png(filename):\n \"\"\"Determine if a file contains a PNG format image.\n Args:\n filename: string, path of the image file.\n Returns:\n boolean indicating if the image is a PNG.\n \"\"\"\n return '.png' in filename\n\ndef _process_image(filename, coder):\n \"\"\"Process a single image file.\n Args:\n filename: string, path to an image file e.g., '/path/to/example.JPG'.\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n Returns:\n image_buffer: string, JPEG encoding of RGB image.\n height: integer, image height in pixels.\n width: integer, image width in pixels.\n \"\"\"\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'r') as f:\n image_data = f.read()\n\n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width\n\ndef _process_image_files_batch(coder, thread_index, ranges, name, filenames,\n texts, labels, num_shards):\n \"\"\"Processes and saves list of images as TFRecord in 1 thread.\n Args:\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n thread_index: integer, unique batch to run index is within [0,\nlen(ranges)).\n ranges: list of pairs of integers specifying ranges of each batches to\n analyze in parallel.\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in xrange(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n filename = filenames[i]\n label = labels[i]\n text = texts[i]\n\n image_buffer, height, width = _process_image(filename, coder)\n\n example = _convert_to_example(filename, image_buffer, label,\n text, height, width)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n \ndef _process_image_files(name, filenames, texts, labels, num_shards):\n \"\"\"Process and save list of images as TFRecord of Example protos.\n Args:\n name: string, unique identifier specifying the data set\n filenames: list of strings; each string is a path to an image file\n texts: list of strings; each string is human readable, e.g. 'dog'\n labels: list of integer; each integer identifies the ground truth\n num_shards: integer number of shards for this data set.\n \"\"\"\n assert len(filenames) == len(texts)\n assert len(filenames) == len(labels)\n\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in xrange(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i+1]])\n\n # Launch a thread for each batch.num_shards\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all\n # num_shardsimage codings.\n coder = ImageCoder()\n\n threads = []\n for thread_index in xrange(len(ranges)):\n args = (coder, thread_index, ranges, name, filenames,\n texts, labels, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(filenames)))\n sys.stdout.flush()\n\ndef _find_image_files(data_dir, files_list, text_list):\n unique_files = [l.strip() for l in tf.gfile.FastGFile(files_list,'r').readlines()]\n unique_labels = [l.strip() for l in tf.gfile.FastGFile(text_list,'r').readlines()]\n labels = []\n filenames = []\n texts = []\n\n p = re.compile(\"\\s\")\n for line in unique_files:\n myList = p.split(line)\n jpeg_file_path = '%s/%s' % (data_dir, myList[0])\n file = tf.gfile.Glob(jpeg_file_path)\n filenames.extend(file)\n labels.extend([int(myList[1])])\n texts.extend([unique_labels[int(myList[1])]])\n\n #print ('=================================')\n #print (filenames)\n #print (labels)\n #print (texts)\n\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n\n shuffled_index = range(len(filenames))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n filenames = [filenames[i] for i in shuffled_index]\n texts = [texts[i] for i in shuffled_index]\n labels = [labels[i] for i in shuffled_index]\n\n print('Found %d JPEG files across %d labels inside %s.' %(len(filenames),len(unique_labels), data_dir))\n return filenames, texts, labels\n\ndef _process_dataset(name, data_dir, num_shards, files_list, text_list):\n \"\"\"Process a complete data set and save it as a TFRecord.\n Args:\n name: string, unique identifier specifying the data set.\n directory: string, root path to the data set.\n num_shards: integer number of shards for this data set.\n labels_file: string, path to the labels file.\n \"\"\"\n filenames, texts, labels = _find_image_files(data_dir, files_list, text_list)\n _process_image_files(name, filenames, texts, labels, num_shards)\n\ndef main(unused_argv):\n assert not FLAGS.train_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')\n assert not FLAGS.validation_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with '\n 'FLAGS.validation_shards')\n print('Saving results to %s' % FLAGS.output_directory)\n\n # Run it!\n _process_dataset('validation', FLAGS.data_directory,\n FLAGS.validation_shards, FLAGS.validation_file, FLAGS.labels_file)\n _process_dataset('train', FLAGS.data_directory,\n FLAGS.train_shards, FLAGS.train_file, FLAGS.labels_file)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"inception/data/build_image_data.py","file_name":"build_image_data.py","file_ext":"py","file_size_in_byte":12280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"458912032","text":"import math\n\nN=32\nJ=500\n\nprimes=[2,3,5,7,11,13,17,19,23,29]\n\ntot=N-2\nres=[]\nfor i in range(2**tot):\n\tb= \"1\"+(\"{0:0\"+str(tot)+\"b}\").format(i)+\"1\"\n\tdivisors=[]\n\tfor base in range(2,11):\n\t\tcheck=int(b,base)\n\t\tfor prime in primes:\n\t\t\tif check%prime==0:\n\t\t\t\tdivisors.append(prime)\n\t\t\t\tbreak\n\t\tif len(divisors)==base-1:\n\t\t\tcontinue\n\t\tbreak\n\tif len(divisors)==9:\n\t\tres.append([b]+list(map(str,divisors)))\n\t\tif len(res)==J:\n\t\t\tbreak\n\nprint(\"Case #1:\")\nfor r in res:\n\tprint(\" \".join(r))\n\n\n\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_fabrizyo_CoinJam.py","file_name":"16_0_3_fabrizyo_CoinJam.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"235275403","text":"import unittest\nfrom typing import List\nfrom card import Card\nfrom hand_of_cards import HandOfCards\n\nclass TestHandOfCards(unittest.TestCase):\n def setUp(self):\n self.hand: HandOfCards = HandOfCards()\n \n def test_add_cards(self):\n cards = [Card('3', '\\u2665'), Card('2', '\\u2665'), Card('A', '\\u2665')]\n self.hand.add_cards(cards)\n self.assertListEqual(self.hand.cards, cards)\n \n def test_remove_cards_when_empty(self):\n empty_cards = self.hand.remove_cards('10')\n self.assertListEqual(empty_cards, [])\n \n def test_remove_non_existant_cards(self):\n self.hand.cards = [\n Card('3', '\\u2665'), Card('2', '\\u2665'), Card('A', '\\u2665')\n ]\n no_cards = self.hand.remove_cards('7')\n self.assertListEqual(no_cards, [])\n \n def test_remove_existant_cards(self):\n self.hand.cards = [\n Card('3', '\\u2665'), Card('2', '\\u2665'), Card('A', '\\u2665'), \\\n Card('3', '\\u2660')\n ]\n threes = self.hand.remove_cards('3')\n self.assertListEqual(threes, [Card('3', '\\u2665'), Card('3', '\\u2660')])\n self.assertListEqual(self.hand.cards, [Card('2', '\\u2665'), Card('A', '\\u2665')])\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"python-is-easy/projects/go_fish/test_hand_of_cards.py","file_name":"test_hand_of_cards.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"252649120","text":"from datetime import datetime\nfrom airflow import settings\nfrom airflow.models import DagBag\nfrom airflow.operators.dagrun_operator import TriggerDagRunOperator, DagRunOrder\nfrom airflow.plugins_manager import AirflowPlugin\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.utils.state import State\n\n\nclass TriggerMultiDagRunOperator(TriggerDagRunOperator):\n @apply_defaults\n def __init__(self, op_args=None, op_kwargs=None, *args, **kwargs):\n super(TriggerMultiDagRunOperator, self).__init__(*args, **kwargs)\n self.op_args = op_args or []\n self.op_kwargs = op_kwargs or {}\n\n def execute(self, context):\n session = settings.Session()\n created = False\n for dro in self.python_callable(context, *self.op_args, **self.op_kwargs):\n if not dro or not isinstance(dro, DagRunOrder):\n break\n\n if dro.run_id is None:\n dro.run_id = 'trig__' + datetime.utcnow().isoformat()\n print('TriggerMultiDagRunOperator|dro : ', dro)\n dbag = DagBag(settings.DAGS_FOLDER)\n trigger_dag = dbag.get_dag(self.trigger_dag_id)\n dr = trigger_dag.create_dagrun(\n run_id=dro.run_id,\n state=State.RUNNING,\n conf=dro.payload,\n external_trigger=True\n )\n created = True\n self.log.info(\"Creating DagRun %s\", dr)\n\n if created is True:\n session.commit()\n else:\n self.log.info(\"No DagRun created\")\n session.close()\n\n\nclass MyFirstPlugin(AirflowPlugin):\n name = \"trigger_multi_dag_run_operator\"\n operators = [TriggerMultiDagRunOperator]\n","sub_path":"local_dss_workflow/plugins/operators/multi_dag_trigger_operator.py","file_name":"multi_dag_trigger_operator.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"234081837","text":"import smtplib\n\nsender = 'mdnuraminsifat@gmail.com'\nreceiver = 'nur15-1463@diu.edu.bd'\n\nmessage = \"\"\"Hello Sifat\"\"\"\ntry:\n smtpobj = smtplib.SMTP('localhost.com') # my smtp server is localhost\n smtpobj.sendmail(sender, receiver, message)\n print(\"Successfully sent mail\")\nexcept smtplib.SMTPException:\n print('Error: unable to send mail')\n","sub_path":"mail_send/sending email.py","file_name":"sending email.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"298351991","text":"\n# time log(n), space O(1)\nclass Solution(object):\n def findNthDigit(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n k = 1 # k is the number of digits\n while n > 9*10**(k-1)*k:\n n -= 9*10**(k-1)*k\n k += 1\n \n target = 10**(k-1) + n/k - 1 # the n-th digit is either in target or in target+1\n if n%k == 0:\n return target%10\n else:\n target += 1\n times = k - n%k + 1 # the digit counting backwards, wrong: times = k - n%k\n target = target/10**(times-1)\n return target%10\n \n\n\n\"\"\"\nFind the nth digit of the infinite integer sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ...\n\nNote:\nn is positive and will fit within the range of a 32-bit signed integer (n < 231).\n\nExample 1:\n\nInput:\n3\n\nOutput:\n3\nExample 2:\n\nInput:\n11\n\nOutput:\n0\n\nExplanation:\nThe 11th digit of the sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... is a 0, which is part of the number 10.\n\"\"\"\n","sub_path":"0400. Nth Digit.py","file_name":"0400. Nth Digit.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"529869338","text":"\"\"\"Separate module to handle framework logging\"\"\"\nimport logging\nimport sys\n\n\ndef _get_logger():\n nlogger = logging.getLogger()\n nlogger.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n nlogger.addHandler(handler)\n return nlogger\n","sub_path":"utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"436448199","text":"import numpy as np\nimport gym\nimport torch\nfrom tqdm import tqdm\nimport dill\nimport os\nimport itertools\nimport time\n\nfrom agent.dqn_agent import DQNAgent, HierarchicalDQNAgent\nfrom agent.policy import get_greedy_epsilon_policy, greedy_action\n\nfrom environment.wrappers import FrozenLakeToCoords\n\nfrom .gridsearch_hierarchical import HRLWrapper\nfrom .model import QFunction\n\nimport utils\n\nclass DummyPolicy():\n def __call__(self,state):\n v0 = (state[:,0] > 1).view(-1,1).float()\n v1 = 1-v0\n return torch.cat((v0,v1),dim=1)\n def parameters(self):\n x = torch.tensor(0)\n return [x]\n\ndef run_trial(gamma, alpha, eps_b, eps_t, tau, directory=None,\n net_structure=[2,3,4], num_options=4,\n env_name='gym_fourrooms:fourrooms-v0', batch_size=32,\n min_replay_buffer_size=1000,\n max_steps=5000, epoch=50, test_iters=1, verbose=False):\n args = locals()\n env = gym.make(env_name)\n env = gym.wrappers.TimeLimit(env,36)\n test_env = gym.make(env_name)\n test_env = gym.wrappers.TimeLimit(test_env,36)\n\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n def create_option():\n return HierarchicalDQNAgent(\n action_space=env.action_space,\n observation_space=env.observation_space,\n learning_rate=alpha,\n discount_factor=gamma,\n polyak_rate=tau,\n device=device,\n behaviour_policy=get_greedy_epsilon_policy(eps_b),\n target_policy=get_greedy_epsilon_policy(eps_t),\n q_net=QFunction(layer_sizes=net_structure,input_size=4)\n )\n options = [create_option() for _ in range(num_options)]\n agent = DQNAgent(\n action_space=gym.spaces.Discrete(num_options),\n observation_space=env.observation_space,\n learning_rate=alpha,\n discount_factor=gamma,\n polyak_rate=tau,\n device=device,\n behaviour_policy=get_greedy_epsilon_policy(eps_b),\n target_policy=get_greedy_epsilon_policy(eps_t),\n q_net=QFunction(layer_sizes=net_structure,input_size=4,output_size=num_options)\n #q_net=DummyPolicy()\n )\n print_policy(agent)\n\n env = HRLWrapper(env, options, test=False)\n test_env = HRLWrapper(test_env, options, test=True)\n\n def value_function(states):\n action_values = agent.q_net(states)\n optimal_actions = greedy_action(action_values)\n values = [options[o].q_net_target(s).max() for s,o in zip(states,optimal_actions)]\n return torch.tensor(values)\n\n def test(env, iterations, max_steps=np.inf, render=False, record=True, processors=1):\n def test_once(env, max_steps=np.inf, render=False):\n reward_sum = 0\n sa_vals = [[] for _ in range(env.action_space.n)]\n so_vals = []\n option_freq = np.array([0]*num_options)\n obs = env.reset()\n o = agent.act(obs, testing=True)\n for steps in itertools.count():\n if steps > max_steps:\n break\n o = agent.act(obs, testing=True)\n so_vals.append(agent.get_state_action_value(obs,o))\n obs, reward, done, _ = env.step(o)\n sa_vals[o].append(env.last_sa_value)\n reward_sum += reward\n if render:\n env.render()\n if done:\n break\n prob = option_freq/option_freq.sum()\n entropy = -sum([p*np.log(p) if p > 0 else 0 for p in prob])\n return reward_sum, [np.mean(v) for v in sa_vals], np.mean(so_vals), entropy\n rewards = []\n sa_vals = []\n so_vals = []\n entropies = []\n for i in range(iterations):\n r,sav,sov,e = test_once(env, render=render, max_steps=max_steps)\n rewards.append(r)\n sa_vals.append(sav)\n so_vals.append(sov)\n entropies.append(e)\n return rewards, sa_vals, so_vals, entropies\n\n # Create file to save results\n results_file_path = utils.save_results(args,\n {'rewards': [], 'state_action_values': []},\n directory=directory)\n\n rewards = []\n state_action_values = []\n state_option_values = []\n entropies = []\n done = True\n step_range = itertools.count()\n if verbose:\n step_range = tqdm(step_range)\n try:\n for steps in step_range:\n # Run tests\n if steps % epoch == 0:\n r,sa_vals,so_vals,e = test(test_env, test_iters, render=False, processors=1)\n rewards.append(r)\n state_action_values.append(sa_vals)\n state_option_values.append(so_vals)\n entropies.append(e)\n if verbose:\n print_policy(agent)\n tqdm.write('steps %d \\t Reward: %f \\t SA-V: %f \\t SO-V: %f \\t Ent: %f' % (steps, np.mean(r), np.mean(sa_vals), np.mean(so_vals), np.mean(e)))\n data = {'rewards': rewards, \n 'state_action_values': state_action_values,\n 'state_option_values': state_option_values,\n 'entropies': entropies}\n utils.save_results(args, data, file_path=results_file_path)\n\n # Linearly Anneal epsilon\n agent.behaviour_policy = get_greedy_epsilon_policy((1-min(steps/1000000,1))*(1-eps_b)+eps_b)\n for o in options:\n o.behaviour_policy = get_greedy_epsilon_policy((1-min(steps/1000000,1))*(1-eps_b)+eps_b)\n\n # Run step\n if done:\n obs = env.reset()\n action = agent.act(obs)\n\n obs2, reward2, done, _ = env.step(action)\n agent.observe_step(obs, action, reward2, obs2, terminal=done)\n\n # Update weights\n if steps >= min_replay_buffer_size:\n agent.train(batch_size=batch_size,iterations=1)\n if len(options[action].replay_buffer) >= min_replay_buffer_size:\n options[action].train(batch_size=batch_size,iterations=1,value_function=value_function)\n\n # Next time step\n obs = obs2\n except ValueError as e:\n if verbose:\n tqdm.write(str(e))\n tqdm.write(\"Diverged\")\n raise\n except KeyboardInterrupt:\n data = {'rewards': rewards, \n 'state_action_values': state_action_values,\n 'state_option_values': state_option_values,\n 'entropies': entropies}\n utils.save_results(args, data, file_path=results_file_path)\n\n return (args, rewards, state_action_values)\n\ndef print_policy(agent):\n states = list(itertools.product(range(13),range(13),[1],[1]))\n vals = agent.q_net(torch.tensor(states).float())\n tqdm.write(repr(vals.argmax(dim=1).view(13,13)))\n\ndef plot(results_dir, plot_dir):\n import matplotlib\n matplotlib.use('Agg')\n from matplotlib import pyplot as plt\n\n results = utils.get_all_results(results_dir)\n\n if not os.path.isdir(plot_dir):\n os.makedirs(plot_dir)\n results = list(results)\n for i,trial in enumerate(results):\n params,data = trial\n params = dict(params)\n y1 = np.mean(data['rewards'],axis=1)\n y2 = np.mean(data['state_action_values'],axis=1)\n y3 = np.mean(data['state_option_values'],axis=1)\n x = list(range(0,y1.shape[0]*params['epoch'],params['epoch']))\n\n # Create figure\n fig, (ax1, ax2) = plt.subplots(1,2)\n fig.set_size_inches(10,4)\n # Plot\n ax1.plot(x,y1,label='Rewards')\n ax1.plot(x,y2,label='State-action values')\n ax1.plot(x,y3,label='State-option values')\n ax1.set_ylim([0,1])\n ax1.set_title('[Insert Title Here]')\n ax1.grid(True,which='both',axis='both',color='grey')\n ax1.legend()\n # Show params\n ax2.set_axis_off()\n for j,(pname,pval) in enumerate(sorted(dict(params).items(), key=lambda x: x[0], reverse=True)):\n ax2.text(0,j/len(params),'%s: %s' % (pname, pval))\n file_name = os.path.join(plot_dir,'%d.png'%i)\n fig.savefig(file_name)\n plt.close(fig)\n\n print('Saved', file_name)\n\ndef run():\n utils.set_results_directory(\n os.path.join(utils.get_results_root_directory(),'hrl'))\n directory = os.path.join(utils.get_results_directory(),__name__)\n plot_directory = os.path.join(utils.get_results_directory(),'plots',__name__)\n\n #run_trial(gamma=1,alpha=0.001,eps_b=0,eps_t=0,tau=0.001,net_structure=(5,5),num_options=8,batch_size=256,epoch=1000,test_iters=10,verbose=True,directory=directory)\n #run_trial(gamma=1,alpha=0.01,eps_b=0,eps_t=0,tau=0.01,net_structure=(),num_options=1,batch_size=10,epoch=10, test_iters=3,verbose=True,directory=directory)\n plot(results_dir=directory,plot_dir=plot_directory)\n","sub_path":"experiments/hrl/long_trial_hierarchical_fourrooms.py","file_name":"long_trial_hierarchical_fourrooms.py","file_ext":"py","file_size_in_byte":8990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"505494001","text":"import json\nimport PyconnectorExceptions\n\ndef handle_msg(message, conn):\n try:\n message.pprint(True)\n raise Exception(\"just a test\")\n except Exception:\n ex_msg = {'error_desc': \"some error happened\",\n 'error_code': \"UnknownException\"}\n raise PyconnectorExceptions.SpecificException(json.dumps(ex_msg))\n","sub_path":"default_handler.py","file_name":"default_handler.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"104528867","text":"from PyQt4 import QtCore,QtGui\nfrom controls import Control\nimport threading as th\nimport numpy as np\nimport time\nfrom graph import Graph, VoltGraph\n\nclass Optimizer(QtGui.QDialog):\n \"\"\"docstring for Optimizer\"\"\"\n def __init__(self,parent,beamline,subset):\n super(Optimizer, self).__init__(parent)\n\n self.beamline = beamline\n self.subset = subset\n\n self.controls = {}\n self.voltGraphs = {}\n\n self.graph = Graph(self.beamline)\n self.init_UI()\n\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(50)\n\n self.updateGraph = False\n\n def init_UI(self):\n self.layout = QtGui.QGridLayout(self)\n \n i = 0\n for n,v in self.beamline.voltages.items():\n if n in self.subset:\n copy = Control(v)\n self.controls[n] = copy\n self.layout.addWidget(copy,1+2*(i%2),i//2)\n\n self.voltGraphs[n] = VoltGraph(self.beamline,n)\n self.layout.addWidget(self.voltGraphs[n],2+2*(i%2),i//2)\n\n i = i + 1\n\n buttons = QtGui.QDialogButtonBox(\n QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,\n QtCore.Qt.Horizontal,self)\n buttons.accepted.connect(self.accept)\n buttons.rejected.connect(self.reject)\n self.layout.addWidget(buttons,10,i//2)\n\n self.scanButton = QtGui.QPushButton('Scan')\n self.layout.addWidget(self.scanButton,9,i//2)\n self.scanButton.clicked.connect(self.startScan)\n\n self.stopButton = QtGui.QPushButton('Stop')\n self.layout.addWidget(self.stopButton,9,i//2-1)\n self.stopButton.clicked.connect(self.stopScan)\n\n self.setOptimalButton = QtGui.QPushButton('Set to optimal values')\n self.setOptimalButton.clicked.connect(self.beamline.setToOptimal)\n self.layout.addWidget(self.setOptimalButton,9,i//2-2)\n\n self.layout.addWidget(self.graph,0,0,1,i//2)\n\n self.optimal = QtGui.QLabel()\n self.layout.addWidget(self.optimal,0,i//2,1,1)\n\n\n def update(self): \n for c in self.controls.values():\n c.update()\n\n if self.updateGraph:\n self.graph.updateGraph()\n\n for g in self.voltGraphs.values():\n g.updateGraph()\n\n text = [n + ': ' + str(v) for n,v in self.beamline.optimalSettings.items()]\n text = \"\\n\".join(text)\n self.optimal.setText(text)\n\n def startScan(self):\n self.cont = True\n self.graph.clearPlot()\n self.scanThread = th.Thread(target = self.scan)\n self.scanThread.start()\n\n def stopScan(self):\n self.cont = False\n\n def scan(self):\n self.updateGraph = True\n ra = np.linspace(0,10**4,100)\n for n,v in self.beamline.voltages.items():\n if n in self.subset:\n for r in ra:\n if self.cont:\n v.setpoint = r\n time.sleep(0.5)\n else:\n break\n if not self.cont:\n break\n self.beamline.setToOptimal()\n\n self.updateGraph = False\n\n\n def keyPressEvent(self,e):\n if e.key() == QtCore.Qt.Key_Enter:\n e.ignore()\n else:\n e.accept()\n\n @staticmethod\n def optimize(parent,beamline,subset):\n optimizer = Optimizer(parent,beamline,subset)\n result = optimizer.exec_()\n optimizer.cont = False\n return result==QtGui.QDialog.Accepted\n","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"384282341","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport csv\n\nfrom anchor import AnchorCfg, VariantCfg, md5_checksum\nfrom encoders import get_encoder\nfrom metrics import VariantData, psnr_stats, ssim_stats, avg, bd_q\n\nfrom typing import List, Iterable\n\ndef to_csv(fout:str, fieldnames:List[str], values:List[dict]):\n with open(fout, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in values:\n writer.writerow(row)\n\nmetrics_keys = [\n 'psnr_y',\n 'psnr_u',\n 'psnr_v',\n 'psnr_avg',\n 'mse_y',\n 'mse_u',\n 'mse_v',\n 'mse_avg',\n 'ssim_y',\n 'ssim_u',\n 'ssim_v',\n 'ssim_all'\n]\n\ndef compute_variant_metrics(variant:VariantCfg) -> VariantData:\n # bit/s\n avg_bitrate = int(os.path.getsize(variant.bitstream) * 8 / variant.anchor.reference.duration)\n v = []\n for frame in zip(psnr_stats(variant), ssim_stats(variant)):\n f = {}\n for metric in frame:\n f.update(metric)\n v.append(f)\n to_csv(f'{variant.anchor.working_dir / variant.basename}.csv', metrics_keys, v)\n return VariantData(variant.basename, avg_bitrate, **avg(v, *metrics_keys))\n\ndef compute_anchor_metrics(anchor:AnchorCfg):\n data = [compute_variant_metrics(v) for v in anchor.variants]\n if len(data) == 0:\n return\n keys = data[0].data.keys()\n to_csv(f'{anchor.working_dir / anchor.basename}.csv', keys, [m.data for m in data])\n return data\n\ndef encode_anchor(anchor:AnchorCfg, recon=True):\n enc = get_encoder(anchor.encoder_id)\n if enc == None:\n raise Exception(f'unknown encoder: {anchor.encoder_id}')\n for var in anchor.variants:\n enc.encode_variant(var, recon=recon)\n\ndef decode_anchor(anchor:AnchorCfg):\n enc = get_encoder(anchor.encoder_id)\n if enc == None:\n raise Exception(f'unknown encoder: {anchor.encoder_id}')\n for var in anchor.variants:\n enc.decode_variant(var)\n\ndef md5_reconstucted(anchor:AnchorCfg):\n for var in anchor.variants:\n h = md5_checksum(var.reconstructed)\n p = var.reconstructed.parent / f'{var.reconstructed.stem}.yuv.md5'\n with p.open('w') as f:\n f.write(h)\n\ndef md5_bitstream(anchor:AnchorCfg):\n for var in anchor.variants:\n h = md5_checksum(var.bitstream)\n p = var.bitstream.parent / f'{var.bitstream.stem}.md5'\n with p.open('w') as f:\n f.write(h)\n\n\ndef man():\n h = \"\"\"\n usage:\n cmd.py cfg.json [encode] [decode] [metrics]\n \n cfg.json\n a valid anchor configuration\n \"\"\"\n print(h)\n\n\ndef parse_args():\n if len(sys.argv) <= 1:\n return None, False, False, False\n\n if not os.path.exists(sys.argv[1]):\n print(f'config file not found {sys.argv[1]}')\n return None, False, False, False\n \n cfg = sys.argv[1]\n\n if len(sys.argv) == 2:\n return cfg, True, True, True\n\n encode = \"encode\" in sys.argv\n decode = \"decode\" in sys.argv\n metrics = \"metrics\" in sys.argv\n \n return cfg, encode, decode, metrics\n\n\ndef main():\n\n cfg, encode, decode, metrics = parse_args()\n\n if (cfg is None) or not (encode or decode or metrics):\n man()\n return\n \n anchor = AnchorCfg.load(cfg)\n\n if encode:\n encode_anchor(anchor, recon=decode)\n md5_bitstream(anchor)\n\n if decode and not encode:\n decode_anchor(anchor)\n\n if decode:\n md5_reconstucted(anchor)\n \n data = None\n\n if metrics:\n data = compute_anchor_metrics(anchor)\n for var in data:\n print(var.to_string())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483054677","text":"import sys\nTASK = sys.argv[1]\nimport json\nfor partition in [\"train\", \"val\"]:\n columns = None\n with open(f\"/u/scr/mhahn/PRETRAINED/SuperGLUE/{TASK}/{partition}.jsonl\", \"r\") as inFile:\n with open(f\"/u/scr/mhahn/PRETRAINED/SuperGLUE/{TASK}/{partition if partition != 'val' else 'dev'}.tsv\", \"w\") as outFile:\n for line in inFile:\n line = json.loads(line.strip())\n if columns is None:\n columns = sorted(list(line))\n print(\"\\t\".join(columns), file=outFile)\n assert columns is not None\n print(\"\\t\".join([str(line[x]) for x in columns]), file=outFile)\n","sub_path":"preprocessSuperGLUE2TSV.py","file_name":"preprocessSuperGLUE2TSV.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"538204977","text":"#!/usr/bin/env python3\n\nimport struct\n\npw_bytes = [0x6b, 0xcf, 0x2a, 0x4b, 0x6e, 0x5a, 0xca, 0x0f]\n\nfh = open('enc_pw', 'wb')\nfor b in pw_bytes:\n fh.write(struct.pack(\"B\", b))\nfh.close()\n","sub_path":"boxes/cascade/pw/w.py","file_name":"w.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"458118818","text":"from datetime import datetime\nimport logging\nimport base64\nimport gzip\nimport pickle\nfrom pydiscourse import DiscourseClient\n\nlogger = logging.getLogger()\ndeb = logger.debug\ninfo = logger.info\nwarn = logger.warn\nerr = logger.error\n\ndef generate_post_winners(all_items:list) -> str:\n post='*This post is generated by the rafflebot! All code is open source, please \\\n see https://github.com/vhs/raffle to see how this is generated, and how you can verify the results at home.*\\n\\n'\n post+='

Raffle Results

\\n\\n'\n for item in all_items:\n post+=f\"**{item['description']}**\\n\\n\"\n for i,entrant in enumerate(item['sorted_winner_list']):\n post+=f\"{i+1}. {entrant['username']} - {entrant['user-item-dice-result'].hex()[:8]}...\\n\"\n post+='\\n\\n'\n return post\n\ndef generate_post_data(all_items:list) -> str:\n post='*This post is generated by the rafflebot! All code is open source, please \\\n see https://github.com/vhs/raffle to see how this is generated, and how you can verify the results at home.*\\n\\n'\n post+='

Data Dump

\\n\\n'\n\n post+='[details=\"Base64 data\"]\\n`'\n post+=base64.b64encode(gzip.compress(pickle.dumps(all_items))).decode()\n post+='`\\n[/details]'\n return post\n\nclass DiscouseConnection:\n def __init__(self, url, discord_api_key, api_username=\"system\") -> None:\n self._discource_client = DiscourseClient(\n url, api_username=api_username, api_key=discord_api_key\n )\n def make_post(self, topic_id:int, post:str) -> None:\n self._discource_client.create_post(post,topic_id=topic_id)\n\n def get_all_voters(self, post_id, poll_name, option_id):\n results = []\n i = 1\n page = self._discource_client._request(\n \"GET\",\n \"/polls/voters.json\",\n params={\n \"post_id\": post_id,\n \"poll_name\": poll_name,\n \"option_id\": option_id,\n \"page\": i,\n },\n )[\"voters\"][\n option_id\n ] # Hacky way to get voters directly\n results += page\n i += 1\n while len(page) != 0:\n page = self._discource_client._request(\n \"GET\",\n \"/polls/voters.json\",\n params={\n \"post_id\": post_id,\n \"poll_name\": poll_name,\n \"option_id\": option_id,\n \"page\": i,\n },\n )[\"voters\"][option_id]\n results += page\n i += 1\n # Ugh that (^^^) was a lame way of doing this, I was tired,\n # TODO: Make this cooler/cleaner for pagination and the actual request\n return results\n\n def get_all_polls(self, post_id:int, close_time_override=None) -> list:\n assert isinstance(post_id, int)\n topic = self._discource_client.topic_posts(str(post_id))\n all_poll_items = []\n for post in topic[\"post_stream\"][\"posts\"]:\n if \"polls\" not in post:\n continue # Skip if this post doesn't have any polls in it (most will skip, only a few polls per post)\n\n for poll in post[\"polls\"]:\n for item in poll[\"options\"]:\n winnable_item = {}\n winnable_item[\"description\"] = item[\"html\"]\n winnable_item[\"id\"] = item[\"id\"]\n if close_time_override:\n winnable_item[\"close_time\"] = close_time_override\n else:\n try:\n winnable_item[\"close_time\"] = int(\n datetime.fromisoformat(\n poll[\"close\"].replace(\"Z\", \"+00:00\")\n ).timestamp()\n )\n except:\n err(\n \"Problem with close time for poll. Close time is used for hash generation and is needed. \\\n You can specify from command line if needed\"\n )\n exit()\n winnable_item[\"entrants\"] = self.get_all_voters(\n post[\"id\"], poll[\"name\"], item[\"id\"]\n )\n all_poll_items.append(winnable_item)\n return all_poll_items\n","sub_path":"libs/discourse_helper.py","file_name":"discourse_helper.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"589999590","text":"n,m = map(int,input().split())\nstore = []\ncan_num = m\ncost = 0\n\nfor i in range(n):\n store.append(tuple(map(int,input().split())))\n\nstore.sort()\n\nwhile can_num > 0:\n lowest_store = store.pop(0)\n buy_num = lowest_store[1] if lowest_store[1] < can_num else can_num\n cost += buy_num * lowest_store[0]\n can_num -= buy_num\n\nprint(cost)\n\n","sub_path":"ABC/ABC121/C-ABC121.py","file_name":"C-ABC121.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"258489785","text":"\"\"\"Performance Evaluation Module\n\nContains the PerformanceEval class which is used\nto evaluate the performance of a classifier. This class\nallows for recall, precision, F1, and accuracy to be\ncalculated.\n\n\"\"\"\nclass PerformanceEval(object):\n \"\"\"Class object used to evaluate classifier performance.\"\"\"\n\n def __init__(self):\n self.TP = 0\n self.FN = 0\n self.FP = 0\n self.TN = 0\n self.total = 0\n\n def add_result(self, prediction, label):\n \"\"\"Add a prediction result to the set.\"\"\"\n self.total += 1\n if abs(prediction - 1.0) < 1E-10:\n if abs(label - 1.0) < 1E-10:\n self.TP += 1\n else:\n self.FP += 1\n else:\n if abs(label - 1.0) < 1E-10:\n self.FN += 1\n else:\n self.TN += 1\n\n def __add__(self, other):\n new = PerformanceEval()\n new.TP = self.TP + other.TP\n new.FN = self.FN + other.FN\n new.FP = self.FP + other.FP\n new.TN = self.TN + other.TN\n new.total = self.total + other.total\n return new\n\n def accuracy(self):\n \"\"\"Returns the accuracy of all classifications\"\"\"\n return float(self.TP + self.TN)/float(self.total)\n\n def recall(self):\n \"\"\"Returns the recall of all classifications\"\"\"\n return float(self.TP)/float(self.TP + self.FN)\n\n def precision(self):\n \"\"\"Returns the precision of all classifications\"\"\"\n return float(self.TP)/float(self.TP + self.FP)\n\n def F1(self):\n \"\"\"Returns the F1 score of all classifications\"\"\"\n return float(2*self.TP)/float(2*self.TP + self.FP + self.FN)\n\n\n","sub_path":"src/final_mini_project/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"92372783","text":"import rethinkdb as r\n\nfrom pprint import pprint\nfrom bottle import get, post, put, delete, route, run, template, request\nfrom bigchaindb import Bigchain\nb = Bigchain()\n\nconn = r.connect(host=\"localhost\", port=28015, db=\"bigchain\")\n\n# create a test user\ntestuser1_priv, testuser1_pub = b.generate_keys()\n\ndef get_tx_from_block(block):\n tx_id = block.get(\"block\").get(\"transactions\")[0].get(\"id\")\n tx_retrieved = b.get_transaction(tx_id)\n\n return tx_retrieved\n\ndef get_payload_from_tx(tx):\n payload = tx.get(\"transaction\").get(\"data\").get(\"payload\")\n\n return payload\n\n@route('/api/assets', method=\"PUT\")\ndef put_assets():\n body = request.json\n\n if \"activity\" not in body:\n return \"activity is missing.\\n\"\n\n activity = body.get(\"activity\")\n\n # define a digital asset data payload\n asset_payload = {\n \"activity\": activity\n }\n\n # a create transaction uses the operation `CREATE` and has no inputs\n tx = b.create_transaction(b.me, testuser1_pub, None, 'CREATE', payload=asset_payload)\n\n # all transactions need to be signed by the user creating the transaction\n tx_signed = b.sign_transaction(tx, b.me_private)\n\n # write the transaction to the bigchain\n # the transaction will be stored in a backlog where it will be validated,\n # included in a block, and written to the bigchain \n b.write_transaction(tx_signed)\n\n return \"activity \\\"%s\\\" was accepted\" % activity\n\n@route('/api/reliabilities', method=\"GET\")\ndef get_reliabilities():\n blocks = r.table(\"bigchain\").run(conn)\n reliability = 0\n\n for block in blocks:\n tx = get_tx_from_block(block)\n payload = get_payload_from_tx(tx)\n\n if payload.get(\"activity\") == \"消防団\":\n reliability += 100\n else:\n reliability += 1\n\n return '{\"reliability\": %s}' % reliability\n\nrun(host='localhost', port=8082)\n","sub_path":"server/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352526404","text":"# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport mock\n\nfrom c7n.actions.webhook import Webhook\nfrom jsonschema.exceptions import ValidationError\nfrom .common import BaseTest\n\n\nclass WebhookTest(BaseTest):\n\n def test_valid_policy(self):\n policy = {\n \"name\": \"webhook-batch\",\n \"resource\": \"ec2\",\n \"actions\": [\n {\n \"type\": \"webhook\",\n \"url\": \"http://foo.com\",\n }\n ],\n }\n\n self.assertTrue(self.load_policy(data=policy, validate=True))\n\n policy = {\n \"name\": \"webhook-batch\",\n \"resource\": \"ec2\",\n \"actions\": [\n {\n \"type\": \"webhook\",\n \"url\": \"http://foo.com\",\n \"batch\": True,\n \"query-params\": {\n \"foo\": \"bar\"\n }\n }\n ],\n }\n\n self.assertTrue(self.load_policy(data=policy, validate=True))\n\n def test_invalid_policy(self):\n # Missing URL parameter\n policy = {\n \"name\": \"webhook-batch\",\n \"resource\": \"ec2\",\n \"actions\": [\n {\n \"type\": \"webhook\"\n }\n ],\n }\n\n with self.assertRaises(ValidationError):\n self.load_policy(data=policy, validate=True)\n\n # Bad method\n policy = {\n \"name\": \"webhook-batch\",\n \"resource\": \"ec2\",\n \"actions\": [\n {\n \"type\": \"webhook\",\n \"url\": \"http://foo.com\",\n \"method\": \"CREATE\"\n }\n ],\n }\n\n with self.assertRaises(ValidationError):\n self.load_policy(data=policy, validate=True)\n\n @mock.patch('c7n.actions.webhook.urllib3.PoolManager.request')\n def test_process_batch(self, request_mock):\n resources = [\n {\n \"name\": \"test_name\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test_name\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test_name\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test_name\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test_name\",\n \"value\": \"test_value\"\n }\n ]\n\n data = {\n \"url\": \"http://foo.com\",\n \"batch\": True,\n \"batch-size\": 2,\n \"query-params\": {\n \"foo\": \"resources[0].name\"\n }\n }\n\n wh = Webhook(data=data, manager=self._get_manager())\n wh.process(resources)\n req = request_mock.call_args[1]\n\n # 5 resources with max batch size 2 == 3 calls\n self.assertEqual(3, len(request_mock.call_args_list))\n\n # Check out one of the calls in detail\n self.assertEqual(\"http://foo.com?foo=test_name\", req['url'])\n self.assertEqual(\"POST\", req['method'])\n self.assertEqual({}, req['headers'])\n\n @mock.patch('c7n.actions.webhook.urllib3.PoolManager.request')\n def test_process_batch_body(self, request_mock):\n resources = [\n {\n \"name\": \"test_name\",\n \"value\": \"test_value\"\n }\n ]\n\n data = {\n \"url\": \"http://foo.com\",\n \"batch\": True,\n \"body\": \"resources[].name\",\n \"body-size\": 10,\n \"headers\": {\n \"test\": \"`header`\"\n },\n \"query-params\": {\n \"foo\": \"resources[0].name\"\n }\n }\n\n wh = Webhook(data=data, manager=self._get_manager())\n wh.process(resources)\n req = request_mock.call_args[1]\n\n self.assertEqual(\"http://foo.com?foo=test_name\", req['url'])\n self.assertEqual(\"POST\", req['method'])\n self.assertEqual(b'[\"test_name\"]', req['body'])\n self.assertEqual(\n {\"test\": \"header\", \"Content-Type\": \"application/json\"},\n req['headers'])\n\n @mock.patch('c7n.actions.webhook.urllib3.PoolManager.request')\n def test_process_no_batch(self, request_mock):\n resources = [\n {\n \"name\": \"test1\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test2\",\n \"value\": \"test_value\"\n }\n ]\n\n data = {\n \"url\": \"http://foo.com\",\n \"query-params\": {\n \"foo\": \"resource.name\"\n }\n }\n\n wh = Webhook(data=data, manager=self._get_manager())\n wh.process(resources)\n req1 = request_mock.call_args_list[0][1]\n req2 = request_mock.call_args_list[1][1]\n\n self.assertEqual(\"http://foo.com?foo=test1\", req1['url'])\n self.assertEqual(\"http://foo.com?foo=test2\", req2['url'])\n\n @mock.patch('c7n.actions.webhook.urllib3.PoolManager.request')\n def test_process_existing_query_string(self, request_mock):\n resources = [\n {\n \"name\": \"test1\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test2\",\n \"value\": \"test_value\"\n }\n ]\n\n data = {\n \"url\": \"http://foo.com?existing=test\",\n \"query-params\": {\n \"foo\": \"resource.name\"\n }\n }\n\n wh = Webhook(data=data, manager=self._get_manager())\n wh.process(resources)\n\n req1 = request_mock.call_args_list[0][1]\n req2 = request_mock.call_args_list[1][1]\n\n self.assertIn(\"existing=test\", req1['url'])\n self.assertIn(\"foo=test1\", req1['url'])\n self.assertIn(\"existing=test\", req2['url'])\n self.assertIn(\"foo=test2\", req2['url'])\n\n @mock.patch('c7n.actions.webhook.urllib3.PoolManager.request')\n def test_process_policy_metadata(self, request_mock):\n resources = [\n {\n \"name\": \"test1\",\n \"value\": \"test_value\"\n },\n {\n \"name\": \"test2\",\n \"value\": \"test_value\"\n }\n ]\n\n data = {\n \"url\": \"http://foo.com\",\n \"query-params\": {\n \"policy\": \"policy.name\"\n }\n }\n\n wh = Webhook(data=data, manager=self._get_manager())\n wh.process(resources)\n req1 = request_mock.call_args_list[0][1]\n req2 = request_mock.call_args_list[1][1]\n\n self.assertEqual(\"http://foo.com?policy=webhook_policy\", req1['url'])\n self.assertEqual(\"http://foo.com?policy=webhook_policy\", req2['url'])\n\n def _get_manager(self):\n \"\"\"The tests don't require real resource data\n or recordings, but they do need a valid manager with\n policy metadata so we just make one here to use\"\"\"\n\n policy = self.load_policy({\n \"name\": \"webhook_policy\",\n \"resource\": \"ec2\",\n \"actions\": [\n {\n \"type\": \"webhook\",\n \"url\": \"http://foo.com\"}\n ]})\n\n return policy.resource_manager\n","sub_path":"tests/test_webhook.py","file_name":"test_webhook.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"48364385","text":"import csv\n\nimport fasttext\nfrom pathlib import Path\n\nfrom Intent import Intent\nfrom IntentService import IntentService\nfrom preprocess import removeSpecialCharactersAndToLower as prepInput\n\nrawTraining = Path(\"../classifierTraining.csv\")\nfastTextTraining = Path(\"../fastTextTraining.txt\")\n\n\nclass fasttext_IntentService(IntentService):\n def __init__(self):\n if Path(\"./fastTextClassifierModel.bin\").exists():\n self.model = fasttext.load_model(\"./fastTextClassifierModel.bin\")\n else:\n self.generateTrainingData()\n self.model = fasttext.train_supervised(\"../fastTextTraining.txt\", wordNgrams=2, dim=300,\n pretrainedVectors=\"wiki-news-300d-1M.vec\")\n self.model.save_model(\"./fastTextClassifierModel.bin\")\n\n def generateTrainingData(self):\n with rawTraining.open() as input, fastTextTraining.open(\"w\") as output:\n reader = csv.reader(input)\n for line in reader:\n label = line[0]\n text = prepInput(line[1])\n\n fastTextLine = \"__label__{} {}\".format(label, text)\n output.write(fastTextLine + \"\\n\")\n\n def getIntent(self, text):\n intentName = self.model.predict(prepInput(text))[0][0].replace(\"__label__\", \"\")\n return Intent(intentName)\n\n\nif __name__ == \"__main__\":\n c = fasttextClassifier()\n print(c.getIntent(\"I have to go.\"))\n print(c.model.test(\"../fastTextEvalSet.txt\"))\n","sub_path":"thesis/src/fasttext_IntentService.py","file_name":"fasttext_IntentService.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"515220463","text":"import string\nfrom rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef\nfrom rdflib.namespace import DC, FOAF\n\ng = Graph()\ng.parse(\"theboy.rdf\")\n\n\nimport pprint\n#for stmt in g:\n#\tpprint.pprint(stmt)\n#ns1 = Namespace('http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#Event')\n#g.bind(\"dul\", ns1)\n\ndef updateAmrVarList(strg, amrVarList):\n\tnome = strg.split(\"#\")[-1]\n\tvar = nome[0]\n\tinsert = True\n\tomonimia = False\n\tfor (v,n) in amrVarList:\n\t\tif(v == var and n == nome):\n\t\t\tinsert = False\n\t\tif(v == var and n != nome):\n\t\t\tomonimia = True\n\t\t\n\tif(insert):\n\t\tif(omonimia):\n\t\t\ti = 2\n\t\t\twhile((var+str(i),nome) in amrVarList):\n\t\t\t\ti = i+1\n\t\t\tvar = var+str(i)\n\t\tamrVarList.append((var, nome))\n\treturn amrVarList\n\t\ndef getPrintableAmrName(strg, amrVarList):\n\tnome = strg.split(\"#\")[-1]\n\tprintableName = \"None\"\n\tfor (v,n) in amrVarList:\n\t\tif(n == nome):\n\t\t\tprintableName = v + \" / \" + nome\n\treturn printableName\n\ndef printAMR(verbo, listaArgomenti, indiceArgomenti, chiusura, soloArgomenti, soloVerbo, argomentoVerbo, tabdeep, amrVarList):\n\n\tstringa = \"\"\n\tif(not soloArgomenti):\n\t\tamrVarList = updateAmrVarList(str(verbo), amrVarList)\n\t\tstringa = \"\\n(\"+getPrintableAmrName(str(verbo), amrVarList)\n\tif(not soloVerbo):\n\t\ti = indiceArgomenti\n\t\ttabs = \"\\n\\t\"\n\t\tfor tab in range(tabdeep):\n\t\t\ttabs = tabs + \"\\t\" \n\t\tfor arg in listaArgomenti:\n\t\t\tamrVarList = updateAmrVarList(str(arg), amrVarList)\n\t\t\tstringa = stringa + tabs + \":ARG\" + str(i) + \"(\" + getPrintableAmrName(str(arg), amrVarList) + \")\"\n\t\t\ti = i+1\n\t\tif(argomentoVerbo):\n\t\t\tstringa = stringa[:-1] #rimuovo l'ultima parentesi\n\tif(chiusura):\n\t\tstringa = stringa + \")\"\n\tprint(stringa)\n\treturn amrVarList\n\ndef recArgs(m, args, counter, bkp, mlist, firstPrint, mchecklist, tabs, amrVarList):\n\ta = []\n\tif(firstPrint):\n\t\tamrVarList = printAMR(m, args, counter, False, False, True, False, 0, amrVarList)\n\tfor x in args:\n\t\tif(str(x) != \"None\"):\n\t\t\ta.append(x)\n\t\t\tif(str(x) in mlist and (not mchecklist[mlist.index(str(x))])):\n\t\t\t\tamrVarList = printAMR(m, a, counter, False, True, False, True, tabs, amrVarList)\n\t\t\t\tcounter = len(a)\n\t\t\t\ta = []\n\t\t\t\tfor (bm,bargs) in bkp:\n\t\t\t\t\tif(str(bm) == mlist[mlist.index(str(x))]):\n\t\t\t\t\t\tmchecklist[mlist.index(str(x))] = True\n\t\t\t\t\t\trecArgs(bm, bargs, 0, bkp, mlist, False, mchecklist, tabs+1, amrVarList)\n\t\t\t\t\t\t\n\tif(len(args) != 0):\n\t\tamrVarList = printAMR(m, a, counter, True, True, False, False, tabs, amrVarList)\n\treturn mchecklist\n\t\t\n#for mainverb in mainverbList:\nqres = g.query(\n\t\"\"\"SELECT DISTINCT ?mainverb ?j_1 ?j_2 ?j_3\n\t\tWHERE {\n\t\t ?mainverb a ?verb . \n\t\t ?verb rdfs:subClassOf .\n\t\t OPTIONAL{?mainverb j.0:Theme ?j_1} .\n\t\t OPTIONAL{?mainverb j.0:Agent ?j_2} .\n\t\t OPTIONAL{?mainverb j.0:Experiencer ?j_3}\n\t\t}\"\"\")\nmlist = []\nbkp = []\nmchecklist = []\n\n#guardo se ci sono delle coref\nqres2 = g.query(\n\t\"\"\"SELECT DISTINCT ?x ?y\n\t\tWHERE {\n\t\t ?x j.1:other_coref ?y\n\t\t}\"\"\")\n\t\t\nfor (m, t, a, e) in qres : \n\t\tmlist.append(str(m))\n\t\targs = []\n\t\tfor x in [t, a, e]:\n\t\t\tif(str(x) != \"None\"):\n\t\t\t\t#se e' presente una coref\t\t\n\t\t\t\tfor (xx, yy) in qres2:\n\t\t\t\t\tif(x == yy):\n\t\t\t\t\t\tx = xx\t\t\n\t\t\t\targs.append(x)\n\t\tbkp.append((m,args))\n\t\tmchecklist.append(False)\n\ncounter = 0\nfor (m, t, a, e) in qres : \n\t\t#print(\"MAINVERB:%s\\nTHEME:%s\\nAGENT:%s\\nEXPERIENCER:%s\" %(m, t, a, e))\n\t\targs = []\n\t\tfor x in [t, a, e]:\n\t\t\tif(str(x) != \"None\"):\n\t\t\t\t#se e' presente una coref\t\t\n\t\t\t\tfor (xx, yy) in qres2:\n\t\t\t\t\tif(x == yy):\n\t\t\t\t\t\tx = xx\t\t\n\t\t\t\targs.append(x)\n\t\t\n\t\tif(not mchecklist[counter]): #se non e' ancora stata fatta\n\t\t\tmchecklist = recArgs(m, args, 0, bkp, mlist, True, mchecklist, 0, [])\n\t\tmchecklist[counter] = True\n\t\tcounter = counter + 1\n","sub_path":"parser - REGOLE 1+2.py","file_name":"parser - REGOLE 1+2.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66405284","text":"import pyglet, random, util\n\n\nclass Velocity(object):\n \"\"\"Holds x and y values correlating to the velocity of an object.\n \"\"\"\n\n x = 0\n y = 0\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n @classmethod\n def random(self, limit=100):\n return Velocity(random.randrange(-limit, limit), random.randrange(-limit, limit))\n\n def __div__(self, value):\n if isinstance(value, Velocity):\n self.x /= value.x\n self.y /= value.y\n else:\n self.x /= value\n self.y /= value\n return self\n\n def __add__(self, value):\n if isinstance(value, Velocity):\n self.x += value.x\n self.y += value.y\n else:\n self.x += value\n self.y += value\n return self\n\n def __radd__(self, value):\n return self.__add__(value)\n\n def __mul__(self, value):\n if isinstance(value, Velocity):\n self.x *= value.x\n self.y *= value.y\n else:\n self.x *= value\n self.y *= value\n return self\n\n def __rmul__(self, value):\n return self.__mul__(value)\n\nclass GameObject(pyglet.sprite.Sprite):\n \"\"\"A generic object for all game objects that need interaction.\n\n Attributes:\n velocity = Represents this objects velocity through space\n dead = Whether or not we should remove this object on a scenes update()\n new_objects = A list containing any new objects we would like to add to the objects container\n event_handlers = Any input event handlers we'd like to add to a given object\n \"\"\"\n\n velocity = None\n dead = False\n new_objects = []\n event_handlers = []\n\n def __init__(self, *args, **kwargs):\n super(GameObject, self).__init__(*args, **kwargs)\n self.velocity = Velocity()\n\n def update(self, dt):\n if not self.dead and not self.image:\n return\n\n self.x += self.velocity.x * dt\n self.y += self.velocity.y * dt\n\n self.check_bounds()\n\n def check_bounds(self):\n \"\"\"Check to see if the object has left the screen.\n If it does, move it to the opposing side.\n \"\"\"\n\n min_x = -self.image.width/2\n min_y = -self.image.height/2\n max_x = 800 + self.image.width/2\n max_y = 600 + self.image.height/2\n\n if self.x < min_x:\n self.x = max_x\n elif self.x > max_x:\n self.x = min_x\n\n if self.y < min_y:\n self.y = max_y\n elif self.y > max_y:\n self.y = min_y\n\n def collides_with(self, other_object):\n \"\"\"Whether or not two objects have collided.\n It is intended to overload this to determine whether two objects\n should even bother calculating the distance between them.\n \"\"\"\n\n if self.dead or other_object.dead or not self.image:\n return False\n\n collision_distance = self.image.width/2 + other_object.image.width/2\n actual_distance = util.distance(self.position, other_object.position)\n\n return actual_distance <= collision_distance\n","sub_path":"fable/gameobject.py","file_name":"gameobject.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"506008335","text":"#!python3\n\nfrom mypl_util import *\nfrom mypl_token import *\n\nimport mypl_ast\nimport mypl_symbol_table\n\n#import sys\n#import mypl_ast_printer\n#print_visitor = mypl_ast_printer.ASTPrintVisitor(sys.stdout)\n\nclass TypeChecker(mypl_ast.Visitor):\n\n def __init__(self):\n self.sym = mypl_symbol_table.SymbolTable()\n self.ctype = None\n\n def visit_stmt_list(self, stmt_list):\n self.sym.push_environment()\n for stmt in stmt_list.stmts:\n stmt.accept(self)\n self.sym.pop_environment()\n\n # HELPER FUNCTIONS\n # ~~~~~~~~~~~~~~~~\n\n def __gettype_or_fail(self, identifier):\n if self.sym.variable_exists(identifier.lexeme):\n return self.sym.get_variable_type(identifier.lexeme)\n else:\n identifier.error(\"undefined variable '\"+xstr(identifier.lexeme)+\"'\")\n return None\n\n def __checkrel_int(self, rel):\n return (rel.type in [Token.PLUS, Token.MINUS, Token.MULTIPLY, Token.DIVIDE, Token.MODULUS, \\\n Token.EQUAL, Token.LESS_THAN, Token.GREATER_THAN, Token.LESS_THAN_EQUAL, \\\n Token.GREATER_THAN_EQUAL, Token.NOT_EQUAL])\n\n def __checkrel_string(self, rel):\n return rel.type == Token.PLUS\n\n def __checkrel_bool(self, rel):\n return (rel.type in [Token.EQUAL, Token.NOT_EQUAL])\n\n def __checkrel_compare(self, rel):\n return (rel.type in [Token.EQUAL, Token.LESS_THAN, Token.GREATER_THAN, Token.LESS_THAN_EQUAL, \\\n Token.GREATER_THAN_EQUAL, Token.NOT_EQUAL])\n\n # PRINT/READ STATEMENTS\n # ~~~~~~~~~~~~~~~~~~~~~\n\n def visit_print_stmt(self, print_stmt):\n print_stmt.expr.accept(self)\n\n def visit_read_expr(self, read_expr):\n read_expr.expr.accept(self)\n if read_expr.is_read_int:\n self.ctype = Token.INT\n else:\n self.ctype = Token.STRING\n\n # LEN EXPR\n # ~~~~~~~~\n\n def visit_len_expr(self, len_expr):\n len_expr.expr.accept(self)\n\n # ASSIGN STATEMENT\n # ~~~~~~~~~~~~~~~~\n\n def visit_assign_stmt(self, assign_stmt):\n # name and type of variable being modified\n var_name = assign_stmt.lhs.lexeme\n is_index = False\n\n # accept indexed ID (modifing element of variable instead of variable)\n if assign_stmt.index_expr != None:\n assign_stmt.index_expr.accept(self)\n is_index = True\n\n # accept rhs\n assign_stmt.rhs.accept(self)\n\n # get type of variable, or add it if it doesn't exist\n if self.sym.variable_exists(var_name):\n var_type = self.sym.get_variable_type(var_name)\n\n if is_index:\n if var_type == Token.ARRAY or var_type == Token.STRING:\n return\n else:\n assign_stmt.first_token().error(\"cannot access index on the type \" + xstr(var_type))\n return\n\n # check if match\n if self.ctype != var_type and var_type != Token.NA and self.ctype != Token.NA:\n assign_stmt.first_token().error(\"expected \" + xstr(var_type) + \" for '\" + \\\n xstr(var_name) + \"', got \" + xstr(self.ctype))\n else:\n self.sym.set_variable_type(var_name, self.ctype)\n else:\n if is_index:\n assign_stmt.first_token().error(\"cannot access index on nonexistent variable, \" + xstr(var_name))\n\n self.sym.add_variable(var_name)\n self.sym.set_variable_type(var_name, self.ctype)\n\n\n # EXPR/ID/VALUE STATEMENTS\n # ~~~~~~~~~~~~~~~~~~~~~~~~\n\n def visit_simple_expr(self, simple_expr):\n term = simple_expr.term\n\n if term.type == Token.ID:\n # variable\n self.ctype = self.__gettype_or_fail(term)\n else:\n # primitive\n self.ctype = term.type\n\n return\n\n def visit_index_expr(self, index_expr):\n # identifier[expr]\n\n # first check if variable exists and is an array\n array_name = index_expr.identifier.lexeme\n array_type = self.__gettype_or_fail(index_expr.identifier)\n\n if array_type != Token.ARRAY and array_type != Token.STRING:\n index_expr.identifier.error(\"expected an array or string type for index access on '\" + xstr(array_name) + \"', got \" + xstr(array_type))\n\n # accept index expressions\n index_expr.expr.accept(self)\n\n # indices should be ints\n if self.ctype != Token.INT and self.ctype != Token.NA:\n index_expr.first_token().error('expected INT, got ' + xstr(self.ctype))\n\n # set current type\n self.ctype = Token.NA\n\n def visit_list_expr(self, list_expr):\n # common_type is the type that all items (item_type) in the list should match\n # it is set to the type of the first item in the list\n common_type = None\n\n # Make sure everything in the array has the same type\n for expr in list_expr.expressions:\n expr.accept(self)\n item_type = self.ctype\n\n if common_type == None:\n common_type = item_type\n elif item_type != common_type:\n expr.first_token().error(\"expected \" + xstr(common_type) + \", got \" + xstr(item_type))\n\n # Set to curren type to array type of common_type\n self.ctype = Token.ARRAY\n\n def visit_complex_expr(self, complex_expr):\n # accept left operand\n complex_expr.first_operand.accept(self)\n left_type = self.ctype\n rel = complex_expr.rel\n\n # accept right operand\n complex_expr.second_operand.accept(self)\n right_type = self.ctype\n\n # Both operands must be of same type\n if left_type != right_type:\n if left_type == Token.STRING and right_type == Token.INT:\n # allow string and int concatenation if int is right-hand side\n self.ctype = Token.STRING\n elif left_type == Token.NA or right_type == Token.NA:\n # If either operand is NA, then ctype should become name\n self.ctype = Token.NA\n else:\n complex_expr.second_operand.first_token().error(\"expected \" + xstr(left_type) + \", got \" + xstr(right_type))\n else:\n # ctype should right now be set to right_type\n # left_type and right_type are the same, so no need to change anything\n pass\n\n if left_type == Token.ARRAY:\n if rel != Token.PLUS:\n rel.error(\"cannot perform \" + rel.type + \" on ARRAY type\")\n elif left_type == Token.INT:\n if self.__checkrel_int(rel) == False:\n rel.error(\"cannot perform \" + rel.type + \" on INT type\")\n elif left_type == Token.STRING:\n if self.__checkrel_string(rel) == False:\n rel.error(\"cannot perform \" + rel.type + \" on STRING type\")\n elif left_type == Token.BOOL:\n if self.__checkrel_bool(rel) == False:\n rel.error(\"cannot perform \" + rel.type + \" on BOOL type\")\n\n # BOOLEAN EXPRESSIONS\n # ~~~~~~~~~~~~~~~~~~~\n\n def visit_simple_bool_expr(self, simple_bool_expr):\n simple_bool_expr.expr.accept(self)\n\n if self.ctype != Token.BOOL and self.ctype != Token.NA:\n simple_bool_expr.expr.first_token().error('condition must be of BOOL type, instead got ' + xstr(self.ctype))\n\n def visit_complex_bool_expr(self, complex_bool_expr):\n complex_bool_expr.first_expr.accept(self)\n first_type = self.ctype\n\n complex_bool_expr.second_expr.accept(self)\n second_type = self.ctype\n\n # Both operands must be of same type\n if first_type != second_type:\n complex_bool_expr.second_expr.first_token().error('expected '+xstr(first_type)+', got ' + xstr(second_type))\n\n # Check operator\n if first_type == Token.INT:\n if self.__checkrel_compare(complex_bool_expr.bool_rel) == False:\n complex_bool_expr.bool_rel.error( \\\n 'cannot use ' + complex_bool_expr.bool_rel.type + ' to compare INT types')\n elif first_type == Token.BOOL:\n if self.__checkrel_bool(complex_bool_expr.bool_rel) == False:\n complex_bool_expr.bool_rel.error( \\\n 'cannot use ' + complex_bool_expr.bool_rel.type + ' to compare BOOL types')\n else:\n complex_bool_expr.first_expr.first_token().error('encountered uncomparable type ' + xstr(first_type))\n\n if complex_bool_expr.has_bool_connector:\n # 'second_operand' is either a complex or simple bool expr, both of which\n # will set ctype to BOOL. So no need to check anything here.\n complex_bool_expr.second_operand.accept(self)\n\n self.ctype = Token.BOOL\n\n # IF STATEMENTS\n # ~~~~~~~~~~~~~\n\n def visit_if_stmt(self, if_stmt):\n # IF\n if_stmt.if_part.bool_expr.accept(self)\n # THEN\n if_stmt.if_part.stmt_list.accept(self)\n for elseif in if_stmt.elseifs:\n # ELSE IF\n elseif.bool_expr.accept(self)\n # THEN\n elseif.stmt_list.accept(self)\n if if_stmt.has_else:\n # ELSE\n if_stmt.else_stmts.accept(self)\n\n # LOOP STATEMENTS\n # ~~~~~~~~~~~~~~~\n\n def visit_while_stmt(self, while_stmt):\n # WHILE\n while_stmt.bool_expr.accept(self)\n # DO\n while_stmt.stmt_list.accept(self)\n\n","sub_path":"MyPL/mypl_type_checker.py","file_name":"mypl_type_checker.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"560814465","text":"import tornado\nfrom tornado import gen, httpclient\nfrom tornado.ioloop import IOLoop\nfrom tornado.httpserver import HTTPServer\nfrom tornado.log import app_log\nfrom tornado.options import define, options, parse_command_line\nfrom tornado.web import asynchronous, Application, RequestHandler, URLSpec\n\nclass MainHandler(tornado.web.RequestHandler):\n \"\"\"\n Blocking\n \"\"\"\n def get(self):\n\n self.write(\"Hello, world\")\n\nclass MainHandlerAsync(RequestHandler):\n \"\"\"\n Async\n \"\"\"\n @asynchronous\n @gen.engine\n def get(self):\n\n req = httpclient.HTTPRequest('http://ya.ru', method='GET')\n client = httpclient.AsyncHTTPClient()\n # don't let the yield call confuse you, it's just Tornado helpers to make\n # writing async code a bit easier. This is the same as doing\n # client.fetch(req, callback=_some_other_helper_function)\n response = yield gen.Task(client.fetch, req)\n self.write(response.body)\n ### do something with the response (response.body)\n self.finish(\"from asynchronous\")\n\ndef make_app():\n return tornado.web.Application([\n URLSpec(r'/async', MainHandlerAsync),\n (r\"/\", MainHandler),\n ])\n\nif __name__ == \"__main__\":\n app = make_app()\n server = HTTPServer(app)\n app.listen(8888)\n io_loop = IOLoop.instance()\n io_loop.start()\n tornado.ioloop.IOLoop.current().start()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"642514116","text":"'''\n\nInput = raw VCF\nOutput = filtered VCF prints to screen\n- Filtered sites are marked as FAIL_? in the 7th column\n- Sites that pass go on to genotype filtering\n- Filtered out genotypes are changed to './.', all others reported\n\nPossible usage:\n\nMYDIR=/u/home/j/jarobins/project-rwayne\nTABIX=${MYDIR}/utils/programs/htslib-1.3.1/tabix\nINTERSECTBED=${MYDIR}/utils/programs/bedtools2/bin/intersectBed\nBGZIP=${MYDIR}/utils/programs/htslib-1.3.1/bgzip\nCPGandREPEATS=${MYDIR}/utils/beds/CpG_and_repeat_filter_cf31_fixed_sorted.bed\n\npython ${MYDIR}/utils/scripts/filtervcf/filterVCF_010717.py fox_15_joint_chr38_trim_annot.vcf.gz | \\\n${INTERSECTBED} -v -sorted -header -a stdin -b ${CPGandREPEATS} | \\\n${BGZIP} > fox_15_joint_chr38_trim_annot_filtered.vcf.gz; \\\n${TABIX} -p vcf fox_15_joint_chr38_trim_annot_filtered.vcf.gz\n\nNOTE: Will recalculate AC, AF, AN.\n\n'''\n\nimport sys\nimport gzip\nimport re\n\nvcf_file = sys.argv[1]\ninVCF = gzip.open(vcf_file, 'r')\n\n#outVCF=open(vcf_file[:-7]+'_filtered.vcf', 'w')\n\nminD=6\n\nmaxD={\n'SNI1_88_F':34,\n'SCA_88_F':49,\n'SCZ_88_M':30,\n'SMI_88_F':44,\n'SNI2_88_F':28,\n'SRO_88_F':29,\n'SCL_88_F':36,\n'GF_SAMO_F':33,\n'GF_GOGA_M':54,\n'SCA_05_M':42,\n'SCL_09_F':39,\n'SCZ_08_F':40,\n'SMI_08_M':41,\n'SNI_00_M':43,\n'SRO_08_M':42}\n\nsamples=[]\nfor line in inVCF:\n\tif line.startswith('##'):\n\t\tpass\n\telse:\n\t\tfor i in line.split()[9:]: samples.append(i)\n\t\tbreak\n\ninVCF.seek(0)\n\n# Filter to check for excess heterozygosity\n#def HETfilter(sample,GT_entry):\n#\tfield=GT_entry.split(':')\n#\tif field[0]=='0/1': return '.'\n\n# Filter to be applied to individual genotypes\ndef GTfilter(sample,GT_entry):\n\tfield=GT_entry.split(':')\n\tif field[0]=='./.': return GT_entry\n\telse:\n\t\tif field[3]!='.' and float(field[3])>=20.0 and field[2]!='.' and minD<=int(field[2])<=maxD[sample]: return GT_entry\n#\t\tif field[3]!='.' and field[2]!='.': return GT_entry\n\t\telse: return './.:' + ':'.join(field[1:])\n\nfor line0 in inVCF:\n\n### Write header lines\n\tif line0.startswith('#'): sys.stdout.write(line0); continue\n\n### For all other lines:\n\tline=line0.strip().split('\\t')\n\n### Site filtering:\n\n### Reference must not be N\n\tif line[3]=='N': sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_refN', '\\t'.join(line[7:])) ); continue\n\n### Alternate allele must not be \n\tif line[4]=='': sys.stdout.write('%s\\t%s\\t%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:4]), '.', line[5] , 'FAIL_bad_alt', '\\t'.join(line[7:])) ); continue\n\n### Only accept sites with minimum QUAL value (min QUAL 30 for variant sites)\n\tif line[5]=='.': sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_QUAL', '\\t'.join(line[7:])) ); continue\n\tif float(line[5])<30.0: sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_QUAL', '\\t'.join(line[7:])) ); continue\n\n## Access INFO field data\n\tINFO=line[7].split(';')\n\tf=dict(s.split('=') for s in INFO)\n\n### Only accept sites that are monomorphic or simple SNPs\n\tif f['VariantType'] not in ('NO_VARIATION', 'SNP'): sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_bad_mut', '\\t'.join(line[7:])) ); continue\n\n### Only accept sites within permissible depth range\n#\tif int(re.search('(?<=DP=)[^;]+', INFO).group(0))>396 or int(re.search('(?<=DP=)[^;]+', INFO).group(0))396: sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_DP', '\\t'.join(line[7:])) ); continue\n\n### For variant sites: check strand bias\n# Note: previous filter was \"only accept sites with QUAL>=50, and at least 2 observations on each of the F and R strands for alternate alleles\"\n\tif line[4]!='.':\n#\t\tif float(f['FS'])>60.0 or float(f['SOR'])>4.0: sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_altstrand', '\\t'.join(line[7:])) ); continue\n\t\tif float(f['FS'])>60.0: sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_altstrand', '\\t'.join(line[7:])) ); continue\n#\t\tif float(f['SOR'])>4.0: sys.stdout.write('%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_altstrand', '\\t'.join(line[7:])) ); continue\n### Get AB value for use in later het. filtering\n\t\tif 'ABHet' in f: AB=float(f['ABHet'])\n\n### Genotype filtering:\n\n\tmissing,excesshet=0,0\n\n\tGT_list=[]\n\tfor i in range(0,len(samples)):\n\t\tGT=GTfilter(samples[i],line[i+9])\n#\t\tGT=line[i+9]\n\t\tif GT[:3]=='./.': \n\t\t\tmissing+=1\n\t\t\tGT_list.append(GT)\n\t\telse:\n\t\t\tif GT[:3]=='0/1':\n\t\t\t\texcesshet+=1\n\t\t\t\tif 0.2<=AB<=0.8: GT_list.append(GT)\n#\t\t\t\tGT_list.append(GT)\n\t\t\t\telse: \n\t\t\t\t\tGT_list.append('./.' + GT[3:])\n\t\t\t\t\tmissing+=1\n\t\t\telse: GT_list.append(GT)\n\n### Filter out sites with more than 25% missing/failing genotypes, and more than 66% het. genotypes\n# Note: previous filter was for > 2/8 missing and > 5/8 hets\n\tif missing>5: sys.stdout.write('%s\\t%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_missing', '\\t'.join(line[7:9]), '\\t'.join(GT_list)) ); continue\n\tif excesshet>10: sys.stdout.write('%s\\t%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:6]), 'FAIL_excesshet', '\\t'.join(line[7:9]), '\\t'.join(GT_list)) ); continue\n\n### Recalculate INFO fields\n\tREF=2*[x[:3] for x in GT_list].count('0/0') + [x[:3] for x in GT_list].count('0/1')\n\tALT=2*[x[:3] for x in GT_list].count('1/1') + [x[:3] for x in GT_list].count('0/1')\t\n\tf['AC']=ALT\n\tf['AN']=REF+ALT\n\tf['AF']=round(float(ALT)/(float(REF)+float(ALT)), 4)\n\n### Write out new line\n\tsys.stdout.write('%s\\t%s\\t%s\\t%s\\n' % ('\\t'.join(line[0:7]), ';'.join('{0}={1}'.format(key, val) for key, val in sorted(f.items())), line[8], '\\t'.join(GT_list)) )\n\t\t\n\ninVCF.close()\n#outVCF.close()\n\nexit()\n\n","sub_path":"1-MapToreRerence/11_FilterVCF/old/filterVCF_010717.py","file_name":"filterVCF_010717.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"427682906","text":"# -*- coding: utf-8 -*-\n\nAUTHOR = 'My Company'\nSITENAME = 'My Company'\nSITEURL = ''\n\nTHEME = 'theme'\n\nPATH = 'content'\n\nTIMEZONE = 'Australia/Sydney'\n\nDEFAULT_LANG = 'en'\n\n# Cache key\nimport hashlib\nfrom datetime import datetime\nCACHE_KEY = hashlib.md5(datetime.utcnow().isoformat().encode('utf8')).hexdigest()\n\n# Plugins\nPLUGIN_PATHS = ['./plugins']\nPLUGINS = ['category_page']\n\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (\n ('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),\n)\n\n# Social widget\nSOCIAL = (\n ('Telephone', 'tel:+61123456789'),\n ('Fax', 'tel:+61324568'),\n ('Facebook', '#'),\n ('Twitter', '#'),\n)\n\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"549300369","text":"\"\"\"\n @author : acoto\n @since : 18/9/19\n @Description : eval forms for climate\n\"\"\"\nfrom lxml import etree\nfrom formshare.models.formshare import Odkform\n\ndef requiredFields():\n fields = {\n \"livestock_repeat\": {\n \"fields\": {\n \"livestock_repeat_rowid\": {\"odktype\": \"\"},\n \"live_rep_number\": {\"odktype\": \"calculate\"},\n \"live_name\": {\"odktype\": \"select one\"},\n \"live_label\": {\"odktype\": \"calculate\"},\n \"live_number\": {\"odktype\": \"integer\"},\n \"bee_number\": {\"odktype\": \"integer\"},\n \"tipo_ganaderia\": {\"odktype\": \"select one\"},\n \"tipo_aves\": {\"odktype\": \"select one\"},\n \"tiene_pastos\": {\"odktype\": \"select one\"},\n \"pasto_total\": {\"odktype\": \"decimal\"},\n \"pasto_total_unit\": {\"odktype\": \"select one\"},\n \"pasto_piso\": {\"odktype\": \"decimal\"},\n \"pasto_piso_unit\": {\"odktype\": \"select one\"},\n \"pasto_mejorado\": {\"odktype\": \"decimal\"},\n \"pasto_mejorado_unit\": {\"odktype\": \"select one\"},\n \"pasto_corte\": {\"odktype\": \"decimal\"},\n \"pasto_corte_unit\": {\"odktype\": \"select one\"},\n \"ensilaje\": {\"odktype\": \"decimal\"},\n \"ensilaje_unit\": {\"odktype\": \"select one\"},\n \"fecha_pasto\": {\"odktype\": \"date\"},\n \"rowuuid\": {\"odktype\": \"\"}\n\n }\n }, \"crop_repeat\": {\n \"fields\": {\n \"crop_repeat_rowid\": {\"odktype\": \"\"},\n \"crop_rep_number\": {\"odktype\": \"calculate\"},\n \"crop_name\": {\"odktype\": \"select one\"},\n \"crop_label\": {\"odktype\": \"calculate\"},\n \"crop_planted\": {\"odktype\": \"decimal\"},\n \"crop_yield_units\": {\"odktype\": \"select one\"},\n \"crop_irrigated\": {\"odktype\": \"select one\"},\n \"fecha_siembra\": {\"odktype\": \"date\"},\n \"rowuuid\": {\"odktype\": \"\"}\n }\n }, \"maintable\": {\n \"fields\": {\n \"surveyid\": {\"odktype\": \"\"},\n \"originid\": {\"odktype\": \"\"},\n \"_submitted_by\": {\"odktype\": \"text\"},\n \"_xform_id_string\": {\"odktype\": \"text\"},\n \"_submitted_date\": {\"odktype\": \"datetime\"},\n \"_geopoint\": {\"odktype\": \"geopoint\"},\n \"_dummy\": {\"odktype\": \"text\"},\n \"interviewername\": {\"odktype\": \"text\"},\n \"deviceid\": {\"odktype\": \"deviceid\"},\n \"starttime_auto\": {\"odktype\": \"start\"},\n \"starttime_calculated\": {\"odktype\": \"calculate\"},\n \"participation\": {\"odktype\": \"select one\"},\n \"provincia\": {\"odktype\": \"select one\"},\n \"canton\": {\"odktype\": \"select one\"},\n \"distrito\": {\"odktype\": \"select one\"},\n \"caserio_nombre\": {\"odktype\": \"text\"},\n \"respondentname\": {\"odktype\": \"text\"},\n \"i_d\": {\"odktype\": \"text\"},\n \"respondentsex\": {\"odktype\": \"select one\"},\n \"respondent_is_head\": {\"odktype\": \"select one\"},\n \"household_position\": {\"odktype\": \"select one\"},\n \"household_type\": {\"odktype\": \"select one\"},\n \"casa_productor\": {\"odktype\": \"select one\"},\n \"direccion_de_la_casa\": {\"odktype\": \"text\"},\n \"numero_casa\": {\"odktype\": \"integer\"},\n \"numero_movil\": {\"odktype\": \"integer\"},\n \"work_away\": {\"odktype\": \"select one\"},\n \"children_under_4\": {\"odktype\": \"integer\"},\n \"children_4to10\": {\"odktype\": \"integer\"},\n \"children_4to10_educacion\": {\"odktype\": \"select one\"},\n \"males11to24\": {\"odktype\": \"integer\"},\n \"females11to24\": {\"odktype\": \"integer\"},\n \"males25to50\": {\"odktype\": \"integer\"},\n \"females25to50\": {\"odktype\": \"integer\"},\n \"malesover50\": {\"odktype\": \"integer\"},\n \"femalesover50\": {\"odktype\": \"integer\"},\n \"adultsover65\": {\"odktype\": \"integer\"},\n \"adultsover65_pension\": {\"odktype\": \"select one\"},\n \"land_tenure\": {\"odktype\": \"select all that apply\"},\n \"landowned\": {\"odktype\": \"decimal\"},\n \"unitland_owned\": {\"odktype\": \"select one\"},\n \"land_ownership\": {\"odktype\": \"select all that apply\"},\n \"landrentin\": {\"odktype\": \"decimal\"},\n \"unitland_rentin\": {\"odktype\": \"select one\"},\n \"landrentout\": {\"odktype\": \"decimal\"},\n \"unitland_rentout\": {\"odktype\": \"select one\"},\n \"landcultivated\": {\"odktype\": \"decimal\"},\n \"unitland\": {\"odktype\": \"select one\"},\n \"areaunits_other\": {\"odktype\": \"text\"},\n \"agua_finca\": {\"odktype\": \"select one\"},\n \"agua_disponible\": {\"odktype\": \"select one\"},\n \"grow_crops\": {\"odktype\": \"select one\"},\n \"crops\": {\"odktype\": \"select all that apply\"},\n \"crop_count\": {\"odktype\": \"calculate\"},\n \"crops_other1\": {\"odktype\": \"text\"},\n \"crops_other2\": {\"odktype\": \"text\"},\n \"crops_other3\": {\"odktype\": \"text\"},\n \"livestock_owners\": {\"odktype\": \"select one\"},\n \"livestock\": {\"odktype\": \"select all that apply\"},\n \"livestock_count\": {\"odktype\": \"calculate\"},\n \"tiene_ensilaje\": {\"odktype\": \"select one\"},\n \"ensilaje\": {\"odktype\": \"decimal\"},\n \"ensilaje_unit\": {\"odktype\": \"select one\"},\n \"offfarm_incomes_any\": {\"odktype\": \"select one\"},\n \"offfarm_income_ag\": {\"odktype\": \"select one\"},\n \"prop_onfarm\": {\"odktype\": \"select one\"},\n \"prop_crops\": {\"odktype\": \"select one\"},\n \"gps\": {\"odktype\": \"geopoint\"},\n \"endtime_auto\": {\"odktype\": \"end\"},\n \"endtime_calculated\": {\"odktype\": \"calculate\"},\n \"rowuuid\": {\"odktype\": \"\"}\n }\n }\n }\n return fields\n\n\ndef getFormId(self,schema):\n result=self.request.dbsession.query(Odkform.form_id).filter(Odkform.form_schema==schema).first()\n\n return result.form_id\n\ndef validateForm(create_file):\n myTables = requiredFields()\n tree = etree.parse(create_file)\n root = tree.getroot()\n\n err = []\n\n if root.find(\".//table[@name='crop_repeat']\") is None and root.find(\".//table[@name='livestock_repeat']\") is None:\n return False, \"Este usuario solo puede subir formularios aptos para sequia\"\n\n for k in myTables.keys():\n table = root.find(\".//table[@name='\" + k + \"']\")\n if table is not None:\n t_fields = list(myTables[k][\"fields\"].keys())\n for i in table.iterchildren():\n if i.tag == \"field\":\n if i.attrib[\"name\"] in t_fields:\n t_fields.remove(i.attrib[\"name\"])\n odktype = myTables[k][\"fields\"][i.attrib[\"name\"]][\"odktype\"]\n if str(i.attrib[\"odktype\"]) != str(odktype):\n err.append(\"En el campo %s el tipo de dato debe ser: %s\\n\" % (i.attrib[\"name\"], odktype))\n if len(t_fields) is not 0:\n err.append(\"En la tabla %s faltan los siguientes campos: %s\\n\" % (k, \", \".join(t_fields)))\n else:\n err.append(\"No se encuentra la tabla: %s\\n\" % k)\n\n if len(err) is 0:\n return True, \"\"\n else:\n return False, \" - \".join(err)\n","sub_path":"ext_climate/processes/evaluateForm.py","file_name":"evaluateForm.py","file_ext":"py","file_size_in_byte":7674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"519562545","text":"\"\"\"The WaveBlocks Project\n\nThis file contains some ready made potentials in several variables\nand with several separate energy levels. This is a pure data file\nwithout any code. To load the potentials, use the methods of\n:py:class:`BlockFactory`.\n\n@author: R. Bourquin\n@copyright: Copyright (C) 2010, 2011, 2012 R. Bourquin\n@license: Modified BSD License\n\"\"\"\n\n####################################################################\n# Potentials in one dimension and with one energy level (D=1, N=1) #\n####################################################################\n\n# Free particle\nfree_particle = {}\nfree_particle[\"variables\"] = [\"x\"]\nfree_particle[\"potential\"] = \"c\"\nfree_particle[\"defaults\"] = {\"c\":\"0\"}\n\n# Simple harmonic potential 1D\nquadratic = {}\nquadratic[\"variables\"] = [\"x\"]\nquadratic[\"potential\"] = \"1/2 * sigma * x**2\"\nquadratic[\"defaults\"] = {\"sigma\":\"1/2\"}\n\n# Perturbed harmonic potential\npert_quadratic = {}\npert_quadratic[\"variables\"] = [\"x\"]\npert_quadratic[\"potential\"] = \"1/2 * sigma * x**2 + 1/2 * delta**2 * x**2\"\npert_quadratic[\"defaults\"] = {\"sigma\":0.05, \"delta\":0.2}\n\n# A simple fourth order anharmonic potential\nquartic = {}\nquartic[\"variables\"] = [\"x\"]\nquartic[\"potential\"] = \"1/4 * sigma * x**4\"\nquartic[\"defaults\"] = {\"sigma\":0.05}\n\n# A potential consisting of a cosine wave\ncos_osc = {}\ncos_osc[\"variables\"] = [\"x\"]\ncos_osc[\"potential\"] = \"a * (1 - cos(b*x))\"\ncos_osc[\"defaults\"] = {\"a\":0.07, \"b\":1.0}\n\n# A potential consisting of a hyperbolic cosine\ncosh_osc = {}\ncosh_osc[\"variables\"] = [\"x\"]\ncosh_osc[\"potential\"] = \"a * cosh(b * x)\"\ncosh_osc[\"defaults\"] = {\"a\":\"1\", \"b\":\"1\"}\n\n# The Morse potential\nmorse = {}\nmorse[\"variables\"] = [\"x\"]\nmorse[\"potential\"] = \"D * (1 - exp(-a*(x-x0)))**2\"\nmorse[\"defaults\"] = {\"D\":3.0, \"a\":0.3, \"x0\":0.0}\n\n# A double well potential\ndouble_well = {}\ndouble_well[\"variables\"] = [\"x\"]\ndouble_well[\"potential\"] = \"sigma * (x**2 - 1)**2\"\ndouble_well[\"defaults\"] = {\"sigma\":1.0}\n\n# The Eckart potential\neckart = {}\neckart[\"variables\"] = [\"x\"]\neckart[\"potential\"] = \"sigma * cosh(x/a)**(-2)\"\neckart[\"defaults\"] = {\"sigma\":100*3.8088*10**(-4), \"a\":1.0/(2.0*0.52918)}\n\n# A smooth unitstep like wall\nwall = {}\nwall[\"variables\"] = [\"x\"]\nwall[\"potential\"] = \"atan(sigma*x) + pi/2\"\nwall[\"defaults\"] = {\"sigma\":10.0}\n\n# A narrow 'V'-like potential\nv_shape = {}\nv_shape[\"variables\"] = [\"x\"]\nv_shape[\"potential\"] = \"1/2 * sqrt(tanh(x)**2+4*delta**2)\"\nv_shape[\"defaults\"] = {\"delta\":0.2}\n\n\n#####################################################################\n# Potentials in two dimensions and with one energy level (D=2, N=1) #\n#####################################################################\n\n# Simple harmonic potential 2D\nquadratic_2d = {}\nquadratic_2d[\"variables\"] = [\"x\", \"y\"]\nquadratic_2d[\"potential\"] = \"1/2 * (sigmax * x**2 + sigmay * y**2)\"\nquadratic_2d[\"defaults\"] = {\"sigmax\":\"1/2\", \"sigmay\":\"1/2\"}\n\n# A potential consisting of a cosine wave part in 2D\ncos_osc_2d = {}\ncos_osc_2d[\"variables\"] = [\"x\", \"y\"]\ncos_osc_2d[\"potential\"] = \"ax * (1 - cos(bx*x)) + ay * (1 - cos(by*y))\"\ncos_osc_2d[\"defaults\"] = {\"ax\":\"1\", \"bx\":\"1\", \"ay\":\"1\", \"by\":\"1\"}\n\n# A potential consisting of a hyperbolic cosine\ncosh_osc_2d = {}\ncosh_osc_2d[\"variables\"] = [\"x\", \"y\"]\ncosh_osc_2d[\"potential\"] = \"a * cosh(b * sqrt(x**2+y**2))\"\ncosh_osc_2d[\"defaults\"] = {\"a\":\"1\", \"b\":\"1\"}\n\n# A potential consisting of a hyperbolic cosine\ncorral_rotsym_2d = {}\ncorral_rotsym_2d[\"variables\"] = [\"x\", \"y\"]\ncorral_rotsym_2d[\"potential\"] = \"atan(sigma*(sqrt(x**2+y**2) - R)) + pi/2\"\ncorral_rotsym_2d[\"defaults\"] = {\"sigma\":\"10\", \"R\":\"8\"}\n\n# A potential consisting of circular pit of radius R\ncircle_pit_2d = {}\ncircle_pit_2d[\"variables\"] = [\"x\", \"y\"]\ncircle_pit_2d[\"potential\"] = \"atan(sigma*(sqrt(x**2+y**2) - R)) + pi/2\"\ncircle_pit_2d[\"defaults\"] = {\"sigma\":\"10\", \"R\":\"8\"}\n\n# A potential consisting of a ring like corral\ncorral_ring = {}\ncorral_ring[\"variables\"] = [\"x\", \"y\"]\ncorral_ring[\"potential\"] = \"sqrt(delta**2 + tanh(sqrt(x**2 + y**2) - R)**2*tanh(sqrt(x**2 + y**2) + R)**2)/2\"\ncorral_ring[\"defaults\"] = {\"delta\":\"1/32\", \"R\":\"3\"}\n\n\n\n\n#######################################################################\n# Potentials in three dimensions and with one energy level (D=3, N=1) #\n#######################################################################\n\n# Simple harmonic potential 3D\nquadratic_3d = {}\nquadratic_3d[\"variables\"] = [\"x\", \"y\", \"z\"]\nquadratic_3d[\"potential\"] = \"1/2 * (sigmax * x**2 + sigmay * y**2 + sigmaz * z**2)\"\nquadratic_3d[\"defaults\"] = {\"sigmax\":\"1/2\", \"sigmay\":\"1/2\", \"sigmaz\":\"1/2\"}\n\n\n\n\n#####################################################################\n# Potentials in one dimension and with two energy levels (D=1, N=2) #\n#####################################################################\n\n# Double harmonic potential for two components\ntwo_quadratic = {}\ntwo_quadratic[\"variables\"] = [\"x\"]\ntwo_quadratic[\"potential\"] = [[\"1/2*sigma*x**2\", \"0\" ],\n [\"0\", \"1/2*sigma*x**2\"]]\ntwo_quadratic[\"defaults\"] = {\"sigma\":0.05}\n\n# Double quartic anharmonic potential for two components\ntwo_quartic = {}\ntwo_quartic[\"variables\"] = [\"x\"]\ntwo_quartic[\"potential\"] = [[\"1/4*sigma*x**4\", \"0\" ],\n [\"0\", \"1/8*sigma*x**4\"]]\ntwo_quartic[\"defaults\"] = {\"sigma\":\"1\"}\n\n# A potential with a single avoided crossing\ndelta_gap = {}\ndelta_gap[\"variables\"] = [\"x\"]\ndelta_gap[\"potential\"] = [[\"1/2 * tanh(x)\", \"delta\" ],\n [\"delta\", \"-1/2 * tanh(x)\"]]\n\n# Diagonalized single avoided crossing\ndelta_gap_diag = {}\ndelta_gap[\"variables\"] = [\"x\"]\ndelta_gap_diag[\"potential\"] = [[\"sqrt(delta**2 + tanh(x)**2/4)\", \"0\" ],\n [\"0\", \"-sqrt(delta**2 + tanh(x)**2/4)\"]]\n\n# A potential with two avoided crossings in series\ntwo_crossings = {}\ntwo_crossings[\"variables\"] = [\"x\"]\ntwo_crossings[\"potential\"] = [[\"tanh(x-rho)*tanh(x+rho)/2\", \"delta/2\" ],\n [\"delta/2\", \"-tanh(x-rho)*tanh(x+rho)/2\"]]\ntwo_crossings[\"defaults\"] = {\"rho\":3.0}\n\n\n######################################################################\n# Potentials in two dimensions and with two energy levels (D=2, N=2) #\n######################################################################\n\ndelta_gap_rotsym = {}\ndelta_gap_rotsym[\"variables\"] = [\"x\", \"y\"]\ndelta_gap_rotsym[\"potential\"] = [[\"tanh(sqrt(x**2 + y**2))/2\", \"delta\"],\n [\"delta\" , \"-tanh(sqrt(x**2 + y**2))/2\"]]\n\nconic = {}\nconic[\"variables\"] = [\"x\", \"y\"]\nconic[\"potential\"] = [[\"x\", \"y\"],\n [\"y\", \"-x\"]]\n\nconic_avoided = {}\nconic_avoided[\"variables\"] = [\"x\", \"y\"]\nconic_avoided[\"potential\"] = [[\"x\", \"sqrt(y**2+delta**2)\"],\n [\"sqrt(y**2+delta**2)\", \"-x\" ]]\nconic_avoided[\"defaults\"] = {\"delta\":1.0}\n\nconic_avoided_c = {}\nconic_avoided_c[\"variables\"] = [\"x\", \"y\"]\nconic_avoided_c[\"potential\"] = [[\"x\", \"y + I*delta\"],\n [\"y - I*delta\", \"-x\" ]]\n\n\n\n\n########################################################################\n# Potentials in three dimensions and with two energy levels (D=3, N=2) #\n########################################################################\n\n\n\n\n#######################################################################\n# Potentials in one dimension and with three energy levels (D=1, N=3) #\n#######################################################################\n\n# Decoupled harmonic potentials for three components\nthree_quadratic = {}\nthree_quadratic[\"variables\"] = [\"x\"]\nthree_quadratic[\"potential\"] = [[\"1/2 * sigma * x**2\", \"0\", \"0\" ],\n [\"0\", \"1/2 * sigma * x**2\", \"0\" ],\n [\"0\", \"0\", \"1/2 * sigma * x**2\"]]\nthree_quadratic[\"defaults\"] = {\"sigma\":0.05}\n\n# A potential with three energy levels and multiple crossings\nthree_levels = {}\nthree_levels[\"variables\"] = [\"x\"]\nthree_levels[\"potential\"] = [[\"tanh(x+rho) + tanh(x-rho)\", \"delta1\", \"delta2\" ],\n [\"delta1\", \"-tanh(x+rho)\", \"0\" ],\n [\"delta2\", \"0\", \"1 - tanh(x-rho)\"]]\nthree_levels[\"defaults\"] = {\"rho\":3.0}\n\n\n########################################################################\n# Potentials in two dimensions and with three energy levels (D=2, N=3) #\n########################################################################\n\n\n##########################################################################\n# Potentials in three dimensions and with three energy levels (D=3, N=3) #\n##########################################################################\n\n\n\n\n######################################################################\n# Potentials in one dimension and with four energy levels (D=1, N=4) #\n######################################################################\n\n# Decoupled harmonic potentials for four components\nfour_quadratic = {}\nfour_quadratic[\"variables\"] = [\"x\"]\nfour_quadratic[\"potential\"] = [[\"1/2 * sigma * x**2\", \"0\", \"0\", \"0\" ],\n [\"0\", \"1/2 * sigma * x**2\", \"0\", \"0\" ],\n [\"0\", \"0\", \"1/2 * sigma * x**2\", \"0\" ],\n [\"0\", \"0\", \"0\", \"1/2 * sigma * x**2\"]]\nfour_quadratic[\"defaults\"] = {\"sigma\":0.05}\n\n# Harmonic and higher order anharmonic potentials for four components\nfour_powers = {}\nfour_powers[\"variables\"] = [\"x\"]\nfour_powers[\"potential\"] = [[\"1/2 * sigma * x**2\", \"0\", \"0\", \"0\" ],\n [\"0\", \"1/4 * sigma * x**4\", \"0\", \"0\" ],\n [\"0\", \"0\", \"1/6 * sigma * x**6\", \"0\" ],\n [\"0\", \"0\", \"0\", \"1/8 * sigma * x**8\"]]\nfour_powers[\"defaults\"] = {\"sigma\":0.05}\n\n\n\n\n######################################################################\n# Potentials in one dimension and with five energy levels (D=1, N=5) #\n######################################################################\n\n# Decoupled harmonic potential for five components\nfive_quadratic = {}\nfive_quadratic[\"variables\"] = [\"x\"]\nfive_quadratic[\"potential\"] = [[\"1/2 * sigma * x**2\", \"0\", \"0\", \"0\", \"0\" ],\n [\"0\", \"1/2 * sigma * x**2\", \"0\", \"0\", \"0\" ],\n [\"0\", \"0\", \"1/2 * sigma * x**2\", \"0\", \"0\" ],\n [\"0\", \"0\", \"0\", \"1/2 * sigma * x**2\", \"0\" ],\n [\"0\", \"0\", \"0\", \"0\", \"1/2 * sigma * x**2\"]]\nfive_quadratic[\"defaults\"] = {\"sigma\":0.05}\n","sub_path":"src/WaveBlocksND/PotentialLibrary.py","file_name":"PotentialLibrary.py","file_ext":"py","file_size_in_byte":11543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"646671781","text":"from abc import ABCMeta\nfrom typing import Type, TypeVar\n\nfrom odmantic.model import Model\n\nModelType = TypeVar(\"ModelType\")\n\n\nclass BaseEngineException(Exception, metaclass=ABCMeta):\n \"\"\"Base Exception raised by the engine while operating on a model.\"\"\"\n\n def __init__(self, message: str, model: Type[Model]):\n self.model: Type[Model] = model\n super().__init__(message)\n\n\nclass DocumentNotFoundError(BaseEngineException):\n \"\"\"The targetted document has not been found by the engine.\n\n Attributes:\n instance: the instance that has not been found\n \"\"\"\n\n def __init__(self, instance: Model):\n self.instance: Model = instance\n super().__init__(\n f\"Document not found for : {type(instance).__name__}. \"\n f\"Instance: {self.instance}\",\n type(instance),\n )\n","sub_path":"odmantic/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"562822676","text":"##\n# Programación en Python\n# ===========================================================================\n##\n# La columna 3 del archivo `data.csv` contiene una fecha en formato\n# `YYYY-MM-DD`. Imprima la cantidad de registros por cada mes separados\n# por comas, tal como se muestra a continuación.\n##\n# Rta/\n# 01,3\n# 02,4\n# 03,2\n# 04,4\n# 05,3\n# 06,3\n# 07,5\n# 08,6\n# 09,3\n# 10,2\n# 11,2\n# 12,3\n##\n# >>> Escriba su codigo a partir de este punto <<<\n##\nimport csv\n\n# Definicion de variables\nruta = './data.csv'\narchivo = []\ncolumna = []\nmes = []\nconteo = {}\n\n# Lectura de archivo\nwith open(ruta, 'r') as f:\n x = csv.reader(f, delimiter='\\t', quoting=csv.QUOTE_NONE)\n # se lee una linea a la vez\n for r in x:\n archivo.append(r)\n\n# Definicion de objetos de trabajo\n# columna = [z[2] for z in archivo[0:]]\nfor z in (archivo[0:]):\n columna.append(z[2])\n# mes = [z[5:7] for z in columna[0:]]\nfor z in columna[0:]:\n mes.append(z[5:7])\n\n# Cuenta mes\nfor numero in mes:\n conteo[numero] = mes.count(numero)\n# Ordena diccionario\n# [print(key, conteo[key], sep=',') for key in sorted(conteo.keys())]\nfor key in sorted(conteo.keys()):\n print(key, conteo[key], sep=',')\n","sub_path":"03-python=1/q04=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"599869656","text":"def solution(N, number):\n\tif N == number:\n\t\treturn 1\n\n\tdp = [set() for _ in range(9)]\n\tfor i in range(1, 9):\n\t\tdp[i].add(int(str(N) * i))\n\n\tfor i in range(2, 9):\n\t\tfor j in range(1, i):\n\t\t\tfor a in dp[j]:\n\t\t\t\tfor b in dp[i - j]:\n\t\t\t\t\tdp[i].add(a + b)\n\t\t\t\t\tdp[i].add(a * b)\n\t\t\t\t\tdp[i].add(a - b)\n\t\t\t\t\tif b:\n\t\t\t\t\t\tdp[i].add(a // b)\n\t\tif number in dp[i]:\n\t\t\treturn i\n\n\treturn -1\n\n\nif __name__ == '__main__':\n\tN, number = map(int, input().split())\n\tprint(solution(N, number))","sub_path":"Programmers/MakeN.py","file_name":"MakeN.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"366417792","text":"from unittest import TestCase\nimport requests\n\nimport main\n\n\nclass MainTests(TestCase):\n def test_result(self):\n self.assertEqual(main.is_spam(\"spam spam http://bit.ly/2yTkW52\", [\"www.filekok.com\"], 1), False)\n self.assertEqual(main.is_spam(\"spam spam http://bit.ly/2yTkW52\", [\"goo.gl\"], 1), True)\n self.assertEqual(main.is_spam(\"spam spam http://bit.ly/2yTkW52\", [\"tvtv24.com\"], 2), True)\n self.assertEqual(main.is_spam(\"spam spam http://bit.ly/2yTkW52\", [\"www.filekok.com\"], 2), False)\n self.assertEqual(main.is_spam(\"spam spam http://bit.ly/2yTkW52\", [\"www.filekok.com\"], 3), True)\n\n def test_extract_url_in_content(self):\n self.assertSetEqual(main.extract_url_in_content(\"spam spam http://bit.ly/2yTkW52\"), {\"http://bit.ly/2yTkW52\",})\n self.assertSetEqual(main.extract_url_in_content(\"spam spam http://bit.ly/2yTkW52 spam http://www.filekok.com\"),\n {\"http://bit.ly/2yTkW52\", \"http://www.filekok.com\"})\n\n def test_get_redirected_url(self):\n resp = requests.get(\"http://bit.ly/2yTkW52\")\n self.assertEqual(main.get_redirection_url(resp), [\"https://goo.gl/nVLutc\",\n \"http://tvtv24.com/view.php?id=intro&no=58&query=\"\n \"%EC%96%B4%EC%84%9C%EC%99%80!%20%ED%95%9C%EA%B5%AD%EC%9D%80\"\n \"%20%EC%B2%98%EC%9D%8C%EC%9D%B4%EC%A7%80%20E09%20170921\"])\n\n resp = requests.get(\"https://goo.gl/nVLutc\")\n self.assertEqual(main.get_redirection_url(resp), [\"http://tvtv24.com/view.php?id=intro&no=58&query=\"\n \"%EC%96%B4%EC%84%9C%EC%99%80!%20%ED%95%9C%EA%B5%AD%EC%9D%80\"\n \"%20%EC%B2%98%EC%9D%8C%EC%9D%B4%EC%A7%80%20E09%20170921\"])\n\n def test_count_redirection_depth(self):\n # 2단계 리다이렉트\n resp = requests.get(\"http://bit.ly/2yTkW52\")\n count = main.count_redirection_depth(resp)\n self.assertEqual(count, 2)\n\n # 1단계 리다이렉트\n resp = requests.get(\"https://goo.gl/nVLutc\")\n count = main.count_redirection_depth(resp)\n self.assertEqual(count, 1)\n\n # 리다이렉트 없음\n resp = requests.get(\"http://tvtv24.com/view.php?id=intro&no=58&query=\"\n \"%EC%96%B4%EC%84%9C%EC%99%80!%20%ED%95%9C%EA%B5%AD%EC%9D%80\"\n \"%20%EC%B2%98%EC%9D%8C%EC%9D%B4%EC%A7%80%20E09%20170921\")\n count = main.count_redirection_depth(resp)\n self.assertEqual(count, 0)\n\n def test_get_a_url_in_html(self):\n self.assertEqual(main.get_url_in_html(\n \"\"\"\n \n \n \n \n \n \n 어서와! 한국은 처음이지 E09 170921\n \n \n \n \n \n \n \n \n \n \n
\n
\n 어서와! 한국은 처음이지 E09 170921 다운로드 << 3571분이 추천하였습니다. >>\n
\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
번호\n 3884892\t\t\t\t등록자\n 다올리자\t\t\t\t
등록날짜\n 2018-12-22\t\t\t\t평점\n \n
다운수\n 3279\t\t\t\t서버상태\n 쾌적\t\t\t\t
\n
\n
\n
\n \n \n \n \n \n \n \n \n \n \n \n
 파일목록내려받기
어서와! 한국은 처음이지 E09 170921.HDTV.H264.720p-NEXT.mp4다운로드
\n
\n
\n
\n \n
\n
\n \n
\n
\n

 

\n
\n \n \n \n
\n
\n 톡톡튀는 댓글평을 달아주세요! 댓글은 당신의 얼굴입니다. 매너댓글 부탁~\n
\n
\n \n
\n
\n
\n
    \n
  • \n
    \n
    가을수
    \n
    우와...대박..감사히 잘보겠습니다~~
    \n
    2018-12-22
    \n
  • \n
  • \n
    \n
    황금열쇠
    \n
    오호 ㅎㅎ~^^ 대박 자료 감사합니다!!
    \n
    2018-12-22
    \n
  • \n
  • \n
    \n
    휴먼드림
    \n
    완전 강추!! 다른것들도 찾아봐야겠음.ㅋ
    \n
    2018-12-22
    \n
  • \n
  • \n
    \n
    다래랑
    \n
    댓글믿고 고고
    \n
    2018-12-22
    \n
  • \n
  • \n
    \n
    오아시스
    \n
    소중한 자료 올려주셔서 감사합니다.
    \n
    2018-12-22
    \n
  • \n
  • \n
    \n
    비스타
    \n
    내가 원하던 자료!! 선리플 후다운~
    \n
    2018-12-22
    \n
  • \n
  • \n
    \n
    멋진천사
    \n
    잘 받아갑니다~
    \n
    2018-12-22
    \n
  • \n
\n
\n
\n
\n
\n
\n
\n 본 사이트는 사이트 제작시 자동수집된 웹하드 자료 안내 페이지로 자료가 삭제되었거나 실제하지 않을수 있으며 다운로드는 해당 웹하드를 통해 다운 받으시면 됩니다.\n
\n \n \n \n \n \n \n \n \n \n \"\"\"),\n {\"http://www.fileok.com\", \"http://www.fileok2.com\"})\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":13201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"367540565","text":"from collections import deque\nfrom sys import stdin\n\ninput = stdin.readline\n\ndef solution(N):\n # 0 ~ 2번 피보나치 결과\n arr = [0, 1, 1]\n # 3번 부터 for 문 돌려서 arr 에 쌓아 놓기\n for i in range(3, N + 1):\n arr.append(arr[i - 1] + arr[i - 2])\n return arr[N]\n\n \nif __name__ == '__main__':\n N = int(input())\n print(solution(N))\n","sub_path":"Coding_Solve/20210817/4150.py","file_name":"4150.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"447625737","text":"#encoding: utf-8\nfrom OpenOrange import *\n\nParentEmployeeWindow = SuperClass(\"EmployeeWindow\",\"AddressableWindow\",__file__)\n\nclass EmployeeWindow(ParentEmployeeWindow):\n \n def afterEdit(self, fieldname):\n ParentEmployeeWindow.afterEdit(self, fieldname)\n bt = self.getRecord()\n if (fieldname == \"CashPaid\"):\n bt.pasteCashPaid()\n\n\n def afterEditRow(self, fieldname, rowfieldname, rownr):\n ParentEmployeeWindow.afterEditRow(self, fieldname, rowfieldname, rownr)\n bt = self.getRecord()\n if (fieldname == \"EmployeeSalaryRows\"):\n btrow = bt.EmployeeSalaryRows[rownr]\n if (rowfieldname == \"SalaryType\"):\n btrow.pasteSalaryType(bt)\n\n def afterDeleteRow(self, detailfieldname, rownr):\n bt = self.getRecord()\n if detailfieldname == \"EmployeeRows\":\n bt.sumUp()\n","sub_path":"extra/PayRoll/windows/EmployeeWindow.py","file_name":"EmployeeWindow.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523829680","text":"import json\nfrom copy import deepcopy\n\nimport requests\nfrom falcon.uri import parse_query_string\n\nfrom service.conf_reader import ConfReader\nfrom service.conf_reader import Singleton\n\n\ndef validate_token(func):\n \"\"\"\n Decorator that forces the validation of the token\n :param func: FUnction to decorate\n \"\"\"\n\n def func_wrapper(*args, **kwargs):\n Authenticate.validate_token()\n return func(*args, **kwargs)\n\n return func_wrapper\n\n\nclass Authenticate(metaclass=Singleton):\n \"\"\"\n Class to manage the authentication process.\n It is a singleton so only one entity manages this process.\n \"\"\"\n\n __TOKEN__ = None\n\n @staticmethod\n def get_token():\n return Authenticate.__TOKEN__\n\n @staticmethod\n def get_header():\n Authenticate.validate_token()\n return {'X-Auth-Token': Authenticate.__TOKEN__}\n\n @staticmethod\n def validate_token():\n Authenticate.__authenticate__(validate=True)\n\n @staticmethod\n def __authenticate__(validate=False):\n \"\"\"\n Group of functions to authenticate the user on the MONASCA. This method can also validate the current token\n acquiring a new one if needed.\n :param validate: Validates the token and gets a new one if needed\n \"\"\"\n\n def __get_auth_obj__():\n return json.loads(ConfReader().get('MONASCA_KEYSTONE', 'auth_obj', raw=True))\n\n def __build_path__():\n base_path = conf_obj.get('keystone')\n return base_path + 'auth/tokens' if base_path.endswith('/') else base_path + '/auth/tokens'\n\n def __auth__(auth_obj, path):\n resp = requests.post(path, json=auth_obj, headers={'Content-Type': 'application/json'},\n timeout=ConfReader().get('MONASCA', 'timeout'))\n\n if resp.status_code == 201:\n Authenticate.__TOKEN__ = resp.headers['X-Subject-Token']\n else:\n raise ValueError(\"Can't obtain master authentication to retrieve metrics: status code {} and message\"\n \"{}\".format(resp.status_code, resp.json()))\n\n def __validate__(path):\n if not Authenticate.get_token():\n __auth__(__get_auth_obj__(), __build_path__())\n resp = requests.get(path, headers={'X-Auth-Token': Authenticate.get_token(),\n 'X-Subject-Token': Authenticate.get_token()},\n timeout=ConfReader().get('MONASCA', 'timeout'))\n\n if resp != 200:\n __auth__(__get_auth_obj__(), path)\n\n conf_obj = ConfReader().get_section_dict('MONASCA_KEYSTONE')\n if not validate:\n __auth__(__get_auth_obj__(), __build_path__())\n else:\n __validate__(__build_path__())\n\n\ndef paginate(func):\n \"\"\"\n Decorator to force the pagination rewrite\n :param func:\n :return:\n \"\"\"\n\n def func_wrapper(raw_obj, uri, req):\n if uri:\n links = deepcopy(raw_obj)\n pagination_links(links, uri, req)\n resource = func(raw_obj, uri, req)\n resource['links'] = links.get('links')\n else:\n resource = func(raw_obj, uri, req)\n return resource\n\n return func_wrapper\n\n\ndef pagination_links(raw_obj, uri, req):\n \"\"\"\n Writes the pagination with the new URI\n :param raw_obj: The object containing the pagination objects\n :param uri: The URI string that originated the request\n \"\"\"\n\n if 'links' not in raw_obj:\n return\n\n for link in raw_obj.get('links'):\n\n if link.get('rel') == 'next':\n qs = parse_query_string(link.get('href').rsplit('?')[-1])\n # Collect offset and limit\n offset = qs.get('offset', None)\n limit = qs.get('limit', None)\n\n # Collect base uri\n if '?' in uri:\n uri = req.uri.split('?')[0]\n else:\n uri = req.uri\n\n # Insert limit and offset for pagination\n uri = uri + '?'\n if offset:\n uri = uri + 'offset=' + offset + '&'\n if limit:\n uri = uri + 'limit=' + limit + '&'\n\n # Remove old limit and offset\n req.context.get('query_parameters').pop('offset', None)\n req.context.get('query_parameters').pop('limit', None)\n\n # Append remaining parameters\n query_string = '&'.join(\n ['{}={}'.format(key, value) for key, value in req.context.get('query_parameters').items()])\n\n uri = uri + query_string\n link['href'] = uri\n print(uri)\n\n elif link.get('rel') == 'self':\n link['href'] = req.uri\n","sub_path":"MonitoringService/service/monasca_helper.py","file_name":"monasca_helper.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"552506561","text":"# Se importa el control requerido automáticamente\n\nfrom state import state\nimport wpilib \n\nif state[\"Controller\"] == \"PacificRim\":\n\timport PacificRim as Controller_inputs\n\nelif state[\"Controller\"] == \"ControlPiko\":\n\timport ControlPiko as Controller_inputs\n\nelif state[\"Controller\"] == \"ControlPelon\":\n\timport ControlPelon as Controller_inputs\n\n\n\ndef read_control_inputs(control_type):\n\n\tif control_type == \"PacificRim\":\n\n\t\tread_chasis_inputs(0)\n\t\tread_abilities_inputs(1)\n\n\telif control_type == \"ControlPiko\" or control_type == \"ControlPelon\":\n\n\t\tread_abilities_inputs(0)\n\t\tread_chasis_inputs(0)\n\n\telse:\n\n\t\tprint (\"Non-existent control type\")\n\t\twpilib.DriverStation.reportWarning(str(\"Non-existent control type\"),True)\n\n\ndef read_chasis_inputs(control_port):\n\n\tchasis_controller = wpilib.Joystick(control_port)\n\n\tx = chasis_controller.getX()\n\tstate[\"mov_x\"] = x\n\n\ty = chasis_controller.getY()\n\tstate[\"mov_y\"] = y\n\n\tz = chasis_controller.getRawAxis(4)\n\tstate[\"mov_z\"] = z\n\n\talign_button = chasis_controller.getRawButton(Controller_inputs.accomodate)\n\tstate[\"align_activated\"] = align_button\n\n\t# button_2 = chasis_controller.getRawButton(Controller_inputs.turbo)\n\t# state[\"turbo_activated\"] = button_2\n\n\ndef read_abilities_inputs(control_port):\n\n\tabilities_controller = wpilib.Joystick(control_port)\n\n\t# Codewide button breaker\n\n\tbutton_breaker = abilities_controller.getRawButton(Controller_inputs.button_breaker)\n\tstate[\"codewide_breaker\"] = button_breaker\n\n\t# botones del elevador y predeterminados\n\n\tPOV = wpilib.interfaces.GenericHID(control_port)\n\n\teje_t = abilities_controller.getZ()\n\teje_z =abilities_controller.getThrottle()\n\n\tbutton_medio_piston = abilities_controller.getRawButton(Controller_inputs.up_platform_middle_piston)\n\tbutton_alto_piston = abilities_controller.getRawButton(Controller_inputs.up_platform_high_piston)\n\n\n\tbutton_2 = abilities_controller.getRawButton(Controller_inputs.turbo)\n\tstate[\"turbo_activated\"] = button_2\n\n\t# Uso de los botones\n\n\n\tif POV.getPOV() == 180 and state[\"Controller\"] == \"PacificRim\" or state[\"Controller\"] == \"ControlPiko\" and eje_t > 0:\n\t\tstate[\"lift_motor\"] = 0.5\n\n\telif POV.getPOV() == 0 and state[\"Controller\"] == \"PacificRim\" or state[\"Controller\"] == \"ControlPiko\" and eje_z > 0:\n\t\tstate[\"lift_motor\"] = -1\n\telse:\n\t\tstate[\"lift_motor\"] = 0\n\n\t\n\tif button_medio_piston:\n\n\t\tstate[\"position\"] = \"media\"\n\t\tstate[\"mechanism\"] = \"piston\"\n\n\telif button_alto_piston:\n\n\t\tstate[\"position\"] = \"high\"\n\t\tstate[\"mechanism\"] = \"piston\"\n\n\n\t#Inputs de Solenoides, pistones, wheelers y subir o bajar garra (los cuales se quitaron del robot asi que ya solo queda lo del piston)\n\n\timpulsor_on = abilities_controller.getRawButton(Controller_inputs.on_and_off_impulsor)\n\tstate[\"impulsor_on\"] = impulsor_on\n\n\tturn_piston_on = abilities_controller.getRawButton(Controller_inputs.on_and_off_piston)\n\t\n\timpulsor_on_button = abilities_controller.getRawButton(Controller_inputs.manual_impulsor_on)\n\n\timpulsor_off_button = abilities_controller.getRawButton(Controller_inputs.manual_impulsor_off)\n\n\n\t#Configuracion para el uso de pistones e impulsores\n\n\tif turn_piston_on or state[\"timer_piston\"] != 0:\n\t\tstate[\"timer_piston\"] += 1\n\t\tif state[\"timer_piston\"] < 35: \n\t\t\tstate[\"piston_activated\"] = True\n\t\telif state[\"timer_piston\"] < 60:\n\t\t\tstate[\"piston_activated\"] = False\n\t\telse:\n\t\t\tstate[\"timer_piston\"] = 0\n\n\n\tif impulsor_on_button:\n\n\t\tstate[\"impulsor_situation_front\"] = 1\n\t\tstate[\"impulsor_situation_trasero\"] = 1\n\n\tif impulsor_off_button:\n\n\t\tstate[\"impulsor_situation_front\"] = 2\n\t\tstate[\"impulsor_situation_trasero\"] = 2\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"oi.py","file_name":"oi.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75665912","text":"from functools import partial\nfrom operator import eq, ne\n\n\nIRRELEVANT = object()\n\n\nclass ChangeWatcher(object):\n\n #: Used in the equality failure message during exit of the contexdt manager\n #: to explain why the at ext check failed.\n EQUALITY_FAILURE_SIGN = {ne: '!=', eq: '=='}\n\n def __init__(self, compare, thing, *args, **kwargs):\n self.thing = thing\n self.compare = compare\n\n self.args = args\n self.kwargs = kwargs\n\n self.expected_before = kwargs.pop('before', IRRELEVANT)\n self.expected_after = kwargs.pop('after', IRRELEVANT)\n\n def __enter__(self):\n self.before = self.__apply()\n\n if not self.expected_before is IRRELEVANT:\n check = self.compare(self.before, self.expected_before)\n assert not check, self.__precondition_failure_msg_for('before')\n\n def __exit__(self, exec_type, exec_value, traceback):\n if exec_type is not None:\n return False # reraises original exception\n\n self.after = self.__apply()\n\n if not self.expected_after is IRRELEVANT:\n check = self.compare(self.after, self.expected_after)\n assert not check, self.__precondition_failure_msg_for('after')\n\n at_exist_check = self.compare(self.before, self.after)\n assert at_exist_check, self.__equality_failure_message\n\n def __apply(self):\n return self.thing(*self.args, **self.kwargs)\n\n @property\n def __equality_failure_message(self):\n return 'Expected before %r %s %r after' % (\n self.before,\n self.EQUALITY_FAILURE_SIGN[self.compare],\n self.after\n )\n\n def __precondition_failure_msg_for(self, condition):\n return '%s value did not change (%s)' % (\n condition,\n getattr(self, condition)\n )\n\n\nclass AssertsMixin(object):\n assertChanges = partial(ChangeWatcher, ne)\n assertDoesNotChange = partial(\n ChangeWatcher,\n eq,\n before=IRRELEVANT,\n after=IRRELEVANT\n )\n","sub_path":"exam/asserts.py","file_name":"asserts.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"108992906","text":"import connexion\nimport json\n\n\n\nfrom common.logging import get_new_request_id, log_exception, setup_logging\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom flask import redirect, request, Response\nfrom gunicorn_config import HOST_PORT, LOG_LEVEL, LOG_REQUESTS, LOG_RESPONSES\nfrom structlog import get_logger\nfrom werkzeug.routing import RequestRedirect\n\n\n# Set up logging\nsetup_logging(LOG_LEVEL)\nlogger = get_logger()\n\nlogger.info('Loading Stock Screening Service')\n\n\n# Configure app\napplication = connexion.FlaskApp(__name__)\napplication.add_api('./swagger/swagger.yaml')\n\n\n@application.route('/')\ndef redirect_to_ui():\n \"\"\"\n redirects base path to Swagger UI\n :return: redirect\n \"\"\"\n\n return redirect(\"/ui/\")\n\n\n@application.app.before_request\ndef before_request():\n \"\"\"\n append UUIDs to all requests and log request\n \"\"\"\n\n request.id = get_new_request_id()\n\n # Get request data\n if request.view_args:\n path_variables = deepcopy(request.view_args)\n # Remove filename key to prevent it from sticking around\n path_variables.pop('filename', None)\n else:\n path_variables = {}\n if request.args:\n query_parameters = request.args.to_dict()\n else:\n query_parameters = {}\n if request.mimetype == 'application/json':\n request_data = request.get_json(silent=True)\n else:\n request_data = {}\n\n logger = get_logger(requestId=request.id,\n requestMethod=request.method,\n requestPath=request.path,\n remoteIPAddress=request.remote_addr,\n **path_variables,\n **query_parameters)\n\n if LOG_REQUESTS:\n logger.debug('Screening request', screeningRequest=str(request_data))\n\n request.start_time = datetime.now()\n\n\n@application.app.after_request\ndef after_request(response):\n \"\"\"\n log service response\n\n :param response: Response\n :rtype: Response\n \"\"\"\n\n logger = get_logger()\n\n if response.mimetype == 'application/json':\n response_data = response.get_json(silent=True)\n else:\n response_data = {}\n\n if LOG_RESPONSES:\n logger.debug('Screening response', statusCode=response.status_code, screeningResponse=str(response_data))\n\n return response\n\n\n@application.app.errorhandler(Exception)\ndef internal_server_error(ex):\n \"\"\"\n log all unhandled exceptions\n\n :param ex: exception\n :return: 500 response\n \"\"\"\n\n # Handle redirects\n if isinstance(ex, RequestRedirect):\n return ex\n else:\n log_exception(ex)\n return Response(json.dumps({'message': f'Unhandled exception in Screening application: {ex}'},\n ensure_ascii=False, sort_keys=True), status=500, mimetype='application/json')\n\n\nif __name__ == '__main__':\n application.run(port=HOST_PORT)\n","sub_path":"src/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393684369","text":"from datetime import datetime\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom django.http import HttpResponseRedirect\n\nfrom wallapp.models import Post\n\n\n# Create your views here.\nclass HomeView(generic.ListView):\n model = Post\n template_name = 'wallapp/home.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(HomeView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the books\n context['post_list'] = Post.objects.order_by('-post_date')\n context['first_name'] = self.request.user.first_name\n context['liked_list'] = Post.objects.filter(\n likers__username=self.request.user.username)\n return context\n\n\nclass PostView(generic.View):\n\n def post(self, request):\n content = request.POST['post_content']\n if content != '':\n post_content = Post(\n creator=request.user,\n content=content,\n post_date=datetime.now()\n )\n post_content.save()\n return redirect('home')\n\n\nclass LikeView(generic.View):\n\n def get(self, request, post_id):\n post_object = get_object_or_404(Post, pk=post_id)\n if post_object.likers.all().filter(\n pk=request.user.pk).exists():\n post_object.likers.remove(request.user)\n else:\n post_object.likers.add(request.user)\n\n post_object.save()\n return redirect('home')\n\n\nclass DeleteView(generic.View):\n\n def get(self, request, post_id):\n post_object = get_object_or_404(Post, pk=post_id)\n post_object.delete()\n return redirect('home')\n\n\nclass EditView(generic.View):\n\n def post(self, request, post_id):\n post_object = get_object_or_404(Post, pk=post_id)\n post_content = request.POST['edited_post_content']\n post_object.content = post_content\n post_object.save()\n return redirect('home')\n","sub_path":"src/facebook/wallapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"477019572","text":"from bs4 import BeautifulSoup\nimport time\nimport urllib.request\nimport http\n\n\ndef get_page_count(html):\n soup = BeautifulSoup(html, \"html.parser\")\n pagination = soup.find('div', class_='layout__body-pagination')\n if pagination is None:\n return 1\n else:\n return (pagination.find_all('ul',class_='toggle-menu toggle-menu_pagination')[-1].find_all('li')[-1].find('a')['href'][1:-1].split('/')[-1].replace('page', ''))\n\n\ndef get_html(url):\n try:\n response = urllib.request.urlopen(url)\n html = response.read()\n except urllib.error.URLError as e:\n print('Ошибка при открытии страницы: ', e)\n print('URLError url={url}: '.format(url=url))\n html = None\n time.sleep(1)\n except http.client.IncompleteRead as e:\n print('Ошибка при открытии страницы: ', e)\n print('IncompleteRead url={url}: '.format(url=url))\n html = None\n time.sleep(1)\n except http.client.IncompleteRead as e:\n print('Ошибка при открытии страницы: ', e)\n print('IncompleteRead url={url}: '.format(url=url))\n html = None\n time.sleep(1)\n except http.client.RemoteDisconnected as e:\n print('Ошибка при открытии страницы: ', e)\n print('RemoteDisconnected url={url}: '.format(url=url))\n html = None\n time.sleep(1)\n except ConnectionResetError as e:\n print('Ошибка при открытии страницы: ', e)\n print('ConnectionResetError url={url}: '.format(url=url))\n html = None\n time.sleep(1)\n return html\n\n\ndef get_content_full(html):\n if html is None:\n return None\n else:\n page = BeautifulSoup(html, \"html.parser\", from_encoding='UTF-8').find('div', class_='content html_format')\n content_full = ''\n for txt in page:\n content_full = content_full + str(txt)\n return content_full","sub_path":"parse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"632639732","text":"from sklearn.datasets import make_moons\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n\nN_SAMPLES = 2000\nX,y=make_moons(n_samples=N_SAMPLES,noise=0.25,random_state=100)\nX_train ,X_test,y_train,y_test = train_test_split(X,y,test_size = 400,random_state=42)\n\nprint(X_train)\nprint(X_test)\nprint(y_test)\n\ndef make_plot(X, y, plot_name, file_name, XX=None, YY=None, preds=None):\n plt.figure()\n axes = plt.gca()\n axes.set_xlim([x_min,x_max])\n axes.set_ylim([y_min, y_max])\n axes.set(xlabel=\"$x_1$\", ylabel=\"$x_2$\")\n if (XX is not None and YY is not None and preds is not None):\n plt.contourf(XX, YY, preds.reshape(XX.shape), 25,alpha=0.08,cmap=cm.Spectral)\n plt.contour(XX, YY, preds.reshape(XX.shape), levels=[.5],cmap=\"Greys\",vmin=0, vmax=.6)\n markers = ['o' if i == 1 else 's' for i in y.ravel()]\n # 绘制正负样本\n mscatter(X[:, 0], X[:, 1], c=y.ravel(), s=20, cmap=plt.cm.Spectral, edgecolors='none', m=markers)\n plt.savefig(OUTPUT_DIR + '/' + file_name)\n make_plot(X, y, None, \"dataset.svg\")","sub_path":"noto/tensorflow-learning/P9-moon区别,普通神经网络待优化代码.py","file_name":"P9-moon区别,普通神经网络待优化代码.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"451344197","text":"from src.utilities.utils import FileOperation\nfrom src.utilities.model_response import CustomResponse\nfrom src.utilities.model_response import Status\nfrom src.errors.errors_exception import WorkflowkeyError\nfrom src.errors.errors_exception import FileErrors\nfrom src.errors.errors_exception import ServiceError\nfrom src.errors.error_validator import ValidationResponse\nfrom anuvaad_auditor.loghandler import log_info\nfrom anuvaad_auditor.loghandler import log_exception\nfrom anuvaad_auditor.errorhandler import post_error_wf\nimport time\nimport config\nimport copy\nimport threading\nfrom src.kafka_module.producer import Producer\nimport src.utilities.app_context as app_context\n###################################\nfrom src.services.main import TesseractOCR as Service\n#####################################\n\nfile_ops = FileOperation()\n\n\nclass Response(object):\n def __init__(self, json_data, DOWNLOAD_FOLDER):\n self.json_data = json_data\n self.DOWNLOAD_FOLDER = DOWNLOAD_FOLDER\n\n def workflow_response(self, task_id, task_starttime, debug_flush=False):\n\n app_context.init()\n app_context.application_context = {}\n\n input_files, workflow_id, jobid, tool_name, step_order = file_ops.json_input_format(self.json_data)\n log_info(\"workflow_response started the response generation\", app_context.application_context)\n error_validator = ValidationResponse(self.DOWNLOAD_FOLDER)\n try:\n error_validator.wf_keyerror(jobid, workflow_id, tool_name, step_order)\n error_validator.inputfile_list_error(input_files)\n output_file_response = list()\n for i, item in enumerate(input_files):\n input_filename, in_file_type, identifier = file_ops.accessing_files(item['file'])\n self.json_data['taskID'] = task_id\n app_context.application_context = self.json_data\n #debug_flush = True\n if debug_flush == False:\n ############################\n response = Service(app_context=app_context)\n ##############################\n if response['code'] == 200:\n \n output_filename_json = file_ops.writing_json_file(i, response['rsp'], self.DOWNLOAD_FOLDER)\n file_res = file_ops.one_filename_response(output_filename_json)\n output_file_response.append(file_res)\n task_endtime = eval(str(time.time()).replace('.', '')[0:13])\n response_true = CustomResponse(Status.SUCCESS.value, jobid, task_id)\n response_success = response_true.success_response(workflow_id, task_starttime, task_endtime, tool_name, step_order, output_file_response)\n response = copy.deepcopy(response_success)\n log_info(\"successfully generated response for workflow\", app_context.application_context)\n \n return response\n else:\n post_error_wf(response['code'], response['message'], app_context.application_context, None)\n return None\n else:\n log_info('flushing queue data, not handling file {}'.format(input_files), app_context.application_context)\n post_error_wf(400, 'flushing queue data, not handling file {}'.format(input_files), app_context.application_context, None)\n return None\n\n \n except WorkflowkeyError as e:\n response_custom = CustomResponse(Status.ERR_STATUS.value, jobid, task_id)\n response_custom.status_code['message'] = str(e)\n response = file_ops.error_handler(response_custom.status_code, \"WORKFLOWKEY-ERROR\", True)\n log_exception(\"workflow_response workflow key error: key value missing\", app_context.application_context, e)\n response = copy.deepcopy(response)\n return response\n except FileErrors as e:\n response_custom = CustomResponse(Status.ERR_STATUS.value, jobid, task_id)\n response_custom.status_code['message'] = e.message\n response = file_ops.error_handler(response_custom.status_code, e.code, True)\n log_exception(\"workflow_response some error occured while validating file\", app_context.application_context, e)\n response = copy.deepcopy(response)\n return response\n except ServiceError as e:\n response_custom = CustomResponse(Status.ERR_STATUS.value, jobid, task_id)\n response_custom.status_code['message'] = str(e)\n response = file_ops.error_handler(response_custom.status_code, \"SERVICE_ERROR\", True)\n log_exception(\"workflow_response Something went wrong during ocr.\", app_context.application_context, e)\n response = copy.deepcopy(response)\n return response\n\n def nonwf_response(self):\n log_info(\"non workflow response started the response generation\", app_context.application_context)\n input_files = self.json_data['input']['inputs']\n app_context.init()\n app_context.application_context = self.json_data\n error_validator = ValidationResponse(self.DOWNLOAD_FOLDER)\n try:\n error_validator.inputfile_list_error(input_files)\n # output_file_response = list()\n # for item in input_files:\n # input_filename, in_file_type, identifier = file_ops.accessing_files(item['file'])\n # output_json_data = DocumentStructure(None, input_filename)\n # output_filename_json = file_ops.writing_json_file(i, output_json_data, self.DOWNLOAD_FOLDER)\n # file_res = file_ops.one_filename_response(input_filename, output_filename_json, in_file_type)\n # output_file_response.append(file_res)\n response_true = Status.SUCCESS.value\n #response_true['output'] = output_file_response\n\n output_json_data = Service(app_context=app_context)\n output_filename_json = file_ops.writing_json_file( 0,output_json_data, self.DOWNLOAD_FOLDER)\n response_true = file_ops.one_filename_response( output_filename_json)\n\n log_info(\"non workflow_response successfully generated response for rest server\", app_context.application_context)\n response_true = copy.deepcopy(response_true)\n return response_true\n\n except FileErrors as e:\n response_custom = Status.ERR_STATUS.value\n response_custom['message'] = e.message\n response = file_ops.error_handler(response_custom, e.code, False)\n log_exception(\"non workflow_response some error occured while validating file\", app_context.application_context, e)\n response = copy.deepcopy(response)\n return response\n except ServiceError as e:\n response_custom = Status.ERR_STATUS.value\n response_custom['message'] = str(e)\n response = file_ops.error_handler(response_custom, \"SERVICE_ERROR\", False)\n log_exception(\"non workflow_response Something went wrong during ocr.\", app_context.application_context, e)\n response = copy.deepcopy(response)\n return response\n\n ","sub_path":"anuvaad-etl/anuvaad-extractor/document-processor/ocr/tesseract/src/resources/response_gen.py","file_name":"response_gen.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"260731648","text":"#! /usr/bin/python3\n\nimport subprocess as sp\nimport random\nimport time\n\nnotes = [\"C\", \"Db\", \"D\", \"Eb\", \"E\", \"F\", \"F#\", \"G\", \"Ab\", \"A\", \"Bb\", \"B\"]\nnotes2 = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"Gb\", \"G\", \"G#\", \"A\", \"Bb\", \"B\"]\ntypes = [\" Minor Chord Broken\", \" Dominant 7th Chord Broken\", \" Dominant 7th Chord Solid\", \" Diminished 7th Chord Broken\", \" Diminished 7th Chord Solid\", \" Minor Arpeggio\"]\ntypes2 = [\" Harmonic minor scale\", \" Melodic minor scale\"]\n\nwhile True:\n sp.call(\"clear\", shell=True)\n scale = random.choice([random.choice(notes) + random.choice([\" Major Scale\", \" Major Chord Broken\", \" Major Arpeggio\"]), random.choice(notes2) + (random.choice(types+types2))])\n print(scale)\n time.sleep(35)\n","sub_path":"usefulprograms/scaleChooser.py","file_name":"scaleChooser.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"434511307","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Q1.Create a program that asks the user to enter name and their age and then print out a message addressed to them that tells them the year that they will turn 100 years old.\n\n# In[1]:\n\n\nname = input(\" Please enter your name: \")\nage = int( input(\"Please enter your age: \") )\nfin = 2021 - age + 100\nprint(f\"{name},you will turn 100 years old in the year {fin}.\")\n\n\n# Now do the following:\n# 1. Add on to the previous program by asking the user for another number and printing out that many copies of the previous message.\n# 2. Print out that many copies of the previous message on separate lines. (\n\n# In[2]:\n\n\nname = input(\"Enter your name: \")\nage = int( input(\"Enter your age: \") )\ni = int(input(\"How many times do you want the output to be displayed? \"))\nfin = 2021 - age + 100\nfor j in range(i):\n print(f\" {name}, you will turn 100 years old in the year {fin}.\",end='\\t')\n\n\n# In[4]:\n\n\nname = input(\"Enter your name: \")\nage = int( input(\"Enter your age: \") )\ni = int(input(\"How many times do you want the output to be displayed? \"))\nfin = 2021 - age + 100\n\nfor j in range(i):\n print(f\"{name}, you will turn 100 years old in the year {fin}.\")\n\n\n# 2. Take a list, say for example this one:\n# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n# and write a program that prints out all the elements of the list that are less than 5.\n\n# In[5]:\n\n\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nfor i in a:\n if i < 5:\n print(i)\n\n\n# Also do the following:\n# 1. Instead of printing the elements one by one, make a new list that has all the elements less than 5 from this list in it and print out this new list.\n# 2. Write this in one line of Python.\n# 3. Ask the user for a number and return a list that contains only elements from the original list a that are smaller than that number given by the user.\n\n# In[6]:\n\n\n# 2.1\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nnew = []\nfor i in a:\n if i < 5:\n new.append(i)\nprint(new)\n\n\n# In[7]:\n\n\n# 2.2\nprint( [x for x in a if x < 5] ) \n\n\n# In[8]:\n\n\n# 2.3\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nn = int(input(\"Enter number: \"))\nnew = []\nfor i in a:\n if i < n:\n new.append(i)\nprint(new)\n\n\n# 3. Write a program that asks the user how many Fibonacci numbers to generate and then generates them. Take this opportunity to think about how you can use functions. Make sure to ask the user to enter the number of numbers in the sequence to generate.(Hint: The Fibonacci seqence is a sequence of numbers where the next number in the sequence is the sum of the previous two numbers in the sequence. The sequence looks like this: 1, 1, 2, 3, 5, 8, 13, ...)\n\n# In[9]:\n\n\ndef fibonacci(n):\n if n <= 1:\n return n\n else:\n return(fibonacci(n-1) + fibonacci(n-2))\n\nn = int(input(\"Enter the number of term: \"))\n\nprint(\"Sequence is:\")\nfor i in range(n):\n print(fibonacci(i))\n\n\n# 4. Write a program (function!) that takes a list and returns a new list that contains all the elements of the first list minus all the duplicates.\n# Extra: Write two different functions to do this - one using a loop and constructing a list, and another using sets.\n\n# In[10]:\n\n\ndef removingDuplicate(l):\n n = []\n \n for i in l:\n if i not in n:\n n.append(i)\n return n\n\nfin = removingDuplicate( [1,1,2,3, 4, 4, 5, 5,5] )\nprint(fin)\n\n\n# In[11]:\n\n\n# 3.1\ndef removingDuplicates(l):\n return list(set(l))\n\nprint( removingDuplicates([1,1,2,3, 4, 4, 5, 5,5]) )\n\n\n# \n# 5. Ask the user for a number and determine whether the number is prime or not. (For those who have forgotten, a prime number is a number that has no divisors) Use functions.\n\n# In[12]:\n\n\ndef primeOrNot(n):\n if n <= 1:\n return \"Not prime\"\n else:\n \n for i in range(2, n):\n if n % i == 0:\n return \"Not prime\"\n else: \n return \"Prime\"\n \nn = int( input(\"Enter a number: \") )\nprint( primeOrNot(n) )\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Divyam Bhayana Assignment 1.py","file_name":"Divyam Bhayana Assignment 1.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"30611266","text":"from .credentials import CredentialStore, requires_credentials, TOKEN_STORE\nfrom .gigya import Gigya\nfrom .schedule import ChargeSchedule, ChargeMode\nfrom collections import namedtuple\nfrom functools import lru_cache\n\nimport datetime\nimport dateutil.tz\nimport jwt\nimport logging\nimport os\nimport requests\nimport simplejson\n\n\nDEFAULT_ROOT_URL = 'https://api-wired-prod-1-euw1.wrd-aws.com'\n_log = logging.getLogger('pyze.api.kamereon')\n\n\nclass AccountException(Exception):\n def __init__(self, message):\n super().__init__(message)\n\n\nclass CachingAPIObject(object):\n def _clear_all_caches(self):\n cached_funcs = [f for f in dir(self) if hasattr(f, 'cache_clear')]\n for func in cached_funcs:\n f.cache_clear()\n\n\nclass Kamereon(CachingAPIObject):\n def __init__(\n self,\n api_key=None,\n credentials=None,\n gigya=None,\n country='GB',\n root_url=DEFAULT_ROOT_URL\n ):\n\n self._root_url = root_url\n self._credentials = credentials or CredentialStore()\n self._country = country\n self._gigya = gigya or Gigya(credentials=self._credentials)\n self._session = requests.Session()\n if api_key:\n self.set_api_key(api_key)\n\n @staticmethod\n def print_multiple_account_warning(accounts):\n print(\"WARNING: Multiple Kamereon accounts found:\")\n for acc in accounts:\n print('- {}'.format(acc['accountId']))\n print('Using the first of these. If that\\'s not correct (perhaps you can\\'t see your vehicle)')\n print('or to silence this message, run `pyze set-account` or set the KAMEREON_ACCOUNT_ID')\n print('environment variable to the account you want to use i.e.')\n print(' KAMEREON_ACCOUNT_ID=abcdef123456789 pyze ...')\n print('API users may instead call Kamereon#set_account_id().')\n print('This setting will persist until you next log in.')\n\n def set_api_key(self, api_key):\n self._credentials.store('kamereon-api-key', api_key, None)\n\n def get_account_id(self):\n if 'KAMEREON_ACCOUNT_ID' in os.environ:\n self.set_account_id(os.environ['KAMEREON_ACCOUNT_ID'])\n if 'kamereon-account' in self._credentials:\n return self._credentials['kamereon-account']\n\n accounts = self.get_accounts()\n\n if len(accounts) == 0:\n raise AccountException('No Kamereon accounts found!')\n if len(accounts) > 1:\n Kamereon.print_multiple_account_warning(accounts)\n\n account = accounts[0]\n self._clear_all_caches()\n self._credentials['kamereon-account'] = (account['accountId'], None)\n return account['accountId']\n\n @requires_credentials('gigya', 'gigya-person-id', 'kamereon-api-key')\n def get_accounts(self):\n response = self._session.get(\n '{}/commerce/v1/persons/{}?country={}'.format(\n self._root_url,\n self._credentials['gigya-person-id'],\n self._country\n ),\n headers={\n 'apikey': self._credentials['kamereon-api-key'],\n 'x-gigya-id_token': self._gigya.get_jwt_token()\n }\n )\n\n response.raise_for_status()\n response_body = response.json()\n _log.debug('Received Kamereon accounts response: {}'.format(response_body))\n\n return response_body.get('accounts', [])\n\n def set_account_id(self, account_id):\n self._credentials['kamereon-account'] = (account_id, None)\n\n @requires_credentials('gigya', 'gigya-person-id', 'kamereon-api-key')\n def get_token(self):\n if 'kamereon' in self._credentials:\n return self._credentials['kamereon']\n\n response = self._session.get(\n '{}/commerce/v1/accounts/{}/kamereon/token?country={}'.format(\n self._root_url,\n self.get_account_id(),\n self._country\n ),\n headers={\n 'apikey': self._credentials['kamereon-api-key'],\n 'x-gigya-id_token': self._gigya.get_jwt_token()\n }\n )\n\n response.raise_for_status()\n response_body = response.json()\n _log.debug('Received Kamereon token response: {}'.format(response_body))\n\n token = response_body.get('accessToken')\n if token:\n decoded = jwt.decode(token, options={'verify_signature': False, 'verify_aud': False})\n self._credentials['kamereon'] = (token, decoded['exp'])\n self._clear_all_caches()\n return token\n else:\n raise AccountException(\n 'Unable to obtain a Kamereon access token! Response included keys {}'.format(\n ', '.join(response_body.keys())\n )\n )\n\n @lru_cache(maxsize=1)\n @requires_credentials('kamereon-api-key')\n def get_vehicles(self):\n response = self._session.get(\n '{}/commerce/v1/accounts/{}/vehicles?country={}'.format(\n self._root_url,\n self.get_account_id(),\n self._country\n ),\n headers={\n 'apikey': self._credentials['kamereon-api-key'],\n 'x-gigya-id_token': self._gigya.get_jwt_token(),\n 'x-kamereon-authorization': 'Bearer {}'.format(self.get_token())\n }\n )\n\n response.raise_for_status()\n response_body = response.json()\n _log.debug('Received Kamereon vehicles response: {}'.format(response_body))\n\n return response_body\n\n\nclass Vehicle(object):\n def __init__(self, vin, kamereon=None):\n self._vin = vin\n self._kamereon = kamereon or Kamereon()\n self._root_url = self._kamereon._root_url\n\n @requires_credentials('kamereon-api-key')\n def _request(self, method, endpoint, **kwargs):\n return self._kamereon._session.request(\n method,\n endpoint,\n headers={\n 'Content-type': 'application/vnd.api+json',\n 'apikey': self._kamereon._credentials['kamereon-api-key'],\n 'x-gigya-id_token': self._kamereon._gigya.get_jwt_token(),\n 'x-kamereon-authorization': 'Bearer {}'.format(self._kamereon.get_token())\n },\n **kwargs\n )\n\n def _get(self, endpoint):\n response = self._request(\n 'GET',\n '{}/commerce/v1/accounts/kmr/remote-services/car-adapter/v1/cars/{}/{}'.format(\n self._root_url,\n self._vin,\n endpoint\n )\n )\n\n response.raise_for_status()\n json = response.json()\n _log.debug('Received Kamereon vehicle response: {}'.format(json))\n return json['data']['attributes']\n\n def _post(self, endpoint, data):\n _log.debug('POSTing with data: {}'.format(data))\n response = self._request(\n 'POST',\n '{}/commerce/v1/accounts/kmr/remote-services/car-adapter/v1/cars/{}/{}'.format(\n self._root_url,\n self._vin,\n endpoint\n ),\n json={\n 'data': data\n }\n )\n\n response.raise_for_status()\n json = response.json()\n _log.debug('Received Kamereon vehicle response: {}'.format(json))\n return json\n\n def battery_status(self):\n return self._get('battery-status')\n\n def hvac_status(self):\n return self._get('hvac-status')\n\n def charge_mode(self):\n raw_mode = self._get('charge-mode')['chargeMode']\n if hasattr(ChargeMode, raw_mode):\n return getattr(ChargeMode, raw_mode)\n else:\n return raw_mode\n\n def mileage(self):\n return self._get('cockpit')\n\n # Not (currently) implemented server-side\n def lock_status(self):\n return self._get('lock-status')\n\n # Not (currently) implemented server-side\n def location(self):\n return self._get('location')\n\n def charge_schedule(self):\n return ChargeSchedule(\n self._get('charge-schedule')\n )\n\n def notification_settings(self):\n return self._get('notification-settings')\n\n def charge_history(self, start, end):\n if not isinstance(start, datetime.datetime):\n raise RuntimeError('`start` should be an instance of datetime.datetime, not {}'.format(start.__class__))\n if not isinstance(end, datetime.datetime):\n raise RuntimeError('`end` should be an instance of datetime.datetime, not {}'.format(end.__class__))\n\n return self._get(\n 'charges?start={}&end={}'.format(\n start.strftime('%Y%m%d'),\n end.strftime('%Y%m%d')\n )\n ).get('charges', [])\n\n def charge_statistics(self, start, end, period='month'):\n if not isinstance(start, datetime.datetime):\n raise RuntimeError('`start` should be an instance of datetime.datetime, not {}'.format(start.__class__))\n if not isinstance(end, datetime.datetime):\n raise RuntimeError('`end` should be an instance of datetime.datetime, not {}'.format(end.__class__))\n if period not in PERIOD_FORMATS.keys():\n raise RuntimeError('`period` should be one of `month`, `day`')\n\n return self._get(\n 'charge-history?type={}&start={}&end={}'.format(\n period,\n start.strftime(PERIOD_FORMATS[period]),\n end.strftime(PERIOD_FORMATS[period])\n )\n )['chargeSummaries']\n\n def hvac_history(self, start, end):\n if not isinstance(start, datetime.datetime):\n raise RuntimeError('`start` should be an instance of datetime.datetime, not {}'.format(start.__class__))\n if not isinstance(end, datetime.datetime):\n raise RuntimeError('`end` should be an instance of datetime.datetime, not {}'.format(end.__class__))\n\n return self._get(\n 'hvac-sessions?start={}&end={}'.format(\n start.strftime('%Y%m%d'),\n end.strftime('%Y%m%d')\n )\n ).get('hvacSessions', [])\n\n def hvac_statistics(self, start, end, period='month'):\n if not isinstance(start, datetime.datetime):\n raise RuntimeError('`start` should be an instance of datetime.datetime, not {}'.format(start.__class__))\n if not isinstance(end, datetime.datetime):\n raise RuntimeError('`end` should be an instance of datetime.datetime, not {}'.format(end.__class__))\n if period not in PERIOD_FORMATS.keys():\n raise RuntimeError('`period` should be one of `month`, `day`')\n\n return self._get(\n 'hvac-history?type={}&start={}&end={}'.format(\n period,\n start.strftime(PERIOD_FORMATS[period]),\n end.strftime(PERIOD_FORMATS[period])\n )\n )['hvacSessionsSummaries']\n\n # Actions\n\n def ac_start(self, when=None, temperature=21):\n\n attrs = {\n 'action': 'start',\n 'targetTemperature': temperature\n }\n\n if when:\n\n if not isinstance(when, datetime.datetime):\n raise RuntimeError('`when` should be an instance of datetime.datetime, not {}'.format(when.__class__))\n\n attrs['startDateTime'] = when.astimezone(\n dateutil.tz.tzutc()\n ).strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n )\n\n return self._post(\n 'actions/hvac-start',\n {\n 'type': 'HvacStart',\n 'attributes': attrs\n }\n )\n\n def cancel_ac(self):\n return self._post(\n 'actions/hvac-start',\n {\n 'type': 'HvacStart',\n 'attributes': {\n 'action': 'cancel'\n }\n }\n )\n\n def set_charge_schedule(self, schedule):\n if not isinstance(schedule, ChargeSchedule):\n raise RuntimeError('Expected schedule to be instance of ChargeSchedule, but got {} instead'.format(schedule.__class__))\n schedule.validate()\n\n data = {\n 'type': 'ChargeSchedule',\n 'attributes': schedule\n }\n\n return self._post(\n 'actions/charge-schedule',\n simplejson.loads(simplejson.dumps(data, for_json=True))\n )\n\n def set_charge_mode(self, charge_mode):\n if not isinstance(charge_mode, ChargeMode):\n raise RuntimeError('Expceted charge_mode to be instance of ChargeMode, but got {} instead'.format(charge_mode.__class__))\n\n data = {\n 'type': 'ChargeMode',\n 'attributes': {\n 'action': charge_mode.name\n }\n }\n\n return self._post(\n 'actions/charge-mode',\n data\n )\n\n\nPERIOD_FORMATS = {\n 'day': '%Y%m%d',\n 'month': '%Y%m'\n}\n","sub_path":"src/pyze/api/kamereon.py","file_name":"kamereon.py","file_ext":"py","file_size_in_byte":12926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"404873896","text":"import yaml\nimport os\n\ndef get(path):\n # this is dirty and no-pythonic\n\n path_stack = path.split(\".\")\n\n stream = file('{}/config/base.yaml'.format(os.environ['PYTHONPATH'])[1:], 'r')\n config_dict = yaml.load(stream)\n recur = config_dict\n for key in path_stack:\n if not key or not recur:\n raise Exception('wrong path')\n recur = recur.get(key)\n return recur\n\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47087356","text":"import numpy as np\nimport keras\nfrom keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten, Dropout\nfrom keras.models import Model\nfrom keras.datasets import mnist\n\nclass SharedVisionModel:\n '''\n Implementing a model that will train to classify whether two MNIST digits\n are the same or different\n '''\n def __init__(self, model_name='sample_model', epochs=4, batch_size=32, train_size=60000, test_size=18000):\n self._model_name = model_name\n self._epochs = epochs\n self._batch_size = batch_size\n self._train_size = train_size\n self._test_size = test_size\n self._train = None\n self._test = None\n self._classification_model = None\n self._create_data_set()\n self._define_model()\n return\n\n def _create_data_set(self):\n '''\n Create data set for pairs of images and whether the two digits match or not\n '''\n def get_img_pairs_and_labels(imgs, labels, desired_size):\n '''\n creates image pairs with appropriate label vector\n '''\n half_desired_size = int(desired_size/2)\n data = {}\n num_img = imgs.shape[0]\n #reshape to dim 4\n imgs = np.reshape(imgs, (-1, 28, 28, 1))\n\n #get num_img random digits between 0 and num_img-1\n digit_a_idx = np.random.choice(num_img-1, desired_size*10)\n digit_b_idx = np.random.choice(num_img-1, desired_size*10)\n digit_a = np.take(imgs, digit_a_idx, axis=0)\n digit_b = np.take(imgs, digit_b_idx, axis=0)\n\n #get labels as 0 when digits don't match, 1 otherwise\n labels_a = np.take(labels, digit_a_idx, axis=0)\n labels_b = np.take(labels, digit_b_idx, axis=0)\n labels = (labels_a == labels_b).astype(int)\n\n #need to make data even in terms of digits that match vs don't match\n idx_false = np.nonzero(labels)[0]\n mask_true = np.ones(len(labels))\n mask_true[idx_false] = 0\n idx_true = np.nonzero(mask_true)[0]\n idx_false = idx_false[0:half_desired_size]\n idx_true = idx_true[0:half_desired_size]\n idx_images = np.concatenate((idx_true, idx_false))\n data['labels'] = labels[idx_images]\n data['digit_a'] = digit_a[idx_images]\n data['digit_b'] = digit_b[idx_images]\n\n #make labels that will be used as the secondary output by the vision model\n data['digit_a_labels'] = np.zeros((half_desired_size*2, 10))\n data['digit_b_labels'] = np.zeros((half_desired_size*2, 10))\n data['digit_a_labels'][np.arange(half_desired_size*2), labels_a[idx_images]] = 1\n data['digit_b_labels'][np.arange(half_desired_size*2), labels_b[idx_images]] = 1\n return data\n\n #load MNIST data\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n #create pairs of images with label\n self._train = get_img_pairs_and_labels(x_train, y_train, desired_size=self._train_size)\n self._test = get_img_pairs_and_labels(x_test, y_test, desired_size=self._test_size)\n return\n\n def _define_model(self):\n '''\n define the model to be trained\n Original code taken from:\n https://keras.io/getting-started/functional-api-guide/\n '''\n # First, define the vision modules\n digit_input = Input(shape=(28, 28, 1))\n hidden_layer = Conv2D(32, (3, 3), activation='relu')(digit_input)\n hidden_layer = Conv2D(64, (3, 3), activation='relu')(hidden_layer)\n hidden_layer = MaxPooling2D((2, 2))(hidden_layer)\n hidden_layer = Dropout(0.25)(hidden_layer)\n hidden_layer = Flatten()(hidden_layer)\n hidden_layer = Dense(128, activation='relu')(hidden_layer)\n hidden_layer = Dropout(0.25)(hidden_layer)\n out = Dense(10, activation='softmax')(hidden_layer)\n\n vision_model = Model(digit_input, out)\n\n # Then define the tell-digits-apart model\n digit_a = Input(shape=(28, 28, 1))\n digit_b = Input(shape=(28, 28, 1))\n\n # The vision model will be shared, weights and all\n out_a = vision_model(digit_a)\n out_b = vision_model(digit_b)\n\n out = keras.layers.Dot(axes=1)([out_a, out_b])\n\n self._classification_model = Model(inputs=[digit_a, digit_b], outputs=[out, out_a, out_b])\n self._classification_model.compile( optimizer=keras.optimizers.Adadelta(),\n loss='binary_crossentropy',\n metrics=['accuracy'],\n loss_weights = [0.1, 1, 1]\n )\n return\n\n def test_single_train_input(self):\n '''\n debugging tool, prints out the layer outputs to make sure they are correct\n\n :return: None\n '''\n print('#############################')\n print('Reference')\n print('Digit A Labels: {}'.format(self._test['digit_a_labels']))\n print('Digit B Labels: {}'.format(self._test['digit_b_labels']))\n print('Classification Label: {}'.format(self._test['labels']))\n output = self._classification_model.predict([self._test['digit_a'], self._test['digit_b']])\n print('#############################')\n print('Predictions')\n print('Digit A Labels: {}'.format(output[1]))\n print('Digit B Labels: {}'.format(output[2]))\n print('Classification Label: {}'.format(output[0]))\n print('#############################')\n return\n\n\n def train_model(self):\n '''\n train the model\n '''\n self._classification_model.fit([self._train['digit_a'], self._train['digit_b']],\n [self._train['labels'], self._train['digit_a_labels'], self._train['digit_b_labels']],\n epochs=self._epochs,\n batch_size=self._batch_size,\n validation_data=([self._test['digit_a'], self._test['digit_b']],\n [self._test['labels'], self._test['digit_a_labels'], self._test['digit_b_labels']]),\n )\n return\n\n def save_model(self, save_path=''):\n '''\n save the model\n '''\n self._classification_model.save(save_path+self._model_name+'.h5')\n print('Model saved to '+save_path+self._model_name+'.h5')\n return\n\n def evaluate_model(self):\n '''\n Evaluate model using test set data\n :return:\n '''\n\n score = self._classification_model.evaluate([self._test['digit_a'], self._test['digit_b']],\n [self._test['labels'], self._test['digit_a_labels'], self._test['digit_b_labels']])\n print('Test loss: {}'.format(score[0]))\n print('Test accuracy: {}'.format(score[1]))\n return\n\n def load_model(self, model_path):\n '''\n load a model\n '''\n self._classification_model = keras.models.load_model(model_path)\n return\n\nif __name__ == '__main__':\n MNISTDigitCompare = SharedVisionModel()\n MNISTDigitCompare.train_model()\n MNISTDigitCompare.evaluate_model()\n MNISTDigitCompare.save_model()\n","sub_path":"mnist_shared_vision_model.py","file_name":"mnist_shared_vision_model.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"608431073","text":"from cv2 import cv2\nimg = cv2.imread('data\\lena.jpg',1)\nimg = cv2.resize(img,(512,512))\ng_blur = cv2.GaussianBlur(img,(3,3),0)\nblur = cv2.blur(img,(3,3))\nbilatblur = cv2.bilateralFilter(img,10,50,50)\nmedian = cv2.medianBlur(img,5)\ncv2.imshow('original',img)\ncv2.imshow('blur',blur)\ncv2.imshow('g_blur',g_blur)\ncv2.imshow('bilat_blur',bilatblur)\ncv2.imshow('median',median)\ncv2.waitKey(0)","sub_path":"read_write.py","file_name":"read_write.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"22526861","text":"import pathlib\nimport re\nfrom setuptools import find_packages, setup\n\n\nhere = pathlib.Path(__file__).parent\nfname = here / \"neuro_flow\" / \"__init__.py\"\n\n\nwith fname.open(encoding=\"utf8\") as fp:\n try:\n version = re.findall(r'^__version__ = \"([^\"]+)\"$', fp.read(), re.M)[0]\n except IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\nsetup(\n name=\"neuro-flow\",\n version=version,\n description=\"Pipelines system for neu.ro\",\n author=\"Neu.ro Team\",\n author_email=\"team@neu.ro\",\n license=\"Apache License, version 2.0\",\n url=\"https://neu.ro/\",\n python_requires=\">=3.6.0\",\n include_package_data=True,\n install_requires=[\n \"neuro-cli>=21.4.2\",\n \"pyyaml>=5.4\",\n \"funcparserlib>=0.3\",\n 'dataclasses>=0.5; python_version<\"3.7\"',\n \"humanize>=0.5.1\",\n 'backports-datetime-fromisoformat>=1.0.0; python_version<\"3.7\"',\n 'async_exit_stack>=1.0.1; python_version<\"3.7\"',\n \"neuro-extras>=20.12.16\",\n \"graphviz>=0.14\",\n \"yarl>=1.6.2,<1.7\",\n \"multidict>=5.0,<6.0\",\n \"rich~=10.0\",\n ],\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n entry_points={\"console_scripts\": [\"neuro-flow=neuro_flow.cli:main\"]},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"78365461","text":"\"\"\"Script which uses google's define to get the dictionary and thesaurus information of a word.\"\"\"\nimport argparse\nimport json\n\nimport requests\n\n\nclass TerminalColours:\n \"\"\"Define the colours for printing to the terminal.\"\"\"\n\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n\n\nclass Interpretation:\n def __init__(self, keyword, more, word, phonetic, meanings):\n \"\"\"Contains a single interpretation of the a particular word\"\"\"\n self.keyword = keyword\n self.more = more\n self.word = word\n self.phonetic = phonetic\n self.meanings = meanings\n\n def output(self):\n \"\"\"Outputs the stored word, meanings and phonetics to the terminal.\"\"\"\n head = \"{} . {} . {}\".format(self.keyword, self.word, self.phonetic)\n print(\"{}{}{}\".format(TerminalColours.HEADER, head, TerminalColours.ENDC))\n for k, type in self.meanings.items():\n print(\"{}{}{}\".format(TerminalColours.HEADER, k, TerminalColours.ENDC))\n for i, meaning in enumerate(type):\n print()\n # Only output the first 2 definitions for each group, unless more is provided.\n if i > 1 and not self.more:\n break\n for base_colour, group in [\n (TerminalColours.OKGREEN, \"definition\"),\n (TerminalColours.OKBLUE, \"synonyms\"),\n (TerminalColours.OKBLUE, \"antonyms\"),\n ]:\n if group in meaning.keys():\n vals = meaning.get(group, \"\")\n if isinstance(vals, list):\n # Just output the first 5 arguments\n if not self.more:\n vals = vals[:5]\n vals = \", \".join(vals)\n print(\"{} - {}: {}{}\".format(base_colour, group, vals, TerminalColours.ENDC))\n print()\n\n\nclass DictionarySearch:\n \"\"\"The main definition class. Parses the google search output for the important elements.\"\"\"\n\n def __init__(self, keyword, more=False):\n self.text = None\n self.keyword = keyword\n self.more = more\n self.interpretations = []\n\n def get_request(self):\n \"\"\"\n Gets a request from an unofficial google dictionary api of the set keyword.\n \"\"\"\n try:\n req = requests.get(f\"https://api.dictionaryapi.dev/api/v1/entries/en/{str(self.keyword)}\", timeout=10)\n if req.status_code == requests.codes.ok:\n self.text = req.text\n req.close()\n else:\n raise requests.exceptions.RequestException(\"Request return code not OK.\")\n except requests.exceptions.RequestException:\n for site in [\"https://mydictionaryapi.appspot.com/\", \"https://googledictionaryapi.eu-gb.mybluemix.net/\"]:\n try:\n req = requests.get(site, params={\"define\": str(self.keyword)}, timeout=10)\n if req.status_code == requests.codes.ok:\n self.text = req.text\n req.close()\n break\n except requests.exceptions.RequestException:\n pass\n if self.text is None:\n raise IOError(\"Could not get query form unofficial google dictionary api.\")\n\n def parse_text(self):\n \"\"\"Parses the text into json.\"\"\"\n j = json.loads(self.text)\n if not isinstance(j, list):\n j = [j]\n for each in j:\n self.interpretations.append(\n Interpretation(\n self.keyword,\n self.more,\n each.get(\"word\", self.keyword),\n each.get(\"phonetic\", \"\"),\n each.get(\"meaning\"),\n )\n )\n\n def output(self):\n \"\"\"Write all interpretations to the console.\"\"\"\n for interpretation in self.interpretations:\n interpretation.output()\n\n def __repr__(self):\n return __name__\n\n\ndef main():\n \"\"\"\n Runs the main loop, searching google for the definition and printing the result out with colouring.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"search\", nargs=\"+\", help=\"type the word you want to search here\")\n parser.add_argument(\n \"--more\",\n dest=\"more\",\n action=\"store_true\",\n required=False,\n default=False,\n help=\"Get more synonyms and antonyms.\",\n )\n args = parser.parse_args()\n searchword = \" \".join(args.search)\n d = DictionarySearch(searchword, args.more)\n d.get_request()\n d.parse_text()\n d.output()\n if not args.more:\n print(\"Find more synonyms and antonyms by running with --more.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"google_define.py","file_name":"google_define.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"318756727","text":"import unittest\n\ndef is_unique(word):\n #with data structures:\n # chars = list(word)\n # unique_chars = set(word)\n # return len(chars)==len(unique_chars)\n #-------------------------------------\n\n #with no additional data structures\n for c in word:\n if c in word[word.index(c)+1:]:\n return False\n return True\n\n\nclass Test(unittest.TestCase):\n dataT = [('abcd'), ('s4fad'), ('')]\n dataF = [('23ds2'), ('hb 627jh=j ()')]\n\n def test_is_unique(self):\n # true check\n for test_string in self.dataT:\n actual = is_unique(test_string)\n self.assertTrue(actual)\n # false check\n for test_string in self.dataF:\n actual = is_unique(test_string)\n self.assertFalse(actual)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Chapter1/1_Is Unique/IsUnique.py","file_name":"IsUnique.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"387907528","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport ssl\nimport http\nimport json\n\n\n\ndef get_arrival_info(routeId: str, stationId: str) -> dict:\n now = datetime.datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n conn = http.client.HTTPSConnection(\"www.gbis.go.kr\", context=ssl._create_unverified_context())\n\n payload = f\"cmd=searchBusStationJson&\" \\\n f\"stationId={stationId}\"\n\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'charset': \"UTF-8\"\n }\n\n conn.request(\"POST\", \"/gbis2014/schBusAPI.action\", payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n res = dict(json.loads(data.decode(\"utf-8\")))\n res_bus_list = res['result']['busArrivalInfo']","sub_path":"api/lib/global_api.py","file_name":"global_api.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"175086264","text":"import cv2\nimport numpy as np\nimport os\n\nartists = ['Vincent_van_Gogh', 'Pablo_Picasso', 'Edgar_Degas']\nX = []\ny = []\n\nfor artist in artists:\n for image in os.listdir(os.path.join(\"data/\", artist)):\n img_normal = cv2.imread('data/' + artist + '/' + image)\n img_resized = cv2.resize(img_normal, (512,512))\n X.append(img_resized)\n if artist == 'Vincent_van_Gogh':\n y.append(0)\n elif artist == 'Pablo_Picasso':\n y.append(1)\n elif artist == 'Edgar_Degas':\n y.append(2)\n\n_X = np.array(X)\n_y = np.array(y)\n\nnp.save('data_npy/data.npy', _X)\nnp.save('data_npy/labels.npy', _y)\n","sub_path":"read_images.py","file_name":"read_images.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"262256991","text":"def highest_product(liste):\n if not isNumbers(liste):\n raise ValueError(\"The list can only contain integers\")\n # liste.sort()\n i = 0\n while i < len(liste):\n for n in range(len(liste)-1):\n if liste[n] > liste[n+1]:\n temp_n = liste[n]\n liste[n] = liste[n+1]\n liste[n+1] = temp_n\n i += 1\n print(liste)\n return max(liste[0]*liste[1]*liste[-1],liste[-1]*liste[-2]*liste[-3])\n\ndef isNumbers(liste):\n number = True\n for i in liste:\n if not type(i) == int:\n number = False\n return number\n\nprint(highest_product([1, 10, 2, 6, 5, 3])) # example // 300\nprint(highest_product([1,2,-3,-4])) # negative integers // 24\nprint(highest_product([\"hei\"])) # non-integer // error\n","sub_path":"highest_product.py","file_name":"highest_product.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"587292986","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns=[\nurl('^$',views.index,name='index'),\nurl(r'^imagedetails/(\\d+)',views.imagedetails,name ='imagedetails'),\nurl(r'^new/image$',views.new_image,name='new-image'),\nurl(r'^new/comment$',views.new_comment,name='new-comment'),\n\nurl(r'^create/profile$',views.create_profile,name='create-profile'),\nurl(r'^profile/',views.profile,name='profile'),\nurl(r'^profiledetails/(\\d+)',views.profiledetails,name ='profiledetails'),\n\nurl(r'^edit/profile$',views.edit_profile,name='edit-profile'),\nurl(r'^search/', views.search_results, name='search_results')\n\n\n\n]\n\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n","sub_path":"Instaclone/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"322653448","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n# theta goes from 0 to 2pi\ntheta = np.linspace(0, 2*np.pi, 100)\n\n# the radius of the circle\nr = np.sqrt(9)\n\n# compute x1 and x2\nx1 = r*np.cos(theta)\nx2 = r*np.sin(theta)\n\n# create the figure\nfig, ax = plt.subplots(1)\nax.plot(x1, x2)\nax.set_aspect(1)\nplt.show()","sub_path":"Learning/plotting_circle.py","file_name":"plotting_circle.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"346513313","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom five import grok\nfrom Products.Archetypes.interfaces import IObjectInitializedEvent, IObjectEditedEvent\nimport subprocess\n\nfrom observatorio.conteudo.content import IPublicacao\n\nlogger = logging.getLogger('observatorio.conteudo')\n\n\ndef _cria_capa_publicacao(object):\n \"\"\"\n \"\"\"\n pdf = str(object.getArquivo())\n gs_cmd = [ \"gs\",\n \"-q\",\n \"-sDEVICE=png16m\",\n \"-dGraphicsAlphaBits=4\",\n \"-dSAFER\",\n \"-dBATCH\",\n \"-dNOPAUSE\",\n \"-dFirstPage=1\",\n \"-dLastPage=1\",\n \"-sOutputFile=%stdout\",\n \"-\",\n ]\n png = None\n gs_process = subprocess.Popen(gs_cmd,stdout=subprocess.PIPE,stdin=subprocess.PIPE,)\n gs_process.stdin.write(pdf)\n png = gs_process.communicate()[0]\n gs_process.stdin.close()\n return_code = gs_process.returncode\n if return_code == 0:\n logger.info(\"Ghostscript processou uma pagina do arquivo pdf.\")\n else:\n logger.warn(\"O processo Ghostscript nao terminou corretamente! Error Code: %d\" % (return_code))\n png = None\n if png:\n object.setImage(png)\n\n\n@grok.subscribe(IPublicacao, IObjectInitializedEvent)\ndef cria_capa_publicaca_inclusao(object, event):\n _cria_capa_publicacao(object)\n\n\n@grok.subscribe(IPublicacao, IObjectEditedEvent)\ndef cria_capa_publicacao_edicao(object, event):\n _cria_capa_publicacao(object)\n","sub_path":"src/observatorio/conteudo/eventos.py","file_name":"eventos.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"11114040","text":"# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\n\nimport pytest\nimport torch.distributed as dist\nfrom _pytest.monkeypatch import MonkeyPatch\n\nfrom composer.core.logging import Logger, LogLevel\nfrom composer.core.state import State\nfrom composer.loggers.file_logger import FileLoggerBackend\nfrom composer.loggers.logger_hparams import FileLoggerBackendHparams\n\n\n@pytest.fixture\ndef log_file_name(ddp_tmpdir: str) -> str:\n return os.path.join(ddp_tmpdir, \"output.log\")\n\n\n@pytest.fixture\ndef log_destination(log_file_name: str) -> FileLoggerBackend:\n return FileLoggerBackendHparams(\n every_n_batches=3,\n every_n_epochs=2,\n log_level=LogLevel.BATCH,\n filename=log_file_name,\n buffer_size=1,\n flush_every_n_batches=1,\n ).initialize_object()\n\n\ndef test_file_logger(dummy_state: State, log_destination: FileLoggerBackend, monkeypatch: MonkeyPatch,\n log_file_name: str):\n dummy_state.step = 2\n dummy_state.epoch = 2\n logger = Logger(dummy_state, backends=[log_destination])\n monkeypatch.setattr(dist, \"get_rank\", lambda: 0)\n log_destination.training_start(dummy_state, logger)\n logger.metric_fit({\"metric\": \"fit\"}) # should print\n logger.metric_epoch({\"metric\": \"epoch\"}) # should print\n logger.metric_batch({\"metric\": \"batch\"}) # should print\n logger.metric_verbose({\"metric\": \"verbose\"}) # should NOT print, since we're on the BATCH log level\n dummy_state.epoch = 3\n logger.metric_epoch({\"metric\": \"epoch1\"}) # should NOT print, since we print every 2 epochs\n dummy_state.epoch = 4\n dummy_state.step = 3\n log_destination.batch_end(dummy_state, logger)\n logger.metric_epoch({\"metric\": \"epoch2\"}) # should print\n logger.metric_batch({\"metric\": \"batch1\"}) # should NOT print, since we print every 3 steps\n log_destination.batch_end(dummy_state, logger)\n log_destination.training_end(dummy_state, logger)\n with open(log_file_name, 'r') as f:\n assert f.readlines() == [\n '[FIT][step=2]: { \"metric\": \"fit\", }\\n',\n '[EPOCH][step=2]: { \"metric\": \"epoch\", }\\n',\n '[BATCH][step=2]: { \"metric\": \"batch\", }\\n',\n '[EPOCH][step=3]: { \"metric\": \"epoch2\", }\\n',\n ]\n\n\nclass TestCoreLogger:\n\n @pytest.mark.parametrize(\"rank\", [0, 1])\n def test_deferred(self, dummy_state_without_rank: State, log_file_name: str, monkeypatch: MonkeyPatch,\n log_destination: FileLoggerBackend, rank: int):\n dummy_state = dummy_state_without_rank\n dummy_state.step = 2\n dummy_state.epoch = 0\n logger = Logger(dummy_state, backends=[log_destination])\n logger.metric_batch({\"metric\": \"before_training_start\"})\n monkeypatch.setattr(dist, \"get_rank\", lambda: rank)\n log_destination.training_start(dummy_state, logger)\n logger.metric_batch({\"metric\": \"after_training_start\"})\n log_destination.batch_end(dummy_state, logger)\n log_destination.training_end(dummy_state, logger)\n if rank == 0:\n with open(log_file_name, 'r') as f:\n assert f.readlines() == [\n '[BATCH][step=2]: { \"metric\": \"before_training_start\", }\\n',\n '[BATCH][step=2]: { \"metric\": \"after_training_start\", }\\n',\n ]\n return\n else:\n assert rank == 1\n assert not os.path.exists(log_file_name), \"nothing should be logged on rank 1\"\n\n def test_deep_copy(self, dummy_state_without_rank: State, log_destination: FileLoggerBackend,\n monkeypatch: MonkeyPatch, log_file_name: str):\n # This test ensures that the logger deepcopies the logged metric when using deferred logging\n dummy_state = dummy_state_without_rank\n dummy_state.step = 2\n dummy_state.epoch = 0\n logger = Logger(dummy_state, backends=[log_destination])\n metric_data = [[\"hello\"]]\n logger.metric_batch({\"metric\": metric_data})\n metric_data[0] = [\"world\"]\n monkeypatch.setattr(dist, \"get_rank\", lambda: 0)\n log_destination.training_start(dummy_state, logger)\n logger.metric_batch({\"metric\": metric_data})\n log_destination.batch_end(dummy_state, logger)\n log_destination.training_end(dummy_state, logger)\n with open(log_file_name, 'r') as f:\n assert f.readlines() == [\n '[BATCH][step=2]: { \"metric\": [[\"hello\"]], }\\n',\n '[BATCH][step=2]: { \"metric\": [[\"world\"]], }\\n',\n ]\n","sub_path":"tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"632076094","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Author: leeyoshinari\r\nimport os\r\nimport re\r\nimport time\r\nfrom threading import Lock\r\nfrom git import Repo\r\nfrom sendEmail import sendMsg\r\nfrom logger import logger, cfg, handle_exception\r\n\r\n\r\nclass Testing(object):\r\n def __init__(self):\r\n self.lock = Lock()\r\n\r\n def run(self, case_email_path):\r\n \"\"\"\r\n 执行测试任务\r\n :param case_email_path: 列表,第一个元素是测试用例文件路径,第二个元素是收件人的txt文件路径\r\n :return:\r\n \"\"\"\r\n try:\r\n if int(cfg.getConfig('is_git')):\r\n logger.info('准备从git上拉取最新版本')\r\n repo = Repo(cfg.getConfig('git_path'))\r\n remote = repo.remote()\r\n remote.pull()\r\n logger.info('从git上拉取版本成功')\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n file_name = None\r\n error_msg = None\r\n case_path = case_email_path[0]\r\n email_path = case_email_path[1]\r\n build_path = os.path.join(case_path, 'build.xml')\r\n logger.info(f'开始执行测试任务{build_path}')\r\n\r\n try:\r\n start_time = time.strftime('%Y-%m-%d %H:%M:%S')\r\n res = os.popen('ant -f {}'.format(build_path)).readlines() # 执行测试,并等待测试完成\r\n logger.debug(res)\r\n length = len(res)\r\n for i in range(length-1, -1, -1):\r\n if 'Failed' in res[i]: # 如果有失败日志,打印出\r\n error_msg = '{}\\n{}'.format(res[i], res[i-1])\r\n logger.error(error_msg)\r\n break\r\n if 'xslt' in res[i] and 'Processing' in res[i] and 'to' in res[i]: # 获取测试报告文件名\r\n line = res[i].strip()\r\n logger.debug(line)\r\n if '/' in line:\r\n file_name = line.split('/')[-1]\r\n else:\r\n file_name = line.split('\\\\')[-1]\r\n logger.info(file_name)\r\n break\r\n\r\n del res\r\n if file_name:\r\n logger.info('测试任务执行完成')\r\n time.sleep(2)\r\n msg = self.parse_html(file_name, case_path) # 重组html\r\n\r\n sendMsg(msg['fail_case'], email_path, failure_num=msg['failure_num']) # 发送邮件\r\n\r\n string = f\"{start_time},{build_path},{msg['total_num']},{msg['failure_num']}\\n\"\r\n self.lock.acquire()\r\n logger.info(f'写测试记录到本地, {string}')\r\n with open(os.path.join(case_path, cfg.getConfig('record_name')), 'a', encoding='utf-8') as f:\r\n f.write(string)\r\n self.lock.release()\r\n logger.info('测试完成')\r\n else:\r\n error_msg = 'html格式的测试报告未找到'\r\n except Exception as err:\r\n error_msg = err\r\n logger.error(err)\r\n\r\n if error_msg:\r\n logger.error(f'测试任务执行失败,失败信息:{error_msg}')\r\n html = f'' \\\r\n f'

异常提醒:{build_path} 测试任务执行失败,请重新执行! 失败信息:{error_msg}

' \\\r\n f'

此邮件自动发出,请勿回复。

'\r\n try:\r\n sendMsg(html, email_path, is_path=False)\r\n except Exception as err:\r\n logger.error(err)\r\n\r\n @handle_exception()\r\n def parse_html(self, file_name, case_path):\r\n \"\"\"\r\n 提取自动生成的测试报告中的一些信息,重组测试报告用于邮件发送\r\n :param case_path: 测试用例路径\r\n :param file_name: 测试报告名称\r\n :return:\r\n \"\"\"\r\n all_case = os.path.join(cfg.getConfig('report_path'), file_name) # 完整的测试报告路径\r\n fail_case = os.path.join(cfg.getConfig('report_path'), 'send_' + file_name) # 处理好用于邮件发送的测试报告路径\r\n logger.info('开始处理html测试报告{}'.format(all_case))\r\n with open(all_case, 'r', encoding='utf-8') as f:\r\n htmls = f.readlines()\r\n\r\n html = ''\r\n for line in htmls:\r\n html += line.strip()\r\n\r\n # 提取用例总数,成功率数据\r\n case_num = re.findall('响应时间最大值.*(\\d+)(\\d+)'\r\n '\\d{1,3}.\\d+%', html)[0]\r\n total_num = [int(case_num[0])]\r\n failure_num = [int(case_num[1])]\r\n\r\n # 提取出概览和失败用例,用于邮件发送\r\n # res = re.findall('(.*?)

所有用例', html)[0]\r\n res = html.split('

所有用例')[0]\r\n url = 'http://{}:{}/testReport/{}'.format(cfg.getConfig('host'), cfg.getConfig('port'), file_name)\r\n logger.info(f'详细测试报告跳转链接为 {url}')\r\n # 添加完整测试报告路径跳转链接\r\n jump_url = f'如需查看详细测试结果,请点我'\r\n\r\n # 添加历史数据\r\n self.lock.acquire()\r\n with open(os.path.join(case_path, cfg.getConfig('record_name')), 'r', encoding='utf-8') as f:\r\n history = f.readlines()\r\n self.lock.release()\r\n for line in history:\r\n datas = line.split(',')\r\n total_num.append(int(datas[-2]))\r\n failure_num.append(int(datas[-1]))\r\n ratio = 100 - round(100 * sum(failure_num) / sum(total_num), 2)\r\n history = f'

历史数据概览

' \\\r\n f'
累计执行次数' \\\r\n f'累计执行用例数累计执行失败用例数执行成功率
{len(total_num)}{sum(total_num)}{sum(failure_num)}{ratio}%
'\r\n\r\n res1 = re.sub('(.*?)', jump_url+history, res)\r\n # 添加尾巴\r\n res = res1 + '

此邮件自动发出,请勿回复。

'\r\n # 写到本地\r\n with open(fail_case, 'w', encoding='utf-8') as f:\r\n f.writelines(res)\r\n\r\n logger.info('html测试报告处理完成')\r\n del htmls, html, res, res1, history\r\n return {'all_case': all_case, 'fail_case': fail_case, 'total_num': total_num[0], 'failure_num': failure_num[0]}\r\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253500092","text":"import os\r\nimport shutil\r\nimport time\r\n\r\n#create zip file from source folder\r\nzip_source=r'C:\\Users\\claudius.ibine\\Desktop\\backup\\source'\r\nshutil.make_archive('test_backup','zip',zip_source)\r\ntime.sleep(5)\r\n\r\n#copy files to destination\r\nsource=r'C:\\Users\\claudius.ibine\\Desktop\\backup.zip'\r\ndestination=r'C:\\Users\\claudius.ibine\\Desktop\\backup\\destination'\r\ndef backup(source, destination):\r\n for subdir, dirs, files in os.walk(source):\r\n for file in files:\r\n print(os.path.join(subdir, file))\r\n shutil.copy2(os.path.join(subdir, file), destination)\r\n\r\n\r\n","sub_path":"Python/General/Backup-Computer.py","file_name":"Backup-Computer.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51980921","text":"\nclass Board:\n rows = 6\n columns = 7\n\n def __init__(self):\n self.board = [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n # columns go from 1 to 7 here, hence the \"column - 1\"\n def add_chip(self, player, column):\n empty_row = self.get_top_column(column - 1)\n self.set_board_position(empty_row, column - 1, player)\n\n def set_board_position(self, row, column, player):\n self.board[row][column] = player\n\n def set_board(self, board):\n self.board = board\n\n def get_board(self):\n return self.board\n\n def get_top_column(self, column):\n current_row = 0\n while self.board[current_row][column] == 0:\n current_row += 1\n if current_row == 6:\n break\n return current_row - 1\n\n def get_board_position(self, row, column):\n return self.board[row][column]\n\n def get_diagonal_start(self, row, column):\n while column != 0 and row != 5:\n column -= 1\n row += 1\n return [row, column]\n\n def get_anti_diagonal_start(self, row, column):\n while column != 0 and row != 0:\n column -= 1\n row -= 1\n return [row, column]\n\n def is_possible_move(self, row, column):\n if (row == 0 and self.get_board_position(row, column) == 0) \\\n or (row != 0 and row == self.get_top_column(column)):\n return True\n return False\n\n def is_possible_move_column(self, column):\n if self.get_board_position(0, column) == 0:\n return True\n return False\n\n def top_board_spaces(self):\n highest_positions = []\n top = 5\n for column in range(0, self.columns):\n if self.get_top_column(column) < top:\n highest_positions = []\n top = self.get_top_column(column)\n highest_positions += [column]\n elif self.get_top_column(column) == top:\n highest_positions += [column]\n return highest_positions\n\n def bottom_board_spaces(self):\n lowest_positions = []\n bottom = 0\n for column in range(0, self.columns):\n if self.get_top_column(column) > bottom:\n lowest_positions = []\n bottom = self.get_top_column(column)\n lowest_positions += [column]\n elif self.get_top_column(column) == bottom:\n lowest_positions += [column]\n return lowest_positions\n","sub_path":"Board/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"422137384","text":"from typing import Dict\n\nfrom flask import Blueprint, current_app as app\n\nfrom feedrsub.database import db\nfrom feedrsub.ingestion.parsers.parser_factory import ParserFactory\nfrom feedrsub.ingestion.subscriber import Subscriber\nfrom feedrsub.ingestion.tasks import task_parse_notification\nfrom feedrsub.models.feed import Feed\nfrom feedrsub.models.feedstats import FeedStats\nfrom feedrsub.utils.signals import notification_received\n\ningestion_blueprint = Blueprint(\n 'ingestion',\n __name__,\n template_folder='templates',\n static_folder='static',\n url_prefix='/push'\n)\n\nfrom feedrsub.ingestion.views import verification, notification\n\nsubscriber = Subscriber()\n\ndef when_notification_received(sender,\n feed: Feed,\n content_type: str,\n content: str,\n encoding: str='UTF-8',\n parsed: bool=False,\n stats: Dict=None,\n headers: Dict=None):\n \"\"\"\n Called when 'notification-received' signal sent.\n\n :param headers: Response headers from Feed fetch, or Request headers from Feed notification\n :param stats: Dictionary of Statistics about current Feed parsing operation\n :param feed: Feed the notification is for.\n :type feed: Feed object\n :param content_type: Valid HTTP content-type, should be 'application/json',\n 'application/rss+xml', or 'application/atom+xml'\n :type content_type: str\n :param content: The data received in the notification\n :type content: str\n :param encoding: Character encoding of content\n :type encoding: str\n :param parsed: Boolean telling if content is parsed or raw\n :type parsed: bool\n \"\"\"\n app.logger.info(\n 'Creating Parse Task for %s, Content-Type: %s, Encoding: %s',\n feed, content_type, encoding)\n\n try:\n if isinstance(content, bytes):\n content = content.decode(encoding)\n except UnicodeDecodeError as e:\n app.logger.exception(\n 'Failed to decode notification for for %s, Content-Type: %s, Encoding: %s, Content: %s: %s',\n feed, content_type, encoding, content, e)\n FeedStats.save_stats(stats)\n db.session.commit()\n return\n\n task_parse_notification.delay(feed.id, content_type, content, encoding, parsed, stats=stats, headers=headers)\n\n\ndef register_ingestion_blueprint_and_signals(app):\n \"\"\"Registers blueprint to app and connects signals\"\"\"\n app.register_blueprint(ingestion_blueprint)\n notification_received.connect(when_notification_received)\n\n\ndef disconnect_ingestion_signals():\n \"\"\"Disconnects signals for this blueprint\"\"\"\n notification_received.disconnect(when_notification_received)\n","sub_path":"feedrsub/ingestion/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"37434468","text":"import numpy as np\nfrom scanorama import *\nfrom sklearn.preprocessing import normalize, LabelEncoder\nimport sys\n\nfrom process import load_names, merge_datasets, save_datasets\n\n\nNAMESPACE = 'tian'\n\ndata_names = [\n 'data/Tian/sc_10x_3cl',\n 'data/Tian/sc_10x_5cl',\n 'data/Tian/sc_CEL-seq2',\n 'data/Tian/sc_Drop-seq'\n]\n\nif __name__ == '__main__':\n datasets, genes_list, n_cells = load_names(data_names)\n\n datasets_dimred, datasets, genes = correct(\n datasets, genes_list, ds_names=data_names, hvg=2000, \n return_dimred=True\n )\n\n save_datasets(datasets, genes, data_names)\n","sub_path":"4_Batch_correction/Code/scanorama/Tian/tian.py","file_name":"tian.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"580779465","text":"import json\nfrom time import sleep\n\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\n\n\nclass BuscaLocalidades:\n SIGLAS = iter(['AC', 'AL', 'AM', 'AP', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MG', 'MS', 'MT', 'PA', 'PB',\n 'PE', 'PI', 'PR', 'RJ', 'RN', 'RO', 'RR', 'RS', 'SC', 'SE', 'SP', 'TO'])\n\n def __init__(self, browser, url_page):\n self.browser = browser\n self.url = url_page\n self.estados_dict = {}\n self.__count_id = 1\n\n def acessar_site(self):\n self.browser.get(self.url)\n sleep(2)\n\n def salvar_localidades(self):\n estados = self.browser.find_element_by_class_name(\"f1col\")\n try:\n sigla_atual = next(self.SIGLAS)\n estados.send_keys(sigla_atual)\n except StopIteration:\n print('.:: Fim da Consulta ::.')\n exit()\n\n btn_buscar = self.browser.find_element_by_class_name(\"btn2,float-right\")\n btn_buscar.click()\n sleep(2)\n tabela_localidade = navegador.find_elements_by_tag_name('tbody')\n\n result = tabela_localidade[1].text.split('\\n')[1:]\n for i in range(len(result)):\n primeiro_hifen = result[i].find('-')\n faixa = result[i][primeiro_hifen - 5: primeiro_hifen + 16]\n localidade = result[i][:primeiro_hifen - 6]\n self.estados_dict[sigla_atual] = self.estados_dict.get(sigla_atual,\n {localidade: {'ID': self.__count_id,\n 'Faixa de CEP': faixa}})\n self.estados_dict[sigla_atual][localidade] = self.estados_dict[sigla_atual].get(localidade,\n {'ID': self.__count_id,\n 'Faixa de CEP': faixa})\n self.__count_id += 1\n\n with open('dados.jsonl', 'w') as f:\n json.dump(self.estados_dict, f, ensure_ascii=False)\n\n navegador.back()\n self.salvar_localidades()\n\n\nif __name__ == '__main__':\n url = 'http://www.buscacep.correios.com.br/sistemas/buscacep/buscaFaixaCep.cfm'\n options = ChromeOptions()\n options.add_argument(\"--headless\")\n navegador = Chrome(options=options)\n buscacep = BuscaLocalidades(navegador, url)\n buscacep.acessar_site()\n buscacep.salvar_localidades()\n","sub_path":"busca_localidades.py","file_name":"busca_localidades.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"46288524","text":"__author__ = 'stephen.leigh'\n\nimport sys\nfrom selenium import webdriver\nfrom WikipediaAutomation import Person\n\n\nclass UserInterface:\n \"\"\"Create object that holds information external to the person objects.\n\n Hold the browser selection, list of names and object dictionary\n (saves passing these things between functions).\n \"\"\"\n\n def __init__(self, browser):\n self.browser = browser\n self.persondict = {}\n self.namelist = []\n\n def which_list(self):\n input_selection = input(\n \"Select from the following:\\n\\n \"\n \"1 Input list in Python form\\n \"\n \"2 Input list items one by one\\n \"\n \"3 Select from pre-loaded lists\\n\"\n )\n if input_selection in [\"1\", \"2\", \"3\"]:\n return input_selection\n else:\n print(\"Input not recognised!\\n\")\n self.which_list()\n\n def open_browser(self, list_selection):\n \"\"\"Take list selection, browser selection, and go to create person objects.\"\"\"\n if list_selection == \"1\":\n self.listcopy()\n elif list_selection == \"2\":\n self.listenter()\n elif list_selection == \"3\":\n self.demolists()\n self.choose_browser_prompt()\n self.browser.get(\"http://www.wikipedia.org\")\n self.create_persons()\n\n def create_persons(self):\n \"\"\"Create a Person object for each entry in the list of names, and add the objects\n to a dictionary of person objects\n \"\"\"\n for name in self.namelist:\n createdperson = Person(name, None, None, None, None, self.browser)\n self.persondict[self.namelist.index(name)] = createdperson\n createdperson.find_person()\n if name == self.namelist[-1]: # i.e. last entry in list\n self.endgame()\n\n def endgame(self):\n \"\"\"Final summary of what couldn't be found\"\"\"\n self.browser.close()\n print(\"\\n\")\n for personobject in self.persondict:\n eachperson = self.persondict[personobject]\n if eachperson.birth is False:\n print(eachperson.name, \"- birthday not found\")\n if eachperson.altName is not None:\n print(\"\\t(tried page '%s')\" % eachperson.altName)\n if eachperson.death is False:\n if eachperson.age is False:\n print(eachperson.name, \"- deathday not found\")\n if eachperson.altName is not None:\n print(\"\\t(tried page '%s')\" % eachperson.altName)\n playagain = input(\n \"\\nEnter 1 to run the program again, or any other key to quit:\\n\"\n )\n if playagain == \"1\":\n self.browser = None\n self.persondict = {}\n self.namelist = []\n ui_instance = UserInterface(None)\n ui_instance.open_browser(ui_instance.which_list())\n else:\n sys.exit()\n\n def choose_browser_prompt(self):\n browser_selection = input(\n \"\\nSelect a browser:\\n\\n\"\n \"\\t1 Firefox\\n\"\n \"\\t2 Chrome with this webdriver path:\\t\"\n \"C:\\Program Files (x86)\\Google\\Chrome\\Application\\Plugins\\chromedriver.exe\\n\"\n \"\\t3 Chrome with custom webdriver path\\n\"\n )\n if browser_selection == \"1\":\n self.browser = webdriver.Firefox()\n elif browser_selection == \"2\":\n self.browser = webdriver.Chrome(\n \"C:\\Program Files (x86)\\Google\\Chrome\\Application\\Plugins\\chromedriver.exe\"\n )\n elif browser_selection == \"3\":\n chromedriver_path = input(\"Enter path to chromedriver.exe \\\n (e.g. C:\\Program Files (x86)\\Google\\Chrome\\Application\\Plugins\\chromedriver.exe):\\n\")\n self.browser = webdriver.Chrome(chromedriver_path)\n else:\n print(\"Input not recognised!\")\n self.choose_browser_prompt()\n\n def listcopy(self):\n \"\"\"Take a list in Python format\"\"\"\n try:\n inserted_list = eval(input(\"Copy list here:\\n\\n\"))\n assert isinstance(inserted_list, list)\n self.namelist = inserted_list\n except (TypeError, NameError, AssertionError):\n print(\"Not a recognised list! Try again\\n\")\n self.listcopy()\n\n def listenter(self):\n \"\"\"Build a list entry by entry\"\"\"\n if len(self.namelist) > 0:\n print(\"\\nCurrent list:\")\n for entry in self.namelist:\n print(\"\\t\" + entry)\n entry = input(\n \"\\nTo submit the list, enter 1\\n\"\n \"To delete the last entry, enter 0\\n\"\n \"Enter list article here:\\n\"\n )\n if entry == \"0\":\n if len(self.namelist) > 0:\n self.namelist.pop()\n else:\n print(\"No entries to delete!\\n\")\n self.listenter()\n elif entry == \"1\":\n if len(self.namelist) > 0:\n return\n else:\n print(\"List is empty!\")\n self.listenter()\n else:\n self.namelist.append(entry)\n self.listenter()\n\n def demolists(self):\n \"\"\"Select a pre-coded demo list.\n There have been 14 Clements, 13 Leos, etc., hence the third entry in the tuples\n \"\"\"\n nametuplelist = [\n (\"1\", \"Pope Clement\", 14, \"Popes named Clement\"),\n (\"2\", \"Pope Leo\", 13, \"Popes named Leo\"),\n (\"3\", \"Pope Gregory\", 16, \"Popes named Gregory\")\n ]\n numerals = [\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\", \"VIII\", \"IX\", \"X\", \"XI\",\n \"XII\", \"XIII\", \"XIV\", \"XV\", \"XVI\"]\n print(\"Select from one of our Papal demonstration lists:\\n\")\n for x in nametuplelist:\n print(x[0], x[3]) # lists the index number and description of the tuple\n demochoice = input()\n\n try: # this part constructs the namelist by marrying the nametuplelist and numerals\n assert demochoice in [item[0] for item in nametuplelist]\n selectedtuple = nametuplelist[int(demochoice)-1] # points to the selected tuple\n for romnum in numerals:\n if numerals.index(romnum) < selectedtuple[2]:\n self.namelist.append(selectedtuple[1] + \" \" + romnum)\n # ensures only goes up to the numeral limit stated in tuple\n return\n except (ValueError, AssertionError):\n print(\"Input not recognised! Try again\\n\\n\")\n self.demolists()\n\n\nif __name__ == \"__main__\":\n instance = UserInterface(None)\n instance.open_browser(instance.which_list())\n","sub_path":"UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476266020","text":"\"\"\"La ley de Chargaff dice que en el ADN de un organismo la cantidad de Adenina es la \nmisma que la de Tiamina, y la de Citosina es la misma que la de Guanina. Dada una \nsecuencia de nucleótidos del estilo de ATTACCAGTACA... podemos comprobar si cumple\ndicha ley de la siguiente forma:\na=(Na-Nt)/(Na+Nt) c=Nc-Ng/Nc+Ng\nContamos la cantidad de A, T, C y G presentes en la cadena y calculamos los coeficientes\ndonde NX indica la cantidad de nucleótidos del tipo X presentes en la secuencia.\nPartiremos de una cadena que contiene una cantidad indeterminada de caracteres, que \nsolo pueden ser A, T, G ó C. Calcula a partir de dicha cadena los coeficientes a y c\"\"\"\n#ingresar la cadena\n#validar cadena\n\ndef cadena_Valida(sec):\n cadena_valida = \"aAcCgGtT\"\n valida=True\n for c in sec:\n if c not in cadena_valida:\n valida=False\n print(valida)\n return valida \n\ndef coeficiente_a(a,t):\n a_=(a-t)/(a+t) \n return(a_)\n\ndef coeficiente_c(c,g):\n c_=(c-g)/(c+g) \n return(c_)\n \ncadena_atcg = input(\"Ingrese su secuencia de nucleòtidos con los caracteres apropiados \").upper()\n\nvalidez= cadena_Valida(cadena_atcg)\nwhile (not validez):\n print(\"Estas ingresando un caracter no válido, sòlo puedes ingresar A,C,G,T \")\n cadena_atcg = input(\"Ingrese su secuencia de nucleòtidos con los caracteres apropiados \").upper()\n\nlista_nucleotidos= list(cadena_atcg)\n\n\nadenina = lista_nucleotidos.count(\"A\")\ntiamina= lista_nucleotidos.count(\"T\")\ncitosina = lista_nucleotidos.count(\"C\")\nguadina= lista_nucleotidos.count(\"G\")\n\ncoe_a= coeficiente_a(adenina,tiamina)\n\ncoe_c = coeficiente_c(citosina,guadina)\n\nprint(adenina,tiamina,citosina,guadina)\nprint(\"el coeficiente a de la secuencia \", cadena_atcg ,\"es\",coe_a, \"y el coeficiente c es \", coe_c )","sub_path":"eje 11 guia3.py","file_name":"eje 11 guia3.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121711079","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport hashlib\nimport json\nimport collections\nfrom os import listdir\nfrom os.path import isfile, join\nimport time\nimport shutil\n\n\ndictionary = collections.OrderedDict()\ntimestamp = time.strftime(\"%Y-%m-%d %H:%M:%S %z\")\n\n# Makes sure the proper parameters were passed in.\n#\n# sys.argv contains arguments passed in to Sync. \n# sys.argv[0] is the program name\n# sys.argv[1] is the 1st directory\n# sys.argv[2] is the 2nd directory\n# If more than 3, it should throw a usage error.\ndef setup():\n\tusageError = \"Usage: sync directory1 directory2\"\n\tif (len(sys.argv) != 3):\n\t\tprint(usageError)\n\t\tsys.exit()\n\tdir1 = sys.argv[1]\n\tdir2 = sys.argv[2]\n\tif os.path.isdir(dir1):\n\t\tif not os.path.isdir(dir2):\n\t\t\t# dir1 is a directory but dir2 is not -- create dir2\n\t\t\tos.makedirs(dir2)\n\telse: \n\t\tif os.path.isdir(dir2):\n\t\t\t# dir2 is a directory but dir1 is not -- create dir1\n\t\t\tos.makedirs(dir1)\n\t\telse:\n\t\t\t# neither are directories -- throw error and exit\n\t\t\tprint(usageError)\n\t\t\tsys.exit()\n\treturn True\n\n\n# Creates sync file.\ndef createSyncFile(file_name, dirPath):\n\td = getJSONfromDir(dirPath);\n\trewriteSync(d, dirPath)\n\n\n# Creates a .sync file in dirPath and in each subdirectory in dirPath.\ndef makeSyncs(dirPath):\n\tdirs = [x[0] for x in os.walk(dirPath)]\n\tfor x in dirs:\n\t\tif (os.path.isfile(os.path.join(x, \".sync\"))):\n\t\t\t# Sync files already exist in this directory.\n\t\t\t# Get sync information from sync file. \n\t\t\tgetJSONfromSync(x)\n\t\t\t# now compare and update if needed\n\t\t\tupdateSync(x) \n\t\telse:\n\t\t\t# Sync files do not already exist in this directory.\n\t\t\t# Get sync information from directory. \n\t\t\tcreateSyncFile(os.path.join(x, \".sync\"), x)\n\n\n# Creates a hash of line using the SHA-256 hashing algorithm.\ndef makeHash(line):\n\thash_object = hashlib.sha256(bytes(line, encoding='utf-8'))\n\treturn hash_object.hexdigest()\n\n\n# Gets the modification time of a file at filePath.\ndef getFileModificationTime(filePath):\n\tmt = os.path.getmtime(filePath) # in seconds\n\treturn time.strftime(\"%Y-%m-%d %H:%M:%S %z\", time.gmtime(mt))\n\n\ndef setFileModTime(filePath, timestamp):\n\tx = time.strptime(str(timestamp), \"%Y-%m-%d %H:%M:%S %z\")\n\tt = time.mktime(x)\n\tos.utime(filePath, (t, t))\n\n\n# Gets the contents of a file at filePath.\ndef readFile(filePath):\n\tf = open(filePath, 'r')\n\treturn f.read()\n\n\n# Creates a JSON dictionary by looking at the directory dirPath.\ndef getJSONfromDir(dirPath):\n\tfiles = [ f for f in listdir(dirPath) if isfile(join(dirPath, f)) ]\n\td = collections.OrderedDict()\n\tfor f in files:\n\t\tif (not f.startswith('.')):\n\t\t\tdata = readFile(os.path.join(dirPath, f))\n\t\t\thashVal = makeHash(data)\n\t\t\td[f] = [[timestamp, hashVal]] \n\t\t\tdictionary[os.path.join(dirPath, f)] = [f, [timestamp, hashVal]]\n\treturn d\n\n\n# Creates JSON info by reading the sync file in dirPath.\ndef getJSONfromSync(dirPath):\n\tj = json.loads(readFile(os.path.join(dirPath, \".sync\")))\n\tkeys = j.keys()\n\tfor i in keys:\n\t\tdictionary[os.path.join(dirPath, i)] = [i, j[i]]\n\n\n# Returns the most recent hash of the file. \ndef getHash(dirPath, file_name):\n\treturn dictionary[os.path.join(dirPath, file_name)][1][0][1]\n\n# Returns the second most recent hash of the file. \ndef secondHash(dirPath, file_name):\n\treturn dictionary[os.path.join(dirPath, file_name)][1][1][1]\n\n# Returns the most recent timestamp of the file. \ndef getTimestamp(dirPath, file_name):\n\treturn dictionary[os.path.join(dirPath, file_name)][1][0][0]\n\n# Returns the old collections of [time, hash].\ndef getOldSyncs(dirPath, file_name):\n\treturn dictionary[os.path.join(dirPath, file_name)][1]\n\n\n# Updates the sync information for the file.\n# Returns an array so that the info can be added to a local dictionary for printing to .sync \ndef addEntryToSync(dirPath, file_name, timestamp, hash_digest):\n\tprevious = getOldSyncs(dirPath, file_name)\n\tentries = [[timestamp, hash_digest]]\n\tif (isinstance(previous[0], str)):\n\t\tentries.append(previous)\n\telse:\n\t\tfor entry in previous:\n\t\t\tentries.append(entry)\n\tdictionary[os.path.join(dirPath, file_name)] = [file_name, entries]\n\treturn entries\n\n\n# writes a sync file using information in a dictionary d\ndef rewriteSync(d, dirPath):\n\tsyncFile = open(os.path.join(dirPath, \".sync\"), \"w+\")\n\ts = json.dumps(d, sort_keys=True, indent=4, separators=(',', ': '))\n\tsyncFile.write(s)\n\n\n# updates an old sync file with new information from the directory\ndef updateSync(dirPath):\n\td = collections.OrderedDict() \n\tfiles = [f for f in listdir(dirPath) if isfile(join(dirPath, f))]\n\tfor f in files:\n\t\tif (not f.startswith('.')):\n\t\t\thashFromDir = makeHash(readFile(os.path.join(dirPath, f)))\n\t\t\tif (os.path.join(dirPath, f) in dictionary):\n\t\t\t\thashFromSync = getHash(dirPath, f)\n\t\t\t\tif (hashFromSync == hashFromDir):\n\t\t\t\t\t# file hasn't changed\n\t\t\t\t\td[f] = getOldSyncs(dirPath, f)\n\t\t\t\telse:\n\t\t\t\t\t# file updated\n\t\t\t\t\td[f] = addEntryToSync(dirPath, f, timestamp, hashFromDir)\n\t\t\telse: \n\t\t\t\t# file created\n\t\t\t\td[f] = [[timestamp, hashFromDir]]\n\t\t\t\tdictionary[os.path.join(dirPath, f)] = [f, [timestamp, hashFromDir]]\n\tj = json.loads(readFile(os.path.join(dirPath, \".sync\")))\n\tkeys = j.keys()\n\tfor i in keys:\n\t\tif (i not in files):\n\t\t\tif getHash(dirPath, i) != \"deleted\":\n\t\t\t\t# file recently deleted\n\t\t\t\td[i] = addEntryToSync(dirPath, i, timestamp, \"deleted\")\n\t\t\telse:\n\t\t\t\t# file previously deleted\n\t\t\t\td[i] = getOldSyncs(dirPath, i) \n\trewriteSync(d, dirPath) \n\t\n\n\ndef sync(dir1, dir2):\n\td1 = collections.OrderedDict()\n\td2 = collections.OrderedDict()\n\tfiles_dir1_j = json.loads(readFile(os.path.join(dir1, \".sync\")))\n\tfiles_dir2_j = json.loads(readFile(os.path.join(dir2, \".sync\")))\n\tfiles_dir1 = files_dir1_j.keys()\n\tfiles_dir2 = files_dir2_j.keys()\n\tfor i in files_dir1:\n\t\tif not i in files_dir2:\n\t\t\t# file created -- create file in dir2\n\t\t\tshutil.copyfile(os.path.join(dir1, i), os.path.join(dir2, i))\n\t\t\td1[i] = files_dir1_j[i]\n\t\t\td2[i] = [files_dir1_j[i][0]]\n\t\t\tsetFileModTime(os.path.join(dir2, i), getFileModificationTime(os.path.join(dir1, i)))\n\t\telif (files_dir1_j[i][0][1] == files_dir2_j[i][0][1]):\n\t\t\t# files same -- change mod time to earliest one\n\t\t\tt1 = time.strptime(files_dir1_j[i][0][0], \"%Y-%m-%d %H:%M:%S %z\") \n\t\t\tt2 = time.strptime(files_dir2_j[i][0][0], \"%Y-%m-%d %H:%M:%S %z\")\n\t\t\tif (t1 < t2):\n\t\t\t\td1[i] = files_dir1_j[i]\n\t\t\t\td2[i] = addEntryToSync(dir2, i, files_dir1_j[i][0][0], files_dir1_j[i][0][1])\n\t\t\telif (t2 < t1):\n\t\t\t\td1[i] = addEntryToSync(dir1, i, files_dir2_j[i][0][0], files_dir1_j[i][0][1])\n\t\t\t\td2[i] = files_dir2_j[i]\n\t\t\telse:\n\t\t\t\td1[i] = files_dir1_j[i]\n\t\t\t\td2[i] = files_dir2_j[i]\n\t\telif (files_dir1_j[i][0][1] == \"deleted\"):\n\t\t\tif (len(getOldSyncs(dir2, i)) > 1):\n\t\t\t\tif (secondHash(dir2, i) == \"deleted\"):\n\t\t\t\t\t# file was recreated in dir2-- recreate it in dir1\n\t\t\t\t\td1[i] = addEntryToSync(dir1, i, timestamp, files_dir2_j[i][0][1])\n\t\t\t\t\td2[i] = files_dir2_j[i]\n\t\t\t\t\tshutil.copyfile(os.path.join(dir2, i), os.path.join(dir1, i))\n\t\t\telse:\n\t\t\t\t# file deleted in dir1 -- delete in dir2\n\t\t\t\tos.remove(os.path.join(dir2, i))\n\t\t\t\td1[i] = files_dir1_j[i]\n\t\t\t\td2[i] = addEntryToSync(dir2, i, timestamp, \"deleted\")\n\t\telif (files_dir2_j[i][0][1] == \"deleted\"):\n\t\t\tif (len(getOldSyncs(dir1, i)) > 1):\n\t\t\t\tif (secondHash(dir1, i) == \"deleted\"):\n\t\t\t\t\t# file was recreated in dir1 -- recreate it in dir2\n\t\t\t\t\td1[i] = files_dir1_j[i]\n\t\t\t\t\td2[i] = addEntryToSync(dir2, i, timestamp, files_dir1_j[i][0][1])\n\t\t\t\t\tshutil.copyfile(os.path.join(dir1, i), os.path.join(dir2, i))\n\t\t\telse:\n\t\t\t\t# file deleted in dir2 -- delete in dir1\n\t\t\t\tos.remove(os.path.join(dir1, i))\n\t\t\t\td2[i] = files_dir2_j[i]\n\t\t\t\td1[i] = addEntryToSync(dir1, i, timestamp, \"deleted\")\n\t\telif (files_dir1_j[i][0][1] != files_dir2_j[i][0][1]):\n\t\t\tunique = True\n\t\t\tif (len(getOldSyncs(dir2, i)) > 1):\n\t\t\t\tif (files_dir1_j[i][0][1] == secondHash(dir2, i)):\n\t\t\t\t\t# copy dir2 to dir1\n\t\t\t\t\td1[i] = addEntryToSync(dir1, i, timestamp, files_dir2_j[i][0][1])\n\t\t\t\t\td2[i] = files_dir2_j[i]\n\t\t\t\t\tshutil.copyfile(os.path.join(dir2, i), os.path.join(dir1, i))\n\t\t\t\t\tunique = False\n\t\t\tif (len(getOldSyncs(dir1, i)) > 1):\n\t\t\t\tif (files_dir2_j[i][0][1] == secondHash(dir1, i)):\n\t\t\t\t\t# copy dir1 to dir2\n\t\t\t\t\td2[i] = addEntryToSync(dir2, i, timestamp, files_dir1_j[i][0][1])\n\t\t\t\t\td1[i] = files_dir1_j[i]\n\t\t\t\t\tshutil.copyfile(os.path.join(dir1, i), os.path.join(dir2, i))\n\t\t\t\t\tunique = False\n\t\t\tif (unique):\n\t\t\t\t# hashes are both unique -- check mod times\n\t\t\t\tpath1 = os.path.join(dir1, i)\n\t\t\t\tpath2 = os.path.join(dir2, i)\n\t\t\t\tt1 = getFileModificationTime(path1)\n\t\t\t\tt2 = getFileModificationTime(path2)\n\t\t\t\tif (t1 < t2):\n\t\t\t\t\t# file in dir1 is older -- use dir2's version\n\t\t\t\t\td1[i] = addEntryToSync(dir1, i, timestamp, files_dir2_j[i][0][1])\n\t\t\t\t\td2[i] = files_dir2_j[i]\n\t\t\t\t\tshutil.copyfile(os.path.join(dir2, i), os.path.join(dir1, i))\n\t\t\t\telse:\n\t\t\t\t\t# file in dir2 is older -- use dir1's version\n\t\t\t\t\td2[i] = addEntryToSync(dir2, i, timestamp, files_dir1_j[i][0][1])\n\t\t\t\t\td1[i] = files_dir1_j[i]\n\t\t\t\t\tshutil.copyfile(os.path.join(dir1, i), os.path.join(dir2, i))\n\tfor i in files_dir2:\n\t\tif not i in files_dir1: \n\t\t\t# file created -- create file in dir1\n\t\t\tshutil.copyfile(os.path.join(dir2, i), os.path.join(dir1, i))\n\t\t\td2[i] = files_dir2_j[i]\n\t\t\td1[i] = [files_dir2_j[i][0]]\n\t\t\tsetFileModTime(os.path.join(dir1, i), getFileModificationTime(os.path.join(dir2, i)))\n\trewriteSync(d1, dir1)\n\trewriteSync(d2, dir2)\n\n\ndef copySubDirs(dir1, dir2):\n\tsubs1 = []\n\tsubs2 = []\n\teverything1 = os.listdir(dir1)\n\tfor x in everything1:\n\t\tif os.path.isdir(os.path.join(dir1, x)):\n\t\t\tsubs1.append(x)\n\teverything2 = os.listdir(dir2)\n\tfor y in everything2:\n\t\tif os.path.isdir(os.path.join(dir2, y)):\n\t\t\tsubs2.append(y)\n\tfor i in subs1:\n\t\tif i not in subs2:\n\t\t\tos.makedirs(os.path.join(dir2, i))\n\tfor j in subs2:\n\t\tif j not in subs1:\n\t\t\tos.makedirs(os.path.join(dir1, j))\n\tfor f in subs1:\n\t\tpath1 = os.path.join(dir1, f)\n\t\tpath2 = os.path.join(dir2, f)\n\t\tcopySubDirs(path1, path2)\n\n\ndef syncAll(dir1, dir2):\n\tsync(dir1, dir2)\n\tsubs1 = []\n\tsubs2 = []\n\teverything1 = os.listdir(dir1)\n\tfor x in everything1:\n\t\tif os.path.isdir(os.path.join(dir1, x)):\n\t\t\tsubs1.append(x)\n\teverything2 = os.listdir(dir2)\n\tfor y in everything2:\n\t\tif os.path.isdir(os.path.join(dir2, y)):\n\t\t\tsubs2.append(y)\n\tfor f in subs1:\n\t\tpath1 = os.path.join(dir1, f)\n\t\tpath2 = os.path.join(dir2, f)\n\t\tsyncAll(path1, path2)\n\t\t\n\n# Run Program\n#\n# Make sure the proper parameters were passed in and create directories if needed.\nx = setup()\n#\n# Create any subdirectories that don't exist. \ncopySubDirs(sys.argv[1], sys.argv[2])\n#\n# Create sync files if they don't exist and add all file information to the dictionary.\nmakeSyncs(sys.argv[1])\nmakeSyncs(sys.argv[2])\n#\n# Sync the two directories.\nsyncAll(sys.argv[1], sys.argv[2])\n\n\n\n\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":10645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"617543998","text":"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns =[\n\n\tpath('blog/', views.index, name=\"index\" ),\n path('blog//', views.post_detail, name=\"post_detail\"),\n path('blog/new/', views.post_new, name= \"post_new\"),\n path('blog//edit/', views.edit_post, name=\"edit_post\"),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"450094967","text":"import sys\nsys.path.insert(1 , 'D:/GP/sounds')\nfrom sounds.speak_ import speak,speak_ar,play\n\nmoods = ['kidding','hide' , 'angry', 'sad', \n 'neutral','happy', 'surprise','fear', 'disgust']\nmood_song = [\"انت شكلك فرحان و كلامك زعلان انت بتهزر\",\n \"شكلك متضايق بس كلامك فرحان لا تخبي عليا\",\n \"هدي نفيك لا تغضب\",\n \"لا تحزن ضحكتك بالدنيا\",\n \"\",\n \"شكلك فرحان يارب دايما\",\n \"لمازا انت متفاجىء\",\n \"لا تخاف انا بجانبك\",\n \"لماذا انت مشمئز\" ]\n\n\ndef all_emotions(mood):\n print(\"MOOOOOOOOOOOOOOOOOOOOOOOODDDDDDDDDDDD\")\n print(mood)\n if(mood == 'neutral'):\n pass\n else:\n for i in range(len(moods)):\n if( moods[i] == mood ):\n speak_ar(mood_song[i])\n \n return\n","sub_path":"Batot/emotions.py","file_name":"emotions.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"298834934","text":"#coding=utf8\n\nfrom sqlalchemy import MetaData, Table, select, func\n# import string\n# from datetime import datetime, timedelta\nimport pandas as pd\n# import os\n# import sys\nimport logging\nfrom . import database\n\nfrom dateutil.parser import parse\n\nlogger = logging.getLogger(__name__)\n\ndef find(globalid):\n db = database.connection('base')\n metadata = MetaData(bind=db)\n t = Table('ra_fund', metadata, autoload=True)\n\n columns = [\n t.c.globalid,\n t.c.ra_code,\n t.c.ra_name,\n t.c.ra_type,\n t.c.ra_type_calc,\n t.c.ra_regtime,\n t.c.ra_volume,\n ]\n\n s = select(columns).where(t.c.globalid == globalid)\n\n return s.execute().first()\n\ndef load(globalids=None, codes=None):\n db = database.connection('base')\n metadata = MetaData(bind=db)\n t = Table('ra_fund', metadata, autoload=True)\n\n columns = [\n t.c.globalid,\n t.c.ra_code,\n t.c.ra_name,\n t.c.ra_type,\n t.c.ra_type_calc,\n t.c.ra_regtime,\n t.c.ra_volume,\n ]\n\n s = select(columns)\n if globalids is not None:\n s = s.where(t.c.globalid.in_(globalids))\n\n if codes is not None:\n s = s.where(t.c.ra_code.in_(codes))\n\n df = pd.read_sql(s, db)\n\n return df\n\n\ndef find_type_fund(ra_type):\n\n db = database.connection('base')\n metadata = MetaData(bind=db)\n t = Table('ra_fund', metadata, autoload=True)\n\n columns = [\n t.c.globalid,\n t.c.ra_code,\n t.c.ra_name,\n t.c.ra_type,\n t.c.ra_type_calc,\n t.c.ra_regtime,\n t.c.ra_volume,\n ]\n\n s = select(columns).where(t.c.ra_type == ra_type)\n\n df = pd.read_sql(s, db)\n\n return df\n","sub_path":"asset_allocation_chengtong/shell/db/base_ra_fund.py","file_name":"base_ra_fund.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"584260637","text":"# Python 測試 BeautifulSoup\n#解讀 遠端 網頁資料, 都是使用 html.parser 解析器\n\nprint('------------------------------------------------------------')\t#60個\nprint('準備工作')\n\nimport re\nimport os\nimport sys\nimport csv\nimport time\nimport json\nimport urllib\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\ndef get_html_data1(url):\n print('取得網頁資料: ', url)\n resp = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\n\n # 檢查 HTTP 回應碼是否為 requests.codes.ok(200)\n if resp.status_code != requests.codes.ok:\n print('讀取網頁資料錯誤, url: ', resp.url)\n return None\n else:\n return resp\n\ndef get_soup_from_url(url):\n html_data = get_html_data1(url)\n if html_data == None:\n print('無法取得網頁資料')\n sys.exit(1)\t#立刻退出程式\n\n html_data.encoding = 'UTF-8' # 或是 unicode 也可, 指定編碼方式\n soup = BeautifulSoup(html_data.text, \"html.parser\") # 解析原始碼\n #soup = BeautifulSoup(html_data.text, \"lxml\") # 指定 lxml 作為解析器\n #print(soup.prettify()) #prettify()這個函數可以將DOM tree以比較美觀的方式印出。\n #pprint.pprint(html_data.text)\n print(\"取得網頁標題\", soup.title)\n return soup\n \nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 1')\n\n\n# Python 測試 BeautifulSoup Yahoo電影 台北票房榜\n\nimport ssl\nfrom urllib import request, parse\n\n#urlopen https時需要驗證一次SSL證書,\n#當網站目標使用自簽名的證書時就會跳出錯誤\n#使用SSL module把證書驗證改成不需要驗證\n#context = ssl._create_unverified_context()\n\nurl = 'https://movies.yahoo.com.tw/chart.html'\nreq_obj = request.Request(url)\n\n''' 有問題\n#with request.urlopen(req_obj,context=context) as res_obj:\nwith request.urlopen(req_obj) as res_obj:\n html_data = res_obj.read()\n html_data = html_data.decode('utf-8')\n print(html_data)\n soup = BeautifulSoup(html_data, 'html.parser')\n print(soup.prettify())\n \n rows = soup.find_all('div', class_ = 'tr')\n\n colname = list(rows.pop(0).stripped_strings)\n contents = []\n for row in rows:\n thisweek_rank = row.find_next('div' , attrs={'class' : 'td'})\n updown = thisweek_rank.find_next('div')\n lastweek_rank = updown.find_next('div')\n\n if thisweek_rank.string == str(1):\n movie_title = lastweek_rank.find_next('h2')\n else:\n movie_title = lastweek_rank.find_next('div' , attrs={'class' : 'rank_txt'})\n\n release_date = movie_title.find_next('div' , attrs={'class' : 'td'})\n trailer = release_date.find_next('div' , attrs={'class' : 'td'})\n\n if trailer.find('a') is None:\n trailer_address = ''\n else:\n trailer_address = trailer.find('a')['href']\n\n starts = row.find('h6' , attrs={'class' : 'count'})\n lastweek_rank = lastweek_rank.string if lastweek_rank.string else ''\n\n c = [thisweek_rank.string , lastweek_rank , movie_title.string , release_date.string , trailer_address , starts.string]\n print('加入: ', c)\n contents.append(c)\n\nprint(contents)\n'''\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 2')\n\nfilename = 'C:/_git/vcs/_1.data/______test_files2/kkbox_songs.csv'\n\n# KKBOX華語新歌日榜\nurl = \"https://kma.kkbox.com/charts/api/v1/daily?category=390&lang=tc&limit=50&terr=tw&type=newrelease\"\n\n# 取得歌曲資訊json檔\nhtml_data = requests.get(url)\n# print(html_data.status_code)\n# print(html_data.text)\n\n# 將json字串轉為Python的字典型態\ndata = json.loads(html_data.text)\nsong_list = data[\"data\"][\"charts\"][\"newrelease\"]\n\n#印10筆資料就好\ncnt = 0\nwith open(filename, 'w', newline = '', encoding = \"big5\") as csvfile:\n # 建立 CSV 檔寫入器\n writer = csv.writer(csvfile)\n # 寫入一列資料\n writer.writerow([\"排名\", \"歌名\", \"作者\", \"發行日期\", \"連結\"])\n # 取得每首歌的排名、曲名、連結、作者、時間\n for song in song_list:\n song_rank = song[\"rankings\"][\"this_period\"]\n song_name = song[\"song_name\"]\n song_url = song[\"song_url\"]\n song_artist = song[\"artist_name\"]\n song_timestamp = int(song[\"release_date\"])\n # 從timestamp轉為日期格式\n song_date = time.strftime(\"%Y-%m-%d\", time.localtime(song_timestamp))\n\n print(\"排名:\", song_rank)\n print(\"歌名:\", song_name)\n print(\"作者:\", song_artist)\n print(\"發行��期:\", song_date)\n print(\"連結:\", song_url)\n\n writer.writerow([song_rank, song_name, song_artist.encode('utf-8'), song_date, song_url])\n\n # # 從歌曲連結取得歌詞\n # song_response = requests.get(song_url)\n # soup = BeautifulSoup(song_response.text, \"html.parser\")\n # lyric = soup.find(\"div\", class_=\"lyrics\").text\n # print(\"歌詞:\", lyric)\n\n print(\"-\" * 30)\n cnt += 1\n if cnt == 10:\n break;\n\nprint('將資料寫入檔案 : ' + filename)\nprint('OK')\n\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 3')\n\n\n# Python 測試 BeautifulSoup 好樂迪 K歌排行\nimport ssl\nfrom urllib import request, parse\nimport pandas as pd\n\n#urlopen https時需要驗證一次SSL證書,\n#當網站目標使用自簽名的證書時就會跳出錯誤\n#使用SSL module把證書驗證改成不需要驗證\n#context = ssl._create_unverified_context()\n# 使用 ssl 模組,避免遇到 CERTIFICATE_VERIFY_FAILED 錯誤\ncontext = ssl._create_unverified_context()\n\n# 給好樂迪的網址建立 Request\nurl = 'https://www.holiday.com.tw/SongInfo/SongList.aspx'\nreq_obj = request.Request(url)\n''' 有問題\nsong_list = []\n# 發送 request\nwith request.urlopen(req_obj,context=context) as res_obj:\n # 將 response 讀回並用 utf8 decode \n\tresp = res_obj.read().decode('utf-8')\n # 使用 html.parser\n\tsoup = BeautifulSoup(resp , 'html.parser')\n # 用 find 找到 id 為 ctl00_ContentPlaceHolder1_dgSong 的 table 標籤,並回傳 table 內所有的 tr 內容\n\trank_table = soup.find('table',id='ctl00_ContentPlaceHolder1_dgSong').find_all('tr')\n\n #由於要避開 table 的第一列 tr 資料以及最後一列 tr 資料,所以取 [1:-2] \n\tfor rt in rank_table[1:-2]:\n # 找到所有的 td 並取得第 5 個 td(index 是 4)\n\t\tsong_name = rt.find_all('td')[4]\n # 找到第一個 a 這個標籤,因為只有歌手的資料被 a tag 包住\n\t\tsinger = rt.find('a')\n # 把歌曲跟歌手的資料轉成 string 並去前後空白塞到一個 song_list\n\tsong_list.append([song_name.string.strip(),singer.string.strip()])\n\n# 把 song_list 使用 pandas 模組轉成 dataframe 用於後面資料分析\ndf = pd.DataFrame(song_list,columns=['song','singer'])\nprint(df)\n'''\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 4')\n\n\n \n'''\n參考 https://ithelp.ithome.com.tw/articles/10186119\n\nBeautifulSoup 套件 是 Python 上的 網頁解析工具\nrequests 套件允許我們發送與接收有機及草飼的 HTTP/1.1 請求(這真的是美式幽默。)\n'''\n\nimport numpy as np\nimport pandas as pd\n\n\nurl = \"https://www.ptt.cc/bbs/NBA/index.html\" # PTT NBA 板\n\nprint('01. 印出網頁資料')\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\nprint(soup.prettify()) # 把排版後的 html 印出來\n\nprint('02. 一些 BeautifulSoup 的屬性或方法')\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n# 一些屬性或方法\nprint(soup.title) # 把 tag 抓出來\nprint(\"---\")\nprint(soup.title.name) # 把 title 的 tag 名稱抓出來\nprint(\"---\")\nprint(soup.title.string) # 把 title tag 的內容欻出來\nprint(\"---\")\nprint(soup.title.parent.name) # title tag 的上一層 tag\nprint(\"---\")\nprint(soup.a) # 把第一個 抓出來\nprint(\"---\")\nprint(soup.find_all('a')) # 把所有的 抓出來\n\n#Beautiful Soup 幫我們將 html 檔案轉換為 bs4 的物件,像是標籤(Tag),\n#標籤中的內容(NavigableString)與 BeautifulSoup 物件本身。\n\nprint('03.')\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nprint(type(soup.a))\nprint(\"---\")\nprint(soup.a.name) # 抓標籤名 a\nprint(\"---\")\nprint(soup.a['id']) # 抓的 id 名稱\n\nprint('04. 標籤中的內容(NavigableString)')\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nprint(type(soup.a.string))\nprint(\"---\")\nsoup.a.string\n\nprint('05. BeautifulSoup')\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, 'lxml') # 指定 lxml 作為解析器\n\ntype(soup)\n\nprint('06. 爬樹')\n\n#DOM(Document Object Model)的樹狀結構���念在使用 BeautifulSoup 扮演至關重要的角色,所以我們也要練習爬樹。\nprint('06a. 往下爬')\n#從標籤中回傳更多資訊。\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nprint(soup.body.a.contents)\nprint(list(soup.body.a.children))\nprint(soup.body.a.string)\n\nprint('06b. 往上爬')\n#回傳上一階層的標籤。\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nprint(soup.title)\nprint(\"---\")\nprint(soup.title.parent)\n\nprint('06c. 往旁邊爬')\n#回傳同一階層的標籤。\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nfirst_a_tag = soup.body.a\nnext_to_first_a_tag = first_a_tag.next_sibling\nprint(first_a_tag)\nprint(\"---\")\nprint(next_to_first_a_tag)\nprint(\"---\")\nprint(next_to_first_a_tag.previous_sibling)\n\nprint('07a. 搜尋')\n#這是我們主要使用 BeautifulSoup 套件來做網站解析的方法。\n#find() 方法\n#find_all() 方法\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nprint(soup.find(\"a\")) # 第一個 \nprint(\"---\")\nprint(soup.find_all(\"a\")) # 全部 \n\nprint('07b. 可以在第二個參數 class_= 加入 CSS 的類別。')\n\nresponse = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nprint(soup.find(\"div\", class_= \"r-ent\"))\n\nprint('08.')\n#BeautifulSoup 牛刀小試\n\n'''\n大略照著官方文件練習了前面的內容之後,我們參考Tutorial of PTT crawler來應用 BeautifulSoup 把 PTT NBA 版首頁資訊包含推文數,作者 id,文章標題與發文日期搜集下來。\n\n我們需要的資訊都放在 CSS 類別為 r-ent 的
中。\n'''\nresponse = requests.get(url)\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nposts = soup.find_all(\"div\", class_ = \"r-ent\")\nprint(posts)\ntype(posts)\n\n\n#注意這個 posts 物件是一個 ResultSet,一般我們使用迴圈將裡面的每一個元素再抓出來,先練習一下作者 id。\n\nprint('09.')\n\nresponse = requests.get(url)\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nauthor_ids = [] # 建立一個空的 list 來放置作者 id\nposts = soup.find_all(\"div\", class_ = \"r-ent\")\nfor post in posts:\n author_ids.extend(post.find(\"div\", class_ = \"author\"))\n\nprint(author_ids)\n\nprint('10. #接下來我們把推文數,文章標題與發文日期一起寫進去。')\n\nresponse = requests.get(url)\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nauthor_ids = [] # 建立一個空的 list 來放作者 id\nrecommends = [] # 建立一個空的 list 來放推文數\npost_titles = [] # 建立一個空的 list 來放文章標題\npost_dates = [] # 建立一個空的 list 來放發文日期\n\nposts = soup.find_all(\"div\", class_ = \"r-ent\")\nfor post in posts:\n try:\n author_ids.append(post.find(\"div\", class_ = \"author\").string) \n except:\n author_ids.append(np.nan)\n try:\n post_titles.append(post.find(\"a\").string)\n except:\n post_titles.append(np.nan)\n try:\n post_dates.append(post.find(\"div\", class_ = \"date\").string)\n except:\n post_dates.append(np.nan)\n\n# 推文數藏在 div 裡面的 span 所以分開處理\nrecommendations = soup.find_all(\"div\", class_ = \"nrec\")\nfor recommendation in recommendations:\n try:\n recommends.append(int(recommendation.find(\"span\").string))\n except:\n recommends.append(np.nan)\n\nprint(author_ids)\nprint(recommends)\nprint(post_titles)\nprint(post_dates)\n\nprint('11. ')\n#檢查結果都沒有問題之後,那我們就可以把這幾個 list 放進 dictionary 接著轉換成 data frame 了。\n\nresponse = requests.get(url)\nhtml_doc = response.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(response.text, \"lxml\") # 指定 lxml 作為解析器\n\nauthor_ids = [] # 建立一個空的 list 來放作者 id\nrecommends = [] # 建立一個空的 list 來放推文數\npost_titles = [] # 建立一個空的 list 來放文章標題\npost_dates = [] # 建立一個空的 list 來放發文日期\n\nposts = soup.find_all(\"div\", class_ = \"r-ent\")\nfor post in posts:\n try:\n author_ids.append(post.find(\"div\", class_ = \"author\").string) \n except:\n author_ids.append(np.nan)\n try:\n post_titles.append(post.find(\"a\").string)\n except:\n post_titles.append(np.nan)\n try:\n post_dates.append(post.find(\"div\", class_ = \"date\").string)\n except:\n post_dates.append(np.nan)\n\n# 推文數藏在 div 裡面的 span 所以分開處理\nrecommendations = soup.find_all(\"div\", class_ = \"nrec\")\nfor recommendation in recommendations:\n try:\n recommends.append(int(recommendation.find(\"span\").string))\n except:\n recommends.append(np.nan)\n \nptt_nba_dict = {\"author\": author_ids,\n \"recommends\": recommends,\n \"title\": post_titles,\n \"date\": post_dates\n}\n\nptt_nba_df = pd.DataFrame(ptt_nba_dict)\nptt_nba_df\n\n'''\nold\n\nprint('BeautifulSoup 測試 4')\n\nurl = \"https://www.ptt.cc/bbs/NBA/index.html\" # PTT NBA 板\nhtml_data = requests.get(url) # 用 requests 的 get 方法把網頁抓下來\nhtml_doc = html_data.text # text 屬性就是 html 檔案\nsoup = BeautifulSoup(html_data.text, \"lxml\") # 指定 lxml 作為解析器\n#print(soup.prettify()) # 把排版後的 html 印出來\n\n# 一些屬性或方法\nprint(soup.title) # 把 tag 抓出來\nprint(\"---\")\nprint(soup.title.name) # 把 title 的 tag 名稱抓出來\nprint(\"---\")\nprint(soup.title.string) # 把 title tag 的內容欻出來\nprint(\"---\")\nprint(soup.title.parent.name) # title tag 的上一層 tag\nprint(\"---\")\nprint(soup.a) # 把第一個 抓出來\nprint(\"---\")\nprint(soup.find_all('a')) # 把所有的 抓出來\n\n\n\n'''\n\n\n\n\n\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 5a')\n\n#文淵閣工作室官網\nurl = 'http://www.e-happy.com.tw'\nhtml = requests.get(url)\nhtml.encoding=\"utf-8\"\n#print(html.text) #many\n\nprint('BeautifulSoup 測試 5b')\n#文淵閣工作室官網\nurl = 'http://www.e-happy.com.tw'\nhtml = requests.get(url)\nhtml.encoding=\"utf-8\"\n\nhtmllist = html.text.splitlines() #將網頁資料一行一行地分割成串列\n''' many\nfor row in htmllist:\n print(row)\n'''\n\nprint('BeautifulSoup 測試 5c')\n\n#文淵閣工作室官網\nurl = 'http://www.e-happy.com.tw'\nhtml = requests.get(url)\nhtml.encoding=\"utf-8\"\n\nsoup = BeautifulSoup(html.text,\"html.parser\")\nlinks = soup.find_all(\"a\") # 讀取 \nfor link in links:\n href = link.get(\"href\") # 讀取 href 屬性內容\n # 判斷內容是否為非 None,並且開頭文字是 http://\n if href != None and href.startswith(\"http://\"): \n print(href)\n\n\n\n\n\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 6')\n\ndef get_html_data1(url):\n print('取得網頁資料: ', url)\n resp = requests.get(url)\n # 檢查 HTTP 回應碼是否為 requests.codes.ok(200)\n if resp.status_code != requests.codes.ok:\n print('讀取網頁資料錯誤, url: ', resp.url)\n return None\n else:\n return resp\n\nprint('BeautifulSoup 測試 7')\n\n#url = 'https://pornav.co/'\nurl = 'https://www.deviantart.com/'\n\nhtml_data = get_html_data1(url)\nif html_data:\n soup = BeautifulSoup(html_data.text, 'html.parser')\n #print(soup.prettify()) #prettify()這個函數可以將DOM tree以比較美觀的方式印出。\n\n print(\"取得網頁標題\", soup.title)\n\n print('搜尋網頁中的 jpg圖片連結')\n ''' many\n regex = re.compile('.*\\.jpg')\n imglist = soup.find_all(\"img\", {\"src\":regex})\n for img in imglist:\n print(img[\"src\"])\n '''\n \nelse:\n print('無法取得網頁資料')\n\n\n\nprint('BeautifulSoup 測試 作業完成')\n\n\n\n\n\n\n\n\n\n\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 7')\n\ndef get_html_data1(url):\n print('取得網頁資料: ', url)\n resp = requests.get(url)\n # 檢查 HTTP 回應碼是否為 requests.codes.ok(200)\n if resp.status_code != requests.codes.ok:\n print('讀取網頁資料錯誤, url: ', resp.url)\n return None\n else:\n return resp\n\nprint('#抓中央氣象局的衛星雲圖')\n\n\nurl = 'https://www.cwb.gov.tw/V8/C/W/OBS_Sat.html'\n\n#TBD\n\n'''\nhtml_data = get_html_data1(url)\nif html_data:\n soup = BeautifulSoup(html_data.text, 'html.parser')\n print(soup.prettify()) #prettify()這個函數可以將DOM tree以比較美觀的方式印出。\n\n print(\"取得網頁標題\", soup.title)\n\n print('搜尋網頁中的 jpg圖片連結')\n \n regex = re.compile('.*\\.jpg')\n imglist = soup.find_all(\"img\", {\"src\":regex})\n for img in imglist:\n print(img[\"src\"])\n \nelse:\n print('無法取得網頁資料')\n'''\n\n\n\n\n\n\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 8')\n\n\n\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\n#某圖庫網站\nurl = 'https://www.dreamstime.com/free-images_pg1'\n\nhtml = requests.get(url)\nhtml.encoding=\"utf-8\"\n\nsp = BeautifulSoup(html.text, 'html.parser')\n\n# 建立 images 目錄儲存圖片\nimages_dir=\"images/\"\nif not os.path.exists(images_dir):\n os.mkdir(images_dir)\n \n# 取得所有 標籤\nall_links=sp.find_all(['a','img']) \nfor link in all_links:\n # 讀取 src 和 href 屬性內容\n src=link.get('src')\n href = link.get('href')\n attrs=[src,href]\n for attr in attrs:\n # 讀取 .jpg 和 .png 檔\n if attr != None and ('.jpg' in attr or '.png' in attr):\n # 設定圖檔完整路徑\n full_path = attr \n filename = full_path.split('/')[-1] # 取得圖檔名\n print(full_path)\n # 儲存圖片\n try:\n image = urlopen(full_path)\n f = open(os.path.join(images_dir,filename),'wb')\n f.write(image.read())\n f.close()\n except:\n print(\"{} 無法讀取!\".format(filename))\n\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('BeautifulSoup 測試 9')\n\n\n\n\n\nprint('\\n\\nBeautifulSoup 測試 作業完成\\n')\n\n","sub_path":"_4.python/urllib_requests_BeautifulSoup/BeautifulSoup03.py","file_name":"BeautifulSoup03.py","file_ext":"py","file_size_in_byte":20810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"316799116","text":"from CMGTools.H2TauTau.proto.plotter.categories_common import categories_common\nfrom CMGTools.H2TauTau.proto.plotter.cut import Cut\n\npt1 = 18\npt2 = 20\n\n\n# NEW one - to be implemented as soon as trees are there\ninc_sig_tau = Cut(\n '!veto_dilepton && !veto_thirdlepton && !veto_otherlepton && l2_byCombinedIsolationDeltaBetaCorrRaw3Hits<1.5 && l2_againstMuon3>1.5 && l2_againstElectronMVA5>0.5 && l2_pt>{pt2}'.format(pt2=pt2))\n\ninc_sig_mu = Cut('l1_reliso05<0.1 && l1_muonid_medium>0.5 && l1_pt>{pt1}'.format(pt1=pt1))\n\ninc_sig = inc_sig_mu & inc_sig_tau\n\ncat_Inc_RlxMuIso = str(inc_sig).replace('l1_reliso05<0.1', 'l1_reliso05<1.0')\ncat_Inc_RlxTauIso = str(inc_sig).replace('l2_threeHitIso<1.5', 'l2_threeHitIso<10.0')\ncat_Inc_RlxMuTauIso = str(inc_sig).replace('l1_reliso05<0.1', 'l1_reliso05<0.5').replace('l2_threeHitIso<1.5', 'l2_threeHitIso<10.0')\ncat_Inc_AntiMuTauIso = str(inc_sig).replace('l1_reliso05<0.1', 'l1_reliso05>0.1').replace('l2_looseMvaIso>0.5', 'l2_looseMvaIso<0.5')\n\ncat_Inc_AntiMuIso = str(inc_sig).replace('l1_reliso05<0.1', 'l1_reliso05>0.1')\ncat_Inc_AntiTauIso = str(inc_sig).replace('l2_threeHitIso<1.5', 'l2_threeHitIso>1.5 && l2_threeHitIso<5.0')\n\ncat_Inc = str(inc_sig)\n\ncategories = {\n 'Xcat_Inc_RlxMuIsoX': cat_Inc_RlxMuIso,\n 'Xcat_Inc_RlxTauIsoX': cat_Inc_RlxTauIso,\n 'Xcat_IncX': cat_Inc,\n 'Xcat_Inc_AntiTauIsoX': cat_Inc_AntiTauIso,\n}\n\ncategories.update(categories_common)\n","sub_path":"CMGTools/H2TauTau/python/proto/plotter/categories_TauMu.py","file_name":"categories_TauMu.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"314262168","text":"#\n# enum.py -- Class to implement enumerated types.\n#\n# Copyright (c) 2008, Blakita Software, LLC\n#\n# Brian St. Pierre, \n#\n# Permission to use, copy, modify, and distribute this software and its\n# documentation for any purpose, without fee, and without a written agreement\n# is hereby granted, provided that the above copyright notice and this\n# paragraph and the following two paragraphs appear in all copies.\n#\n# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST\n# PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,\n# EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN \"AS IS\"\n# BASIS, AND THE AUTHOR HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,\n# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.\n#\n# Change History\n# 1 - 2008-12-14 - Written\n#\n\n\n'''This provides an enumerated type. Derive from Enumeration and set\n_enum_ to a list of names. The type will have these names\navailable as attributes on the type.\n\n>>> class EnumTest(Enumeration):\n... _enum_ = ['foo', 'bar', 'rab', 'oof']\n...\n>>> et = EnumTest()\n>>> et2 = EnumTest()\n>>> et == et2\nTrue\n>>> id(et) == id(et2)\nTrue\n>>> et.foo\n\n>>> et.foo == 0\nTrue\n>>> et.foo == 1 - 1\nTrue\n>>> et.foo + 1 == et.bar\nTrue\n>>> et.bar\n\n>>> 3 in et\nTrue\n>>> \"blech\" in et2\nFalse\n>>> \"foo\" in et\nTrue\n>>> EnumTest.foo\n\n>>> EnumTest.bar\n\n>>> EnumTest.rab\n\n>>> EnumTest.oof\n\n>>> EnumTest.oof.name\n'oof'\n>>> EnumTest.oof.name = 'foo'\nTraceback (most recent call last):\n File \"\", line 1, in ?\n et.foo = 6\nAttributeError: '_EnumerationValue' object attribute 'name' is read-only\n>>> EnumTest.oof.name\n'oof'\n>>> 0 in EnumTest\nTrue\n>>> 4 in EnumTest\nFalse\n>>> et.zzz = 6\nTraceback (most recent call last):\n File \"\", line 1, in ?\n et.zzz\nAttributeError: 'EnumTest' object has no attribute 'zzz'\n>>> et.foo = 6\nTraceback (most recent call last):\n File \"\", line 1, in ?\n et.foo = 6\nAttributeError: 'EnumTest' object attribute 'foo' is read-only\n'''\n\n\nclass _EnumerationValue(int):\n '''Enumeration values are integers with a \"name\" property.'''\n __slots__ = ['name', '_enum']\n\n def __new__(cls, name, enum_name, *args, **kwargs):\n instance = int.__new__(cls, *args, **kwargs)\n int.__setattr__(instance, 'name', name)\n int.__setattr__(instance, '_enum', enum_name)\n return instance\n\n def __repr__(self):\n return '<%s: %s=%d>' % (self._enum, self.name, int(self))\n\n def __setattr__(self, name, value):\n raise AttributeError(\n \"'_EnumerationValue' object attribute '%s' is read-only\" % (\n name))\n\n\nclass Enumeration(object):\n\n class __MetaEnumeration(type):\n\n def __new__(cls, name, bases, dct):\n # Force slots to be empty so nobody messes with the\n # enumeration and instances stay lightweight.\n dct['__slots__'] = []\n\n return type.__new__(cls, name, bases, dct)\n\n def __init__(cls, name, bases, dct):\n type.__init__(cls, name, bases, dct)\n\n # Create the single instance.\n cls.instance = cls.__new__(cls, first_time=True)\n\n # Create the enumerated attributes on the type.\n cls._values = []\n for value, member_name in enumerate(cls._enum_):\n ev = _EnumerationValue(member_name, name, value)\n setattr(cls, member_name, ev)\n cls._values.append(ev)\n\n def __contains__(cls, name_or_value):\n return cls.instance.__contains__(name_or_value)\n\n __metaclass__ = __MetaEnumeration\n __slots__ = []\n\n _enum_ = []\n\n def __new__(cls, *args, **kwargs):\n if kwargs.pop('first_time', False):\n return object.__new__(cls, *args, **kwargs)\n return cls.instance\n\n def __contains__(self, name_or_value):\n if type(name_or_value) == str:\n # Test for membership in our string-based list of enum\n # identifiers.\n return name_or_value in self._enum_\n else:\n # Test for membership in our list of values.\n return name_or_value in self._values\n\n @classmethod\n def name(cls, value):\n '''Return the name associated with value. Raises IndexError if\n value is not a member.\n\n If your value was obtained from the enumeration (ie is not a\n plain old int), you can also use \"value.name\" to get the\n name.'''\n return cls._enum_[value]\n\n\ndef _test():\n import doctest\n doctest.testmod()\n\n class ET(Enumeration):\n _enum_ = ['a', 'b', 'c']\n\n e = Enumeration()\n et = ET()\n\n assert('a' in et)\n assert('a' in ET)\n\n try:\n e.zzz = 6\n assert(False), \"Expected AttributeError\"\n except AttributeError:\n pass\n try:\n et.zzz = 6\n assert(False), \"Expected AttributeError\"\n except AttributeError:\n pass\n try:\n et.a = 6\n assert(False), \"Expected AttributeError (r/o)\"\n except AttributeError:\n pass\n\n # Bug: could't get enum members off the class until you\n # instantiated it once.\n\n class ET2(Enumeration):\n _enum_ = ['x', 'y', 'z']\n assert(ET2.x == 0)\n assert(ET2.z == 2)\n\n\nif __name__ == \"__main__\":\n _test()\n","sub_path":"enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"29563239","text":"\"\"\"\n Automate the conversion of plotting of data.\n\n\"\"\"\nimport json\nimport sys\n\nfrom convertPostDataToh5df import convertPostDataToExcel\nfrom convertDatatoH5df import convertDataToExcel\nfrom plotting.plotAttendance import plotAttendance\nfrom plotting.plotAttendanceTime import plotAttendanceTime\nfrom plotting.plotModality import plotModality\nimport gc\n\ndef convertAndPlotData(rawFile, h5File):\n \n jsonFilename = rawFile\n jsonDataFile = open(jsonFilename, 'r')\n eventData = json.load(jsonDataFile)\n jsonDataFile.close()\n \n ### Will convert the json files to h5 files.\n # convertDataToExcel(eventData, h5File)\n plot=False\n plotAttendance(h5File, plot=plot)\n plotAttendanceTime(h5File, plot=plot)\n plotModality(h5File, plot=plot)\n \n \n \ndef processData(fileData):\n \n print(\"fileData: \", fileData)\n postRaw = fileData['postDataRawFiles']\n postH5 = fileData['postDataH5Files']\n for d in range(len(postRaw)):\n print( \"processing data: \", postRaw[d])\n convertAndPlotData(postRaw[d], postH5[d])\n gc.collect()\n \n \nif __name__ == '__main__':\n \n \"\"\"\n \n \"\"\"\n \n jsonFilename = sys.argv[1]\n jsonDataFile = open(jsonFilename, 'r')\n fileDatas = json.load(jsonDataFile)\n jsonDataFile.close()\n \n processData(fileDatas)\n","sub_path":"processData.py","file_name":"processData.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"490685105","text":"import mock\nimport socket\nimport os\nfrom buildapi.lib import mq\nfrom buildapi.model import init_buildapi_model, buildapidb\nfrom unittest import TestCase, SkipTest\nimport sqlalchemy\nfrom sqlalchemy.orm import sessionmaker\nimport json\nfrom kombu.pools import connections\nfrom kombu import Connection\nfrom kombu import Exchange\nfrom kombu import Producer\nfrom kombu import Queue\n\n\nclass Base(object):\n\n cons = None\n\n def setUp(self):\n self.consumed = []\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n init_buildapi_model(self.engine)\n\n hostname = os.environ.get('AMQP_HOSTNAME', 'localhost')\n userid = os.environ.get('AMQP_USERID', 'buildapi-test')\n password = os.environ.get('AMQP_PASSWORD', 'buildapi-test')\n self.config = {\n 'mq.heartbeat_interval': '5',\n 'mq.kombu_url': 'amqp://%s:%s@%s//buildapi-test' % (userid, password, hostname),\n 'mq.exchange': 'buildapi-test',\n 'mq.queue.web': 'buildapi-test-q1',\n 'mq.queue.agent': 'buildapi-test-q2',\n }\n\n # this is the first place we try to connect, so if we get a socket\n # error here, then the rabbitmq server isn't present, so skip the test.\n try:\n self._cleanup()\n except socket.error:\n raise SkipTest(\"could not reach rabbitmq server on %s\" % hostname)\n\n def tearDown(self):\n self._cleanup()\n\n def _cleanup(self):\n self.delete_queue('buildapi-test-q1')\n self.delete_queue('buildapi-test-q2')\n self.delete_exchange('buildapi-test')\n\n def _connect(self):\n return connections[Connection(self.config['mq.kombu_url'])].acquire(block=True)\n\n def delete_queue(self, queue_name):\n with self._connect() as conn:\n queue = Queue(queue_name)\n try:\n queue(conn).declare()\n except Exception:\n # probably no such queue\n return\n queue(conn).delete()\n\n def delete_exchange(self, exch_name):\n with self._connect() as conn:\n exch = Exchange(exch_name)\n try:\n exch(conn).declare()\n except Exception:\n # probably no such exchange\n return\n exch(conn).delete()\n\n def get_message(self, queue_name):\n with self._connect() as conn:\n queue = Queue(queue_name)\n msg = queue(conn).get()\n if msg:\n msg.ack()\n return msg\n\n def declare(self, thing):\n with self._connect() as conn:\n thing(conn).declare()\n\n def send_message(self, exchange_name, routing_key, message):\n with self._connect() as conn:\n pub = Producer(conn)\n pub.publish(message,\n exchange=Exchange(exchange_name),\n routing_key=routing_key)\n\n def patch_consumer_to_stop(self, cons):\n # monkey-patch receive to also stop the consumer after the message is received\n old_receive = cons.receive\n def receive(message_data, message):\n old_receive(message_data, message)\n cons.should_stop = True\n cons.receive = receive\n\n\nclass TestReliableConsumer(Base, TestCase):\n\n def test_consume_one(self):\n testcase = self\n\n class MyReliableConsumer(mq.ReliableConsumer):\n routing_key = 'requests'\n queue_name_config = 'mq.queue.web'\n\n def receive(self, body, message):\n self.got_body = body\n self.should_stop = True\n message.ack()\n\n def on_iteration(self):\n testcase.send_message('buildapi-test',\n 'requests',\n {\"request\": \"body\"})\n super(MyReliableConsumer, self).on_iteration()\n\n cons = MyReliableConsumer(self.config)\n cons.run()\n self.assertEqual(cons.got_body, {\"request\": \"body\"})\n\n def test_consume_until_idle(self):\n class MyReliableConsumer(mq.ReliableConsumer):\n routing_key = 'requests'\n queue_name_config = 'mq.queue.web'\n got_messages = 0\n\n def receive(self, body, message):\n self.got_messages += 1\n message.ack()\n\n cons = MyReliableConsumer(self.config)\n self.declare(cons.queue)\n self.send_message('buildapi-test', 'requests', 'one')\n self.send_message('buildapi-test', 'requests', 'two')\n self.send_message('buildapi-test', 'requests', 'three')\n cons.run_until_idle()\n self.assertEqual(cons.got_messages, 3)\n\n\nclass TestReliablePublisher(Base, TestCase):\n\n def test_consume_one(self):\n\n class MyReliablePublisher(mq.ReliablePublisher):\n routing_key = 'test-message'\n\n pub = MyReliablePublisher(self.config)\n\n exchange = Exchange('buildapi-test', type='topic')\n self.declare(exchange)\n queue = Queue('buildapi-test-q1', exchange=exchange, routing_key='test-message')\n self.declare(queue)\n\n pub.send(message_data={'message': 'body'})\n msg = self.get_message('buildapi-test-q1')\n self.assertEqual(msg.payload, {'message': 'body'})\n\n\nclass TestRequester(Base, TestCase):\n\n def make_publisher(self, messages=1):\n self.pub = mq.LoggingJobRequestPublisher(self.engine, self.config)\n self.pub._clock = lambda: 123456\n\n # set up the queue to listen for it\n exchange = Exchange('buildapi-test', type='topic')\n self.declare(Queue('buildapi-test-q1',\n exchange=exchange,\n routing_key='requests'))\n\n def assertJobRequest(self, action, what):\n body = {u'request_id': 1, u'when': 123456}\n body.update(what)\n\n self.assertEqual(self.get_message('buildapi-test-q1').payload, {\n u'action': action,\n u'body': body,\n u'who': u'me'},\n )\n reqs = [dict(r) for r in\n self.engine.execute('select * from jobrequests')]\n for req in reqs:\n req['what'] = json.loads(req['what'])\n self.assertEqual(reqs, [{\n u'id': 1,\n u'action': action,\n u'what': what,\n u'complete_data': None,\n u'completed_at': None,\n u'when': 123456,\n u'who': u'me',\n }])\n\n def test_reprioritizeRequest(self):\n self.make_publisher()\n self.assertEqual(self.pub.reprioritizeRequest(who='me', brid=10, priority=20),\n dict(status='OK', request_id=1))\n self.assertJobRequest('reprioritize', dict(priority=20, brid=10))\n\n def test_cancelRequest(self):\n self.make_publisher()\n self.assertEqual(self.pub.cancelRequest(who='me', brid=10),\n dict(status='OK', request_id=1))\n self.assertJobRequest('cancel_request', dict(brid=10))\n\n def test_cancelBuild(self):\n self.make_publisher()\n self.assertEqual(self.pub.cancelBuild(who='me', bid=10),\n dict(status='OK', request_id=1))\n self.assertJobRequest('cancel_build', dict(bid=10))\n\n def test_rebuildBuild(self):\n self.make_publisher()\n self.assertEqual(self.pub.rebuildBuild(who='me', bid=10, priority=20),\n dict(status='OK', request_id=1))\n self.assertJobRequest('rebuild_build', dict(bid=10, priority=20))\n\n def test_rebuildRequest(self):\n self.make_publisher()\n self.assertEqual(self.pub.rebuildRequest(who='me', brid=10, priority=20),\n dict(status='OK', request_id=1))\n self.assertJobRequest('rebuild_request', dict(brid=10, priority=20))\n\n def test_cancelRevision(self):\n self.make_publisher()\n self.assertEqual(self.pub.cancelRevision(who='me', branch='branch1', revision='abcd'),\n dict(status='OK', request_id=1))\n self.assertJobRequest('cancel_revision', dict(branch=u'branch1', revision='abcd'))\n\n def test_newBuildAtRevision(self):\n self.make_publisher()\n self.assertEqual(self.pub.newBuildAtRevision(who='me', branch='branch1', revision='abcd'),\n dict(status='OK', request_id=1))\n self.assertJobRequest('new_build_at_revision', dict(branch=u'branch1', revision='abcd'))\n\n def test_newPGOBuildAtRevision(self):\n self.make_publisher()\n self.assertEqual(self.pub.newPGOBuildAtRevision(who='me', branch='branch1', revision='abcd', priority=2),\n dict(status='OK', request_id=1))\n self.assertJobRequest('new_pgobuild_at_revision', dict(branch=u'branch1', revision='abcd', priority=2))\n\n def test_newNightlyAtRevision(self):\n self.make_publisher()\n self.assertEqual(self.pub.newNightlyAtRevision(who='me', branch='branch1', revision='abcd', priority=2),\n dict(status='OK', request_id=1))\n self.assertJobRequest('new_nightly_at_revision', dict(branch=u'branch1', revision='abcd', priority=2))\n\n def test_send_msg_db_error(self):\n self.make_publisher(0)\n with mock.patch.object(self.pub, 'session') as sess:\n sess.side_effect = RuntimeError\n self.assertEqual(self.pub.cancelRequest(who='me', brid=10),\n {'msg': \"Couldn't create JobRequest row\", 'status': 'FAILED'})\n\n def test_send_msg_send_error(self):\n self.make_publisher(0)\n with mock.patch.object(mq.JobRequestPublisher, 'send_msg') as sess:\n sess.side_effect = RuntimeError\n self.assertEqual(self.pub.cancelRequest(who='me', brid=10),\n {'msg': \"Couldn't send message to broker\", 'status': 'FAILED'})\n \n def test_done_acks(self):\n cons = mq.LoggingJobRequestDoneConsumer(self.engine, self.config)\n cons._clock = lambda: 123456\n self.patch_consumer_to_stop(cons)\n\n # cheat and declare the queue so that it receives our produced message\n self.declare(cons.queue)\n\n # add a fake job request\n r = buildapidb.JobRequest(action='act', who='me', when=123456, what='json')\n s = sessionmaker(bind=self.engine)()\n s.add(r)\n s.commit()\n\n # ack it\n msg = {'body': 'action result', 'request_id': r.id}\n self.send_message('buildapi-test', 'finished', msg)\n\n # receive the ack\n cons.run()\n\n # verify that it was acked\n reqs = [dict(r) for r in\n self.engine.execute('select * from jobrequests')]\n self.assertEqual(reqs, [{\n u'what': u'json',\n u'who': u'me',\n u'when': 123456,\n u'complete_data': u'{\"body\": \"action result\", \"request_id\": 1}',\n u'completed_at': 123456,\n u'action': u'act',\n u'id': 1},\n ])\n\n\nclass TestWorker(Base, TestCase):\n\n def make_consumer(self):\n cons = mq.JobRequestConsumer(self.config)\n self.patch_consumer_to_stop(cons)\n return cons\n \n def make_publisher(self):\n pub = mq.JobRequestDonePublisher(self.config)\n # set up the queue to listen for it\n exchange = Exchange('buildapi-test', type='topic')\n self.declare(Queue('buildapi-test-q1',\n exchange=exchange,\n routing_key='finished'))\n return pub\n\n def test_consumer_callback(self):\n cons = self.make_consumer()\n\n def cb(message_data, message):\n self.consumed = message_data\n cons.register_callback(cb)\n\n # cheat and declare the queue so that it receives our produced message\n self.declare(cons.queue)\n # send a message\n self.send_message('buildapi-test', 'requests', {'hello': 'world'})\n # receive it\n cons.run()\n # and see that the callback ran\n self.assertEqual(self.consumed, {'hello': 'world'})\n\n def test_producer_ack_msg(self):\n pub = self.make_publisher()\n msg = {'body': {'hello': 'world'}, 'request_id': 1}\n pub.ack_msg(msg)\n got_msg = self.get_message('buildapi-test-q1')\n self.assertEqual(got_msg.payload, msg)\n\nclass TestRoundTrip(Base, TestCase):\n\n def test_round_trip(self):\n req_pub = mq.LoggingJobRequestPublisher(self.engine, self.config)\n req_pub._clock = lambda: 123456\n\n req_cons = mq.JobRequestConsumer(self.config)\n self.patch_consumer_to_stop(req_cons)\n self.declare(req_cons.queue)\n\n done_pub = mq.JobRequestDonePublisher(self.config)\n\n done_cons = mq.LoggingJobRequestDoneConsumer(self.engine, self.config)\n done_cons._clock = lambda: 123999\n self.patch_consumer_to_stop(done_cons)\n self.declare(done_cons.queue)\n\n # build a fake agent\n @req_cons.register_callback\n def agent_receive_message(message_data, message):\n msg = {'body': 'result of %s' % (message_data['action'])}\n msg['request_id'] = message_data['body']['request_id']\n done_pub.ack_msg(msg)\n\n # run it!\n req_pub.cancelRequest('me', 1234)\n req_cons.run()\n done_cons.run()\n reqs = [dict(r) for r in\n self.engine.execute('select * from jobrequests')]\n self.assertEqual(reqs, [{\n u'action': u'cancel_request',\n u'what': u'{\"brid\": 1234}',\n u'who': u'me',\n u'when': 123456,\n u'complete_data': u'{\"body\": \"result of cancel_request\", \"request_id\": 1}',\n u'completed_at': 123999,\n u'id': 1},\n ])\n","sub_path":"buildapi/tests/test_mq.py","file_name":"test_mq.py","file_ext":"py","file_size_in_byte":13635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"267028202","text":"import openpyxl\nimport pprint\n\n'''\n A program to split a string on the basis of given strings, like here string is split on basis of week days.\n'''\n\nstring = 'Mon 11:00AM - 12:00PM, 2:00PM - 4:00PMTue 11:00AM - 12:00PM, 2:00PM - 4:00PMWed 11:00AM - 12:00PM, 2:00PM - 4:00PMThu 11:00AM - 12:00PM, 2:00PM - 4:00PMFri 11:00AM - 12:00PM, 2:00PM - 4:00PMSat 11:00AM - 12:00PM, 2:00PM - 4:00PM'\n\ndays_list = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']\n\ndays_index = {}\n\n# Find index of each splitter string in the given string\ndef find_days_index(string):\n for day in days_list:\n days_index[day] = string.find(day)\n\n# after splitting string, find the index of next immediate splitter so that relevant string could be extracted. Relevant string is between two splitters.\ndef index_of_next_day(string):\n nearest_index = 99999\n nearest_day = ''\n for day in days_list:\n index = string.find(day)\n # if any splitter exists in the given string then process the string and extract relevant data.\n if index != -1:\n if nearest_index > index:\n nearest_index = index\n nearest_day = day\n if nearest_day != '':\n return string.split(nearest_day)[0]\n else:\n return string\n\n\ndef dayswise_time_slots(string):\n day_timing_map = {}\n if string:\n find_days_index(string)\n for day in days_list:\n if string.find(day)>-1:\n sub_str = string.split(day)[1]\n #print(sub_str)\n sub_str = index_of_next_day(sub_str.strip())\n #print(day +sub_str)\n day_timing_map[day] = sub_str\n return day_timing_map\n\n\ndef insert_into_excel():\n col_timing_map = {'Mon':'K','Tue':'L','Wed':'M','Thu':'N','Fri':'O','Sat':'P','Sun':'Q'}\n excel = openpyxl.load_workbook('C:\\\\Users\\\\gaa8664\\\\Desktop\\\\for Slots.xlsx')\n sheet = excel.get_sheet_by_name('Sheet1')\n timing_sheet = excel.create_sheet(title=\"timing_sheet1\")\n timing_sheet.append(['GAA']+days_list)\n for x in range(2,sheet.max_row):\n val = sheet['J'+str(x)]\n timing_map = dayswise_time_slots(val.value)\n '''first_col = 'A'+str(x)\n timing_sheet[first_col] = sheet[first_col].value'''\n if timing_map:\n for key in timing_map:\n sheet_index = col_timing_map[key]+str(x)\n print(sheet_index)\n print(sheet[sheet_index].value)\n if not sheet[sheet_index].value:\n sheet[sheet_index] = ' '.join([key,timing_map[key]])\n\n excel.save('C:\\\\Users\\\\gaa8664\\\\Desktop\\\\for Slots.xlsx')\n\ninsert_into_excel()\n","sub_path":"LearningPython/utility/string_split_day_week.py","file_name":"string_split_day_week.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"501129590","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Initialising\n\n# In[69]:\n\n\nimport os\nimport requests\nimport pandas as pd\n\n### Intitialise the download process\n\nthisdir = \"dissertation/nsfw data downloader\"\ndestination = \"dissertation/nsfw data downloader/image_data\"\ncompleted_url_path = \"dissertation/nsfw data downloader/completed_urls.csv\"\n\nif os.path.isdir(destination) == False:\n os.makedirs(destination)\n\ntry:\n df = pd.read_csv(completed_url_path)\nexcept:\n df = pd.DataFrame(columns = ['URLs'])\n\n\n# ### Listing all url files\n\n# In[26]:\n\n\nf_list = []\nfor r, d, f in os.walk(thisdir):\n for file in f:\n if \".txt\" in file:\n f_list.append(os.path.join(r, file))\n\n\n# ### Downloading the images with classification labels as names\n\n# In[67]:\n\n\np_urls = []\nc_urls_0 = list(df['URLs'])\nf_list = list(set(f_list).difference(c_urls_0))\nc_urls = []\n\nfor i in f_list:\n u_list = list(open(i))\n cnt = 0\n for j in u_list:\n j.replace('\\n','')\n cnt += 1\n if i.split('/')[-2] != 'nsfw data downloader':\n file_name = i.split('/')[-2] + '_' + str(cnt)\n if cnt % 100 == 0:\n print('URLs processed: ',cnt)\n try:\n img_data = requests.get(j).content\n with open(destination+'/'+file_name+'.jpg', 'wb') as handler:\n handler.write(img_data)\n except:\n p_urls.append(j)\n c_urls.append(j)\n\ndf = df.append(pd.DataFrame(c_urls, columns = ['URLs']))\ndf.to_csv(completed_url_path,index = False)\n\n","sub_path":"Video Censoring using Deep Learning/nsfw data downloader/NSFW Downloader Script.py","file_name":"NSFW Downloader Script.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"403227415","text":"#!venv/bin/python\nimport argparse\nimport logging\nimport asyncio\nimport motor.motor_asyncio\nfrom aiohttp import web\n\nfrom config import config, logger_configure\nimport message_queue.delivery_handlers\nfrom message_queue.connect import QueueListener\nfrom notification import handlers as nh, processing as np\nfrom currency.daemon import CurrencyUpdateDaemon\n\n__author__ = 'Kostel Serhii'\n\n\n_log = logging.getLogger('xop.main')\n\n\nasync def shutdown(app):\n \"\"\"\n Close connections, stop daemons and all process.\n :param app: web server app\n \"\"\"\n _log.info('Stopping XOPay Notify Service...')\n\n queue_connect = app.get('queue_connect')\n if queue_connect:\n await queue_connect.close()\n\n currency_daemon = app.get('currency_daemon')\n if currency_daemon:\n currency_daemon.stop()\n\n _log.info('Shutdown tasks')\n tasks = asyncio.Task.all_tasks()\n if tasks:\n for task in tasks:\n task.cancel()\n try:\n await asyncio.wait(tasks)\n except Exception:\n pass\n\n _log.info('XOPay Notify Service Stopped!')\n\n\ndef register_handlers(app):\n \"\"\"\n Register server handlers with urls.\n :param app: web server app\n \"\"\"\n url_prefix = '/api/notify/{API_VERSION}'.format(**config)\n\n app.router.add_route('GET', url_prefix + '/notifications', nh.notifications_list)\n app.router.add_route('POST', url_prefix + '/notifications', nh.notification_create)\n app.router.add_route('GET', url_prefix + '/notifications/{notify_id}', nh.notification_detail)\n app.router.add_route('PUT', url_prefix + '/notifications/{notify_id}', nh.notification_update)\n app.router.add_route('DELETE', url_prefix + '/notifications/{notify_id}', nh.notification_delete)\n\n\ndef create_app(loop=None):\n \"\"\"\n Create server application and all necessary services.\n :param loop: async main loop\n \"\"\"\n\n app = web.Application(loop=loop)\n app['config'] = config\n\n register_handlers(app)\n\n motor_client = motor.motor_asyncio.AsyncIOMotorClient()\n db = motor_client[config['DB_NAME']]\n app['db'] = db\n\n notify_processor = np.NotifyProcessing(\n db=db,\n admin_base_url=config['ADMIN_BASE_URL']\n )\n notify_processor.start()\n app['notify_processor'] = notify_processor\n\n queue_connect = QueueListener(\n queue_handlers=[\n (config['QUEUE_TRANS_STATUS'], message_queue.delivery_handlers.transaction_queue_handler),\n (config['QUEUE_EMAIL'], message_queue.delivery_handlers.email_queue_handler),\n (config['QUEUE_SMS'], message_queue.delivery_handlers.sms_queue_handler),\n (config['QUEUE_REQUEST'], notify_processor.request_queue_handler),\n ],\n connect_parameters=config\n )\n queue_connect.start()\n app['queue_connect'] = queue_connect\n\n currency_daemon = CurrencyUpdateDaemon(\n admin_base_url=config['ADMIN_BASE_URL'],\n update_hours=config['CURRENCY_UPDATE_HOURS'],\n timezone=config['CURRENCY_TIMEZONE']\n )\n currency_daemon.start()\n app['currency_daemon'] = currency_daemon\n\n return app\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='XOPay Notify Service.', allow_abbrev=False)\n parser.add_argument('--config', default='debug', help='load config: [debug, production] (default \"debug\")')\n\n args = parser.parse_args()\n config.load_config(args.config)\n\n logger_configure(config)\n\n web_app = create_app()\n web_app.on_shutdown.append(shutdown)\n\n _log.info('Starting XOPay Notify Service...')\n if config['DEBUG']:\n _log.warning('Debug mode is active!')\n\n web.run_app(web_app, host='127.0.0.1', port=config['PORT'])\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145272187","text":"\nimport com.ihsan.foundation.pobjecthelper as phelper\nimport time, sys, os\n\ndef Simpan(config, parameter, returnpacket):\n status = returnpacket.CreateValues(['Err',''])\n recParam = parameter.uipart.GetRecord(0)\n config.BeginTransaction()\n helper = phelper.PObjectHelper(config)\n param = {'tipe':'NonReg'}\n try:\n addBio = helper.CreatePObject('Customer')\n addBio.CustomerName = recParam.Nama\n addBio.AddressStreet = recParam.Jalan\n addBio.AddressKecamatan = recParam.Kecamatan\n addBio.AddressCity = recParam.Kota\n addBio.IdPropinsi = recParam.GetFieldByName('LPropinsi.IdPropinsi')\n addMustahiq = helper.CreatePObject('MustahiqPersonal')\n addMustahiq.CustomerId = addBio.CustomerId\n \n tambah = helper.CreatePObject('MustahiqProject', param)\n tambah.MustahiqId=addMustahiq.MustahiqId\n tambah.ProductId=recParam.GetFieldByName('LProduct.ProductId')\n\n config.Commit()\n except:\n config.Rollback()\n status.Err = str (sys.exc_info()[1])\n\n","sub_path":"dialogs/NonReguler/fAddDataMustahiq_data.py","file_name":"fAddDataMustahiq_data.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"549430138","text":"# coding: utf-8\nimport re\n\ndef cipher(string):\n encryptioned = ''\n\n for i,w in enumerate(string):\n if re.compile(r'^[a-z]+$').match(w):\n encryptioned = encryptioned + str(219 - ord(w))\n else:\n encryptioned = encryptioned + w\n\n return encryptioned\n\ndef decode(string):\n decoded_string = ''\n\n i = 0\n while i < len(string):\n if str.isdigit(string[i]):\n num = 0\n for j in range(3):\n num = int(string[i:i+j+1])\n if num > 96:\n i += j\n break\n decoded_string = decoded_string + chr(219-num)\n i += 1\n else:\n decoded_string = decoded_string + string[i]\n i += 1\n\n return decoded_string\n\nif __name__ == '__main__':\n string = 'ABCanz'\n\n encoded = cipher(string)\n decoded = decode(encoded)\n\n print(string)\n print(encoded)\n print(decoded)\n","sub_path":"source/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"536135568","text":"\nfout = open(\"viticulosa.py\", \"w\")\nfin = open(\"main.py\")\nfor linea in fin:\n if linea.startswith('#include'):\n args = linea.split()\n variable = args[1]\n archivo = args[2]\n include = open(archivo)\n archivo_texto = include.read()\n include.close()\n fout.write('{0} = \"\"\"{1}\"\"\"\\n'.format(variable, archivo_texto))\n else:\n fout.write(linea)\nfin.close()\nfout.close()","sub_path":"light/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"638207450","text":"# Silnence output of tensorflow/keras about GPU status\nimport os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\nimport tensorflow as tf\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\n# Keras imports\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras import metrics\nfrom keras.constraints import max_norm\n\n\ndef get_model(tp):\n model = Sequential()\n model.add(\n Conv2D(\n tp[\"filter_1\"],\n (3, 3),\n padding=\"valid\",\n activation=\"relu\",\n input_shape=(32, 32, 1),\n )\n )\n model.add(MaxPooling2D((2, 2)))\n model.add(\n Conv2D(\n tp[\"filter_2\"],\n (3, 3),\n padding=\"valid\",\n activation=\"relu\",\n kernel_constraint=max_norm(3),\n bias_constraint=max_norm(3),\n )\n )\n model.add(MaxPooling2D((2, 2)))\n model.add(\n Conv2D(\n tp[\"filter_3\"],\n (3, 3),\n padding=\"valid\",\n activation=\"relu\",\n kernel_constraint=max_norm(3),\n bias_constraint=max_norm(3),\n )\n )\n model.add(MaxPooling2D((2, 2)))\n\n model.add(Flatten())\n model.add(Dropout(tp[\"dropout_0\"]))\n model.add(\n Dense(\n tp[\"dense_units_1\"],\n activation=\"relu\",\n kernel_constraint=max_norm(3),\n bias_constraint=max_norm(3),\n )\n )\n model.add(Dropout(tp[\"dropout_1\"]))\n model.add(\n Dense(\n tp[\"dense_units_2\"],\n activation=\"relu\",\n kernel_constraint=max_norm(3),\n bias_constraint=max_norm(3),\n )\n )\n model.add(Dropout(tp[\"dropout_2\"]))\n model.add(\n Dense(\n tp[\"dense_units_3\"],\n activation=\"relu\",\n kernel_constraint=max_norm(3),\n bias_constraint=max_norm(3),\n )\n )\n model.add(Dense(1, activation=\"sigmoid\"))\n\n model.compile(\n loss=\"binary_crossentropy\",\n optimizer=Adam(lr=tp[\"learning_rate\"]),\n metrics=[\"accuracy\", metrics.AUC(name=\"auc\")],\n )\n \n return model\n","sub_path":"CNN_training/.ipynb_checkpoints/get_model-checkpoint.py","file_name":"get_model-checkpoint.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261612644","text":"# coding = utf-8\n\"\"\"\n@Author: Idiot\n@Time: 2019/7/22 20:38\n@Software: PyCharm\n@File: lesson40.py\n@Version: V1.0\n@Desc: 使用多线程生产者和消费者问题\n\"\"\"\n\"\"\"\n使用queue(队列)\nimport queue\nq = queue.Queue() #创建了一个队列\nq.put(1) #增加队列的元素\nq.put(2) #增加队列的元素\nq.put(3) #增加队列的元素\nq.get() #读取队列中的第一个元素\n1\nq.get() #读取队列中的第一个元素\n2\nq.get() #读取队列中的第一个元素\n3\n\"\"\"\nfrom threading import Thread, current_thread\nimport time\nimport random\nfrom queue import Queue\n\n# 定义队列的长度\nqueue = Queue(5)\n\n\n# 生产者类,经过随机的时间,往队列中添加数字\nclass ProducerThread(Thread):\n def run(self):\n name = current_thread().getName()\n nums = range(100)\n global queue\n while True: # 死循环\n # 随机选择数字\n num = random.choice(nums)\n queue.put(num)\n print('生产者%s生产了数据%s' % (name, num))\n t = random.randint(1, 3)\n time.sleep(t)\n print('生产者%s睡眠了%s秒' % (name, t))\n\n\n# 在存和取的过程中,没有等待他们完成,而是并行地进行\nclass ConsumerThread(Thread):\n def run(self):\n # 提取消费者的线程名称\n name = current_thread().getName()\n global queue\n while True:\n num = queue.get()\n queue.task_done()\n print('消费者%s消耗了数据%s' % (name, num))\n t = random.randint(1, 5)\n time.sleep(t)\n print('消费者%s睡眠了%s秒' % (name, t))\n\n\n# 下来启动不同数目的生产者和消费者运行\np1 = ProducerThread(name='p1')\np1.start() # 启动线程\nc1 = ConsumerThread(name='c1')\nc1.start()\nc2 = ConsumerThread(name='c2')\nc2.start()\n\n\"\"\"\n运行结果:\n生产者p1生产了数据90\n消费者c1消耗了数据90\n消费者c1睡眠了1秒\n生产者p1睡眠了2秒\n生产者p1生产了数据88\n消费者c2消耗了数据88\n生产者p1睡眠了3秒\n生产者p1生产了数据26\n消费者c1消耗了数据26\n生产者p1睡眠了1秒消费者c1睡眠了1秒\n通过以上的运行结果,我们可以看出:\n消费者数量大于生产者时,消费者会等待生产者。\n\"\"\"","sub_path":"Section7/lesson40/lesson40.py","file_name":"lesson40.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533857905","text":"# %load q01_load_data/build.py\n# Default imports\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\npath = './data/house_prices_multivariate.csv'\n\n# Write your solution here\ndef load_data(path,test_size=0.33,randomState=9):\n df = pd.read_csv(path)\n# X = df.iloc[:,:-1] \n df = pd.read_csv(path)\n y_data = df.pop('SalePrice')\n X_train,X_test,y_train,y_test = train_test_split(df,y_data,test_size = test_size,random_state = randomState)\n return df,X_train,X_test,y_train,y_test\n\ndata,X_train,X_test,y_train,y_test = load_data(path,0.33,9)\n# print(data,X_test,y_train,y_test)\n\n\n","sub_path":"q01_load_data/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137130772","text":"import pandas as pd\nimport numpy as np\nimport psycopg2\nimport time\nimport pdfkit\nimport webbrowser\nimport base64\nimport collections\nimport unicodedata\nimport plotly\nimport plotly.graph_objects as go\nfrom datetime import datetime as dt\nfrom datetime import timedelta as td\n\n# Contiene las credenciales para realizar la conección con postgresql\n#coneccion = psycopg2.connect(user=\"postgres\",password=\"ferrari1\",host=\"localhost\",port=\"5432\",database=\"shm_puentes\")\n\nconeccion = psycopg2.connect(user=\"clandero\",password=\"219UNIx2\",host=\"152.74.52.187\",port=\"5432\",database=\"prototipo_shm\")\nesquema = ['public.','inventario_puentes.','llacolen.']\n\n#Funcion para crear el dataframe a utilizar en el grafico OHLC, ademas el valor de la columna avg se utiliza para para el histograma\n#La funcion requiere de una fecha inicial, para calcular a partir de esta los rangos de fechas\n#La frecuencia, la cual corresponde al intervalo para generar el rango de fechas (12seg,288seg,2016seg y 4032seg)\n#Y por ultimo requiere del nombre del sensor\ndef datos_ace(fecha_inicio,freq,sensor):\n if ((str(fecha_inicio).split(sep=' ')[0]== '2008-01-01') or (str(fecha_inicio).split(sep=' ')[0]== '2008-04-01')) and (str(fecha_inicio).split(sep=' ')[1] == '00:00:00'):\n new_fecha = pd.read_sql_query(\"SELECT fecha FROM \"+str(esquema[2])+str(sensor)+\" ORDER BY fecha ASC LIMIT 1\",coneccion)['fecha'][0]\n new_hora = str(new_fecha).split(sep=' ')[1]\n fecha_inicio = dt.strptime(str(str(fecha_inicio).split(sep=' ')[0]+' '+new_hora),'%Y-%m-%d %H:%M:%S')\n periodo = 301 #Cantidad de fechas a generar, es 301 porque se necesitan tuplas de fechas para calcular los valores\n avg_,min_,max_,open_,close_ = [],[],[],[],[]\n rango_horas = list(pd.date_range(fecha_inicio, periods=periodo, freq=freq).strftime('%Y-%m-%d %H:%M:%S'))\n for i in range(len(rango_horas)-1):\n #consultas a la base de datos\n #La query1 entrega el avg,min y max de un rango de tiempo\n query1 = (\"SELECT avg(lectura) as \"+str(sensor)+\", min(lectura) as min, max(lectura) as max \"\n \"FROM \"+str(esquema[2])+str(sensor)+\" \"\n \"Where fecha between '\"+str(rango_horas[i])+\"' and '\"+str(rango_horas[i+1])+\"' ;\")\n #La query2 entrega el primer elemento de la base de datos al inicio del rango de tiempo y el ultimo elemento al final del rango de tiempo\n query2 = (\"(SELECT lectura as open \"\n \"FROM \"+str(esquema[2])+str(sensor)+\" \"\n \"Where fecha ='\"+str(rango_horas[i])+\"' \"\n \"Order BY fecha ASC LIMIT 1)\"\n \"UNION ALL\"\n \"(SELECT lectura as close \"\n \"FROM \"+str(esquema[2])+str(sensor)+\" \"\n \"Where fecha ='\"+str(rango_horas[i+1])+\"' \"\n \"Order BY fecha DESC LIMIT 1)\")\n tmp = pd.read_sql_query(query1,coneccion)\n tmp1 = pd.read_sql_query(query2,coneccion)\n #comprobaciones si por algun motivo en la fecha en que se busca un valor no existe, este reemplaza por 0\n if (tmp.empty):\n avg_.append(0)\n min_.append(0)\n max_.append(0)\n elif (tmp1.empty):\n open_.append(0)\n close_.append(0)\n elif(len(tmp1['open']) < 2):\n close_.append(0)\n open_.append(tmp1['open'][0])\n else:\n avg_.append(tmp[sensor][0])\n min_.append(tmp['min'][0])\n max_.append(tmp['max'][0])\n open_.append(tmp1['open'][0])\n close_.append(tmp1['open'][1]) \n rango_horas.pop(0) # Se elimina la fecha sobrante ya que se deja solo 300 fechas, las cuales corresponde al inicio de cada rango\n #Se crea el dataframe con todos los valores extraidos\n new_df = pd.DataFrame(list(zip(list(rango_horas), avg_,min_,max_,open_,close_)),columns =['fecha', sensor,'min','max','open','close'])\n return new_df\n\n# Funcion que calcula la fecha del ultimo peak detectado \ndef obtener_fecha_alerta(df,peaks_inf,peaks_sup,peaks_ini,peaks_fin):\n list_df = df['fecha'].tolist()\n tmp = []\n for i in peaks_inf:\n tmp.append(list_df[i])\n for j in peaks_sup:\n tmp.append(list_df[j])\n for k in peaks_ini:\n tmp.append(list_df[k])\n for l in peaks_fin:\n tmp.append(list_df[l])\n tmp.sort(reverse = True)\n if len(tmp) == 0:\n return 'N/A'\n else:\n return str(tmp[0])\n\n# Funcion que calcula todos los peaks, que se encuentran sobre una linea de control, ya sea inferior o superior\ndef peak_(df,linea_control):\n lista = df.tolist()\n peaks = []\n for i in range(len(lista)):\n if(float(lista[i]) >= float(linea_control)):\n peaks.append(i)\n return peaks\n\ndef lineas_control(tipo,df,linea_control_inf,linea_control_sup):\n \n if tipo == 'inf':\n trace_linea_inf = []\n y = []\n #Bucle que agrega una linea recta en el valor ingresado \n for i in range(len(df[\"fecha\"])):\n y.append(float(linea_control_inf))\n trace_linea_inf.append(go.Scattergl(x=df[\"fecha\"], y=y, mode='lines',line=dict(color='purple'),name='Línea Inferior',showlegend=False))\n # Calculo de peaks\n if(float(linea_control_inf) < 0):\n \n peak = []\n columnas = ['min','max','open','close']\n # se calculan los peaks para cada tipo de dato que contine el grafico OHLC\n peaks_inf = peak_(-(df['min']),-(linea_control_inf))\n peaks_sup = peak_(-(df['max']),-(linea_control_inf))\n peaks_ini = peak_(-(df['open']),-(linea_control_inf))\n peaks_fin = peak_(-(df['close']),-(linea_control_inf))\n\n peak.append(peaks_inf)\n peak.append(peaks_sup)\n peak.append(peaks_ini)\n peak.append(peaks_fin) \n\n alert_inf = len(peaks_inf) + len(peaks_sup) + len(peaks_ini) + len(peaks_fin)\n alert_inf = str(alert_inf) + \" peaks\"\n #Obtine la fecha del ultimo peak detectado\n fecha_peak_inf = obtener_fecha_alerta(df,peaks_inf,peaks_sup,peaks_ini,peaks_fin)\n \n #Se marcan en el grafico OHLC los peaks detectados\n for peak,col in zip(peak,columnas):\n trace_linea_inf.append(go.Scatter(\n x=[df[\"fecha\"][j]for j in list(peak)],\n y=[df[col][j]for j in list(peak)],\n mode='markers',\n name= 'Peak',\n marker=dict(\n size=8,\n color='red',\n symbol='cross'\n ),\n showlegend=False\n ))\n return trace_linea_inf,alert_inf,fecha_peak_inf\n\n elif tipo == 'sup':\n trace_linea_sup = []\n y = []\n #Bucle que agrega una linea recta en el valor ingresado \n for i in range(len(df[\"fecha\"])):\n y.append(float(linea_control_sup))\n trace_linea_sup.append(go.Scattergl(x=df[\"fecha\"], y=y, mode='lines',line=dict(color='purple'),name='Línea Superior',showlegend=False))\n # Calculo de peaks\n if(float(linea_control_sup) > 0):\n \n peak = []\n columnas = ['min','max','open','close']\n # se calculan los peaks para cada tipo de dato que contine el grafico OHLC\n peaks_inf = peak_(df['min'],linea_control_sup)\n peaks_sup = peak_(df['max'],linea_control_sup)\n peaks_ini = peak_(df['open'],linea_control_sup)\n peaks_fin = peak_(df['close'],linea_control_sup)\n\n peak.append(peaks_inf)\n peak.append(peaks_sup)\n peak.append(peaks_ini)\n peak.append(peaks_fin) \n\n alert_sup = len(peaks_inf) + len(peaks_sup) + len(peaks_ini) + len(peaks_fin)\n alert_sup = str(alert_sup) + \" peaks\"\n #Obtine la fecha del ultimo peak detectado\n fecha_peak_sup = obtener_fecha_alerta(df,peaks_inf,peaks_sup,peaks_ini,peaks_fin)\n \n #Se marcan en el grafico OHLC los peaks detectados\n for peak,col in zip(peak,columnas):\n trace_linea_sup.append(go.Scatter(\n x=[df[\"fecha\"][j]for j in list(peak)],\n y=[df[col][j]for j in list(peak)],\n mode='markers',\n name= 'Peak',\n marker=dict(\n size=8,\n color='red',\n symbol='cross'\n ),\n showlegend=False\n ))\n return trace_linea_sup,alert_sup,fecha_peak_sup\n\n\n#Funcion para generar rangos de datos para el histograma circular\ndef rangos (tmp1):\n\tvalores = list(tmp1.keys())\n\tf,inicial,final = [],[],[]\n\tfor i in range(8):\n\t\tif (i < 4):\n\t\t\tf.append(min(valores)+((-1)*(min(valores)/4)*i))\n\t\telse:\n\t\t\tf.append(max(valores)-((max(valores)/4)*(i-4)))\n\tf = sorted(f)\n\tfor i in range(7):\n\t\tif((i+1)%2==0):\n\t\t\tinicial.append(f[i])\n\t\t\tfinal.append(f[i+1])\n\t\telse:\n\t\t\tinicial.append(f[i])\n\t\t\tfinal.append(f[i+1])\n\treturn inicial,final\n\n#funcion para definir los datos en cada rango para el histograma circular\ndef datos_por_rango(df,inicial,final):\n\trr,tt,v,c = [],[],[],[]\n\tcount,value,media = 0,0,0\n\tfor ini,end in zip(inicial,final):\n\t\tfor i, row in df.iterrows():\n\t\t\tif (row['dir_viento'] >= float(ini) and row['dir_viento']<=float(end)):\n\t\t\t\tvalue = value + row['vel_viento']\n\t\t\t\tcount = count + 1\n\t\tv.append(value)\n\t\tc.append(count)\n\t\tvalue = 0\n\t\tcount = 0\n\tfor i in range(0,len(c)):\n\t\tif(c[i] != 0):\n\t\t\tmedia = v[i]/c[i]\n\t\trr.append(media)\n\t\tmedia = 0\n\tfor ini,end in zip(inicial,final):\n\t\tmedia = (ini + end) / 2\n\t\ttt.append(media)\n\treturn rr,tt\n\n#Dependiendo de los sensores disponibles, se muestra la alternativa de cambiar el tipo de visualizacion de 1-sensor o varios-sensores\ndef cantidad_sensores_visualizar(tipo_sensor):\n if tipo_sensor == 'acelerometro':\n if len(nombres_ace().keys()) > 1:\n cantidad_sensores = {\"1 Sensor\":\"1-sensor\",\"Varios Sensores\":\"varios-sensores\"}\n else:\n cantidad_sensores = {\"1 Sensor\":\"1-sensor\"}\n else:\n cantidad_sensores = {\"1 Sensor\":\"1-sensor\"}\n return dict(cantidad_sensores)\n\n# Funcion que retorna dependiendo de la cantidad de dias disponibles en la base de datos, las opciones a seleccionar en el RadioItem\ndef ventana_tiempo(tipo):\n if tipo == 0:\n ventana_tiempo = {\"1 Hora\":\"12S\"}\n elif tipo == 1:\n ventana_tiempo = {\"1 Hora\":\"12S\",\"1 Día\":\"288S\"}\n elif tipo == 2:\n ventana_tiempo = {\"1 Hora\":\"12S\",\"1 Día\":\"288S\",\"7 Días\":\"2016S\"}\n elif tipo == 3:\n ventana_tiempo = {\"1 Hora\":\"12S\",\"1 Día\":\"288S\",\"7 Días\":\"2016S\",\"14 Días\":\"4032S\"}\n return dict(ventana_tiempo)\n\n# Funcion que sirve para generar las opciones de horas disponibles que se muestran en el RangeSlider\ndef crear_hora(hora):\n hora_new = '00:00:00'\n if hora == 24:\n hora_new = '00:00:00'\n else:\n for i in range(24):\n if hora == i and hora < 10:\n hora_new = '0'+str(i)+':00:00'\n elif hora == i and hora > 9:\n hora_new = str(i)+':00:00'\n return hora_new\n\n# Funcion que retorna los nombres de los acelerometros disponibles a seleccionar en el Dropdown\ndef nombres_ace():\n df = pd.read_sql_query(\"SELECT DISTINCT nombre_tabla FROM \"+str(esquema[1])+\"sensores_instalados WHERE nombre_tabla like '%.acelerometro%';\",coneccion)\n nombres = df['nombre_tabla'].tolist()\n nombres = sorted(nombres, key=lambda x: int(\"\".join([i for i in x if i.isdigit()])))\n nombres_sensores = {}\n for nom in nombres:\n nombres_sensores[nom.split('.')[1]] = str(nom.split('.')[1])\n return dict(nombres_sensores)\n\n# Funcion que retorna los nombres de las weather station disponibles a seleccionar en el Dropdown\ndef nombres_ws():\n nombres_ws = {\"weather-station_1\": \"Weather Station 1\"}\n return dict(nombres_ws)\n\n# Funcion que retorna los nombres de los strain gauge disponibles a seleccionar en el Dropdown\ndef nombres_sg():\n nombres_sg = {\"strain-gauge_1\": \"Strain Gauge 1\"}\n return dict(nombres_sg)\n\n# Funcion que retorna los nombres de los inclinometros disponibles a seleccionar en el Dropdown\ndef nombres_in():\n nombres_in = {\"inclinometro_1\": \"Inclinómetro 1\"}\n return dict(nombres_in)\n\n# Funcion que retorna los nombres de los LVDT disponibles a seleccionar en el Dropdown\ndef nombres_lvdt():\n nombres_lvdt = {\"lvdt_1\": \"LVDT 1\"}\n return dict(nombres_lvdt)\n\n#Funcion que elimina tildes\ndef elimina_tildes(cadena):\n sin = ''.join((c for c in unicodedata.normalize('NFD',cadena) if unicodedata.category(c) != 'Mn'))\n return sin\n\n# Funcion que retorna los nombres de los tipos de sensores disponibles a seleccionar en el Dropdown\ndef tipos_sensores():\n df = pd.read_sql_query(\"SELECT DISTINCT nombre FROM \"+str(esquema[1])+\"tipos_de_sensor;\",coneccion)\n tipos = df['nombre'].tolist()\n tipos.sort(reverse=False)\n tipos_sensores = {}\n #for tipo in tipos:\n # tipos_sensores[str(elimina_tildes(tipo)).lower().replace(' ', '-') ] = str(tipo)\n tipos_sensores[str(elimina_tildes(tipos[0])).lower().replace(' ', '-') ] = str(tipos[0])\n return dict(tipos_sensores)\n\n# Funcion que retorna la minima fecha que existe en la base de datos\ndef fecha_inicial(tipo_sensor):\n if tipo_sensor == 'acelerometro':\n return pd.read_sql_query(\"SELECT fecha FROM \"+str(esquema[2])+\"acelerometro_5493257 ORDER BY fecha ASC LIMIT 1 \",coneccion)['fecha'][0]\n elif tipo_sensor == 'weather-station':\n return pd.read_sql_query(\"SELECT fecha FROM \"+str(esquema[0])+\"temperatura ORDER BY id_lectura ASC LIMIT 1 \",coneccion)['fecha'][0]\n\n# Funcion que retorna la ultima fecha que existe en la base de datos\ndef fecha_final(tipo_sensor):\n if tipo_sensor == 'acelerometro':\n fecha = pd.read_sql_query(\"SELECT fecha FROM \"+str(esquema[2])+\"acelerometro_5493257 ORDER BY fecha DESC LIMIT 1 \",coneccion)['fecha'][0]\n if fecha == None:\n fecha = dt (2008,1,16,23,18,28)\n return fecha\n elif tipo_sensor == 'weather-station':\n fecha = pd.read_sql_query(\"SELECT fecha FROM \"+str(esquema[0])+\"temperatura ORDER BY id_lectura DESC LIMIT 1 \",coneccion)['fecha'][0]\n if fecha == None:\n fecha = dt (2008,4,1,0,38,36)\n return fecha\n\n#Funcion que cuenta la cantidad de dias entre 2 fechas\ndef dias_entre_fechas(fecha_ini,fecha_fin):\n return abs(fecha_fin - fecha_ini).days\n\n#funcion que revisa las horas disponibles en la base de datos\ndef horas_del_dia(sensor,fecha_inicial):\n fecha_final = fecha_inicial + td(days=1)\n if sensor == 'weather-station_1':\n horas = pd.read_sql_query(\"select distinct extract(hour from fecha) as horas \"\n \"from \"+str(esquema[0])+\"temperatura \"\n \"where fecha between '\"+str(fecha_inicial)+\"' and '\"+str(fecha_final)+\"';\",coneccion)\n else:\n horas = pd.read_sql_query(\"select distinct extract(hour from fecha) as horas \"\n \"from \"+str(esquema[2])+str(sensor)+\" \"\n \"where fecha between '\"+str(fecha_inicial)+\"' and '\"+str(fecha_final)+\"';\",coneccion)\n horas = list(map(int, horas['horas'].tolist()))\n horas.sort()\n cant_horas = len(horas)\n if not horas:\n min_ = 0\n max_ = 0\n else:\n min_ = horas[0]\n max_ = horas[cant_horas-1]\n return horas,min_,max_\n\n# Funcion que dado un dataframe y un sensor especifico, calcula el promedio, maximo, minimo, cuantas repeticiones de maximos, cuantas repeticiones de minimos\n# y entrega las fechas de la ultima repeticon de maximo y minimo de todo el dataframe que se le entrega\n# estos datos son visualizados sobre el grafico OHLC \ndef datos_mini_container(df,sensor):\n max_ = np.amax(df['max'].tolist())\n min_ = np.amin(df['min'].tolist())\n avg_ = np.average(df[sensor].tolist())\n\n promedio = round(avg_,3)\n maximo = round(max_,3)\n minimo = round(min_,3)\n \n count_max = 'N° Veces: '+str(df['max'].tolist().count(max_))\n count_min = 'N° Veces: '+str(df['min'].tolist().count(min_))\n df_max = df.loc[df.loc[:, 'max'] == max_].reset_index().sort_values(by=['fecha'],ascending=True)\n df_min = df.loc[df.loc[:, 'min'] == min_].reset_index().sort_values(by=['fecha'],ascending=True)\n fecha_ultimo_max = str(df_max['fecha'][len(df_max['fecha'])-1])\n fecha_ultimo_min = str(df_min['fecha'][len(df_min['fecha'])-1])\n\n return promedio,maximo,minimo,count_max,count_min,fecha_ultimo_max,fecha_ultimo_min\n\n# Funcion que crea el dataframe para una cajita del grafico boxplot, ya que por cada una de ellas se seleccionan 300 datos que son el \n# promedio de un rango de tiempo, este rango de tiempo depende de la frecuencia que puede ser 12seg, 288seg, 2016seg y 4032seg\n# la obtencion de estos datos se realiza de la misma forma que para el grafico OHLC\ndef datos_box(fecha_inicio,freq,sensor):\n if freq == '12S':\n freq = '1S'\n elif freq == '288S':\n freq = '24S'\n elif freq == '2016S':\n freq = '144S'\n elif freq == '4032S':\n freq = '288S'\n periodo = 301\n tmp = []\n rango_horas = list(pd.date_range(fecha_inicio, periods=periodo, freq=freq).strftime('%Y-%m-%d %H:%M:%S'))\n for i in range(len(rango_horas)-1):\n query1 = (\"SELECT avg(lectura) as \"+str(sensor)+\" \"\n \"FROM \"+str(esquema[2])+str(sensor)+\" \"\n \"Where fecha between '\"+str(rango_horas[i])+\"' and '\"+str(rango_horas[i+1])+\"' ;\")\n df = pd.read_sql_query(query1,coneccion)[sensor]\n for j in range(1):\n tmp.append(df[j])\n rango_horas.pop(0)\n ultimo = rango_horas[-1]\n new_df = pd.DataFrame(list(zip(list(rango_horas), tmp)),columns =['fecha', str(ultimo)])\n return new_df,ultimo\n\n#Funcion que retorna dependiendo de la frecuencia seleccionada su equivalente para incluirlo en la descripcion del grafico boxplot\ndef titulo_box(freq):\n if freq == '12S':\n freq = '5 min'\n elif freq == '288S':\n freq = '2 horas'\n elif freq == '2016S':\n freq = '12 horas'\n elif freq == '4032S':\n freq = '24 horas'\n return freq\n\n#Funcion que retorna dependiendo de la frecuencia seleccionada su equivalente para incluirlo en la descripcion del grafico OHLC\ndef titulo_OHLC(freq):\n if freq == '12S':\n freq = '1 hora'\n elif freq == '288S':\n freq = '1 dia'\n elif freq == '2016S':\n freq = '7 dias'\n elif freq == '4032S':\n freq = '14 dias'\n return freq\n\n#Funcion que retorna dependiendo de la frecuencia seleccionada su equivalente para incluirlo en la descripcion del grafico histograma\ndef titulo_freq_datos(freq):\n if freq == '12S':\n freq = '12 seg'\n elif freq == '288S':\n freq = '4 min y 48 seg'\n elif freq == '2016S':\n freq = '33 min y 36 seg'\n elif freq == '4032S':\n freq = '1 hr, 7 min y 12 seg'\n return freq\n\n#Funcion que retorna el rango de fecha para incluirlo en el titulo de los graficos\ndef fecha_titulo(fecha_inicial,freq):\n if freq == '12S':\n sum_fecha = td(hours=1)\n elif freq == '288S':\n sum_fecha = td(days=1)\n elif freq == '2016S':\n sum_fecha = td(days=7)\n elif freq == '4032S':\n sum_fecha = td(days=14)\n fecha_final = str(fecha_inicial + sum_fecha)\n fecha_inicial = str(fecha_inicial)\n return fecha_inicial,fecha_final\n\n#Funcion para generar los reportes, se tiene una plantilla en html, la cual se transforma en pdf\ndef generar_reportes(fig_principal,fig_sec1,fig_sec2,valor_promedio,valor_max,valor_min,fecha_valor_max,fecha_valor_min,num_valor_max,num_valor_min,alert_sup,alert_inf,fecha_alert_sup,fecha_alert_inf,sensor,sensor_multi,fecha,ventana_tiempo,valor_linea_control_sup,valor_linea_control_inf,hora,cantidad_sensores):\n \n #Transforma las figuras (graficos generados) en uri, para poder ser visualizados en html \n #Escritorio\n def fig_to_uri(fig):\n return base64.b64encode(fig.to_image(format=\"png\")).decode('utf-8')\n\n #Transforma el logo en uri, para poder ser visualizados en html\n with open(\"./assets/SHM-logo2.bmp\", \"rb\") as imageFile:\n logo = base64.b64encode(imageFile.read()).decode('utf-8')\n \n # se guardan los garficos en formato uri en una lista\n #escritorio\n graficos = [fig_to_uri(fig_principal),fig_to_uri(fig_sec1),fig_to_uri(fig_sec2)]\n\n #heroku\n #graficos = [fig_principal.to_html(config={\"displayModeBar\": False}),fig_sec1.to_html(config={\"displayModeBar\": False}),fig_sec2.to_html(config={\"displayModeBar\": False})]\n\n # si es mas de 1 sensor en la visualizacion, se guardan los nombres en un string\n sensores_multi = ''\n if cantidad_sensores != '1-sensor':\n for sen in sensor_multi: \n sensores_multi += str(sen) + ','\n\n meses = ['','Enero','Febrero','Marzo','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n fecha_datos = str(fecha).split(sep='T')[0]\n fecha_max = str(fecha_valor_max).split(sep=' ')[0]\n fecha_min = str(fecha_valor_min).split(sep=' ')[0]\n dia_datos = str(fecha_datos).split(sep='-')[2]\n dia_max = str(fecha_max).split(sep='-')[2]\n dia_min = str(fecha_min).split(sep='-')[2]\n mes_datos = str(meses[int(str(fecha_datos).split(sep='-')[1])])\n mes_max = str(meses[int(str(fecha_max).split(sep='-')[1])])\n mes_min = str(meses[int(str(fecha_min).split(sep='-')[1])])\n ano_datos = str(fecha_datos).split(sep='-')[0]\n ano_max = str(fecha_max).split(sep='-')[0]\n ano_min = str(fecha_min).split(sep='-')[0]\n\n if valor_linea_control_sup != None and valor_linea_control_inf != None:\n fecha_inf = str(fecha_alert_inf).split(sep=' ')[0]\n fecha_sup = str(fecha_alert_sup).split(sep=' ')[0]\n dia_inf = str(fecha_inf).split(sep='-')[2]\n dia_sup = str(fecha_sup).split(sep='-')[2]\n mes_inf = str(meses[int(str(fecha_inf).split(sep='-')[1])])\n mes_sup = str(meses[int(str(fecha_sup).split(sep='-')[1])])\n ano_inf = str(fecha_inf).split(sep='-')[0]\n ano_sup = str(fecha_sup).split(sep='-')[0]\n elif valor_linea_control_inf != None:\n fecha_inf = str(fecha_alert_inf).split(sep=' ')[0]\n dia_inf = str(fecha_inf).split(sep='-')[2]\n mes_inf = str(meses[int(str(fecha_inf).split(sep='-')[1])])\n ano_inf = str(fecha_inf).split(sep='-')[0]\n elif valor_linea_control_sup != None:\n fecha_sup = str(fecha_alert_sup).split(sep=' ')[0]\n dia_sup = str(fecha_sup).split(sep='-')[2]\n mes_sup = str(meses[int(str(fecha_sup).split(sep='-')[1])])\n ano_sup = str(fecha_sup).split(sep='-')[0]\n \n \n ##Plantilla html\n encabezado_multi = (\n ''\n ''\n ''\n ''\n ''\n ''\n ''\n '

\"Logo

'\n '

Datos Recientes

'\n '

Plataforma Monitoreo Salud Estructural

'\n '

 

'\n '

Datos obtenidos de los sensores \"'+str(sensores_multi)+'\", la ventana de tiempo seleccionada para las visualizaciones es de \"'+str(titulo_OHLC(ventana_tiempo))+'\", el dia '+str(dia_datos)+' de '+str(mes_datos)+' de '+str(ano_datos)+' desde las '+str(crear_hora(int(hora)))+' a las '+str(crear_hora(int(hora) + 1))+' .

'\n '

 

'\n )\n\n #heroku\n '''\n img = (''\n '

{image}

'\n '')\n\n img2 = (''\n '

{image}

'\n '')\n\t'''\n #Escritorio\n \n img = (''\n '

\"Gráfico

'\n '')\n img2 = (''\n '

\"Gráfico

'\n '')\n \n encabezado = (\n ''\n ''\n ''\n ''\n ''\n ''\n ''\n '

\"Logo

'\n '

Datos Recientes

'\n '

Plataforma Monitoreo Salud Estructural

'\n '

 

'\n '

Datos obtenidos del sensor \"'+str(sensor)+'\" la ventana de tiempo seleccionada para las visualizaciones es de \"'+str(titulo_OHLC(ventana_tiempo))+'\", el dia '+str(dia_datos)+' de '+str(mes_datos)+' de '+str(ano_datos)+' desde las '+str(crear_hora(int(hora)))+' a las '+str(crear_hora(int(hora) + 1))+' .

'\n '

 

'\n )\n\n resumen = (\n '

Resumen de Indicadores

'\n '

Valor promedio: '+str(valor_promedio)+'

' \n '

Valor máximo: '+str(valor_max)+', Repeticiones: '+str(num_valor_max)[10:len(str(num_valor_max))]+', Fecha de última repetición: '+str(dia_max)+'-'+str(mes_max)+'-'+str(ano_max)+' '+str(fecha_valor_max).split(sep=' ')[1]+'

'\n '

Valor mínimo: '+str(valor_min)+', Repeticiones: '+str(num_valor_min)[10:len(str(num_valor_min))]+', Fecha de última repetición: '+str(dia_min)+'-'+str(mes_min)+'-'+str(ano_min)+' '+str(fecha_valor_min).split(sep=' ')[1]+'

'\n '

 

'\n )\n resumen_multi = (\n '

Resumen de Indicadores

'\n '

(Datos del último sensor seleccionado)

'\n '

Valor promedio: '+str(valor_promedio)+'

' \n '

Valor máximo: '+str(valor_max)+', Repeticiones: '+str(num_valor_max)[10:len(str(num_valor_max))]+', Fecha de última repetición: '+str(dia_max)+'-'+str(mes_max)+'-'+str(ano_max)+' '+str(fecha_valor_max).split(sep=' ')[1]+'

'\n '

Valor mínimo: '+str(valor_min)+', Repeticiones: '+str(num_valor_min)[10:len(str(num_valor_min))]+', Fecha de última repetición: '+str(dia_min)+'-'+str(mes_min)+'-'+str(ano_min)+' '+str(fecha_valor_min).split(sep=' ')[1]+'

'\n '

 

'\n )\n \n #version con indicadores descritos en parrafos\n '''\n resumen = (\n '

Resumen de Indicadores

'\n '

Los datos seleccionados tienen un valor promedio de '+str(valor_promedio)+', además de un valor máximo de '+str(valor_max)+', que se repite '+str(num_valor_max)[10:len(str(num_valor_max))]+' vez y su última repetición fue el '+str(dia_max)+' de '+str(mes_max)+' de '+str(ano_max)+' a las '+str(fecha_valor_max).split(sep=' ')[1]+' y por último un valor mínimo de '+str(valor_min)+' que se repite '+str(num_valor_min)[10:len(str(num_valor_min))]+' vez y su última repetición fue el '+str(dia_min)+' de '+str(mes_min)+' de '+str(ano_min)+' a las '+str(fecha_valor_min).split(sep=' ')[1]+'.

'\n '

 

'\n )\n resumen_multi = (\n '

Resumen de Indicadores

'\n '

(Datos del último sensor seleccionado)

'\n '

Los datos seleccionados tienen un valor promedio de '+str(valor_promedio)+', además de un valor máximo de '+str(valor_max)+', que se repite '+str(num_valor_max)[10:len(str(num_valor_max))]+' vez y su última repetición fue el '+str(dia_max)+' de '+str(mes_max)+' de '+str(ano_max)+' a las '+str(fecha_valor_max).split(sep=' ')[1]+' y por último un valor mínimo de '+str(valor_min)+' que se repite '+str(num_valor_min)[10:len(str(num_valor_min))]+' vez y su última repetición fue el '+str(dia_min)+' de '+str(mes_min)+' de '+str(ano_min)+' a las '+str(fecha_valor_min).split(sep=' ')[1]+'.

'\n '

 

'\n )\n '''\n linea = ('

Líneas de control

')\n linea_multi = (\n '

Líneas de control

'\n '

(Datos de todos los sensores seleccionados)

'\n )\n if valor_linea_control_sup != None and valor_linea_control_inf != None:\n linea_sup = (\n '

Valor de línea de control superior: '+str(valor_linea_control_sup)+'

'\n '

Peaks superiores: '+str(alert_sup)+'

' \n '

Fecha último peak superior detectado: '+str(dia_sup)+'-'+str(mes_sup)+'-'+str(ano_sup)+' '+str(fecha_alert_sup).split(sep=' ')[1]+'

'\n )\n linea_inf = (\n '

Valor de línea de control inferior: '+str(valor_linea_control_inf)+'

'\n '

Peaks inferiores: '+str(alert_inf)+'

' \n '

Fecha último peak inferior detectado: '+str(dia_inf)+'-'+str(mes_inf)+'-'+str(ano_inf)+' '+str(fecha_alert_inf).split(sep=' ')[1]+'

'\n )\n #version con lineas de control descritos en parrafos\n '''\n linea_sup = ('

Línea de control superior ubicada en el valor '+str(valor_linea_control_sup)+', existen '+str(alert_sup)+' que superan este umbral y el último peak detectado fue el '+str(dia_sup)+' de '+str(mes_sup)+' de '+str(ano_sup)+' a las '+str(fecha_alert_sup).split(sep=' ')[1]+'.')\n linea_inf = ('

Línea de control inferior ubicada en el valor '+str(valor_linea_control_inf)+', existen '+str(alert_inf)+' que superan este umbral y el último peak detectado fue el '+str(dia_inf)+' de '+str(mes_inf)+' de '+str(ano_inf)+' a las '+str(fecha_alert_inf).split(sep=' ')[1]+'.')\n '''\n elif valor_linea_control_sup != None:\n linea_sup = (\n '

Valor de línea de control superior: '+str(valor_linea_control_sup)+'

'\n '

Peaks superiores: '+str(alert_sup)+'

' \n '

Fecha último peak superior detectado: '+str(dia_sup)+'-'+str(mes_sup)+'-'+str(ano_sup)+' '+str(fecha_alert_sup).split(sep=' ')[1]+'

'\n )\n #version con lineas de control descritos en parrafos\n #linea_sup = ('

Línea de control superior ubicada en el valor '+str(valor_linea_control_sup)+', existen '+str(alert_sup)+' que superan este umbral y el último peak detectado fue el '+str(dia_sup)+' de '+str(mes_sup)+' de '+str(ano_sup)+' a las '+str(fecha_alert_sup).split(sep=' ')[1]+'.')\n elif valor_linea_control_inf != None:\n linea_inf = (\n '

Valor de línea de control inferior: '+str(valor_linea_control_inf)+'

'\n '

Peaks inferiores: '+str(alert_inf)+'

' \n '

Fecha último peak inferior detectado: '+str(dia_inf)+'-'+str(mes_inf)+'-'+str(ano_inf)+' '+str(fecha_alert_inf).split(sep=' ')[1]+'

'\n )\n #version con lineas de control descritos en parrafos\n #linea_inf = ('

Línea de control inferior ubicada en el valor '+str(valor_linea_control_inf)+', existen '+str(alert_inf)+' que superan este umbral y el último peak detectado fue el '+str(dia_inf)+' de '+str(mes_inf)+' de '+str(ano_inf)+' a las '+str(fecha_alert_inf).split(sep=' ')[1]+'.')\n\n #heroku\n '''\n fecha = (\n '

Reporte del obtenido el '+str(time.strftime(\"%d/%m/%y\"))+' a las '+str(time.strftime(\"%H:%M:%S\"))+'

'\n '
Guardar/Imprimir Reporte'\n ''\n '')\n '''\n #escritorio\n \n fecha = (\n '

Reporte del obtenido el '+str(time.strftime(\"%d/%m/%y\"))+' a las '+str(time.strftime(\"%H:%M:%S\"))+'

'\n ''\n '')\n \n\n #Se agregan las imagenes a la plantilla html\n imagenes = ''\n tmp = 1\n for image in graficos:\n if tmp == 3:\n _ = img2\n else: \n _ = img\n _ = _.format(image=image)\n imagenes += _\n tmp += 1\n\n \n #Dependiendo de la situacion se modifica la plantilla html, tanto como para agregar contenido o para quitarlo \n if cantidad_sensores != '1-sensor':\n reporte = encabezado_multi + resumen_multi + imagenes + fecha\n if valor_linea_control_sup != None and valor_linea_control_inf != None:\n reporte = encabezado_multi + resumen_multi + linea_multi + linea_sup + linea_inf + imagenes + fecha\n elif valor_linea_control_inf != None:\n reporte = encabezado_multi + resumen_multi + linea_multi + linea_inf + imagenes + fecha\n elif valor_linea_control_sup != None:\n reporte = encabezado_multi + resumen_multi + linea_multi + linea_sup + imagenes + fecha\n \n else:\n reporte = encabezado + resumen + imagenes + fecha\n\n if valor_linea_control_sup != None and valor_linea_control_inf != None:\n reporte = encabezado + resumen + linea + linea_sup + linea_inf + imagenes + fecha\n elif valor_linea_control_inf != None:\n reporte = encabezado + resumen + linea + linea_inf + imagenes + fecha\n elif valor_linea_control_sup != None:\n reporte = encabezado + resumen + linea + linea_sup + imagenes + fecha\n\n #Funcion que transforma el html en pdf para la version de escritorio o servidor con linux (necesita instalar dependencias extra no compatibles con heroku)\n pdfkit.from_string(reporte,'reporte.pdf')\n\n #Forma de generar el reporte para la version del subsistema montado en heroku\n #text_file = open(\"reporte.html\", \"w\")\n #n = text_file.write(reporte)\n #text_file.close()\n\n #Funcion que abre el pdf recien creado\n \n #en chrome\n #webbrowser.get('google-chrome').open_new_tab('reporte.html') #heroku\n #webbrowser.get('google-chrome').open_new_tab('reporte.pdf') #escritorio\n \n #en navegador o lector de pdf por defecto\n #webbrowser.open_new_tab('reporte.html')#heroku\n webbrowser.open_new_tab('reporte.pdf')#escritorio","sub_path":"Versiones/Version-Conexion DB-UdeC/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":38868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"627887426","text":"\"\"\"\n--------------------------------------------------------------------------------\nserial-apriori.py\nA parallel apriori algorithm implementation in python for CP431/CP631 term project\nThe program finds and prints out the association rules in a given dataset. It takes the min support\nand min confidence values as parameters\n--------------------------------------------------------------------------------\nAuthors: Elizabeth Gorbonos, Omer Tal, Tianran Wang\n--------------------------------------------------------------------------------\n\"\"\"\n\n\nimport os\nimport argparse\nimport datetime\nimport logging\n\nfrom utils.itemset import ItemSet, ItemSetTree\nfrom utils.dataset import load_dataset\nfrom utils.arule import RuleGenerator\nfrom utils.candidates import *\n\ndef main():\n parser = argparse.ArgumentParser(description='Find association rules in a dataset')\n parser.add_argument('-d', '--dataset', help='dataset file', default='ds1.txt')\n parser.add_argument('-s', '--support', type=float, help='minimum support for frequent item sets', default='0.01')\n parser.add_argument('-c', '--confidence', type=float, help='minimum confidence for association rules', default='0.6')\n parser.add_argument('-v', '--verbose', action='store_true', help='include verbose output', default=False)\n args = parser.parse_args()\n\n # set logging level\n FORMAT = '%(asctime)-15s: %(message)s'\n if args.verbose:\n logging.basicConfig(format=FORMAT, datefmt=\"%Y-%m-%d %H:%M:%S\", level=logging.DEBUG)\n else:\n logging.basicConfig(format=FORMAT, datefmt=\"%Y-%m-%d %H:%M:%S\", level=logging.INFO)\n\n logging.info(\"Running apriori with support {}% and confidence {}%\".format(args.support * 100, args.confidence * 100))\n\n # load dataset\n transactions = load_dataset(os.path.join('datasets', args.dataset))\n dataset_size = len(transactions)\n\n step = 0\n level_itemsets = []\n itemset_tree = None\n next_candidates = []\n\n # benchmark run time\n start_t = datetime.datetime.now()\n\n # while next level has candidates or step 0\n while (len(next_candidates) > 0 or step == 0):\n step += 1\n logging.debug(\"step {}: starting\".format(step))\n\n # count the candidate\n candidates = count_candidates(transactions, itemset_tree)\n logging.debug(\"step {}: counted candidates supports\".format(step))\n\n # filter candidates\n filtered = filter_candidates(candidates, args.support, dataset_size)\n logging.debug(\"step {}: filtered candidates\".format(step))\n\n # store the current levels frequent itemsets\n level_itemsets.append(filtered)\n\n # generate next level candidate\n next_candidates = generate_next_level_candidates(level_itemsets[step - 1], step)\n logging.debug(\"step {}: generated next step candidates\".format(step))\n\n # build the candidate tree\n itemset_tree = ItemSetTree(next_candidates)\n logging.debug(\"step {}: created tree\".format(step))\n\n logging.debug(\"Found {} levels.\".format(len(level_itemsets)))\n for i in range(len(level_itemsets)):\n logging.debug(\"Level {} - {} frequent itemsets\".format(i+1, len(level_itemsets[i])))\n for i in range(len(level_itemsets)):\n logging.debug(\"Level {}: {}\".format(i+1, \" \".join(x for x in level_itemsets[i])))\n\n # Generate and print rules\n RuleGenerator(level_itemsets).generate_rules(args.confidence)\n\n end_t = datetime.datetime.now()\n logging.info(\"Serial apriori took {}\".format(end_t - start_t))\n\nif __name__ == \"__main__\":\n main()","sub_path":"final/serial-apriori.py","file_name":"serial-apriori.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"300680930","text":"# All the imports go here\r\nimport numpy as np\r\nfrom cv2 import cv2\r\n# import sys\r\nfrom os import path\r\n\r\n# Initializing the face and eye cascade classifiers from xml files\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\neye_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')\r\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\r\nif path.exists(\"trainer/trainer.yml\"):\r\n train_dataset = recognizer.read('trainer/trainer.yml') #load trained model\r\nelse:\r\n train_dataset = False\r\nid = 2 #two persons\r\nnames = ['','Arpita','sutanuka','unknown'] #key in names, start from the second place, leave first empty\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\nface_id = input('\\n enter user id end press ==> ')\r\n\r\n# face_id = sys.argv[1]\r\n# Variable store execution state\r\nfirst_read = True\r\n\r\n# Starting the video capture\r\ncap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\r\nret, img = cap.read()\r\ncount =0\r\nwhile (ret):\r\n ret, img = cap.read()\r\n # Coverting the recorded image to grayscale\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # Applying filter to remove impurities\r\n gray = cv2.bilateralFilter(gray, 5, 1, 1)\r\n\r\n # Detecting the face for region of image to be fed to eye classifier\r\n faces = face_cascade.detectMultiScale(gray, 1.3, 5, minSize=(200, 200))\r\n if (len(faces) > 0):\r\n for (x, y, w, h) in faces:\r\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n id, confidence = recognizer.predict(gray[y:y + h, x:x + w])\r\n # roi_face is face which is input to eye classifier\r\n roi_face = gray[y:y + h, x:x + w]\r\n roi_face_clr = img[y:y + h, x:x + w]\r\n eyes = eye_cascade.detectMultiScale(roi_face, 1.3, 5, minSize=(50, 50))\r\n\r\n # Examining the length of eyes object for eyes\r\n if (len(eyes) >= 2):\r\n # Check if program is running for detection\r\n if (first_read):\r\n cv2.putText(img,\r\n \"Eye detected press s\",\r\n (70, 70),\r\n cv2.FONT_HERSHEY_PLAIN, 3,\r\n (0, 255, 0), 2)\r\n\r\n else:\r\n count += 1\r\n cv2.putText(img,\r\n \"Eyes open!\", (70, 70),\r\n cv2.FONT_HERSHEY_PLAIN, 2,\r\n (255, 255, 255), 2)\r\n # print('click ' + str(count) + ' photo' + ' new face')\r\n if (train_dataset)and train_dataset!= False:\r\n id, confidence = recognizer.predict(gray[y:y + h, x:x + w])\r\n if (confidence > \"30%\"):\r\n cv2.putText(img, 'Already in the dataset', (x + 50, y + w + 20), font, 1, (255, 255, 0), 2)\r\n else:\r\n print('click ' + str(count) + ' photo' + confidence +' new face' + id)\r\n print(confidence)\r\n cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\",\r\n gray[y:y + h, x:x + w])\r\n else:\r\n cv2.putText(img, 'New face was detected', (x + 50, y + w + 20), font, 1,\r\n (255, 255, 0), 1)\r\n print('click ' + str(count) + ' photo' + ' new face',confidence,id)\r\n cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\",\r\n gray[y:y + h, x:x + w])\r\n\r\n else:\r\n if (first_read):\r\n # To ensure if the eyes are present before starting\r\n cv2.putText(img,\r\n \"No eyes detected\", (70, 70),\r\n cv2.FONT_HERSHEY_PLAIN, 3,\r\n (0, 0, 255), 2)\r\n else:\r\n # This will print on console and restart the algorithm\r\n # print(\"Blink detected--------------\")\r\n # count += 1\r\n # print('click ' + str(count) + ' photo' + ' new face')\r\n # if (train_dataset)and train_dataset!= False:\r\n # id, confidence = recognizer.predict(gray[y:y + h, x:x + w])\r\n # if (confidence > \"30%\"):\r\n # cv2.putText(img, 'Already in the dataset', (x + 50, y + w + 20), font, 1, (255, 255, 0), 2)\r\n # else:\r\n # print('click ' + str(count) + ' photo' + ' new face')\r\n # print(confidence)\r\n # cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\",\r\n # gray[y:y + h, x:x + w])\r\n # else:\r\n # cv2.putText(img, 'New face was detected', (x + 50, y + w + 20), font, 1,\r\n # (255, 255, 0), 1)\r\n # print('click ' + str(count) + ' photo' + ' new face')\r\n # cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\",\r\n # gray[y:y + h, x:x + w])\r\n cv2.waitKey(30)\r\n first_read = True\r\n\r\n else:\r\n cv2.putText(img,\r\n \"No face detected\", (100, 100),\r\n cv2.FONT_HERSHEY_PLAIN, 3,\r\n (0, 255, 0), 2)\r\n\r\n # Controlling the algorithm with keys\r\n cv2.imshow('img', img)\r\n a = cv2.waitKey(1)\r\n if (a == ord('q')):\r\n break\r\n elif (a == ord('s') and first_read):\r\n # This will start the detection\r\n first_read = False\r\n elif count >= 30: # Take 30 face sample and stop video\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"demo_two.py","file_name":"demo_two.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"584867247","text":"'''\n'''\nfrom pathlib import Path\nimport unittest\n\nfrom vvc import config\nfrom vvc.detector import faster_rcnn, yolo_v3\nfrom vvc.vvc import VVC\nfrom tests.test_video import output_folder\n\nclass OtherTestCase(unittest.TestCase):\n \n tm_videos = ['Ch4_20181121071359_640x480.mp4', 'Ch4_20181121073138_640x480.mp4']\n tm_folder = str(Path(config.base_folder).joinpath('Videos').joinpath('Otros'))\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n \n def test_yolo_naive_tm_person(self):\n config.video_folder = self.tm_folder\n \n detector = yolo_v3.YOLOV3('YOLOv3')\n \n counter = VVC(detector)\n \n for video_name in self.tm_videos:\n print(config.video_folder)\n print(video_name)\n counter.count(video_name, \n frame_rate_factor=1, \n filter_tags=['person'],\n show_obj_id=False)\n \n def test_yolo_naive_tm_workers(self):\n \n tm_workers_folder = Path(config.base_folder).joinpath('Videos/TM/TrabajadoresYPolicias')\n video_filder = str(tm_workers_folder.joinpath('Videos'))\n output_folder = str(tm_workers_folder.joinpath('vvc'))\n tm_workers_videos = ['Ch1_20181113075540_1min.mp4', 'Ch2_20181110121206_1min.mp4',\n 'Ch2_20181112171900_1min.mp4', 'Ch2_20181113171816_1min.mp4',\n 'Ch3_20181115065141_1min.mp4', 'Ch4_20181117115137_1min.mp4',\n 'Ch4_20181119065543_1min.mp4', 'Ch4_20181119164606_1min.mp4']\n \n config.video_folder = video_filder\n config.output_folder = output_folder\n detector = yolo_v3.YOLOV3('TM-YOLOv3')\n \n counter = VVC(detector)\n \n for video_name in tm_workers_videos:\n print(config.video_folder)\n print(video_name)\n counter.count(video_name, \n frame_rate_factor=1, \n filter_tags=['tu_llave', 'seg'],\n show_obj_id=False)\n \n \n","sub_path":"tests/test_others.py","file_name":"test_others.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"80338874","text":"#!/bin/python3\n\nimport re\n\n\nif __name__ == '__main__':\n N = int(input()) # Solved without using regular expressions\n\n firstNames = []\n\n for N_itr in range(N):\n firstNameEmailID = input().split()\n\n firstName = firstNameEmailID[0]\n emailID = firstNameEmailID[1]\n\n if emailID[-10:] == '@gmail.com':\n firstNames.append(firstName)\n\n firstNames.sort()\n\n for name in firstNames:\n print(name)\n","sub_path":"30_days_of_python/28_regex_patterns_and_intro_to_databases.py","file_name":"28_regex_patterns_and_intro_to_databases.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"606615191","text":"from flask.ext.wtf import Form\nfrom wtforms import BooleanField, SubmitField\nfrom wtforms.ext.sqlalchemy.orm import model_form\nfrom kulukin import models, db\n\n\ndef cap_obj_name(model):\n return model.name.capitalize()\n\n\nShelfForm = model_form(models.Shelf,\n db_session=db.session,\n base_class=Form,\n exclude=['contents'],\n field_args={\n 'owner':\n {'get_label': 'name'},\n 'contributors':\n {'get_label': 'name'}\n }\n )\n\nShelfForm.submit_button = SubmitField(label='Submit')\n\nUserForm = model_form(models.User,\n db_session=db.session,\n base_class=Form,\n field_args={'roles':\n {'get_label': cap_obj_name}})\n\n\ndef attr_format(attr):\n type_name = attr.type.name.capitalize()\n content = attr.content\n return '%s: %s'.format(type_name, content)\n\nItemForm = model_form(models.Item,\n db_session=db.session,\n base_class=Form,\n field_args={\n 'attrs':\n {'get_label':\n attr_format}\n }\n )\n\n\nAttrTypeForm = model_form(models.AttrType,\n db_session=db.session,\n base_class=Form)\n\n\ndef attr_types_for_item_attr(attr):\n return models.AttrType.query.with_parent(attr.item)\n\nAttrForm = model_form(models.Attr,\n db_session=db.session,\n base_class=Form,\n field_args={'type':\n {'get_label': cap_obj_name,\n 'query_factory':\n attr_types_for_item_attr}\n })\nRoleForm = model_form(models.Role,\n db_session=db.session,\n base_class=Form)\n\n\nItemTypeForm = model_form(models.ItemType,\n db_session=db.session,\n base_class=Form,\n field_args={'role':\n {'get_label': cap_obj_name},\n 'attr_types':\n {'get_label': cap_obj_name}\n })\n\n\ndef managed_shelves_for_instance(item_instance):\n return models.ItemInstance.query.with_parent(item_instance.owner)\n\nItemInstanceForm = model_form(models.ItemInstance,\n db_session=db.session,\n base_class=Form,\n field_args={'shelf':\n {'get_label': cap_obj_name,\n 'query_factory':\n managed_shelves_for_instance}\n })\n\n\nclass ConfirmDeleteForm(Form):\n confirm = BooleanField('I am sure I want to delete this.')\n","sub_path":"kulukin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"503655771","text":"\n\nfrom xai.brain.wordbase.verbs._dampen import _DAMPEN\n\n#calss header\nclass _DAMPENS(_DAMPEN, ):\n\tdef __init__(self,): \n\t\t_DAMPEN.__init__(self)\n\t\tself.name = \"DAMPENS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"dampen\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_dampens.py","file_name":"_dampens.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"94681719","text":"from rest_framework import serializers\nfrom .models import Playlist\n\n\nclass PlaylistSerializer(serializers.Serializer):\n title = serializers.CharField(allow_blank=True, required=False)\n\n class Meta:\n model = Playlist\n\n def create(self, validated_data):\n import pdb; pdb.set_trace()\n\n if not validated_data['title']:\n # query the youtube URL data and get the title that way\n self.title = 'Untitled'\n print(self.title)\n self.save()\n","sub_path":"playlist/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"134981267","text":"import cv2\nimport numpy as np\nimport os\n\nface_cascade = cv2.CascadeClassifier('./data/extern/haarcascade_frontalface_default.xml')\n\ndef face_extractor(img):\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n \n if faces == ():\n return None\n\n for (x,y,w,h) in faces:\n x=x-10\n y=y-10\n cropped_face = img[y:y+h+50, x:x+w+50]\n\n return cropped_face\n\nsource_images_dir = './data/raw/messi'\ntraining_images_dir = './data/train/messi/'\n\ncount = 0\n\nfor image in os.listdir(source_images_dir):\n image_path = os.path.join(source_images_dir, image)\n frame = cv2.imread(image_path)\n \n if face_extractor(frame) is not None :\n count += 1\n face = cv2.resize(face_extractor(frame), (400, 400))\n file_name_path = training_images_dir + str(count) + '.jpg'\n cv2.imwrite(file_name_path, face)\n print(\"Face found\")\n \n else:\n print(\"Face not found\")\n pass\n\n if cv2.waitKey(1) == 13:\n break\n\ncv2.destroyAllWindows() \nprint(\"Collecting Samples Complete\")","sub_path":"collectFaces.py","file_name":"collectFaces.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"12338332","text":"import os\n\nimport numpy as np\n\nfrom utils.read_dataset import read_digits_csv\n\n\"\"\"\n test with test set\n\"\"\"\n\n\ndef run_with_test_set(clf):\n from utils.read_dataset import custom_read_csv\n test_dataset = custom_read_csv(dir_path + '/../dataset/digit_dataset/optdigits_test.csv', feature_size)\n feature_test = test_dataset[0:test_dataset.shape[0], 0:feature_size]\n label_test = test_dataset[0:test_dataset.shape[0], feature_size]\n label_predict = clf.predict(feature_test)\n from sklearn.metrics import precision_score\n precision = precision_score(label_test, label_predict, average='weighted')\n from sklearn.metrics import recall_score\n recall = recall_score(label_test, label_predict, average='weighted')\n f1 = 2 * (precision * recall) / (precision + recall)\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(label_test, label_predict)\n print(str(precision) + '\\t' + str(recall) + '\\t' + str(f1) + '\\t' + str(accuracy))\n\n\nif __name__ == '__main__':\n\n \"\"\"\n read dataset\n \"\"\"\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n feature_size = 64\n feature_list, label_list = read_digits_csv(dir_path + '/../dataset/digit_dataset/optdigits_raining.csv', feature_size)\n from sklearn.model_selection import train_test_split\n feature_train, feature_test, label_train, label_test = train_test_split(\n feature_list , label_list, train_size=0.95)\n\n \"\"\"\n train neural network\n \"\"\"\n\n for layer_size in range(10,150,10):\n\n from sklearn.neural_network import MLPClassifier\n train_features = np.array([[]])\n clf = MLPClassifier(activation='relu'\n ,solver='adam',\n max_iter=2000,\n hidden_layer_sizes=(layer_size,layer_size),\n learning_rate='constant',\n learning_rate_init=0.001)\n clf.fit(feature_train, label_train)\n\n run_with_test_set(clf)\n\n # \"\"\"\n # cross validation\n # \"\"\"\n #\n # label_predict = clf.predict(feature_test)\n #\n # from sklearn.metrics import precision_score\n # precision = precision_score(label_test, label_predict, average='weighted')\n #\n # from sklearn.metrics import recall_score\n # recall = recall_score(label_test, label_predict, average='weighted')\n #\n # f1 = 2 * (precision * recall) / (precision + recall)\n #\n # from sklearn.metrics import accuracy_score\n # accuracy = accuracy_score(label_test, label_predict)\n #\n # print('layer size:', layer_size)\n # print(str(precision)+'\\t'+ str(recall)+'\\t' +str(f1)+'\\t'+ str(accuracy))\n # print()","sub_path":"neural_network/train_evaluate_digit_nn.py","file_name":"train_evaluate_digit_nn.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"395426065","text":"from cards import mouseClicked\nfrom cards import selectedCardFunc\nfrom Combat_screen import setup\ndef setup():\n pass\n \ndef attack_5():\n damage = 5\n if turn == 'player':\n hpMonster = hpMonster - 5\n if turn == 'monster':\n hpPlayer = hpPlayer - 5\ndef attack_10():\n damage = 10\n if turn == 'player':\n hpMonster = hpMonster - 10\n if turn == 'monster':\n hpPlayer = hpPlayer - 19\ndef block_5():\n block = 5\n if turn == 'player':\n playerBlock = playerBlock + 5\n if turn == 'monster':\n monsterBlock = monsterBlock + 5\ndef block_10():\n block = 10\n if turn == 'player':\n playerBlock = playerBlock + 10\n if turn == 'monster':\n monsterBlock = monsterBlock + 10\ndef cleanse():\n if turn == 'player':\n poisonCountPlayer = 0\n corruptionCountPlayer = 0\n \n if turn == 'monster':\n poisonCountMonster = 0\n corruptionCountMonster = 0\n \ndef corruption():\n corruptionIsPlayed = True \n \ndef fireball():\n fireBallIsPlayed == True\n \ndef heal():\n if turn == 'player':\n playerHp = playerHp + 15\n if turn == 'monster':\n monsterHp = monsterHp + 15\n \ndef magicMissile():\n magicDmg = 5\n if turn == 'player':\n hpMonster = hpMonster - 5\n if turn == 'monster':\n hpPlayer = hpPlayer - 5\ndef piercing_5():\n pierceDmg = 5\n if turn == 'player':\n hpMonster = hpMonster - 5\n if turn == 'monster':\n hpPlayer = hpPlayer - 5\ndef poison():\n poisonIsPlayed = True\n\n\ndef draw():\n if selectedCard == 'attack_5':\n attack_5()\n if selectedCard == 'attack_10':\n attack_10()\n if selectedCard == 'block_5':\n block_5()\n if selectedCard == 'block_10':\n block_10()\n if selectedCard == 'cleanse': \n cleanse()\n if selectedCard == 'corruption':\n corruption()\n if selectedCard == 'fireball':\n fireball()\n if selectedCard == 'heal':\n heal()\n if selectedCard == 'magic_missile':\n magicMissile()\n if selectedCard == 'piercing':\n piercing_5()\n if selectedCard == 'poison':\n poison()\n \n \n \n \n \n \n","sub_path":"Gui/attackCards.py","file_name":"attackCards.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"18869703","text":"# -*- coding: ISO-8859-1 -*-\nimport json\nfrom calendar import monthrange\nfrom datetime import date, datetime\nfrom django.views.generic.base import TemplateView\n\nfrom tsm.core import constants, util\nfrom tsm.core.mixins.core_mixin_login import CoreMixinLoginRequired\nfrom tsm.core.mixins.core_mixin_base import CoreMixinDispatch\nfrom tsm.core.mixins.core_mixin_json import JSONResponseMixin\n\nfrom tsm.acesso.models.usuario import Usuario\nfrom tsm.equipe.models.membro import Membro\nfrom tsm.equipe.models.membrometa import MembroMeta\nfrom tsm.equipe.models.tipometa import TipoMeta\n\nfrom tsm.oportunidade.widgets import Widgets\nfrom tsm.oportunidade.models.receita import Receita\n\nclass DashboardIndex(CoreMixinLoginRequired, TemplateView, CoreMixinDispatch):\n \"\"\"\n Dashboard\n \"\"\"\n template_name = 'dashboard_index.html'\n\n def getListView(self, *args, **kwargs):\n \"\"\"\n Retorna a lista dos tipos de visão permitidos no Dashboard (Filial, GAR, EAR)\n e os tipos de meta\n \"\"\"\n request = kwargs['request']\n listOpt = []\n listOptMeta = []\n resp_filial = [] #array com os responsaveis por filiais\n\n if request.user.has_perm('equipe.list_all_members'):\n # pega as filiais\n filiaisList = Usuario.objects.get(id=request.user.id, is_active=True).filiais.all()\n for item in filiaisList:\n nome = item.nome\n if item.responsavel:\n nome = nome + ' - ' + item.responsavel.first_name + ' ' + item.responsavel.last_name\n resp_filial.append(item.responsavel.id)\n\n listOpt.append({\n \"id\" : item.pk,\n \"nome\" : nome,\n \"tipo\" : 'filial',\n })\n\n if filiaisList:\n # pega os líderes (GAR) e responsáveis (EAR)\n membrosList = Membro.objects \\\n .filter(usuario__filiais__in=filiaisList) \\\n .exclude(usuario__id__in=resp_filial) \\\n .order_by('usuario__first_name', 'usuario__last_name') \\\n .distinct()\n\n for item in membrosList:\n listOpt.append({\n \"id\" : item.usuario.id,\n \"nome\" : item.usuario.first_name + ' ' + item.usuario.last_name,\n \"tipo\" : 'usuario',\n })\n\n # pega os tipos de meta\n tiposList = TipoMeta.objects.filter(filial__id__in=filiaisList)\n for item in tiposList:\n listOptMeta.append({\n \"id\" : item.pk,\n \"nome\" : item.nome,\n })\n else:\n widget = Widgets(request.user, {\"id\":request.user.id,\"tipo\":\"usuario\"}, \"\")\n membros = widget.getMembros()\n\n membrosList = Membro.objects.filter(usuario__id__in=membros).order_by('usuario__first_name', 'usuario__last_name')\n for item in membrosList:\n listOpt.append({\n \"id\" : item.usuario.id,\n \"nome\" : item.usuario.first_name + ' ' + item.usuario.last_name,\n \"tipo\" : 'usuario',\n })\n\n #verifica se usuário é um membro de equipe\n membroEquipe = Membro.objects.filter(usuario__id=request.user.id)\n if membroEquipe:\n #verifica se membro tem meta visível pra ele\n membroMeta = MembroMeta.objects \\\n .filter(membro__id=membroEquipe[0].id) \\\n .filter(is_Visible=True) \\\n .filter(mesVigencia=date.today().strftime('%m')) \\\n .filter(anoVigencia=date.today().year)\n\n filterMeta = 0\n if membroMeta:\n filterMeta = membroMeta[0].tipometa.id\n else:\n paramMeta = util.getParamByName(constants.PAR_TPMETA,request.user.id)\n if paramMeta:\n filterMeta = int(paramMeta)\n\n # pega os tipos de meta\n tiposList = TipoMeta.objects.filter(id=filterMeta)\n for item in tiposList:\n listOptMeta.append({\n \"id\" : item.pk,\n \"nome\" : item.nome,\n })\n\n return listOpt, listOptMeta\n\n\n def get(self, request, *args, **kwargs):\n receitas = Usuario.objects.get(id=request.user.id).receitas.all()\n listOpt, listOptMeta = self.getListView(request=request)\n\n hoje = date.today()\n diaIni = datetime.strptime(str(hoje.year)+'-'+hoje.strftime('%m')+'-01','%Y-%m-%d')\n diaFim = datetime.strptime(str(hoje.year)+'-'+hoje.strftime('%m')+'-'+str(monthrange(hoje.year,hoje.month)[1]),'%Y-%m-%d')\n\n return self.render_to_response(self.get_context_data(\n receitas = receitas,\n listOpt = listOpt,\n listOptMeta = listOptMeta,\n diaIni=diaIni.strftime('%d/%m/%Y'),\n diaFim=diaFim.strftime('%d/%m/%Y'),\n ))\n\nclass DashboardGetData(CoreMixinLoginRequired, JSONResponseMixin, TemplateView):\n \"\"\"\n Retorna os indicadores\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n receitas = []\n for receita in request.GET.getlist('receitas[]'):\n receitas.append(int(receita))\n\n instanceWg = Widgets(\n request.user,\n {\n 'id': int(request.GET['visao']),\n 'tipo':request.GET['tipovisao']\n },\n int(request.GET['tipometa']),\n request.GET['diaini'],\n request.GET['diafim'],\n receitas\n )\n instanceWg.setup()\n\n data = {\n \"ponderado\" : instanceWg.prospeccaoData(),\n \"evolucao\" : instanceWg.heatData(),\n \"gross\" : instanceWg.heatGross(),\n \"linearidade\": instanceWg.linearidadeData(),\n \"compromisso\": instanceWg.entregaData(),\n }\n\n return self.render_to_response(data)\n","sub_path":"tsm/oportunidade/views/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"134395496","text":"import setuptools\nimport os\nimport os.path\n\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef get_version():\n print(SCRIPT_DIR)\n version_file = os.path.join(SCRIPT_DIR, \"cpg-version\")\n print(version_file)\n if not os.path.exists(version_file):\n set_version_filename = str(os.path.join(SCRIPT_DIR, \"setVersion\"))\n print(set_version_filename)\n os.system(set_version_filename)\n output = open(version_file).readline()\n version = output.replace(\"'\", \"\").replace(\"\\\\n\", \"\").replace(\"v\", \"\").replace(\"b\", \"\").replace(\"\\n\", \"\")\n print(version)\n return version\n\n\ndependencies=[\"requests\"]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n setuptools.setup(\n name='cpgclientlib',\n version=get_version(),\n scripts=['cpg-version', 'cpg-create', 'cpg-query'],\n author=\"Fabian Yamaguchi\",\n author_email=\"fabs@shiftleft.io\",\n description=\"A client library for CPG servers\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n url=\"https://github.com/ShiftLeftSecurity/codepropertygraph\",\n packages=setuptools.find_packages(),\n install_requires=dependencies,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n )\n","sub_path":"cpgclientlib/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"208305461","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\n\nclass PinSpiderSpider(scrapy.Spider):\n name = 'pin_spider'\n allowed_domains = ['www.pinterest.com']\n start_urls = ['https://www.pinterest.com/pin/664140276275266310']\n\n def start_requests(self):\n urls = ['https://www.pinterest.com/pin/664140276275266310']\n for url in urls:\n req = scrapy.Request(url, callback=self.parse, meta={'use_selenium': True})\n yield req\n\n def parse(self, response):\n data_str = response.xpath('/html/body/div[2]/div/div/div[1]/div[2]/div[2]/div[1]/div/div/div[2]/div[2]/div/div[2]/div/div/div/div/div/div[1]/a/img').extract()\n compile_title_href = re.compile(r'\"(.*?)\"')\n for data_single in data_str:\n # print(data_single)\n result = re.findall(compile_title_href, data_single)\n # print(result)\n result_list = list(result[0])\n print(result_list[0])\n print(result_list[1])\n\n # print(data_str)\n # print(response)\n # compile_title_href = re.compile(r'\"Eyes]>')\n # result = re.findall(compile_title_href, response)\n # print(result)\n\n","sub_path":"pin/spiders/pin_spider.py","file_name":"pin_spider.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"626381999","text":"import json\nfrom functools import lru_cache\n\nimport toml\n\n\nfrom nessus import (\n list_policies,\n list_policy_templates,\n create_scan,\n create_policy,\n policy_details,\n)\n\n\n@lru_cache(maxsize=1)\ndef set_policy():\n policies = list_policies()\n policy_id = None\n\n for policy in policies[\"policies\"]:\n if policy[\"name\"] == \"standard_scan\":\n policy_id = policy[\"id\"]\n\n if not policy_id:\n print(\"No policies exist.\")\n policy_id = create_gds_scan_policy()\n\n return policy_id\n\n\n@lru_cache(maxsize=1)\ndef advanced_dynamic_policy_template_uuid():\n templates = list_policy_templates()\n return next(\n template\n for template in templates[\"templates\"]\n if template[\"title\"] == \"Advanced Dynamic Scan\"\n )[\"uuid\"]\n\n\ndef create_gds_scan_policy():\n with open(\"scan_config/standard_scan_template.json\", \"r\") as f:\n policy = json.load(f)\n policy[\"uuid\"] = advanced_dynamic_policy_template_uuid()\n policy = create_policy(policy)\n return policy[\"policy_id\"]\n\n\ndef create_scan_config(scan):\n return {\n \"uuid\": advanced_dynamic_policy_template_uuid(),\n \"settings\": {\n \"name\": scan[\"name\"],\n \"enabled\": scan[\"enabled\"],\n \"rrules\": f\"FREQ={scan['rrules.freq']};INTERVAL={scan['rrules.interval']};BYDAY={scan['rrules.byday']}\",\n \"policy_id\": set_policy(),\n \"starttime\": scan[\"starttime\"],\n \"timezone\": \"Europe/London\",\n \"text_targets\": scan[\"text_targets\"],\n \"agent_group_id\": [],\n },\n }\n\n\ndef create_gds_scans(config):\n for scan in config.values():\n create_scan_config(scan)\n create_scan(create_scan_config(scan))\n\n\ndef load_scan_config():\n with open(\"scan_config/scan.toml\", \"r\") as f:\n return toml.load(f)\n\n\ndef main():\n print(\"Scheduling scans...\")\n config = load_scan_config()\n create_gds_scans(config)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"schedule_scans.py","file_name":"schedule_scans.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"331690147","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport re\nimport threading\n\n\nheaders = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0\"}\n\n\ndef search(keyword):\n url='http://search.people.com.cn/cnpeople/search.do?pageNum={}&keyword={}&siteName=news&facetFlag=true&nodeType=belongsId&nodeId=0'\n page=1\n while True:\n try:\n html=requests.get(url.format(page,keyword),headers=headers,timeout=30).text\n except:\n print(page,'failed')\n continue\n try:\n table=BeautifulSoup(html,'lxml').find('div',{'class':['fr','w800']}).find_all('ul')\n except:\n break\n if len(table)==0:\n break\n f=open('urls.txt','a')\n for ul in table:\n try:\n title=ul.find('a').get_text()\n news_url=ul.find('a').get('href')\n except:\n continue\n try:\n date=re.findall('(\\d+-\\d+-\\d+)',str(ul))[0]\n except:\n date='-'\n f.write(str([title,date,news_url])+'\\n')\n f.close()\n print(page,'ok')\n page+=1\n\ndef news_content(url):\n html=requests.get(url,headers=headers,timeout=30).text.encode('iso-8859-1').decode('gbk','ignore')\n soup=BeautifulSoup(html,'lxml')\n ok=False\n try:\n text=soup.find('div',{'class':'text_con_left'}).find('div',{'class':'box_con'}).get_text()\n ok=True\n except:\n pass\n if not ok:\n try:\n text=soup.find('div',{'class':'show_text'}).get_text()\n ok=True\n except:\n pass\n if not ok:\n try:\n text=soup.find('div',{'id':'p_content'}).get_text()\n ok=True\n except:\n pass\n if not ok:\n try:\n text=soup.find('div',{'class':'text_show'}).get_text()\n ok=True\n except:\n pass\n if not ok:\n try:\n text=soup.find('div',{'class':'text_box'}).get_text()\n ok=True\n except:\n pass\n if not ok:\n try:\n text=soup.find('body').get_text()\n ok=True\n except:\n pass\n return text\n\nclass NewsGet(threading.Thread):\n def __init__(self,infor):\n super(NewsGet,self).__init__()\n self.infor=infor\n self.url=self.infor[-1]\n\n def run(self):\n self.ok=True\n try:\n text=news_content(self.url)\n if text!=False:\n self.infor.append(text)\n else:\n self.ok=False\n except:\n self.ok=False\n\ndef load_urls():\n items=[]\n for line in open('./urls.txt','r'):\n item=eval(line)\n items.append(item)\n if len(items)==10:\n yield items\n items.clear()\n else:\n continue\n yield items\n\ndef main():\n count=0\n for items in load_urls():\n threadings=[]\n for item in items:\n work=NewsGet(item)\n work.setDaemon(True)\n threadings.append(work)\n for work in threadings:\n work.start()\n for work in threadings:\n work.join()\n f=open('result.txt','a')\n for work in threadings:\n if work.ok==False:\n failed=open('failed.txt','a')\n failed.write(str(work.infor)+'\\n')\n failed.close()\n continue\n f.write(str(work.infor)+'\\n')\n count+=1\n f.close()\n print(count,'ok')\n\nmain()\n","sub_path":"Nyspider/news_get/people.com.cn/people.com.cn.py","file_name":"people.com.cn.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24787864","text":"import urllib2\nimport re\nfrom bs4 import BeautifulSoup\n#encoding = utf-8\nclass Crawler:\n def main(self):\n page = urllib2.urlopen('http://192.168.10.30', timeout=10)\n data = page.read()\n # print data\n soup = BeautifulSoup(data, \"html.parser\")\n # print len(data)\n # print soup\n for tag in soup.find_all(\"td\"):\n if len(tag.string)>15:\n print (tag.string)\n\nif __name__ == '__main__':\n me=Crawler()\n me.main()","sub_path":"zxc.py","file_name":"zxc.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"101343777","text":"# from scipy.cluster import hierarchy # scipy.cluster is not yet in tcrregex folder\n# from scipy.spatial import distance\nimport logging\n\n#from .all_genes import all_genes\nfrom .all_genes import all_genes as all_genes_default\nfrom . import cdr3s_human\nfrom . import basic\nfrom . import html_colors\n\nlogger = logging.getLogger('util.py')\n\ndef get_rep( gene, organism ):\n assert gene.startswith('TR')\n vj = gene[3]\n if vj == 'V':\n rep = cdr3s_human.all_loopseq_representative[ organism ][ gene ]\n else:\n rep = cdr3s_human.all_jseq_representative[ organism ][ gene ]\n return rep\n\n\ndef get_mm1_rep( gene, organism ):\n assert gene.startswith('TR')\n vj = gene[3]\n if vj == 'V':\n rep = cdr3s_human.all_loopseq_representative_mm1[ organism ][ gene ]\n else:\n rep = cdr3s_human.all_jseq_representative[ organism ][ gene ]\n return rep\n\n\ndef get_rep_ignoring_allele( gene, organism ):\n rep = get_rep( gene, organism )\n rep = rep[:rep.index('*')]\n return rep\n\ndef tree_sort( old_l, distances, return_leaves=True ): ## average linkage\n assert len(distances) == len(old_l)\n\n if len(old_l)==1:\n leaves = [0]\n else:\n y = distance.squareform( distances, checks=True )\n Z = hierarchy.average( y )\n #c,coph_dists = hierarchy.cophenet(Z,y)\n leaves = hierarchy.leaves_list( Z )\n\n new_l = [ old_l[x] for x in leaves ]\n\n if not return_leaves:\n return new_l\n else:\n return new_l, leaves\n\ndef get_top_genes( blast_hits_string ):\n hits = dict( [ ( x.split(':')[0], int( x.split(':')[1] ) ) for x in blast_hits_string.split(';') ] )\n top_score = max( hits.values() )\n return { x for x, y in hits.items() if y >= top_score }\n\ndef get_top_reps( blast_hits_string, organism, all_genes = all_genes_default ):\n hits = dict( [ ( x.split(':')[0], int( x.split(':')[1] ) ) for x in blast_hits_string.split(';') ] )\n top_score = max( hits.values() )\n # vj = hits.keys()[0][3]\n # if vj == 'V':\n # rep_map = cdr3s_human.all_loopseq_representative[ organism ]\n # else:\n # assert vj == 'J'\n # rep_map = cdr3s_human.all_jseq_representative[ organism ]\n return { all_genes[organism][x].rep for x, y in hits.items() if y >= top_score }\n\n\ndef reps_from_genes( genes, organism, mm1=False, trim_allele=False, all_genes = all_genes_default ):\n ## if genes is a set we can't index into it\n # vj = [ x[3] for x in genes ][0]\n\n # if vj == 'V':\n # if mm1:\n # rep_map = cdr3s_human.all_loopseq_representative_mm1[ organism ]\n # else:\n # rep_map = cdr3s_human.all_loopseq_representative[ organism ]\n # else:\n # assert vj == 'J'\n # rep_map = cdr3s_human.all_jseq_representative[ organism ]\n\n # reps = set( [ rep_map[x] for x in genes ] )\n reps = set( ( all_genes[organism][x].mm1_rep for x in genes ) ) if mm1 else \\\n set( ( all_genes[organism][x].rep for x in genes ) )\n if trim_allele:\n reps = set( ( x[:x.index('*')] for x in reps ) )\n return reps\n\n\ndef get_mm1_rep_ignoring_allele( gene, organism ): # helper fxn\n rep = get_mm1_rep( gene, organism )\n rep = rep[:rep.index('*')]\n return rep\n\ndef get_allele2mm1_rep_gene_for_counting(all_genes = all_genes_default):\n allele2mm1_rep_gene_for_counting = {}\n for organism in ['human', 'mouse']:\n allele2mm1_rep_gene_for_counting[ organism ] = {}\n\n for chain in 'AB':\n\n ## look at gene/allele maps\n vj_alleles = { 'V': [ id for (id, g) in all_genes[organism].items() if g.chain==chain and g.region=='V'],\n 'J': [ id for (id, g) in all_genes[organism].items() if g.chain==chain and g.region=='J'] }\n\n for vj, alleles in vj_alleles.items():\n gene2rep = {}\n gene2alleles = {}\n rep_gene2alleles = {}\n\n for allele in alleles:\n #assert allele[2] == chain\n gene = allele[:allele.index('*')]\n rep_gene = get_mm1_rep_ignoring_allele( allele, organism )\n if rep_gene not in rep_gene2alleles:\n rep_gene2alleles[ rep_gene ] = []\n rep_gene2alleles[ rep_gene ].append( allele )\n\n if gene not in gene2rep:\n gene2rep[gene] = set()\n gene2alleles[gene] = []\n gene2rep[ gene ].add( rep_gene )\n gene2alleles[gene].append( allele )\n\n merge_rep_genes = {}\n for gene, reps in gene2rep.items():\n if len(reps)>1:\n assert vj=='V'\n logger.debug('multireps: %s, %s, %s', organism, gene, reps)\n '''\n for allele in gene2alleles[gene]:\n logger.debug('%s %s %s %s' % (' '.join(all_genes[organism][allele].cdrs), allele, get_rep(allele,organism), get_mm1_rep(allele,organism)))\n '''\n\n ## we are going to merge these reps\n ## which one should we choose?\n l = sorted([ (len(rep_gene2alleles[rep]), rep ) for rep in reps ])\n l = l[::-1]\n assert l[0][0] > l[1][0]\n toprep = l[0][1]\n for (count, rep) in l:\n if rep in merge_rep_genes:\n assert rep == toprep and merge_rep_genes[rep] == rep\n merge_rep_genes[ rep ] = toprep\n\n\n for allele in alleles:\n count_rep = get_mm1_rep_ignoring_allele( allele, organism )\n if count_rep in merge_rep_genes:\n count_rep = merge_rep_genes[ count_rep ]\n allele2mm1_rep_gene_for_counting[ organism ][ allele] = count_rep\n logger.debug('allele2mm1_rep_gene_for_counting: %s, %s, %s', organism, allele, count_rep)\n return allele2mm1_rep_gene_for_counting\n\nallele2mm1_rep_gene_for_counting = get_allele2mm1_rep_gene_for_counting(all_genes = all_genes_default)\n\ndef get_mm1_rep_gene_for_counting( allele, organism ):\n return allele2mm1_rep_gene_for_counting[ organism ][ allele ]\n\ndef countreps_from_genes( genes, organism ):\n reps = set( ( allele2mm1_rep_gene_for_counting[ organism ][ x ] for x in genes ) )\n return reps\n\n\ndef assign_label_reps_and_colors_based_on_most_common_genes_in_repertoire( tcr_infos, organism ):\n ## assumes that each element of tcr_infos is a dictionary with fields that would have come from parse_tsv_line\n ## uses the *_countreps info that was filled in by read_pair_seqs.py\n ## the _label_rep* fields get over-written if they were present\n for segtype in basic.segtypes_lowercase:\n countreps_tag = segtype+'_countreps'\n rep_tag = segtype+'_label_rep'\n color_tag = segtype+'_label_rep_color' ## where we will store the rep info\n\n counts = {}\n for tcr_info in tcr_infos:\n try:\n reps = tcr_info[countreps_tag].split(';')\n for rep in reps:\n counts[rep] = counts.get(rep, 0)+1\n except KeyError:\n pass # KMB added 5/13/2020 - this to try to permit beta only analysis\n\n newcounts = {}\n for tcr_info in tcr_infos:\n try:\n reps = tcr_info[countreps_tag].split(';')\n toprep = max( [ ( counts[x], x) for x in reps ] )[1]\n tcr_info[rep_tag] = toprep ## doesnt have allele info anymore\n newcounts[toprep] = newcounts.get(toprep, 0)+1\n except KeyError:\n pass # KMB added 5/13/2020 - this to try to permit beta only analysis\n\n l = sorted([(y, x) for x, y in newcounts.items()])\n l.reverse()\n rep_colors = dict( list(zip( [x[1] for x in l], html_colors.get_rank_colors_no_lights(len(l)) )) )\n for tcr_info in tcr_infos:\n try:\n tcr_info[ color_tag ] = rep_colors[ tcr_info[ rep_tag ] ]\n except KeyError:\n pass # KMB added 5/13/2020 - this to try to permit beta only analysis\n\n return ## we modified the elements of the tcr_infos list in place\n","sub_path":"tcrregex/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644855617","text":"import time\n\nfrom collections import defaultdict\nfrom redis import Redis\n\nfrom erigam.lib.messages import send_message\nfrom erigam.lib.request_methods import redis_pool\n\ndb = Redis(connection_pool=redis_pool)\n\nchat_history = []\n\nwhile True:\n chats = defaultdict(set)\n for chat_session in db.zrange(\"chats-alive\", 0, -1):\n chat, session = chat_session.split(\"/\")\n chats[chat].add(session)\n chat_history.append(chats)\n if len(chat_history) == 6:\n for chat, sessions in sorted(chat_history[-1].items()):\n # Ignore chats which have autosilence on.\n if db.hget(\"chat.%s.meta\" % chat, \"autosilence\") is not None:\n continue\n change = len(sessions)-len(chat_history[0][chat])\n if change != 0:\n print(chat, change)\n if change >= 8:\n # Silence all sessions which have entered recently.\n for session in sessions-chat_history[0][chat]:\n db.hset('session.%s.meta.%s' % (session, chat), 'group', 'silent')\n # Activate autosilence.\n db.hset(\"chat.%s.meta\" % chat, \"autosilence\", 1)\n # Send a message out.\n # XXX NEEDS TO BE META_CHANGE TOO.\n send_message(\n db,\n chat,\n -1,\n 'user_change',\n '----------------------------------------- SPAM DETECTED, SILENCING -----------------------------------------',\n '000000',\n '',\n )\n print('Spam detected in', chat)\n print()\n del chat_history[0]\n time.sleep(1)\n","sub_path":"erigam/extras/usertrack.py","file_name":"usertrack.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"473274147","text":"import pandas as pd\nimport numpy as np\nimport math\n\nmadrid_data = pd.read_csv(\"madrid_covid_aqi.csv\")\nmean_pm25 = madrid_data['pm25'].mean()\nmean_pm10 = madrid_data['pm10'].mean()\nmean_o3 = madrid_data['o3'].mean()\nmean_no2 = madrid_data['no2'].mean()\nmean_so2 = madrid_data['so2'].mean()\n\nmean_corona = madrid_data['Daily'].mean()\n\nxix_pm25 = []\nxix_pm10 = []\nxix_o3 = []\nxix_no2 = []\nxix_so2 = []\nyiy_corona = []\n\nfor i in range(0,len(madrid_data)):\n xix_pm25.append(madrid_data['pm25'].iloc[i] - mean_pm25)\n xix_pm10.append(madrid_data['pm10'].iloc[i] - mean_pm10) \n xix_o3.append(madrid_data['o3'].iloc[i] - mean_o3)\n xix_no2.append(madrid_data['no2'].iloc[i] - mean_no2)\n xix_so2.append(madrid_data['so2'].iloc[i] - mean_so2)\n yiy_corona.append(madrid_data['Daily'].iloc[i] - mean_corona)\n\ns_pm25 = 0\ns_pm10 = 0\ns_o3 = 0\ns_no2 = 0\ns_so2 = 0\ns_corona = 0\n\nfor j in range(0,len(xix_pm25)):\n s_pm25 = s_pm25 + (xix_pm25[j] * xix_pm25[j])\n s_pm10 = s_pm10 + (xix_pm10[j] * xix_pm10[j])\n s_o3 = s_o3 + (xix_o3[j] * xix_o3[j])\n s_no2 = s_no2 + (xix_no2[j] * xix_no2[j])\n s_so2 = s_so2 + (xix_so2[j] * xix_so2[j])\n s_corona = s_corona + (yiy_corona[j] * yiy_corona[j])\n\ns_pm25 = math.sqrt(s_pm25/(len(madrid_data) - 1)) \ns_pm10 = math.sqrt(s_pm10/(len(madrid_data) - 1)) \ns_o3 = math.sqrt(s_o3/(len(madrid_data) - 1)) \ns_no2 = math.sqrt(s_no2/(len(madrid_data) - 1)) \ns_so2 = math.sqrt(s_so2/(len(madrid_data) - 1)) \ns_corona = math.sqrt(s_corona/(len(madrid_data) - 1)) \n\nr_pm25 = 0\nr_pm10 = 0\nr_o3 = 0\nr_no2 = 0\nr_so2 = 0\n\nfor k in range(0,len(xix_pm25)):\n r_pm25 = r_pm25 + ((xix_pm25[k] * yiy_corona[k]) / (s_pm25 * s_corona))\n r_pm10 = r_pm10 + ((xix_pm10[k] * yiy_corona[k]) / (s_pm10 * s_corona))\n r_o3 = r_o3 + ((xix_o3[k] * yiy_corona[k]) / (s_o3 * s_corona))\n r_no2 = r_no2 + ((xix_no2[k] * yiy_corona[k]) / (s_no2 * s_corona))\n r_so2 = r_so2 + ((xix_so2[k] * yiy_corona[k]) / (s_so2 * s_corona))\n\nr_pm25 = r_pm25/(len(madrid_data) - 1)\nr_pm10 = r_pm10/(len(madrid_data) - 1)\nr_o3 = r_o3/(len(madrid_data) - 1)\nr_no2 = r_no2/(len(madrid_data) - 1)\nr_so2 = r_so2/(len(madrid_data) - 1)\n\nprint('Using Basic Statistics:')\nprint('Correlation Coefficient (PM2.5 and Daily Corona Cases): ', r_pm25)\nprint('Correlation Coefficient (PM10 and Daily Corona Cases): ', r_pm10)\nprint('Correlation Coefficient (O3 and Daily Corona Cases): ', r_o3)\nprint('Correlation Coefficient (NO2 and Daily Corona Cases): ', r_no2)\nprint('Correlation Coefficient (SO2 and Daily Corona Cases): ', r_so2)\n\nprint()\nprint('Using Pearson Method: ')\nprint('Correlation Coefficient (PM2.5 and Daily Corona Cases): \\n', madrid_data[['pm25','Daily']].corr(method='pearson'))\nprint('Correlation Coefficient (PM10 and Daily Corona Cases): \\n', madrid_data[['pm10','Daily']].corr(method='pearson'))\nprint('Correlation Coefficient (O3 and Daily Corona Cases): \\n', madrid_data[['o3','Daily']].corr(method='pearson'))\nprint('Correlation Coefficient (NO2 and Daily Corona Cases): \\n', madrid_data[['no2','Daily']].corr(method='pearson'))\nprint('Correlation Coefficient (SO2 and Daily Corona Cases): \\n', madrid_data[['so2','Daily']].corr(method='pearson'))\n","sub_path":"madrid_data/Processed/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"429683692","text":"# -*- coding:utf-8 -*-\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom sklearn.linear_model import LinearRegression\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\", size=10)\n\n\ndef runplt():\n plt.figure()\n plt.title('披萨价格与直径数据', fontproperties=font)\n plt.xlabel('直径(英寸)', fontproperties=font)\n plt.ylabel('价格(美元)', fontproperties=font)\n plt.axis([0, 25, 0, 25])\n plt.grid(True)\n return plt\n\n\nplt = runplt()\nx = [[6], [8], [10], [14], [18]]\ny = [[7], [9], [13], [17.5], [18]]\nx2 = [[0], [10], [14], [25]]\nmodel = LinearRegression()\nmodel.fit(x, y)\ny2 = model.predict(x2)\nplt.plot(x, y, 'k.')\nplt.plot(x2, y2, 'g-')\nplt.show()\n","sub_path":"ML.py","file_name":"ML.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"117423053","text":"import numpy as np\nimport lasagne\nfrom theano import tensor as T\nfrom lasagne.layers import *\nimport time\nfrom load_data import *\nimport theano\nfrom theano.compile.debugmode import DebugMode\nfrom theano.compile.nanguardmode import NanGuardMode\n\n# Prepare Theano variables for inputs and targets as both are 3d variable\ninput_var0 = T.tensor4('inputs0')\ninput_var1 = T.tensor4('inputs1')\ntarget = T.tensor4('target')\nclass_weights = T.tensor4('class_weights')\n\n#network config\nl_in0 = InputLayer(shape=(None, 1, None, None), input_var=input_var0)\nl_in1 = InputLayer(shape=(None, 1, None, None), input_var=input_var1) \n\n############### STAGE 1 ###############\nl_conv1_1 = Conv2DLayer(l_in0, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv1_1_bn = batch_norm(l_conv1_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv1_2 = Conv2DLayer(l_in1, 64, 5, pad='same', untie_biases=False, W=l_conv1_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv1_2_bn = batch_norm(l_conv1_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s1_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv1_1_bn,l_conv1_2_bn],T.sub,cropping=None)\n#l_s1_sub11_abs = ExpressionLayer(l_s1_sub11, lambda X:abs(X), output_shape='auto')\n\ns1_sub11_biased = BiasLayer(l_s1_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns1_sub11_rect = lasagne.layers.NonlinearityLayer(s1_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s1_sub11_final = ExpressionLayer(s1_sub11_rect, lambda X: T.where( X > 0, X-s1_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s1_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv1_2_bn,l_conv1_1_bn],T.sub,cropping=None)\n#l_s1_sub21_abs = ExpressionLayer(l_s1_sub21, lambda X:abs(X), output_shape='auto')\n\ns1_sub21_biased = BiasLayer(l_s1_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns1_sub21_rect = lasagne.layers.NonlinearityLayer(s1_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s1_sub21_final = ExpressionLayer(s1_sub21_rect, lambda X: T.where( X > 0, X-s1_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s1_sub12 = lasagne.layers.ElemwiseMergeLayer([s1_sub11_rect,l_conv1_1_bn],T.sub,cropping=None)\n#l_s1_sub12_abs = ExpressionLayer(l_s1_sub12, lambda X:abs(X), output_shape='auto')\n\ns1_sub12_biased = BiasLayer(l_s1_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns1_sub12_rect = lasagne.layers.NonlinearityLayer(s1_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s1_sub12_final = ExpressionLayer(s1_sub12_rect, lambda X: T.where( X > 0, X-s1_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s1_sub22 = lasagne.layers.ElemwiseMergeLayer([s1_sub21_rect,l_conv1_2_bn],T.sub,cropping=None)\n#l_s1_sub21_abs = ExpressionLayer(l_s1_sub22, lambda X:abs(X), output_shape='auto')\n\ns1_sub22_biased = BiasLayer(l_s1_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns1_sub22_rect = lasagne.layers.NonlinearityLayer(s1_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s1_sub22_final = ExpressionLayer(s1_sub22_rect, lambda X: T.where( X > 0, X-s1_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s1_add = lasagne.layers.ElemwiseMergeLayer([s1_sub12_rect,s1_sub22_rect],T.add,cropping=None)\n\n############### STAGE 2 ###############\nl_conv2_1 = Conv2DLayer(l_conv1_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv2_1_bn = batch_norm(l_conv2_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv2_2 = Conv2DLayer(l_conv1_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv2_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv2_2_bn = batch_norm(l_conv2_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s2_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv2_1_bn,l_conv2_2_bn],T.sub,cropping=None)\n#l_s2_sub11_abs = ExpressionLayer(l_s2_sub11, lambda X:abs(X), output_shape='auto')\n\ns2_sub11_biased = BiasLayer(l_s2_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns2_sub11_rect = lasagne.layers.NonlinearityLayer(s2_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s2_sub11_final = ExpressionLayer(s2_sub11_rect, lambda X: T.where( X > 0, X-s2_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s2_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv2_2_bn,l_conv2_1_bn],T.sub,cropping=None)\n#l_s2_sub21_abs = ExpressionLayer(l_s2_sub21, lambda X:abs(X), output_shape='auto')\n\ns2_sub21_biased = BiasLayer(l_s2_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns2_sub21_rect = lasagne.layers.NonlinearityLayer(s2_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s2_sub21_final = ExpressionLayer(s2_sub21_rect, lambda X: T.where( X > 0, X-s2_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s2_sub12 = lasagne.layers.ElemwiseMergeLayer([s2_sub11_rect,l_conv2_1_bn],T.sub,cropping=None)\n#l_s2_sub12_abs = ExpressionLayer(l_s2_sub12, lambda X:abs(X), output_shape='auto')\n\ns2_sub12_biased = BiasLayer(l_s2_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns2_sub12_rect = lasagne.layers.NonlinearityLayer(s2_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s2_sub12_final = ExpressionLayer(s2_sub12_rect, lambda X: T.where( X > 0, X-s2_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s2_sub22 = lasagne.layers.ElemwiseMergeLayer([s2_sub21_rect,l_conv2_2_bn],T.sub,cropping=None)\n#l_s2_sub21_abs = ExpressionLayer(l_s2_sub22, lambda X:abs(X), output_shape='auto')\n\ns2_sub22_biased = BiasLayer(l_s2_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns2_sub22_rect = lasagne.layers.NonlinearityLayer(s2_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s2_sub22_final = ExpressionLayer(s2_sub22_rect, lambda X: T.where( X > 0, X-s2_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s2_add = lasagne.layers.ElemwiseMergeLayer([s2_sub12_rect,s2_sub22_rect],T.add,cropping=None)\n\n\n# l_s2_add = lasagne.layers.ElemwiseMergeLayer([s2_sub12_final,s2_sub22_final],T.add,cropping=None)\n\n############### STAGE 3 ###############\nl_conv3_1 = Conv2DLayer(l_conv2_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv3_1_bn = batch_norm(l_conv3_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv3_2 = Conv2DLayer(l_conv2_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv3_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv3_2_bn = batch_norm(l_conv3_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s3_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv3_1_bn,l_conv3_2_bn],T.sub,cropping=None)\n#l_s3_sub11_abs = ExpressionLayer(l_s3_sub11, lambda X:abs(X), output_shape='auto')\n\ns3_sub11_biased = BiasLayer(l_s3_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns3_sub11_rect = lasagne.layers.NonlinearityLayer(s3_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s3_sub11_final = ExpressionLayer(s3_sub11_rect, lambda X: T.where( X > 0, X-s3_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s3_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv3_2_bn,l_conv3_1_bn],T.sub,cropping=None)\n#l_s3_sub21_abs = ExpressionLayer(l_s3_sub21, lambda X:abs(X), output_shape='auto')\n\ns3_sub21_biased = BiasLayer(l_s3_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns3_sub21_rect = lasagne.layers.NonlinearityLayer(s3_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s3_sub21_final = ExpressionLayer(s3_sub21_rect, lambda X: T.where( X > 0, X-s3_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s3_sub12 = lasagne.layers.ElemwiseMergeLayer([s3_sub11_rect,l_conv3_1_bn],T.sub,cropping=None)\n#l_s3_sub12_abs = ExpressionLayer(l_s3_sub12, lambda X:abs(X), output_shape='auto')\n\ns3_sub12_biased = BiasLayer(l_s3_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns3_sub12_rect = lasagne.layers.NonlinearityLayer(s3_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s3_sub12_final = ExpressionLayer(s3_sub12_rect, lambda X: T.where( X > 0, X-s3_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s3_sub22 = lasagne.layers.ElemwiseMergeLayer([s3_sub21_rect,l_conv3_2_bn],T.sub,cropping=None)\n#l_s3_sub21_abs = ExpressionLayer(l_s3_sub22, lambda X:abs(X), output_shape='auto')\n\ns3_sub22_biased = BiasLayer(l_s3_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns3_sub22_rect = lasagne.layers.NonlinearityLayer(s3_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s3_sub22_final = ExpressionLayer(s3_sub22_rect, lambda X: T.where( X > 0, X-s3_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s3_add = lasagne.layers.ElemwiseMergeLayer([s3_sub12_rect,s3_sub22_rect],T.add,cropping=None)\n\n############### STAGE 4 ###############\nl_conv4_1 = Conv2DLayer(l_conv3_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv4_1_bn = batch_norm(l_conv4_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv4_2 = Conv2DLayer(l_conv3_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv4_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv4_2_bn = batch_norm(l_conv4_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s4_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv4_1_bn,l_conv4_2_bn],T.sub,cropping=None)\n#l_s4_sub11_abs = ExpressionLayer(l_s4_sub11, lambda X:abs(X), output_shape='auto')\n\ns4_sub11_biased = BiasLayer(l_s4_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns4_sub11_rect = lasagne.layers.NonlinearityLayer(s4_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s4_sub11_final = ExpressionLayer(s4_sub11_rect, lambda X: T.where( X > 0, X-s4_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s4_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv4_2_bn,l_conv4_1_bn],T.sub,cropping=None)\n#l_s4_sub21_abs = ExpressionLayer(l_s4_sub21, lambda X:abs(X), output_shape='auto')\n\ns4_sub21_biased = BiasLayer(l_s4_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns4_sub21_rect = lasagne.layers.NonlinearityLayer(s4_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s4_sub21_final = ExpressionLayer(s4_sub21_rect, lambda X: T.where( X > 0, X-s4_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s4_sub12 = lasagne.layers.ElemwiseMergeLayer([s4_sub11_rect,l_conv4_1_bn],T.sub,cropping=None)\n#l_s4_sub12_abs = ExpressionLayer(l_s4_sub12, lambda X:abs(X), output_shape='auto')\n\ns4_sub12_biased = BiasLayer(l_s4_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns4_sub12_rect = lasagne.layers.NonlinearityLayer(s4_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s4_sub12_final = ExpressionLayer(s4_sub12_rect, lambda X: T.where( X > 0, X-s4_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s4_sub22 = lasagne.layers.ElemwiseMergeLayer([s4_sub21_rect,l_conv4_2_bn],T.sub,cropping=None)\n#l_s4_sub21_abs = ExpressionLayer(l_s4_sub22, lambda X:abs(X), output_shape='auto')\n\ns4_sub22_biased = BiasLayer(l_s4_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns4_sub22_rect = lasagne.layers.NonlinearityLayer(s4_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s4_sub22_final = ExpressionLayer(s4_sub22_rect, lambda X: T.where( X > 0, X-s4_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s4_add = lasagne.layers.ElemwiseMergeLayer([s4_sub12_rect,s4_sub22_rect],T.add,cropping=None)\n\n############## STAGE 5 ###############\nl_conv5_1 = Conv2DLayer(l_conv4_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv5_1_bn = batch_norm(l_conv5_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv5_2 = Conv2DLayer(l_conv4_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv5_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv5_2_bn = batch_norm(l_conv5_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s5_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv5_1_bn,l_conv5_2_bn],T.sub,cropping=None)\n#l_s5_sub11_abs = ExpressionLayer(l_s5_sub11, lambda X:abs(X), output_shape='auto')\n\ns5_sub11_biased = BiasLayer(l_s5_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns5_sub11_rect = lasagne.layers.NonlinearityLayer(s5_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s5_sub11_final = ExpressionLayer(s5_sub11_rect, lambda X: T.where( X > 0, X-s5_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s5_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv5_2_bn,l_conv5_1_bn],T.sub,cropping=None)\n#l_s5_sub21_abs = ExpressionLayer(l_s5_sub21, lambda X:abs(X), output_shape='auto')\n\ns5_sub21_biased = BiasLayer(l_s5_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns5_sub21_rect = lasagne.layers.NonlinearityLayer(s5_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s5_sub21_final = ExpressionLayer(s5_sub21_rect, lambda X: T.where( X > 0, X-s5_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s5_sub12 = lasagne.layers.ElemwiseMergeLayer([s5_sub11_rect,l_conv5_1_bn],T.sub,cropping=None)\n#l_s5_sub12_abs = ExpressionLayer(l_s5_sub12, lambda X:abs(X), output_shape='auto')\n\ns5_sub12_biased = BiasLayer(l_s5_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns5_sub12_rect = lasagne.layers.NonlinearityLayer(s5_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s5_sub12_final = ExpressionLayer(s5_sub12_rect, lambda X: T.where( X > 0, X-s5_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s5_sub22 = lasagne.layers.ElemwiseMergeLayer([s5_sub21_rect,l_conv5_2_bn],T.sub,cropping=None)\n#l_s5_sub21_abs = ExpressionLayer(l_s5_sub22, lambda X:abs(X), output_shape='auto')\n\ns5_sub22_biased = BiasLayer(l_s5_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns5_sub22_rect = lasagne.layers.NonlinearityLayer(s5_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s5_sub22_final = ExpressionLayer(s5_sub22_rect, lambda X: T.where( X > 0, X-s5_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s5_add = lasagne.layers.ElemwiseMergeLayer([s5_sub12_rect,s5_sub22_rect],T.add,cropping=None)\n\n\n############### STAGE 6 ###############\nl_conv6_1 = Conv2DLayer(l_conv5_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv6_1_bn = batch_norm(l_conv6_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv6_2 = Conv2DLayer(l_conv5_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv6_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv6_2_bn = batch_norm(l_conv6_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s6_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv6_1_bn,l_conv6_2_bn],T.sub,cropping=None)\n#l_s6_sub11_abs = ExpressionLayer(l_s6_sub11, lambda X:abs(X), output_shape='auto')\n\ns6_sub11_biased = BiasLayer(l_s6_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns6_sub11_rect = lasagne.layers.NonlinearityLayer(s6_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s6_sub11_final = ExpressionLayer(s6_sub11_rect, lambda X: T.where( X > 0, X-s6_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s6_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv6_2_bn,l_conv6_1_bn],T.sub,cropping=None)\n#l_s6_sub21_abs = ExpressionLayer(l_s6_sub21, lambda X:abs(X), output_shape='auto')\n\ns6_sub21_biased = BiasLayer(l_s6_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns6_sub21_rect = lasagne.layers.NonlinearityLayer(s6_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s6_sub21_final = ExpressionLayer(s6_sub21_rect, lambda X: T.where( X > 0, X-s6_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s6_sub12 = lasagne.layers.ElemwiseMergeLayer([s6_sub11_rect,l_conv6_1_bn],T.sub,cropping=None)\n#l_s6_sub12_abs = ExpressionLayer(l_s6_sub12, lambda X:abs(X), output_shape='auto')\n\ns6_sub12_biased = BiasLayer(l_s6_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns6_sub12_rect = lasagne.layers.NonlinearityLayer(s6_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s6_sub12_final = ExpressionLayer(s6_sub12_rect, lambda X: T.where( X > 0, X-s6_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s6_sub22 = lasagne.layers.ElemwiseMergeLayer([s6_sub21_rect,l_conv6_2_bn],T.sub,cropping=None)\n#l_s6_sub21_abs = ExpressionLayer(l_s6_sub22, lambda X:abs(X), output_shape='auto')\n\ns6_sub22_biased = BiasLayer(l_s6_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns6_sub22_rect = lasagne.layers.NonlinearityLayer(s6_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s6_sub22_final = ExpressionLayer(s6_sub22_rect, lambda X: T.where( X > 0, X-s6_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s6_add = lasagne.layers.ElemwiseMergeLayer([s6_sub12_rect,s6_sub22_rect],T.add,cropping=None)\n\n############### STAGE 7 ###############\nl_conv7_1 = Conv2DLayer(l_conv6_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv7_1_bn = batch_norm(l_conv7_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv7_2 = Conv2DLayer(l_conv6_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv7_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv7_2_bn = batch_norm(l_conv7_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s7_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv7_1_bn,l_conv7_2_bn],T.sub,cropping=None)\n#l_s7_sub11_abs = ExpressionLayer(l_s7_sub11, lambda X:abs(X), output_shape='auto')\n\ns7_sub11_biased = BiasLayer(l_s7_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns7_sub11_rect = lasagne.layers.NonlinearityLayer(s7_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s7_sub11_final = ExpressionLayer(s7_sub11_rect, lambda X: T.where( X > 0, X-s7_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s7_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv7_2_bn,l_conv7_1_bn],T.sub,cropping=None)\n#l_s7_sub21_abs = ExpressionLayer(l_s7_sub21, lambda X:abs(X), output_shape='auto')\n\ns7_sub21_biased = BiasLayer(l_s7_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns7_sub21_rect = lasagne.layers.NonlinearityLayer(s7_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s7_sub21_final = ExpressionLayer(s7_sub21_rect, lambda X: T.where( X > 0, X-s7_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s7_sub12 = lasagne.layers.ElemwiseMergeLayer([s7_sub11_rect,l_conv7_1_bn],T.sub,cropping=None)\n#l_s7_sub12_abs = ExpressionLayer(l_s7_sub12, lambda X:abs(X), output_shape='auto')\n\ns7_sub12_biased = BiasLayer(l_s7_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns7_sub12_rect = lasagne.layers.NonlinearityLayer(s7_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s7_sub12_final = ExpressionLayer(s7_sub12_rect, lambda X: T.where( X > 0, X-s7_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s7_sub22 = lasagne.layers.ElemwiseMergeLayer([s7_sub21_rect,l_conv7_2_bn],T.sub,cropping=None)\n#l_s7_sub21_abs = ExpressionLayer(l_s7_sub22, lambda X:abs(X), output_shape='auto')\n\ns7_sub22_biased = BiasLayer(l_s7_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns7_sub22_rect = lasagne.layers.NonlinearityLayer(s7_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s7_sub22_final = ExpressionLayer(s7_sub22_rect, lambda X: T.where( X > 0, X-s7_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s7_add = lasagne.layers.ElemwiseMergeLayer([s7_sub12_rect,s7_sub22_rect],T.add,cropping=None)\n\n############### STAGE 8 ###############\nl_conv8_1 = Conv2DLayer(l_conv7_1_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv8_1_bn = batch_norm(l_conv8_1)\n#l_conv1_1_mp = lasagne.layers.MaxPool2DLayer(l_conv1_1, pool_size=2)\n###############\nl_conv8_2 = Conv2DLayer(l_conv7_2_bn, 64, 5, pad='same', untie_biases=False, W=l_conv8_1.W, b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\nl_conv8_2_bn = batch_norm(l_conv8_2)\n#l_conv1_2_mp = lasagne.layers.MaxPool2DLayer(l_conv1_2, pool_size=2)\nl_s8_sub11 = lasagne.layers.ElemwiseMergeLayer([l_conv8_1_bn,l_conv8_2_bn],T.sub,cropping=None)\n#l_s8_sub11_abs = ExpressionLayer(l_s8_sub11, lambda X:abs(X), output_shape='auto')\n\ns8_sub11_biased = BiasLayer(l_s8_sub11,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns8_sub11_rect = lasagne.layers.NonlinearityLayer(s8_sub11_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s8_sub11_final = ExpressionLayer(s8_sub11_rect, lambda X: T.where( X > 0, X-s8_sub11_biased.b.get_value(), X), output_shape='auto')\n\nl_s8_sub21 = lasagne.layers.ElemwiseMergeLayer([l_conv8_2_bn,l_conv8_1_bn],T.sub,cropping=None)\n#l_s8_sub21_abs = ExpressionLayer(l_s8_sub21, lambda X:abs(X), output_shape='auto')\n\ns8_sub21_biased = BiasLayer(l_s8_sub21,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns8_sub21_rect = lasagne.layers.NonlinearityLayer(s8_sub21_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s8_sub21_final = ExpressionLayer(s8_sub21_rect, lambda X: T.where( X > 0, X-s8_sub21_biased.b.get_value(), X), output_shape='auto')\n\nl_s8_sub12 = lasagne.layers.ElemwiseMergeLayer([s8_sub11_rect,l_conv8_1_bn],T.sub,cropping=None)\n#l_s8_sub12_abs = ExpressionLayer(l_s8_sub12, lambda X:abs(X), output_shape='auto')\n\ns8_sub12_biased = BiasLayer(l_s8_sub12,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns8_sub12_rect = lasagne.layers.NonlinearityLayer(s8_sub12_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s8_sub12_final = ExpressionLayer(s8_sub12_rect, lambda X: T.where( X > 0, X-s8_sub12_biased.b.get_value(), X), output_shape='auto')\n\n\nl_s8_sub22 = lasagne.layers.ElemwiseMergeLayer([s8_sub21_rect,l_conv8_2_bn],T.sub,cropping=None)\n#l_s8_sub21_abs = ExpressionLayer(l_s8_sub22, lambda X:abs(X), output_shape='auto')\n\ns8_sub22_biased = BiasLayer(l_s8_sub22,b=lasagne.init.Constant(0.), shared_axes=(0,1,2,3))\ns8_sub22_rect = lasagne.layers.NonlinearityLayer(s8_sub22_biased, nonlinearity=lasagne.nonlinearities.rectify)\n# s8_sub22_final = ExpressionLayer(s8_sub22_rect, lambda X: T.where( X > 0, X-s8_sub22_biased.b.get_value(), X), output_shape='auto')\n\nl_s8_add = lasagne.layers.ElemwiseMergeLayer([s8_sub12_rect,s8_sub22_rect],T.add,cropping=None)\n\n###############\nl_concat = lasagne.layers.ConcatLayer([l_s1_add,l_s2_add,l_s3_add,l_s4_add,l_s5_add,l_s6_add,l_s7_add,l_s8_add],axis=1,cropping=None)\n# l_concat = lasagne.layers.ConcatLayer([l_s1_add,l_s2_add,l_s3_add,l_s4_add],axis=1,cropping=None)\n###############\nl_conv3 = Conv2DLayer(l_concat, 256, 1, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.leaky_rectify, flip_filters=True)\nl_conv3_bn = batch_norm(l_conv3)\nl_conv4 = Conv2DLayer(l_conv3_bn, 128, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.leaky_rectify, flip_filters=True)\nl_conv4_bn = batch_norm(l_conv4)\nl_conv5 = Conv2DLayer(l_conv4_bn, 64, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.leaky_rectify, flip_filters=True)\nl_conv5_bn = batch_norm(l_conv5)\nl_conv6 = Conv2DLayer(l_conv5_bn, 1, 5, pad='same', untie_biases=False, W=lasagne.init.GlorotNormal(), b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=True)\n#l_conv6 = batch_norm(l_conv6)\n#l_conv6_mp = lasagne.layers.MaxPool2DLayer(l_conv6, pool_size=2)\n######################## Creating train function #######################################\nnw_output = lasagne.layers.get_output(l_conv6)\n\n# loss_edge = T.mean(T.square(T.abs_(nw_output[:,:,1:,1:]*255-nw_output[:,:,:-1,1:]*255) - T.abs_(target[:,:,1:,1:]-nw_output[:,:,:-1,1:])) + T.square(T.abs_(nw_output[:,:,1:,1:]*255-nw_output[:,:,1:,:-1]*255) - T.abs_(target[:,:,1:,1:]-target[:,:,1:,:-1])))\n# loss_edge = T.mean(T.square(T.abs_(nw_output[:,:,1:,1:]-nw_output[:,:,:-1,1:]) - T.abs_(target[:,:,1:,1:]-target[:,:,:-1,1:])) + T.square(T.abs_(nw_output[:,:,1:,1:]-nw_output[:,:,1:,:-1]) - T.abs_(target[:,:,1:,1:]-target[:,:,1:,:-1])))\n\n#t1 = lasagne.layers.get_output(l_conv1)\n#t2 = lasagne.layers.get_output(l_conv2)\n#t3 = lasagne.layers.get_output(l_conv3)\n#t4 = lasagne.layers.get_output(l_concat)\n#t5 = lasagne.layers.get_output(l_conv4)\n#print(\"Epoch {}-{}-{}-{}-{}\".format(t1,t2,t3,t4,t5))\n#lss1 = lasagne.objectives.squared_error(prediction, target_var)\n#lss = 100*ssim_loss_batch(prediction, target_var)\n#lss = l1_loss.l1_loss(prediction, target_var)\n#cost = lss.mean()\n#a1 = T.scalar('a1')\n#a1 = T.sum((T.isnan(prediction)).flatten())\n\n\n\nlss = lasagne.objectives.binary_crossentropy(nw_output, target)\nlss1 = lasagne.objectives.aggregate(lss,class_weights,'normalized_sum')\n\n# cost = lss1\nreg = lasagne.regularization.l1(nw_output)\ncost = (lss1 + 0.0001*reg)*0.75 #+ loss_edge*0.25\n# cost = lss1 + 0.0001*reg\n#print \"cost calculated\"\nparams = lasagne.layers.get_all_params(l_conv6, trainable=True)\n# print params\nlr = 0.00001\n## SGD updates \n#updates = lasagne.updates.sgd(cost, params, learning_rate=lr)\n\n## SGD updates with nesterov momentum\n#updates = lasagne.updates.sgd(cost, params, learning_rate=lr)\n#updates_nes_sgd = lasagne.updates.apply_nesterov_momentum(updates, params, momentum=0.9)\n\n## RMS prop\n# updates_rms = lasagne.updates.rmsprop(cost, params, learning_rate=lr)\n\n## ADAGRAD\n#updates_adagrad = lasagne.updates.adagrad(cost, params, learning_rate=lr)\n\n## ADAM\nupdates_adam = lasagne.updates.adam(cost, params,learning_rate=lr,beta1=0.9,beta2=0.999)\n\ntrain_fn = theano.function([input_var0,input_var1,target,class_weights], [cost,s1_sub11_biased.b,s1_sub12_biased.b,s1_sub21_biased.b,s1_sub22_biased.b], updates=updates_adam)\n# train_fn = theano.function([input_var0,input_var1,target], cost, updates=updates_adam)\n\n######################## Creating test function #########################################\nt_nw_output = lasagne.layers.get_output(l_conv6, deterministic=True)\n\n\n#target_var1 = T.tensor4('targets1') \n#t_lss1 = lasagne.objectives.squared_error(t_prediction, target_var)\n#t_lss = 100*ssim_loss_batch(t_prediction, target_var)\n#t_lss = l1_loss.l1_loss(t_prediction, target_var)\n#print(\"point 1\")\n\n\n# edge_loss_val = T.mean(T.square(T.abs_(t_nw_output[:,:,1:,1:]-t_nw_output[:,:,:-1,1:]) - T.abs_(target[:,:,1:,1:]-t_nw_output[:,:,:-1,1:])) + T.square(T.abs_(t_nw_output[:,:,1:,1:]-t_nw_output[:,:,1:,:-1]) - T.abs_(target[:,:,1:,1:]-target[:,:,1:,:-1])))\n\nt_lss = lasagne.objectives.binary_crossentropy(t_nw_output, target)\nt_cost = (t_lss.mean() + 0.0001*reg )*0.75 #+ edge_loss_val*0.25\nvalid_fn = theano.function([input_var0,input_var1,target], [t_cost, t_nw_output])\nvalid_fn_gen = theano.function([input_var0,input_var1], t_nw_output)\n\n# valid_fn_gen_vis = theano.function([input_var0,input_var1], [t_nw_output,get_output(l_s1_sub11),get_output(l_s1_sub21),get_output(l_s1_sub12),get_output(l_s1_sub22),get_output(s1_sub11_rect),get_output(s1_sub21_rect),get_output(s1_sub12_rect),get_output(s1_sub22_rect),s1_sub11_biased.b,s1_sub21_biased.b,s1_sub12_biased.b,s1_sub22_biased.b])\n\n# func_temp = theano.function([input_var0,input_var1],[l_conv6])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"230126761","text":"# + \n\n# Listing 13.7 CountEachLetter.py\n\ndef main():\n while True:\n try:\n filename = input(\"Enter a filename: \").strip()\n infile = open(filename, \"r\")\n break\n except IOError:\n print(\"File \" + filename + \" does not exist. Try again.\")\n\n counts = [0 for x in range(26)]\n for line in infile:\n for letter in line.lower():\n if 'a' <= letter <= 'z':\n counts[ord(letter) - ord('a')] += 1\n \n infile.close()\n\n for i in range(len(counts)):\n if counts[i] != 0:\n print(chr(i + ord('a')), \"appears\", counts[i], \"time\" if \n counts[i] ==1 else \"times\")\n\n\nmain()\n","sub_path":"Python/IntroductionBook/CountEachLetter.py","file_name":"CountEachLetter.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483799899","text":"\"\"\"license: Apache License 2.0, see LICENSE for more details.\"\"\"\ndef normpath(path, trailing=False):\n \"\"\"Normalize path, eliminating double slashes, etc.\"\"\"\n comps = path.split('/')\n new_comps = []\n for comp in comps:\n if comp == '':\n continue\n if comp in ('.', '..'):\n raise ValueError('relative paths not allowed')\n new_comps.append(comp)\n new_path = '/'.join(new_comps)\n if trailing is True and path.endswith('/'):\n new_path += '/'\n if path.startswith('/'):\n return '/' + new_path\n return new_path\n\n\ndef join(a, *p):\n \"\"\"Join two or more pathname components, inserting '/' as needed.\n\n If any component is an absolute path, all previous path components\n will be discarded.\n\n \"\"\"\n path = a\n for b in p:\n if b.startswith('/'):\n path = b\n elif path == '' or path.endswith('/'):\n path += b\n else:\n path += '/' + b\n return path\n\n\ndef isabs(s):\n \"\"\"Test whether a path is absolute\"\"\"\n return s.startswith('/')\n\n\ndef basename(p):\n \"\"\"Returns the final component of a pathname\"\"\"\n i = p.rfind('/') + 1\n return p[i:]\n\n\ndef _prefix_root(root, path, trailing=False):\n \"\"\"Prepend a root to a path. \"\"\"\n return normpath(join(_norm_root(root), path.lstrip('/')), trailing=trailing)\n\n\ndef _norm_root(root):\n return normpath(join('/', root))\n","sub_path":"slider-agent/src/main/python/kazoo/protocol/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644641939","text":"import numpy as np\r\nimport math\r\n\r\nclass curve:\r\n def __init__(self,X,Y,Z):\r\n self.X=X\r\n self.Y=Y\r\n self.Z=Z\r\n\r\n#combine 2 curve into 1 curve\r\ndef intersection(curve1,curve2,theta=45,delta=0.0001):\r\n X=[]\r\n Y=[]\r\n Z=[]\r\n v1=-math.tan(theta)\r\n v2=math.tan(theta)\r\n for i in range(len(curve1.X)):\r\n sub_list=[]\r\n y_list=[]\r\n z_list=[]\r\n for j in range(int((curve2.Y[i]/2-curve1.Y[i]/2)/delta)):\r\n y=curve1.Y[i]/2+ j * delta\r\n z1=v1*(y-curve1.Y[i])+curve1.Z[i]\r\n z2=v2*(y-curve2.Y[i])+curve2.Z[i]\r\n sub_list.append(abs(z1-z2))\r\n y_list.append(y)\r\n z_list.append(z1)\r\n\r\n #print(sub_list)\r\n index=sub_list.index(min(sub_list))\r\n X.append(curve1.X[i])\r\n Y.append(y_list[index])\r\n Z.append(z_list[index])\r\n \r\n\r\n X=np.array(X)\r\n Y=np.array(Y)\r\n Z=np.array(Z)\r\n return curve(X,Y,Z)\r\n\r\n#extend the curve down to the floor\r\ndef extend(surface_Z,curve):\r\n for j in range(len(surface_Z[0])):\r\n for i in range(len(surface_Z)):\r\n #print(surface_Z[i][j],curve3.Z[j])\r\n if (surface_Z[i][j]>curve.Z[j]):\r\n surface_Z[i][j]=curve.Z[j]\r\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269005563","text":"import imageio,os\r\nimport time\r\ndef func(f):\r\n s=sum(f)/len(f)\r\n def _temp_func(value):\r\n return abs(value-s)\r\n return list(sorted(f,key=_temp_func))[0]\r\ndef fghfghgh(v,x=100):\r\n pd=v*x\r\n str1='['+('-'*int(pd))+'>'\r\n str2='#'*(x-int(pd))\r\n print(str1+str2+']')\r\n\r\nimg=imageio.imread(\"src.png\")\r\nW,H=size=(len(img),len(img[0]))\r\nprint(\"image:%dx%d\"%size)\r\ninput(\"press to start...\")\r\nprocessed=0\r\nstart_time=time.time()\r\nfor i in range(len(img)-1):\r\n for j in range(len(img[i])-1):\r\n print(\"image:%dx%d\"%size)\r\n f123=processed/(W*H)\r\n f456=1/(f123+0.001)\r\n print(\"processed%d %.8f %%\"%(processed,f123*100))\r\n print(\"processing %d,%d...\"%(i,j))\r\n fghfghgh(f123)\r\n UsedTime=time.time()-start_time\r\n time_left=UsedTime*f456-UsedTime\r\n print(\"used%.4fsec\\t%4.f sec left\"%(UsedTime,time_left))\r\n for k in range(len(img[i][j])):\r\n f=(img[i+1][j][k]\r\n ,img[i-1][j][k]\r\n ,img[i][j+1][k]\r\n ,img[i][j-1][k])\r\n \r\n img[i][j][k]=sum(f)/len(f)\r\n processed+=1\r\n os.system('cls')\r\n imageio.imwrite('dst.jpg',img)\r\n\r\n\r\n","sub_path":"prject1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"41444527","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.utils import get\r\nimport json\r\nimport atexit\r\nimport uuid\r\n\r\n\r\nreaction_roles_data = {}\r\n\r\ntry:\r\n\twith open(\"reaction_roles.json\") as file:\r\n\t\treaction_roles_data = json.load(file)\r\nexcept (FileNotFoundError, json.JSONDecodeError) as ex:\r\n\twith open(\"reaction_roles.json\", \"w\") as file:\r\n\t\tjson.dump({}, file)\r\n\r\n\r\n@atexit.register\r\ndef store_reaction_roles():\r\n\twith open(\"reaction_roles.json\", \"w\") as file:\r\n\t\tjson.dump(reaction_roles_data, file)\r\n\r\n\r\ndef getMember(username, guild, client):\r\n\ttry:\r\n\t\tuserid = int(username)\r\n\t\tmember = guild.get_member(userid)\r\n\t\treturn member\r\n\texcept ValueError:\r\n\t\ttry:\r\n\t\t\tuser = guild.get_member_named(username)\r\n\t\t\tassert user is not None\r\n\t\t\treturn user\r\n\t\texcept AssertionError:\r\n\t\t\tif username.startswith('<@') and username.endswith('>'):\r\n\t\t\t\tusername = username[2:-1]\r\n\t\t\t\tif username[0] == '!':\r\n\t\t\t\t\tusername = username[1:]\r\n\t\t\t\tuserid = int(username)\r\n\t\t\t\tmember = guild.get_member(userid)\r\n\t\t\t\treturn member\r\n\t\t\telse:\r\n\t\t\t\traise Exception(f'Invalid user {username}')\r\n\r\n\r\nclass Utilities(commands.Cog):\r\n\t\"\"\"Only for Admins\"\"\"\r\n\tdef __init__(self, client):\r\n\t\tself.guild = None\r\n\t\tself.author = None\r\n\t\tself.client = client\r\n\r\n\t@commands.Cog.listener()\r\n\tasync def on_ready(self):\r\n\t\tprint(\"Admin cog ready\")\r\n\r\n\tasync def cog_check(self, ctx):\r\n\t\tadmin = get(ctx.guild.roles, name=\"Admin\")\r\n\t\treturn admin in ctx.author.roles\r\n\r\n\t@commands.Cog.listener()\r\n\tasync def on_ready(self):\r\n\t\tprint(f\"ReactionRoles ready.\")\r\n\r\n\t@commands.Cog.listener()\r\n\tasync def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\r\n\t\trole, user = self.parse_reaction_payload(payload)\r\n\t\tif role is not None and user is not None:\r\n\t\t\tawait user.add_roles(role, reason=\"ReactionRole\")\r\n\r\n\t@commands.Cog.listener()\r\n\tasync def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\r\n\t\trole, user = self.parse_reaction_payload(payload)\r\n\t\tif role is not None and user is not None:\r\n\t\t\tawait user.remove_roles(role, reason=\"ReactionRole\")\r\n\r\n\t@commands.command()\r\n\tasync def shutdown(self, ctx):\r\n\t\tawait ctx.bot.logout()\r\n\r\n\t@commands.has_permissions(manage_channels=True)\r\n\t@commands.command()\r\n\tasync def reaction(\r\n\t\t\tself,\r\n\t\t\tctx,\r\n\t\t\temote,\r\n\t\t\trole: discord.Role,\r\n\t\t\tchannel: discord.TextChannel,\r\n\t\t\ttitle,\r\n\t\t\tmessage,\r\n\t):\r\n\t\t\"\"\"Add a reaction\"\"\"\r\n\t\tembed = discord.Embed(title=title, description=message)\r\n\t\tmsg = await channel.send(embed=embed)\r\n\t\tawait msg.add_reaction(emote)\r\n\t\tself.add_reaction(ctx.guild.id, emote, role.id, channel.id, msg.id)\r\n\r\n\t@commands.has_permissions(manage_channels=True)\r\n\t@commands.command()\r\n\tasync def reaction_add(\r\n\t\t\tself, ctx, emote, role: discord.Role, channel: discord.TextChannel, message_id\r\n\t):\r\n\t\t\"\"\"Add reaction role\"\"\"\r\n\t\tself.add_reaction(ctx.guild.id, emote, role.id, channel.id, message_id)\r\n\r\n\t@commands.has_permissions(manage_channels=True)\r\n\t@commands.command()\r\n\tasync def reactions(self, ctx):\r\n\t\t\"\"\"List all reaction roles\"\"\"\r\n\t\tguild_id = ctx.guild.id\r\n\t\tdata = reaction_roles_data.get(str(guild_id), None)\r\n\t\tembed = discord.Embed(title=\"Reaction Roles\")\r\n\t\tif data is None:\r\n\t\t\tembed.description = \"There are no reaction roles set up right now.\"\r\n\t\telse:\r\n\t\t\tfor index, rr in enumerate(data):\r\n\t\t\t\temote = rr.get(\"emote\")\r\n\t\t\t\trole_id = rr.get(\"roleID\")\r\n\t\t\t\trole = ctx.guild.get_role(role_id)\r\n\t\t\t\tchannel_id = rr.get(\"channelID\")\r\n\t\t\t\tmessage_id = rr.get(\"messageID\")\r\n\t\t\t\tembed.add_field(\r\n\t\t\t\t\tname=index,\r\n\t\t\t\t\tvalue=f\"{emote} - @{role} - [message](https://www.discordapp.com/channels/{guild_id}/{channel_id}/{message_id})\",\r\n\t\t\t\t\tinline=False,\r\n\t\t\t\t)\r\n\t\tawait ctx.send(embed=embed)\r\n\r\n\t@commands.has_permissions(manage_channels=True)\r\n\t@commands.command()\r\n\tasync def reaction_remove(self, ctx, index: int):\r\n\t\t\"\"\"Remove reaction role\"\"\"\r\n\t\tguild_id = ctx.guild.id\r\n\t\tdata = reaction_roles_data.get(str(guild_id), None)\r\n\t\tembed = discord.Embed(title=f\"Remove Reaction Role {index}\")\r\n\t\trr = None\r\n\t\tif data is None:\r\n\t\t\tembed.description = \"Given Reaction Role was not found.\"\r\n\t\telse:\r\n\t\t\tembed.description = (\r\n\t\t\t\t\"Do you wish to remove the reaction role below? Please react with 🗑️.\"\r\n\t\t\t)\r\n\t\t\trr = data[index]\r\n\t\t\temote = rr.get(\"emote\")\r\n\t\t\trole_id = rr.get(\"roleID\")\r\n\t\t\trole = ctx.guild.get_role(role_id)\r\n\t\t\tchannel_id = rr.get(\"channelID\")\r\n\t\t\tmessage_id = rr.get(\"messageID\")\r\n\t\t\t_id = rr.get(\"id\")\r\n\t\t\tembed.set_footer(text=_id)\r\n\t\t\tembed.add_field(\r\n\t\t\t\tname=index,\r\n\t\t\t\tvalue=f\"{emote} - @{role} - [message](https://www.discordapp.com/channels/{guild_id}/{channel_id}/{message_id})\",\r\n\t\t\t\tinline=False,\r\n\t\t\t)\r\n\t\tmsg = await ctx.send(embed=embed)\r\n\t\tif rr is not None:\r\n\t\t\tawait msg.add_reaction(\"🗑️\")\r\n\r\n\t\t\tdef check(reaction, user):\r\n\t\t\t\treturn (\r\n\t\t\t\t\t\treaction.message.id == msg.id\r\n\t\t\t\t\t\tand user == ctx.message.author\r\n\t\t\t\t\t\tand str(reaction.emoji) == \"🗑️\"\r\n\t\t\t\t)\r\n\r\n\t\t\treaction, user = await self.client.wait_for(\"reaction_add\", check=check)\r\n\t\t\tdata.remove(rr)\r\n\t\t\treaction_roles_data[str(guild_id)] = data\r\n\t\t\tstore_reaction_roles()\r\n\r\n\tdef add_reaction(self, guild_id, emote, role_id, channel_id, message_id):\r\n\t\tif not str(guild_id) in reaction_roles_data:\r\n\t\t\treaction_roles_data[str(guild_id)] = []\r\n\t\treaction_roles_data[str(guild_id)].append(\r\n\t\t\t{\r\n\t\t\t\t\"id\": str(uuid.uuid4()),\r\n\t\t\t\t\"emote\": emote,\r\n\t\t\t\t\"roleID\": role_id,\r\n\t\t\t\t\"channelID\": channel_id,\r\n\t\t\t\t\"messageID\": message_id,\r\n\t\t\t}\r\n\t\t)\r\n\t\tstore_reaction_roles()\r\n\r\n\tdef parse_reaction_payload(self, payload: discord.RawReactionActionEvent):\r\n\t\tguild_id = payload.guild_id\r\n\t\tdata = reaction_roles_data.get(str(guild_id), None)\r\n\t\tif data is not None:\r\n\t\t\tfor rr in data:\r\n\t\t\t\temote = rr.get(\"emote\")\r\n\t\t\t\tif payload.message_id == rr.get(\"messageID\"):\r\n\t\t\t\t\tif payload.channel_id == rr.get(\"channelID\"):\r\n\t\t\t\t\t\tif str(payload.emoji) == emote:\r\n\t\t\t\t\t\t\tguild = self.client.get_guild(guild_id)\r\n\t\t\t\t\t\t\trole = guild.get_role(rr.get(\"roleID\"))\r\n\t\t\t\t\t\t\tuser = guild.get_member(payload.user_id)\r\n\t\t\t\t\t\t\treturn role, user\r\n\t\treturn None, None\r\n\r\n\t@commands.command(pass_context=True, aliases=[\"purge\"])\r\n\t@commands.has_permissions(administrator=True)\r\n\tasync def clear(self, ctx, limit: int):\r\n\t\t\"\"\"Clears some messages\"\"\"\r\n\t\tawait ctx.channel.purge(limit=limit)\r\n\r\n\t@commands.command()\r\n\t@commands.has_permissions(manage_channels=True)\r\n\tasync def lockdown(self, ctx, role: discord.Role):\r\n\t\t\"\"\"Locks a channel\"\"\"\r\n\t\tawait ctx.channel.set_permissions(role, send_messages=False, embed_links=False, attach_files=False)\r\n\t\tawait ctx.send(ctx.channel.mention + \" ***is now in lockdown.***\")\r\n\r\n\t@commands.command()\r\n\t@commands.has_permissions(manage_channels=True)\r\n\tasync def unlock(self, ctx, role: discord.Role):\r\n\t\t\"\"\"Unlocks a channel\"\"\"\r\n\t\tawait ctx.channel.set_permissions(role, send_messages=True, embed_links=True, attach_files=True)\r\n\t\tawait ctx.send(ctx.channel.mention + \" ***is now unlocked.***\")\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def giverole(self, ctx, user: discord.Member, role: discord.Role):\r\n\t\t\"\"\"Gives a role to a user\"\"\"\r\n\t\tawait user.add_roles(role)\r\n\t\tawait ctx.send(f\"hey {ctx.author.name}, {user.name} has been giving a role called: {role.name}\")\r\n\r\n\t@commands.command(aliases=['make_role'])\r\n\t@commands.has_permissions(manage_roles=True)\r\n\tasync def create_role(self, ctx, *, name):\r\n\t\t\"\"\"Creates a role\"\"\"\r\n\t\tguild = ctx.guild\r\n\t\tawait guild.create_role(name=name)\r\n\t\tawait ctx.send(f'Role `{name}` has been created')\r\n\r\n\t@commands.command(name=\"slap\", aliases=[\"warn\"])\r\n\tasync def slap(self, ctx, members: commands.Greedy[discord.Member], *, reason='no reason'):\r\n\t\t\"\"\"Warns someone\"\"\"\r\n\t\tslapped = \", \".join(x.name for x in members)\r\n\t\tawait ctx.send('{} just got slapped for {}'.format(slapped, reason))\r\n\r\n\t@commands.command(name='create-channel')\r\n\tasync def create_channel(self, ctx, channel_name='new-channel'):\r\n\t\t\"\"\"Creates a channel\"\"\"\r\n\t\tguild = ctx.guild\r\n\t\texisting_channel = discord.utils.get(guild.channels, name=channel_name)\r\n\t\tif not existing_channel:\r\n\t\t\tprint(f'Creating a new channel: {channel_name}')\r\n\t\t\tawait guild.create_text_channel(channel_name)\r\n\r\n\t@commands.command(pass_context=True)\r\n\tasync def chnick(self, ctx, member: discord.Member, nick):\r\n\t\t\"\"\"Changes a Member`s nickname\"\"\"\r\n\t\tawait member.edit(nick=nick)\r\n\t\tawait ctx.send(f'Nickname was changed for {member.mention} ')\r\n\r\n\t@commands.command(name='embed', description='The embed command')\r\n\tasync def embed_command(self, ctx):\r\n\r\n\t\tdef check(ms):\r\n\t\t\treturn ms.channel == ctx.message.channel and ms.author == ctx.message.author\r\n\r\n\t\tawait ctx.send(content='What would you like the title to be?')\r\n\r\n\t\tmsg = await self.client.wait_for('message', check=check)\r\n\t\ttitle = msg.content\r\n\r\n\t\tawait ctx.send(content='What would you like the Description to be?')\r\n\t\tmsg = await self.client.wait_for('message', check=check)\r\n\t\tdesc = msg.content\r\n\r\n\t\tmsg = await ctx.send(content='Now generating the embed...')\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle=title,\r\n\t\t\tdescription=desc,\r\n\t\t\tcolor=discord.Color.blue()\r\n\t\t)\r\n\t\tembed.set_thumbnail(url=self.client.user.avatar_url)\r\n\r\n\t\tembed.set_author(\r\n\t\t\tname=ctx.message.author.name,\r\n\t\t\ticon_url=ctx.message.author.avatar_url\r\n\t\t)\r\n\r\n\t\tawait msg.edit(\r\n\t\t\tembed=embed,\r\n\t\t\tcontent=None\r\n\t\t)\r\n\r\n\t@commands.command()\r\n\t@commands.has_permissions(ban_members=True)\r\n\tasync def ban(self, ctx, member: discord.User = None, reason=None):\r\n\t\t\"\"\"Bans a member\"\"\"\r\n\t\ttry:\r\n\t\t\tembed = discord.Embed(colour=0xC0FF78)\r\n\t\t\tawait ctx.guild.ban(member, reason=reason)\r\n\t\t\tembed.add_field(name=\"Banned!\", value=f\"{member} is banned. Reason: \" + str(reason))\r\n\r\n\t\t\tawait ctx.channel.send(embed=embed)\r\n\t\texcept Exception as e:\r\n\t\t\tpass\r\n\t\t\tembed = discord.Embed(description=\"I don't have permission to ban them :(\", colour=discord.Colour.red())\r\n\r\n\t\t\tawait ctx.send(embed=embed)\r\n\r\n\t@commands.command()\r\n\t@commands.has_permissions(ban_members=True)\r\n\tasync def kick(self, ctx, member: discord.User = None, reason=None):\r\n\t\t\"\"\"Kicks a member\"\"\"\r\n\t\ttry:\r\n\r\n\t\t\tawait ctx.guild.kick(member, reason=reason)\r\n\t\t\tembed = discord.Embed(\r\n\t\t\t\tdescription=(f\"{member} is kicked. Reason: \" + str(reason)),\r\n\t\t\t\tcolour=discord.Colour.red())\r\n\t\t\tawait ctx.channel.send(embed=embed)\r\n\t\texcept Exception as e:\r\n\t\t\tpass\r\n\t\t\tembed = discord.Embed(description=\"I don't have permission to kick them :(\", colour=discord.Colour.blue())\r\n\r\n\t@commands.command()\r\n\tasync def unban(self, ctx, *, member):\r\n\t\t\"\"\"Unbans a member\"\"\"\r\n\t\tbanned_users = await ctx.guild.bans()\r\n\t\tmember_name, member_discriminator = member.split(\"#\")\r\n\r\n\t\tfor banned_entry in banned_users:\r\n\t\t\tuser = banned_entry.user\r\n\t\t\tif (user.name, user.discriminator) == (member_name, member_discriminator):\r\n\t\t\t\tawait ctx.guild.unban(user)\r\n\t\t\t\tawait ctx.send(f\"{user.mention} has been unbanned.:sunglasses:\")\r\n\t\t\t\treturn\r\n\r\n\t@commands.command()\r\n\tasync def mute(self, ctx, member: discord.Member):\r\n\t\t\"\"\"Mutes a member\"\"\"\r\n\t\trole = ctx.guild.get_role(784168505498533920)\r\n\t\tguild = ctx.guild\r\n\t\tif role not in guild.roles:\r\n\t\t\tperms = discord.Permissions(send_messages=False, speak=False)\r\n\t\t\tawait guild.create_role(name=\"Muted\", permissions=perms)\r\n\t\t\trole = discord.utils.get(ctx.guild.roles, name=\"Muted\")\r\n\t\t\tawait member.add_roles(role)\r\n\t\t\tembed = discord.Embed(\r\n\t\t\t\tdescription=f\"{member} was muted.\", colour=discord.Colour.red()\r\n\t\t\t)\r\n\t\t\tawait ctx.send(embed=embed)\r\n\t\telse:\r\n\t\t\tawait member.add_roles(role)\r\n\t\t\tembed = discord.Embed(\r\n\t\t\t\tdescription=f\"{member} was muted.\", colour=discord.Colour.red()\r\n\t\t\t)\r\n\t\t\tawait ctx.send(embed=embed)\r\n\r\n\t@mute.error\r\n\tasync def mute_error(self, ctx, error):\r\n\t\tif isinstance(error, commands.MissingRole):\r\n\t\t\tembed = discord.Embed(description=\"You don't have permission to do this\", colour=discord.Colour.blue())\r\n\t\t\tawait ctx.send(embed=embed)\r\n\t\telif isinstance(error, commands.BadArgument):\r\n\t\t\tembed = discord.Embed(description=\"That is not a valid member\", colour=discord.Colour.blue())\r\n\t\t\tawait ctx.send(embed=embed)\r\n\r\n\t@commands.command()\r\n\tasync def kick(self, msgdata):\r\n\t\tclient = msgdata['client']\r\n\t\tmessage = msgdata['message']\r\n\t\targs = msgdata['args']\r\n\t\tbot_owners = msgdata['bot_owners']\r\n\t\tperms = message.channel.permissions_for(message.author)\r\n\t\tif perms.kick_members or message.author.id in bot_owners:\r\n\t\t\tmember = getMember(args[0], message.guild, client)\r\n\t\t\tif not args[1:]:\r\n\t\t\t\treason = f'Kicked by {message.author}'\r\n\t\t\telse:\r\n\t\t\t\treason = ' '.join(args[1:])\r\n\t\t\tawait member.ban(reason=reason)\r\n\t\t\tawait message.channel.send(f'{member} ({member.id}) has been banned')\r\n\t\telse:\r\n\t\t\tawait message.channel.send('You don\\'t have permission to do that')\r\n\r\n\t@commands.command()\r\n\t@commands.has_permissions(manage_roles=True)\r\n\tasync def unmute(self, ctx, member: discord.Member = None, reason=None):\r\n\t\t\"\"\"Unmutes a member\"\"\"\r\n\t\trole = discord.utils.get(ctx.message.guild.roles, name=\"Muted\")\r\n\t\tawait member.remove_roles(role)\r\n\t\tembed = discord.Embed(colour=discord.Colour.green(), description=\"Unmuted! :slight_smile: This user is unmuted!\")\r\n\t\tawait ctx.send(embed=embed)\r\n\r\n\tdef send(self, param):\r\n\t\tpass\r\n\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(Utilities(bot))\r\n","sub_path":"cog_admin.py","file_name":"cog_admin.py","file_ext":"py","file_size_in_byte":13128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71907545","text":"from __future__ import unicode_literals\nfrom analytics.serializers import RecordSerializer\nfrom rest_framework.decorators import api_view, renderer_classes, authentication_classes\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import User\nimport logging\nimport os\nimport sys\nimport requests\nimport subprocess\nfrom subprocess import PIPE\nfrom subprocess import TimeoutExpired\nimport arcgis\nfrom arcgis import mapping\nfrom PIL import Image\nfrom io import BytesIO\n\nfrom reportlab.pdfgen import canvas\nfrom PyPDF2 import PdfFileWriter, PdfFileReader\nfrom rest_framework.permissions import AllowAny\nfrom django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect\n\nfrom io import BytesIO\nfrom rest_framework.response import Response\nfrom django.http import HttpResponse\nfrom django.core.files import File\nfrom django.core import mail\nfrom datetime import datetime, date\nimport mimetypes\nimport json\nimport shlex\nimport threading\nimport traceback\nimport time\nfrom datetime import datetime\nfrom django.conf import settings\nMEDIA_ROOT = settings.MEDIA_ROOT\nSTATIC_ROOT = settings.STATIC_ROOT\nLDAP_URL = settings.LDAP_URL\nDEBUG = settings.DEBUG\n\nlogger = logging.getLogger(__name__)\n\n\ndef loggit(text):\n exc_type, exc_value, exc_traceback = sys.exc_info()\n logger.error(traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2))\n\n\ndef system_paths(environ):\n arcmap_path = {\n \"work\": r\"C:\\Python27\\ArcGIS10.4\\python.exe\",\n \"rtaa_testing\": r\"C:\\Python27\\ArcGIS10.5\\python.exe\"\n }\n arcmap_path = arcmap_path[environ]\n\n mxd_script = {\n \"work\": r\"C:\\GitHub\\rtaa_gis\\rtaa_gis\\printTool\\utils\\ConvertWebMaptoMXD.py\",\n \"rtaa_testing\": r\"C:\\GitHub\\rtaa_gis_django\\rtaa_gis\\printTool\\utils\\ConvertWebMaptoMXD.py\"\n }\n mxd_script = mxd_script[environ]\n\n media_dir = {\n \"work\": \"C:/GitHub/rtaa_gis/rtaa_gis/media\",\n \"staging\": r\"C:/GitHub/rtaa_gis_django/rtaa_gis/rtaa_gis/media\",\n \"production\": \"C:/inetpub/django_prod/rtaa_gis/rtaa_gis/media\",\n \"rtaa_testing\": r\"C:\\inetpub\\rtaa_gis_django_testing\\rtaa_gis\\media\"\n }\n media_dir = media_dir[environ]\n\n gdb_path = {\n \"work\": r\"C:\\ESRI_WORK_FOLDER\\rtaa\\MasterGDB\\MasterGDB_05_25_16\\MasterGDB_05_25_16.gdb\",\n \"staging\": r\"C:\\inetpub\\rtaa_gis_data\\MasterGDB_05_25_16\\MasterGDB_05_25_16.gdb\",\n \"production\": r\"C:\\inetpub\\rtaa_gis_data\\MasterGDB_05_25_16\\MasterGDB_05_25_16.gdb\",\n \"rtaa_testing\": r\"D:\\ConnectionFiles\\OSAuth@RTAA_MasterGDB.sde\"\n }\n gdb_path = gdb_path[environ]\n\n default_project = {\n \"work\": r\"C:\\Users\\rhughes\\Documents\\ArcGIS\\Projects\\RTAA_Printing_Publishing\\RTAA_Printing_Publishing.aprx\",\n \"staging\": r\"C:\\inetpub\\rtaa_gis_data\\RTAA_Printing_Publishing\\RTAA_Printing_Publishing.aprx\",\n \"production\": r\"C:\\inetpub\\rtaa_gis_data\\RTAA_Printing_Publishing\\RTAA_Printing_Publishing.aprx\",\n \"rtaa_testing\": r\"D:\\ArcPro\\RTAA_Publishing\\RTAA_Publishing.aprx\"\n }\n default_project = default_project[environ]\n\n layer_dir = {\n \"work\": r\"C:\\ESRI_WORK_FOLDER\\rtaa\\layers\",\n \"staging\": r\"C:\\inetpub\\rtaa_gis_data\\RTAA_Printing_Publishing\\FeatureLayers\",\n \"production\": r\"C:\\inetpub\\rtaa_gis_data\\RTAA_Printing_Publishing\\FeatureLayers\",\n \"rtaa_testing\": r\"D:\\ArcPro\\RTAA_Publishing\\FeatureLayers\"\n }\n layer_dir = layer_dir[environ]\n\n return {\n \"arcmap_path\": arcmap_path,\n \"mxd_script\": mxd_script,\n \"gdb_path\": gdb_path,\n \"layer_dir\": layer_dir,\n \"default_project\": default_project,\n \"media_dir\": media_dir\n }\n\n\ndef get_username(request):\n try:\n username = request.META['REMOTE_USER']\n except KeyError:\n username = request.user.username\n if not len(username):\n # This value is used for testing AJAX requests to the dev runserver\n username = \"siteadmin\"\n\n # create print directory if not exist\n local_name = username.split(\"\\\\\")[-1]\n user_dir = os.path.join(MEDIA_ROOT, \"users/{}\".format(local_name))\n if not os.path.exists(user_dir):\n os.mkdir(user_dir)\n print_dir = os.path.join(user_dir, \"prints\")\n if not os.path.exists(print_dir):\n os.mkdir(print_dir)\n\n return username, print_dir\n\n\ndef apply_watermark(watermark, target):\n try:\n logger.info(os.path.abspath(__file__))\n wmark_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), r'media\\printTool\\{}'.format(watermark))\n wmark = PdfFileReader(open(wmark_file, \"rb\"))\n output_file = PdfFileWriter()\n input_file = PdfFileReader(open(target, \"rb\"))\n combo_name = os.path.join(os.path.dirname(target), \"{}_temp.pdf\".format(os.path.basename(target).replace(\".pdf\", \"\")))\n new_file = canvas.Canvas(combo_name)\n new_file.save()\n\n page_count = input_file.getNumPages()\n\n for page_number in range(page_count):\n print(\"Watermarking page {} of {}\".format(page_number, page_count))\n input_page = input_file.getPage(page_number)\n input_page.mergePage(wmark.getPage(0))\n output_file.addPage(input_page)\n\n with open(combo_name, \"wb\") as outputStream:\n output_file.write(outputStream)\n\n # closing the streams allows the files to be renamed/removed\n wmark.stream.close()\n input_file.stream.close()\n\n os.remove(target)\n os.rename(combo_name, target)\n return target\n\n except Exception as e:\n loggit(e)\n\n\ndef name_file(out_folder, new_name, extension):\n full_name = \"{}.{}\".format(new_name, extension)\n\n if os.path.exists(os.path.join(out_folder, full_name)):\n v = 1\n full_name = \"{}_{}.{}\".format(new_name, v, extension)\n if os.path.exists(os.path.join(out_folder, full_name)):\n i = False\n while not i:\n v += 1\n full_name = \"{}_{}.{}\".format(new_name, v, extension)\n if not os.path.exists(os.path.join(out_folder, full_name)):\n i = True\n\n return os.path.join(out_folder, full_name)\n\n\n@api_view(['POST'])\n@ensure_csrf_cookie\ndef layout(request, format=None):\n try:\n username, print_dir = get_username(request)\n localname = username.split(\"\\\\\")[-1]\n data = request.POST\n url = data[\"url\"]\n title = data[\"title\"]\n layout_template = data['layout_template']\n\n # set the filename to be the Title of the map\n filename = name_file(print_dir, title, \"pdf\")\n\n # download the pdf map print from AGOL\n file = requests.get(url, auth=('data_owner', 'GIS@RTAA123!'))\n\n pdfOutputFile = open(filename, 'wb')\n pdfOutputFile.write(file.content)\n pdfOutputFile.close()\n\n # apply the watermark\n\n watermark = None\n if layout_template == \"Letter ANSI A Landscape\":\n watermark = \"Watermark_8_5_11_landscape.pdf\"\n elif layout_template == \"Letter ANSI A Portrait\":\n watermark = \"Watermark_8_5_11_portrait.pdf\"\n elif layout_template == \"Tabloid ANSI B Landscape\":\n watermark = \"Watermark_11_17_landscape.pdf\"\n elif layout_template == \"Tabloid ANSI B Portrait\":\n watermark = \"Watermark_11_17_portrait.pdf\"\n\n apply_watermark(watermark=watermark, target=filename)\n\n # rename map print and graphics file if it exists at temp.json\n graphics_file = os.path.join(print_dir, 'temp.json')\n if os.path.exists(graphics_file):\n try:\n os.remove(filename.replace(\".pdf\", \".json\"))\n except OSError:\n os.rename(graphics_file, filename.replace(\".pdf\", \".json\"))\n\n host = request.META[\"HTTP_HOST\"]\n media_url = settings.MEDIA_URL.lstrip(\"/\")\n media_url = media_url.rstrip(\"/\")\n\n if host == \"127.0.0.1:8080\":\n protocol = \"http\"\n else:\n protocol = \"https\"\n\n url = \"{}://{}/{}/users/{}/prints/{}\".format(protocol, host, media_url, localname, os.path.basename(filename))\n sec = os.path.getmtime(os.path.join(print_dir, filename))\n date = datetime.fromtimestamp(sec).date().isoformat()\n\n data = {\n \"method\": \"print\",\n \"app_name\": \"Print\"\n }\n serial = RecordSerializer(data=data, context={'request': request})\n if serial.is_valid():\n serial.save()\n else:\n logger.error(\"Unable to save count :: {}\".format(data))\n return JsonResponse({\"url\": url, \"date\": date})\n except Exception as e:\n loggit(e)\n\n\n@api_view(['POST'])\n@ensure_csrf_cookie\ndef parseGraphics(request, format=None):\n try:\n # the get_username will also check for and create the print directiories\n username, print_dir = get_username(request)\n\n web_map = request.data.get('web_map_json')\n map = json.loads(web_map)\n op_layers = map[\"operationalLayers\"]\n\n # create an initial temp graphics file to rename\n tempfile = os.path.join(print_dir, \"temp.json\")\n temp_file = open(tempfile, 'w')\n\n cont = []\n for x in op_layers:\n if \"draw_results\" in x[\"id\"].lower() or \"map_graphics\" in x[\"id\"].lower():\n cont.append(x)\n json_cont = json.dumps(cont).replace(\"False\", \"false\").replace(\"True\", \"true\").replace(\"None\", \"null\")\n temp_file.write(json_cont)\n temp_file.close()\n\n resp = Response()\n # read json file, if it is empty delete it from the server\n text = open(tempfile, 'r').read()\n if text == \"[]\":\n os.remove(tempfile)\n resp.data = {\"message\": \"Empty drawings graphics\"}\n else:\n resp.data = {\"message\": \"Graphics file saved\"}\n return resp\n\n except Exception as e:\n loggit(e)\n\n\n@api_view(['GET'])\n@ensure_csrf_cookie\ndef getPrintList(request, format=None):\n username, print_dir = get_username(request)\n localname = username.split(\"\\\\\")[-1]\n logger.info(localname)\n\n response = Response()\n response.data = list()\n if os.path.exists(print_dir):\n files = os.listdir(print_dir)\n # selection will hold the files with the specified extensions\n selection = []\n for x in [\".png\", \".pdf\", \".jpg\", \".gif\", \".eps\", \".svg\", \".svgz\"]:\n selection.extend([f for f in files if f.endswith(x)])\n\n response['Cache-Control'] = 'no-cache'\n host = request.META[\"HTTP_HOST\"]\n if host == \"127.0.0.1:8080\":\n protocol = \"http\"\n else:\n protocol = \"https\"\n media_url = settings.MEDIA_URL.lstrip(\"/\")\n media_url = media_url.rstrip(\"/\")\n\n for out_file in selection:\n url = \"{}://{}/{}/users/{}/prints/{}\".format(protocol, host, media_url, localname, out_file)\n sec = os.path.getmtime(os.path.join(print_dir, out_file))\n date = datetime.fromtimestamp(sec).date().isoformat()\n response.data.append({\"date\": date, \"url\": url})\n else:\n response.data.append(\"Error, print directory not found\")\n\n return response\n\n\n@api_view(['GET'])\n@ensure_csrf_cookie\ndef getMarkupList(request, format=None):\n username, print_dir = get_username(request)\n localname = username.split(\"\\\\\")[-1]\n response = Response()\n response.data = list()\n if os.path.exists(print_dir):\n files = os.listdir(print_dir)\n selection = [x for x in files if x.endswith(\".json\")]\n response['Cache-Control'] = 'no-cache'\n host = request.META[\"HTTP_HOST\"]\n if host == \"127.0.0.1:8080\":\n protocol = \"http\"\n else:\n protocol = \"https\"\n media_url = settings.MEDIA_URL.lstrip(\"/\")\n media_url = media_url.rstrip(\"/\")\n for out_file in selection:\n full_path = os.path.join(print_dir, out_file)\n # count the number of graphics\n obj = json.loads(open(full_path).read())\n feature_cnt = 0\n layers = obj[0][\"featureCollection\"][\"layers\"]\n for x in layers:\n feats = x[\"featureSet\"][\"features\"]\n feature_cnt += len(feats)\n\n sec = os.path.getmtime(full_path)\n date = datetime.fromtimestamp(sec).date().isoformat()\n url = \"{}://{}/{}/users/{}/prints/{}\".format(protocol, host, media_url, localname, out_file)\n response.data.append({\"date\": date, \"url\": url, \"feature_count\": feature_cnt})\n else:\n response.data.append(\"Error, print directory not found\")\n\n return response\n\n\n@api_view(['POST'])\n@ensure_csrf_cookie\ndef delete_file(request, format=None):\n username, print_dir = get_username(request)\n data = request.POST\n file_name = data[\"filename\"].replace(\"\\n\", \"\")\n\n response = Response()\n if os.path.exists(print_dir):\n old_dir = os.getcwd()\n os.chdir(print_dir)\n if os.path.exists(file_name):\n os.remove(file_name)\n data = \"File {} Deleted from Server\".format(file_name)\n else:\n data = \"File {} not found in user's print folder\".format(file_name)\n os.chdir(old_dir)\n else:\n data = \"Failed to located user's media folder\"\n response.data = data\n return response\n\n\n@api_view(['POST'])\n@ensure_csrf_cookie\ndef emailExhibit(request, format=None):\n username, print_dir = get_username(request)\n user_obj = User.objects.get(username=username)\n data = request.POST\n exhibit_url = data[\"exhibit_url\"].replace(\"\\n\", \"\")\n recipient = data[\"recipient\"].replace(\"\\n\", \"\")\n\n cc = data[\"cc\"]\n if type(cc) is str:\n cc = [cc]\n\n bcc = [\"rhughes@aroraengineers.com\"]\n from_email = user_obj.email\n # to allow for testing\n if settings.LDAP_URL == \"gisapps.aroraengineers.com\":\n recipient = \"richardh522@gmail.com\"\n from_email = \"rhughes@aroraengineers.com\"\n cc = [\"rhughes@aroraengineers.com\"]\n\n subject = data[\"subject\"].replace(\"\\n\", \"\")\n message = data[\"message\"].replace(\"\\n\", \"\")\n splits = exhibit_url.split(\"/\")\n start = splits.index(\"users\")\n server_file = os.path.join(MEDIA_ROOT, \"/\".join(splits[start:]))\n base_name = os.path.basename(server_file)\n content = open(server_file, 'rb').read()\n\n with mail.get_connection() as connection:\n mail.EmailMessage(\n subject=\"{}\".format(subject),\n body=\"From - {} \\n {}\".format(username, message),\n from_email=from_email,\n to=[recipient],\n cc=cc,\n bcc=bcc,\n attachments=[(base_name, content, 'application/pdf')],\n connection=connection\n ).send()\n\n response = Response(\"success\")\n return response","sub_path":"rtaa_gis/printTool/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"387651443","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom market3d import Market3D\nfrom model import Model,Model_dense,Model_dense2\nfrom model_efficient import ModelE_dense\nfrom model_efficient2 import ModelE_dense2\nfrom dgl.data.utils import download, get_download_dir\nimport torch.backends.cudnn as cudnn\nfrom functools import partial\nimport tqdm\nimport urllib\nimport os\nimport argparse\nimport scipy.io\nimport yaml\nimport numpy as np\nfrom ptflops import get_model_complexity_info\nfrom DGCNN import DGCNN\nfrom pointnet2_model import PointNet2SSG, PointNet2MSG\nimport swa_utils\nfrom utils import L2norm\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--feature_dims',default=[64,128,256,512], type=list,help='gpu_ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--which-epoch', type=str, default='last')\nparser.add_argument('--dataset-path', type=str, default='./2DMarket/')\nparser.add_argument('--load-model-path', type=str, default='./snapshot/')\nparser.add_argument('--name', type=str, default='b24_lr2')\nparser.add_argument('--cluster', type=str, default='xyz')\nparser.add_argument('--conv', type=str, default='EdgeConv')\nparser.add_argument('--num-epochs', type=int, default=100)\nparser.add_argument('--num-workers', type=int, default=8)\nparser.add_argument('--num_conv', type=int, default=1)\nparser.add_argument('--init_points', type=int, default=512)\nparser.add_argument('--stride', type=int, default=2)\nparser.add_argument('--class-num', type=int, default=751)\nparser.add_argument('--k', type=int, default=20)\nparser.add_argument('--use_DGCNN', action='store_true', help='use DGCNN' )\nparser.add_argument('--use2', action='store_true', help='use model2' )\nparser.add_argument('--use_SSG', action='store_true', help='use SSG' )\nparser.add_argument('--use_MSG', action='store_true', help='use MSG' )\nparser.add_argument('--npart', type=int, default=1)\nparser.add_argument('--channel', type=int, default=6)\nparser.add_argument('--batch-size', type=int, default=48)\nparser.add_argument('--resume', action='store_true', help='resume training' )\nparser.add_argument('--flip', action='store_true', help='flip' )\nparser.add_argument('--id_skip', action='store_true', help='skip connection' )\nparser.add_argument('--use_dense', action='store_true', help='use dense' )\nparser.add_argument('--norm', action='store_true', help='use normalized input' )\nparser.add_argument('--bg', action='store_true', help='use background' )\nparser.add_argument('--light', action='store_true', help='use light model' )\nparser.add_argument('--no_se', action='store_true', help='use light model' )\nparser.add_argument('--final_bn', action='store_true', help='add bn' )\nparser.add_argument('--slim', type=float, default=0.3 )\nparser.add_argument('--layer_drop', type=float, default=0.0 )\nparser.add_argument('--norm_layer', type=str, default='bn')\nparser.add_argument('--rotate', type=int, default=0)\nparser.add_argument('--no_xyz', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--pre_act', action='store_true')\nparser.add_argument('--D2', action='store_true')\nparser.add_argument('--efficient', action='store_true')\nparser.add_argument('--wa', action='store_true')\nparser.add_argument('--update_bn', action='store_true')\nparser.add_argument('--res_scale', type=float, default=1.0 )\nopt = parser.parse_args()\n\nconfig_path = os.path.join('./snapshot',opt.name,'opts.yaml')\nwith open(config_path, 'r') as stream:\n config = yaml.safe_load(stream)\n#assert not ('MSMT' in opt.name)\nopt.slim = config['slim'] \nprint('slim: %.2f:'%opt.slim)\nopt.use_dense = config['use_dense']\nopt.k = config['k']\nopt.class_num = config['class_num']\nopt.channel = config['channel']\nopt.init_points= config['init_points'] \nopt.use_dense2 = config['use_dense2']\nopt.norm = config['norm']\nopt.npart = config['npart']\nopt.id_skip = config['id_skip']\nopt.feature_dims = config['feature_dims']\nopt.light = config['light']\n\nif 'use2' in config:\n opt.use2 = config['use2']\n\nif 'res_scale' in config:\n opt.res_scale = config['res_scale']\n\nif 'bg' in config:\n opt.bg = config['bg']\n\nif 'cluster' in config:\n opt.cluster = config['cluster']\n\nif 'use_DGCNN' in config:\n opt.use_DGCNN = config['use_DGCNN']\nif 'use_SSG' in config:\n opt.use_SSG = config['use_SSG']\n opt.use_MSG = config['use_MSG']\n\nif 'conv' in config:\n opt.conv = config['conv']\n\nif 'no_se' in config:\n opt.no_se = config['no_se']\n\nif 'no_xyz' in config:\n opt.no_xyz = config['no_xyz']\nif 'D2' in config:\n opt.D2 = config['D2']\n\nif 'pre_act' in config:\n opt.pre_act = config['pre_act']\n\nif 'norm_layer' in config:\n opt.norm_layer = config['norm_layer']\nelse:\n opt.norm_layer = 'bn'\n\nif 'stride' in config:\n opt.stride = config['stride']\nelse:\n opt.stride = 2\n\nif 'layer_drop' in config:\n opt.layer_drop = config['layer_drop']\nelse:\n opt.layer_drop = 0\n\nif 'num_conv' in config:\n opt.num_conv = config['num_conv']\nelse:\n opt.num_conv = 1\n\nif 'efficient' in config:\n opt.efficient = config['efficient']\n\nif 'final_bn' in config:\n opt.final_bn = config['final_bn']\n\nif 'shuffle' in config:\n opt.shuffle = config['shuffle']\n\n#if type(opt.feature_dims)==:\n# str_features = opt.feature_dims.split(',')\n# features = []\n# for feature in str_features:\n# feature = int(feature)\n# features.append(feature)\n# opt.feature_dims = features\n\nnum_workers = opt.num_workers\nbatch_size = opt.batch_size\n\nif not opt.resume:\n str_ids = opt.gpu_ids.split(',')\n gpu_ids = []\n for str_id in str_ids:\n gid = int(str_id)\n if gid >=0:\n gpu_ids.append(gid)\n opt.gpu_ids = gpu_ids\n\n# set gpu ids\nif len(opt.gpu_ids)>0:\n cudnn.enabled = True\n cudnn.benchmark = True\n\nCustomDataLoader = partial(\n DataLoader,\n num_workers=num_workers,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False)\n\ndef extract_feature(model, test_loader, dev, rotate = 0):\n model.eval()\n\n total_correct = 0\n count = 0\n features = torch.FloatTensor()\n with tqdm.tqdm(test_loader, ascii=True) as tq:\n for data, label in tq: # n,6890,6\n num_examples = label.shape[0]\n n, c, l = data.size()\n ff = torch.FloatTensor(n, 512*opt.npart ).zero_().cuda()\n data, label = data.to(dev), label.to(dev).squeeze().long()\n xyz = data[:,:,0:3].contiguous()\n rgb = data[:,:,3:].contiguous()\n if rotate == 90:\n xyz_clone = xyz.clone()\n xyz[:,:,0] = xyz_clone[:,:,1]\n xyz[:,:,1] = xyz_clone[:,:,0]\n elif rotate == 180:\n xyz[:,:,1] *= -1\n output = model(xyz, rgb, istrain=False)\n if opt.npart>1:\n for i in range(opt.npart):\n start = 512*i\n end = 512*i + 512\n ff[:, start:end] += L2norm(output[i])\n else:\n ff += output\n #flip\n #xyz[:,:,0] *= -1\n #scale\n xyz *=1.1\n output = model(xyz, rgb, istrain=False)\n if opt.npart>1:\n for i in range(opt.npart):\n start = 512*i\n end = 512*i + 512\n ff[:, start:end] += L2norm(output[i])\n else:\n ff += output\n\n ff = L2norm(ff)\n #fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n #ff = ff.div(fnorm.expand_as(ff))\n features = torch.cat((features,ff.data.cpu()), 0)\n return features\n\ndef get_id(img_path):\n camera_id = []\n labels = []\n for path, v in img_path:\n #filename = path.split('/')[-1]\n filename = os.path.basename(path)\n label = filename[0:4]\n camera = filename.split('c')[1]\n if label[0:2]=='-1':\n labels.append(-1)\n else:\n labels.append(int(label))\n camera_id.append(int(camera[0]))\n return camera_id, labels\n\n\nmarket_data = Market3D(opt.dataset_path, flip=False, slim=opt.slim, norm =opt.norm, erase=0, channel = opt.channel, bg = opt.bg, D2 = opt.D2)\n\ntrain_loader = CustomDataLoader(market_data.train_all())\nquery_loader = CustomDataLoader(market_data.query())\ngallery_loader = CustomDataLoader(market_data.gallery())\n\ngallery_path = market_data.gallery().imgs\nquery_path = market_data.query().imgs\n\ngallery_cam,gallery_label = get_id(gallery_path)\nquery_cam,query_label = get_id(query_path)\n\ndev = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nif opt.use_dense and not opt.efficient:\n model = Model_dense(opt.k, opt.feature_dims, [512], output_classes=opt.class_num, init_points = opt.init_points, input_dims=3, npart = opt.npart, id_skip = opt.id_skip, res_scale = opt.res_scale, light=opt.light, cluster = opt.cluster, conv = opt.conv, use_xyz = not opt.no_xyz, use_se = not opt.no_se, pre_act = opt.pre_act, norm = opt.norm_layer, stride = opt.stride, layer_drop = opt.layer_drop)\nelif opt.use2:\n model = ModelE_dense2(opt.k, opt.feature_dims, [512], output_classes=opt.class_num, init_points = opt.init_points, input_dims=3, npart = opt.npart, id_skip = opt.id_skip, res_scale = opt.res_scale, light=opt.light, cluster = opt.cluster, conv = opt.conv, use_xyz = not opt.no_xyz, use_se = not opt.no_se, pre_act = opt.pre_act, norm = opt.norm_layer, stride = opt.stride, layer_drop = opt.layer_drop, num_conv = opt.num_conv, shuffle = opt.shuffle)\nelif opt.efficient:\n model = ModelE_dense(opt.k, opt.feature_dims, [512], output_classes=opt.class_num, init_points = opt.init_points, input_dims=3, npart = opt.npart, id_skip = opt.id_skip, res_scale = opt.res_scale, light=opt.light, cluster = opt.cluster, conv = opt.conv, use_xyz = not opt.no_xyz, use_se = not opt.no_se, pre_act = opt.pre_act, norm = opt.norm_layer, stride = opt.stride, layer_drop = opt.layer_drop, num_conv = opt.num_conv )\nelif opt.use_dense2:\n model = Model_dense2(opt.k, opt.feature_dims, [512], output_classes=opt.class_num, init_points = opt.init_points, input_dims=3, npart = opt.npart, id_skip = opt.id_skip, res_scale = opt.res_scale, light = opt.light, cluster = opt.cluster, conv=opt.conv, use_xyz = not opt.no_xyz, pre_act = opt.pre_act)\nelif opt.use_DGCNN:\n model = DGCNN( 20, [64,128,256,512], [512,512], output_classes=opt.class_num, input_dims=3)\nelif opt.use_SSG:\n model = PointNet2SSG(output_classes=opt.class_num, init_points = 512, input_dims=3, use_xyz = not opt.no_xyz)\nelif opt.use_MSG:\n model = PointNet2MSG(output_classes=opt.class_num, init_points = 512, input_dims=3, use_xyz = not opt.no_xyz)\nelse:\n model = Model(opt.k, opt.feature_dims, [512], output_classes=opt.class_num, init_points = opt.init_points, input_dims=3, npart = opt.npart, id_skip = opt.id_skip, res_scale = opt.res_scale, light = opt.light, cluster = opt.cluster, conv=opt.conv, use_xyz = not opt.no_xyz, pre_act = opt.pre_act, norm = opt.norm_layer, stride = opt.stride, layer_drop = opt.layer_drop)\n\nprint(model)\n#model = model.to(dev)\nmodel_path = opt.load_model_path+opt.name+'/model_%s.pth'%opt.which_epoch\n\ntry:\n model.load_state_dict(torch.load(model_path, map_location=dev))\n model.proj_output = nn.Sequential()\n model.classifier = nn.Sequential()\nexcept:\n model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids).cuda()\n model.load_state_dict(torch.load(model_path, map_location=dev))\n model.module.proj_output = nn.Sequential()\n model.module.classifier = nn.Sequential()\n if opt.npart>1:\n for i in range(opt.npart):\n model.module.proj_outputs[i] = nn.Sequential()\n\n\nprint(model_path)\n\nbatch0,label0 = next(iter(query_loader))\nbatch0 = batch0[0].unsqueeze(0)\nprint(batch0.shape)\nmacs, params = get_model_complexity_info(model, batch0, ((round(6890*opt.slim), 3) ), as_strings=True, print_per_layer_stat=False, verbose=True)\n#print(macs)\nprint('{:<30} {:<8}'.format('Computational complexity: ', macs))\nprint('{:<30} {:<8}'.format('Number of parameters: ', params))\n#model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n#params = sum([np.prod(p.size()) for p in model_parameters])\n#print('Number of parameters: %.2f M'% (params/1e6) )\n\nif not os.path.exists('./snapshot/'):\n os.mkdir('./snapshot/')\nsave_model_path = './snapshot/' + opt.name\nif not os.path.exists(save_model_path):\n os.mkdir(save_model_path)\n\nif opt.update_bn:\n with torch.no_grad():\n swa_utils.update_bn( train_loader, model, device = 'cuda')\n\n# Extract feature\nwith torch.no_grad():\n query_feature = extract_feature(model, query_loader, dev, rotate = opt.rotate)\n gallery_feature = extract_feature(model, gallery_loader, dev, rotate = opt.rotate)\n\n# Save to Matlab for check\nresult = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam,'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam}\nscipy.io.savemat('pytorch_result.mat',result)\n\nprint(opt.name)\nresult = './snapshot/%s/result.txt'%opt.name\nos.system('python evaluate_gpu.py | tee -a %s'%result)\n","sub_path":"test_M.py","file_name":"test_M.py","file_ext":"py","file_size_in_byte":13272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"169246921","text":"# 102. Binary Tree Level Order Traversal\n\n# Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).\n\n# For example:\n# Given binary tree [3,9,20,null,null,15,7],\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# return its level order traversal as:\n# [\n# [3],\n# [9,20],\n# [15,7]\n# ]\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom typing import List\nfrom queue import Queue\n\n\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n results = []\n if not root:\n return results\n q1 = Queue()\n q2 = Queue()\n q1.put(root)\n while q1.qsize() > 0:\n tmp = []\n while q1.qsize() > 0:\n node = q1.get()\n tmp.append(node.val)\n if node.left:\n q2.put(node.left)\n if node.right:\n q2.put(node.right)\n results.append(tmp)\n q1, q2 = q2, q1\n return results\n","sub_path":"Python/102.py","file_name":"102.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"358703773","text":"from pygame.draw import aalines\nfrom libhex import *\nfrom constants import *\n\nfrom math import sin, cos, tau\n\ndef draw_hexagon(Surface, position, radius=RADIUS):\n points = [(sin(i / 6 * tau) * radius + position[0], cos(i / 6 * tau) * radius + position[1]) for i in range(0, 6)]\n # Filled\n #pygame.draw.polygon(Surface, BLUE, points)\n # Only lines\n return aalines(Surface,\n BLACK,\n True,\n points,\n )\n\ndef draw_grid(Surface):\n for y in range(-HEIGHT, HEIGHT):\n for x in range(-WIDTH, WIDTH):\n draw_hexagon(Surface, Hex(x, y).to_pixel())\n\n","sub_path":"src/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"329894956","text":"# Dette programmet regner ut arealet A = gh/2 til en trekant \n\n# les inn høyden og lengden fra brukeren\nh = input('Skriv inn høyden i trekanten her: ')\ng = input('Skriv inn grunnlinja i pararellogrammet her: ')\n\n# gjør strengene om til desimaltall\nh = float(h)\ng = float(g)\n\n\nA = g*h/2.0\n\nprint('Arealet av trekanten er: ', A)","sub_path":"programmer/grunnleggende/lese_inn_tall/areal_trekant.py","file_name":"areal_trekant.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164763582","text":"# Importing required libraries\n# Using train_test_split for spliting the data randomly\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# Define the class of a perceptron\nclass Perceptron:\n \n # Define the init function\n def __init__(self, weights, bias=1, learning_rate=0.3):\n \n # Initialize all variables\n self.weights = np.array(weights)\n self.bias = bias\n self.learning_rate = learning_rate\n \n # Define the step function/ sign function \n @staticmethod\n def sign_function(x):\n \n # If x < 0 then class1 else class2\n if x <= 0:\n return 0\n else:\n return 1\n \n # Define the call function \n def __call__(self, arr):\n \n # Classify a point\n bias_arr = [self.bias]\n arr = np.concatenate( (arr, bias_arr) )\n result = self.weights @ arr\n return Perceptron.sign_function(result)\n \n # Define function to update weights \n def update_weights(self, target_result, arr):\n \n # If input not numpy array then convert it to np array\n if type(arr) != np.ndarray:\n arr = np.array(arr) \n \n # classify the points\n calculated_result = self(arr)\n \n # If point misclassified then update the weights\n error = target_result - calculated_result\n if error != 0:\n bias_arr = [self.bias]\n arr = np.concatenate( (arr, bias_arr) )\n correction = error * arr * self.learning_rate\n self.weights += correction\n \n # Define function to test the model \n def test_model(self, data, labels):\n \n # Variable to store count of correct classifications \n count = 0\n \n # If point not misclassified count+=1\n for sample, label in zip(data, labels):\n result = self(sample) # predict\n if result == label:\n count += 1\n return count\n \n \n \n# Opening and reading the data files \nclass1 = pd.read_csv(\"linearly_separable_data/Class1.txt\", sep=\"\\t\", header=None)\nclass2 = pd.read_csv(\"linearly_separable_data/Class2.txt\", sep=\"\\t\", header=None)\n\n# Third column NAN eliminated\nclass1 = class1[[0,1]]\nclass2 = class2[[0,1]]\n\n# Splitting into test and train, test = 30% and train 70%\nclass1_train, class1_test = train_test_split(class1, test_size=0.3, random_state=42, shuffle=True)\nclass2_train, class2_test = train_test_split(class2, test_size=0.3, random_state=42, shuffle=True)\n\n# Scatter plot of given data\nprint(\"Scatter plot of given data :\")\nplt.scatter(class1[[0]], class1[[1]], label = \"Class1\", c = \"blue\")\nplt.scatter(class2[[0]], class2[[1]], label = \"Class2\", c = \"orange\")\nplt.xlabel(\"Attribute1\")\nplt.ylabel(\"Attribute2\")\nplt.title(\"Given Data\")\nplt.show()\nprint(80*\"*\")\n\n# Scatter plot of Training data\nprint(\"Scatter plot of Training data : \")\nplt.scatter(class1_train[[0]], class1_train[[1]], label=\"Class1\")\nplt.scatter(class2_train[[0]], class2_train[[1]], label=\"Class2\")\nplt.xlabel(\"Attribute1\")\nplt.ylabel(\"Attribute2\")\nplt.title(\"Training data\")\nplt.show()\nprint(80*\"*\")\n\n# Scatter plot of Test data\nprint(\"Scatter plot of Test data : \")\nplt.scatter(class1_test[[0]], class1_test[[1]], label=\"Class1\")\nplt.scatter(class2_test[[0]], class2_test[[1]], label=\"Class2\")\nplt.xlabel(\"Attribute1\")\nplt.ylabel(\"Attribute2\")\nplt.title(\"Test data\")\nplt.show()\nprint(80*\"*\")\n\n\n# Initialize the model\nmodel = Perceptron(weights=[0.5, 0.5, 0.5], learning_rate=0.8)\n\n# Prepare the training data \nlearn_data = pd.concat([class1_train, class2_train], axis = 0)\nlearn_data = learn_data.to_numpy()\n\n# Prepare the training labels\nl1 = np.zeros((700,), dtype=int)\nl2 = np.ones((700,), dtype=int)\nlearn_labels = np.concatenate((l1, l2), axis = 0)\n\n# Run over all samples and update the weights\nn_iter = 300\nfor i in range(n_iter):\n for sample, label in zip(learn_data, learn_labels):\n model.update_weights(label, sample)\n \n# Prepare the testing data\ntest_data = pd.concat([class1_test, class2_test], axis = 0)\ntest_data = test_data.to_numpy()\n\n# Prepare the testing labels \nt1 = np.zeros((300,), dtype=int)\nt2 = np.ones((300,), dtype=int)\ntest_labels = np.concatenate((t1, t2), axis = 0)\n\n\n# Test the model\nprint()\nprint(\"Testing the model: \")\nprint()\nevaluation = model.test_model(test_data, test_labels)\nprint(\"Correctly classified: \", evaluation)\nprint(\"Incorrectly classified: \", 600 - evaluation)\nprint(\"Acccuracy: \", round(evaluation/600,4))\nprint()\nprint(80*\"*\")\n\n\n# Print details of the decision boundary\nprint()\nprint(\"Details of decision boundary: \")\nprint()\nX = np.arange(np.min(learn_data[:,0]), np.max(learn_data[:,0]))\nm = -model.weights[0] / model.weights[1]\nc = -model.weights[2] / model.weights[1]\nprint(\"Slope of decision boundary:\", m)\nprint(\"Y-Intercept of decision boundary:\", c)\nprint()\nprint(80*\"*\")\n\n\n# Plot the decision boundary and data\nprint(\"Plot of decision boundary: \")\nfig, ax = plt.subplots(figsize=(12,12))\ny = m*X + c\nax.plot(X, y, '-r')\nax.scatter(class1[[0]], class1[[1]], label=\"Class1\")\nax.scatter(class2[[0]], class2[[1]], label=\"Class2\")\nplt.show()","sub_path":"Assignment5/linearly_separable_perceptron.py","file_name":"linearly_separable_perceptron.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121661777","text":"import numpy as np\nimport copy\n\n\nclass TransformableFrame:\n def __init__(self, pose):\n self.pose = pose\n\n @classmethod\n def fromDocument(cls, doc):\n return cls(doc['pose'])\n\n def getRotation(self):\n rotation = copy.deepcopy(self.pose)\n rotation[0] = 0\n rotation[1] = 0\n rotation[2] = 0\n return TransformableFrame(rotation)\n\n def getPosition(self):\n position = copy.deepcopy(self.pose)\n position[3] = 0\n position[4] = 0\n position[5] = 0\n return TransformableFrame(position)\n\n def inv(self):\n trans = self.pose\n theta = np.linalg.norm(np.asarray(trans[3:6]), 2)\n if theta == 0:\n ax = np.asarray([0, 0, 0])\n else:\n ax = np.asarray(trans[3:6]) / theta\n theta = -theta\n v = np.asarray(trans[0:3])\n vRot = np.cos(theta) * v + np.sin(theta) * np.cross(ax, v) + (1 - np.cos(theta)) * np.dot(ax, v) * ax\n transInv = np.ndarray.tolist(-vRot) + np.ndarray.tolist(ax * theta)\n return TransformableFrame(transInv)\n\n def __mul__(self, other):\n trans1 = self.pose\n trans2 = other.pose\n # Compute rotation\n theta1 = np.linalg.norm(np.asarray(trans1[3:6]), 2)\n theta2 = np.linalg.norm(np.asarray(trans2[3:6]), 2)\n if theta1 < 0.00001:\n ax1 = np.asarray([0, 0, 0])\n else:\n ax1 = np.asarray(trans1[3:6]) / theta1\n if theta2 < 0.00001:\n ax2 = np.asarray([0, 0, 0])\n else:\n ax2 = np.asarray(trans2[3:6]) / theta2\n val = np.cos(theta1 / 2) * np.cos(theta2 / 2) - np.sin(theta1 / 2) * np.sin(theta2 / 2) * np.dot(ax1, ax2)\n if val >= 1.0:\n val = 1.0\n elif val <= -1.0:\n val = -1.0\n theta3 = 2 * np.arccos(val)\n theta3 = np.mod(theta3, 2 * np.pi)\n if theta3 < 0.00001:\n v = np.asarray(trans2[0:3])\n # print(str(ax1))\n # print(str(v))\n vRot = np.cos(theta1) * v + np.sin(theta1) * np.cross(ax1, v) + (1 - np.cos(theta1)) * np.dot(ax1, v) * ax1\n trans3 = np.ndarray.tolist(trans1[0:3] + vRot) + [0, 0, 0]\n else:\n ax3Scaled = (np.sin(theta1 / 2) * np.cos(theta2 / 2) * ax1 + np.cos(theta1 / 2) * np.sin(\n theta2 / 2) * ax2 + np.sin(theta1 / 2) * np.sin(theta2 / 2) * np.cross(ax1, ax2))\n ax3 = 1 / np.sin(theta3 / 2) * ax3Scaled\n v = np.asarray(trans2[0:3])\n vRot = np.cos(theta1) * v + np.sin(theta1) * np.cross(ax1, v) + (1 - np.cos(theta1)) * np.dot(ax1, v) * ax1\n trans3 = np.ndarray.tolist(trans1[0:3] + vRot) + np.ndarray.tolist(ax3 * theta3)\n return TransformableFrame(trans3)\n\n def __str__(self):\n return str(self.pose)\n\n def __repr__(self):\n return str(self.pose)\n\n def __getitem__(self, key):\n return self.pose[key]\n\n def __setitem__(self, key,value):\n self.pose[key] = value\n\n @staticmethod\n def get_rotated_vector(vector, frame):\n rotated_position = frame.getRotation().inv() * vector\n rotated_orientation = frame.getRotation().inv() * TransformableFrame([vector[3], vector[4], vector[5], 0, 0, 0])\n rotated_vector = [rotated_position[0], rotated_position[1], rotated_position[2], rotated_orientation[0], rotated_orientation[1],\n rotated_orientation[2]]\n return rotated_vector\n","sub_path":"transformable_frame.py","file_name":"transformable_frame.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"309062476","text":"\"\"\"\nMIT License\n\nCopyright (c) 2019 Jonathan Barda\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport json\nimport re\nimport array\nimport pprint\n\nclass json2html:\n\tSELF_CLOSING_TAGS = ('meta', 'img', 'br', 'hr')\n\tDEFAULT_MARKER = '${}'\n\tMARKERS = ('{{}}', '${}')\n\tDEBUG = False\n\tDATA = None\n\tDISPLAY_PROPERTIES = False\n\n\titerations = 0\n\tchilds = 0\n\tbuffer = []\n\thtml = ''\n\tprops = []\n\n\t@staticmethod\n\tdef in_array(needle, haystack):\n\t\tif (needle in haystack):\n\t\t\treturn True\n\t\treturn False\n\t\n\t@staticmethod\n\tdef is_array(needle):\n\t\treturn isinstance(needle, (list, tuple))\n\n\t@staticmethod\n\tdef is_string(needle):\n\t\treturn isinstance(needle, str)\n\n\t@staticmethod\n\tdef contains(needle, string):\n\t\tif (string.find(needle) == -1):\n\t\t\treturn False\n\t\treturn True\n\t\n\t@classmethod\n\tdef _convert(cls, decoded_tags, debug):\n\t\t# Init internal counter\n\t\tcls.iterations += 1\n\t\tif (debug == True):\n\t\t\tprint('\\n* Iteration:', cls.iterations)\n\t\t\n\t\t# Parse given tags\n\t\tfor (key, value) in decoded_tags.items():\n\t\t\tif (key == 'tag' or key == '<>'):\n\t\t\t\tcls.buffer.append({'iteration': cls.iterations, 'tag': value})\n\t\t\t\tif (debug == True):\n\t\t\t\t\tprint('Converted [' + value + '] to: <' + value + '>')\n\t\t\t\t\tprint('is self closing tag:', 'true' if value in cls.SELF_CLOSING_TAGS else 'false')\n\n\t\t\telif (key == 'alt' or key == 'class' or key == 'id' or key == 'src' or key == 'href' or key == 'target' or key == 'name' or key == 'action' or key == 'method' or key == 'style'):\n\t\t\t\tcls.props.append({'iteration': cls.iterations, 'attribute': key + '=\"' + value + '\"'})\n\t\t\t\tif (debug == True):\n\t\t\t\t\tprint('Converted to: ' + key + ' = \"' + value + '\"')\n\t\t\t\n\t\t\telif (key == 'child' or key == 'children' or key == 'html'):\n\t\t\t\tif (cls.is_array(value)):\n\t\t\t\t\tfor html_tags in value:\n\t\t\t\t\t\tcls._convert(html_tags, debug)\n\t\t\t\telse:\n\t\t\t\t\tcls.props.append({'iteration': cls.iterations, 'content': value})\n\t\t\t\t\tif (debug == True):\n\t\t\t\t\t\tprint('Converted to: innerHTML=\"' + value + '\"')\n\t\t\t\n\t\t\telif (key == 'text'):\n\t\t\t\tcls.props.append({'iteration': cls.iterations, 'content': value})\n\t\t\t\tif (debug == True):\n\t\t\t\t\tprint('Converted to: innerText=\"' + value + '\"')\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint('Unsupported tag given. Got: \"' + key + '\"\\n')\n\t\t\t\treturn False\n\t\n\t@classmethod\n\tdef _merge(cls, debug):\n\t\t# Loop on tags\n\t\ti = 0\n\t\twhile i <= len(cls.buffer)-1:\n\t\t\t# Debug pass\n\t\t\tif (debug == True):\n\t\t\t\tprint('Pass:', i)\n\n\t\t\t# Open tag\n\t\t\tcls.html += '<' + cls.buffer[i]['tag']\n\n\t\t\t# Reading props\n\t\t\tif (debug == True):\n\t\t\t\tprint('Props:', cls.props)\n\n\t\t\t# Adding attributes\n\t\t\tfor j in range(0, len(cls.props)):\n\t\t\t\tif (cls.props[j]['iteration'] == cls.props[i]['iteration']):\n\t\t\t\t\t# print('J:A:', cls.props[j]['iteration'], '==', 'I:A:', cls.props[i]['iteration'])\n\t\t\t\t\tcls.html += ' ' + cls.props[j]['attribute'] if 'attribute' in cls.props[j] else ''\n\t\t\t\t\t\n\t\t\t# Closing open tag\n\t\t\tcls.html += '>'\n\n\t\t\t# Adding content\n\t\t\tfor k in range(0, len(cls.props)):\n\t\t\t\tif (cls.props[k]['iteration'] == cls.props[i]['iteration']):\n\t\t\t\t\t# print('K:C:', cls.props[k]['iteration'], '==', 'I:C:', cls.props[i]['iteration'])\n\t\t\t\t\tcls.html += cls.props[k]['content'] if 'content' in cls.props[k] else ''\n\t\t\t\n\t\t\t# Remove consumed props[i] (passed around 3 hours on that crap!!)\n\t\t\tdel cls.props[i]\n\n\t\t\t# Increment tag open counter\n\t\t\ti += 1\n\n\t\t# Close tag\n\t\ti = len(cls.buffer)-1\n\t\twhile i >= 0:\n\t\t# for i in range(0, len(cls.buffer)):\n\t\t\tif (cls.in_array(cls.buffer[i]['tag'], cls.SELF_CLOSING_TAGS) == False):\n\t\t\t\tcls.html += ''\n\t\t\t\n\t\t\t# Deincrement tag close counter\n\t\t\ti -= 1\n\n\t@classmethod\n\tdef _replace(cls, html, data, marker, debug):\n\t\t# Additional debug infos\n\t\tif (debug == True):\n\t\t\tprint('Data:', data)\n\t\t\tprint('Used marker:', marker)\n\n\t\t# Validate given marker\n\t\tif (cls.in_array(marker, cls.MARKERS)):\n\t\t\tif (marker == '{{}}'):\n\t\t\t\tregex = r\"({{)(\\w*)(}})(\\w*)\"\n\t\t\telif (marker == '${}'):\n\t\t\t\tregex = r\"(\\${)(\\w*)(})(\\w*)\"\n\t\telse:\n\t\t\tprint('Unsupported marker given.\\n')\n\t\t\treturn False\n\n\t\t# Apply changes\n\t\tmatches = re.finditer(regex, html, re.MULTILINE)\n\t\tif (debug == True):\n\t\t\tprint('Used regex:', regex)\n\t\t\tprint('Search in:', html)\n\t\t\tprint('Results:', matches)\n\n\t\tif (matches):\n\t\t\t## Loop on matches\n\t\t\tindex = 0\n\t\t\tfor matchNum, match in enumerate(matches, start=1):\n\t\t\t\tif (debug == True):\n\t\t\t\t\tprint (\"Match {matchNum} was found at {start}-{end}: {match}\".format(matchNum = matchNum, start = match.start(), end = match.end(), match = match.group()))\n\t\t\t\tcls.html = cls.html.replace(match.group(), data[index][match.group(2)])\n\n\t\t\t\t## Increment data counter\n\t\t\t\tindex += 1\n\t\t\t\n\t\t\t\t\"\"\" for groupNum in range(0, len(match.groups())):\n\t\t\t\t\tgroupNum = groupNum + 1\n\t\t\t\t\tprint (\"Group {groupNum} found at {start}-{end}: {group}\".format(groupNum = groupNum, start = match.start(groupNum), end = match.end(groupNum), group = match.group(groupNum))) \"\"\"\n\t\telse:\n\t\t\tprint('Failed to process data.\\n')\n\t\t\treturn False\n\t\n\t@classmethod\n\tdef transform(cls, tags, data = None, debug = None, marker = None):\n\t\t# Save state\n\t\tencoded_tags = tags\n\t\tencoded_data = data\n\n\t\t# Assign default props\n\t\tif (data == None):\n\t\t\tdata = cls.DATA\n\t\t\n\t\tif (debug == None):\n\t\t\tdebug = cls.DEBUG\n\t\t\n\t\tif (marker == None):\n\t\t\tmarker = cls.DEFAULT_MARKER\n\n\t\t# Check if you we got tags to process\n\t\tif (encoded_tags != ''):\n\t\t\t# Check given tags type\n\t\t\tif (cls.is_string(encoded_tags)):\n\t\t\t\t# Decode tags\n\t\t\t\tdecoded_tags = json.loads(encoded_tags)\n\n\t\t\t\t# Decode data\n\t\t\t\tif (encoded_data != None):\n\t\t\t\t\tdecoded_data = json.loads(encoded_data)\n\t\t\t\telse:\n\t\t\t\t\tdecoded_data = encoded_data\n\t\t\t\t\n\t\t\t\t# Check decoding result\n\t\t\t\tif (decoded_tags):\n\t\t\t\t\tif (debug == True):\n\t\t\t\t\t\tif (cls.DISPLAY_PROPERTIES == True):\n\t\t\t\t\t\t\tprint('\\n* Decoded tags:\\n', decoded_tags, '\\n\\n ==> Properties:\\n', dir(decoded_tags))\n\t\t\t\t\t\t\tprint('\\n* Decoded data:\\n', decoded_data, '\\n\\n ==> Properties:\\n', dir(decoded_data))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint('\\n* Decoded tags:\\n', decoded_tags)\n\t\t\t\t\t\t\tprint('\\n* Decoded data:\\n', decoded_data)\n\t\t\t\t\t\tprint('\\nConverting...')\n\t\t\t\t\t\n\t\t\t\t\t# Convert tags\n\t\t\t\t\tcls._convert(decoded_tags, debug)\n\n\t\t\t\t\t# Display memory info\n\t\t\t\t\tif (debug == True):\n\t\t\t\t\t\tprint('\\n* Buffer:', len(cls.buffer), '\\n')\n\t\t\t\t\t\tpprint.pprint(cls.buffer)\n\t\t\t\t\t\tprint('\\n* Props:', len(cls.props), '\\n')\n\t\t\t\t\t\tpprint.pprint(cls.props)\n\t\t\t\t\t\tprint('\\nMerging...\\n')\n\t\t\t\t\t\n\t\t\t\t\t# Merge buffer and props\n\t\t\t\t\tcls._merge(debug)\n\n\t\t\t\t\t# Parse given data\n\t\t\t\t\tif (decoded_data != None):\n\t\t\t\t\t\tif (debug == True):\n\t\t\t\t\t\t\tprint('\\nAdding data...\\n')\n\t\t\t\t\t\tcls._replace(cls.html, decoded_data, marker, debug)\n\n\t\t\t\t\t# Output result\n\t\t\t\t\treturn cls.html\n\t\t\t\t\n\t\t\t\t# Return error\n\t\t\t\telse:\n\t\t\t\t\tprint('Failed to parse tags/data.')\n\t\t\t\t\tif (debug == True):\n\t\t\t\t\t\tprint(dir(decoded_tags))\n\t\t\t\t\t\tprint(dir(data))\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tprint('Tags must be a string. ' + type(tags).__name__.capitalize() + ' given.')\n\t\t\t\treturn False\n\t\telse:\n\t\t\tprint('Empty tags given.')\n\t\t\treturn False","sub_path":"json2html.py","file_name":"json2html.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"119399467","text":"import datetime\n\nfrom mock import patch\nfrom unittest2 import TestCase\n\nfrom ..builder import ListBuilder\nfrom ..responses import (\n response, make_identity, make_boolean, make_locator, make_timeline)\nfrom ..utils import AuthTestCase, fill_cache, Url\n\n\n\nclass BaseFieldTests(TestCase):\n @property\n def field_cls(self):\n from ccui.core.fields import Field\n return Field\n\n\n @property\n def cls(self):\n class Bag(object):\n def __init__(self, **kw):\n for k, v in kw.items():\n setattr(self, k, v)\n\n\n @classmethod\n def get(cls, *args, **kwargs):\n return cls(*args, **kwargs)\n\n return Bag\n\n\n def field_and_cls(self, *args, **kwargs):\n f = self.field_cls(*(self.prepend_args + args), **kwargs)\n cls = self.cls\n setattr(cls, \"attname\", f)\n f.install(\"attname\", cls)\n return f, cls\n\n\n def field(self, *args, **kwargs):\n return self.field_and_cls(*args, **kwargs)[0]\n\n\n @property\n def prepend_args(self):\n return ()\n\n\n def test_default_names(self):\n f = self.field()\n\n self.assertEqual(f.api_name, \"ns1.attname\")\n self.assertEqual(f.api_filter_name, \"attname\")\n self.assertEqual(f.api_submit_name, \"attname\")\n\n\n def test_set_api_name(self):\n f = self.field(api_name=\"apiname\")\n\n self.assertEqual(f.api_name, \"ns1.apiname\")\n self.assertEqual(f.api_filter_name, \"apiname\")\n self.assertEqual(f.api_submit_name, \"apiname\")\n\n\n def test_set_api_submit_name(self):\n f = self.field(api_submit_name=\"apisubmitname\")\n\n self.assertEqual(f.api_name, \"ns1.attname\")\n self.assertEqual(f.api_filter_name, \"attname\")\n self.assertEqual(f.api_submit_name, \"apisubmitname\")\n\n\n def test_set_api_name_and_api_submit_name(self):\n f = self.field(api_name=\"apiname\", api_submit_name=\"apisubmitname\")\n\n self.assertEqual(f.api_name, \"ns1.apiname\")\n self.assertEqual(f.api_filter_name, \"apiname\")\n self.assertEqual(f.api_submit_name, \"apisubmitname\")\n\n\n def test_decode_nil(self):\n f = self.field()\n self.assertEqual(f.decode({\"@xsi.nil\": \"true\"}), None)\n\n\n def test_decode(self):\n f = self.field()\n self.assertEqual(f.decode(\"blah\"), \"blah\")\n\n\n def test_encode(self):\n f = self.field()\n self.assertEqual(f.encode(\"blah\"), \"blah\")\n\n\n def test_no_submit_data(self):\n f, cls = self.field_and_cls(api_submit_name=False)\n self.assertEqual(f.submit_data(cls()), {})\n\n\n def test_submit_none(self):\n f, cls = self.field_and_cls()\n self.assertEqual(f.submit_data(cls(attname=None)), {})\n\n\n def test_submit(self):\n f, cls = self.field_and_cls()\n self.assertEqual(\n f.submit_data(cls(attname=\"blah\")), {\"attname\": \"blah\"})\n\n\n def test_non_filterable(self):\n class NonFilterableField(self.field_cls):\n api_filter_name = False\n\n f = NonFilterableField(*self.prepend_args)\n f.install(\"attname\", self.cls)\n\n self.assertEqual(f.api_filter_name, False)\n\n\n\nclass FieldTest(BaseFieldTests):\n def test_encode_returns_dict(self):\n class ComplexField(self.field_cls):\n def encode(self, value):\n return {\n \"%s_part1\" % self.api_submit_name: value[0],\n \"%s_part2\" % self.api_submit_name: value[1],\n }\n\n f = ComplexField()\n cls = self.cls\n f.install(\"attname\", cls)\n\n self.assertEqual(\n f.submit_data(cls(attname=[\"one\", \"two\"])),\n {\"attname_part1\": \"one\", \"attname_part2\": \"two\"})\n\n\n def test_submit_dict(self):\n f, cls = self.field_and_cls()\n self.assertEqual(\n f.submit_data(cls(attname={\"a\": 1})), {\"attname\": {\"a\": 1}})\n\n\n\nclass DateFieldTest(BaseFieldTests):\n @property\n def field_cls(self):\n from ccui.core.fields import Date\n return Date\n\n\n def test_decode(self):\n f = self.field_cls()\n\n self.assertEqual(\n f.decode(\"2011-04-28T00:46:00Z\"), datetime.date(2011, 4, 28))\n\n\n def test_encode(self):\n f = self.field()\n\n self.assertEqual(\n f.encode(datetime.date(2011, 4, 28)), \"2011/04/28\")\n\n\n def test_encode_string(self):\n f = self.field()\n\n self.assertEqual(f.encode(\"2011/04/28\"), \"2011/04/28\")\n\n\n def test_submit(self):\n f, cls = self.field_and_cls()\n\n self.assertEqual(\n f.submit_data(cls(attname=datetime.date(2011, 4, 28))),\n {\"attname\": \"2011/04/28\"})\n\n\n\nclass LocatorFieldTest(BaseFieldTests):\n @property\n def field_cls(self):\n from ccui.core.fields import Locator\n return Locator\n\n\n @property\n def target_cls(self):\n return self.cls\n\n\n def field_cls_and_target(self, *args, **kwargs):\n target = self.target_cls\n f = self.field_cls(target, *args, **kwargs)\n cls = self.cls\n setattr(cls, \"attname\", f)\n f.install(\"attname\", cls)\n return f, cls, target\n\n\n @property\n def prepend_args(self):\n return (self.target_cls,)\n\n\n def field_and_cls(self, *args, **kwargs):\n return self.field_cls_and_target(*args, **kwargs)[:2]\n\n\n def test_default_names(self):\n f = self.field()\n\n self.assertEqual(f.api_name, \"ns1.attnameLocator\")\n self.assertEqual(f.api_filter_name, \"attnameId\")\n self.assertEqual(f.api_submit_name, \"attnameId\")\n\n\n def test_set_api_name(self):\n f = self.field(api_name=\"apiname\")\n\n self.assertEqual(f.api_name, \"ns1.apiname\")\n self.assertEqual(f.api_filter_name, \"apinameId\")\n self.assertEqual(f.api_submit_name, \"apinameId\")\n\n\n def test_set_api_submit_name(self):\n f = self.field(api_submit_name=\"apisubmitname\")\n\n self.assertEqual(f.api_name, \"ns1.attnameLocator\")\n self.assertEqual(f.api_filter_name, \"attnameId\")\n self.assertEqual(f.api_submit_name, \"apisubmitname\")\n\n\n def test_set_api_name_and_api_submit_name(self):\n f = self.field(api_name=\"apiname\", api_submit_name=\"apisubmitname\")\n\n self.assertEqual(f.api_name, \"ns1.apiname\")\n self.assertEqual(f.api_filter_name, \"apinameId\")\n self.assertEqual(f.api_submit_name, \"apisubmitname\")\n\n\n def test_encode(self):\n f, cls, target_cls = self.field_cls_and_target()\n self.assertEqual(f.encode(target_cls(identity={\"@id\": 1})), 1)\n\n\n def test_encode_string(self):\n f, cls, target_cls = self.field_cls_and_target()\n self.assertEqual(f.encode(\"1\"), 1)\n\n\n def test_encode_int(self):\n f, cls, target_cls = self.field_cls_and_target()\n self.assertEqual(f.encode(1), 1)\n\n\n def test_submit(self):\n f, cls, target_cls = self.field_cls_and_target()\n self.assertEqual(\n f.submit_data(cls(attname=target_cls(identity={\"@id\": 1}))),\n {\"attnameId\": 1})\n\n\n def test_descriptor_class_access(self):\n f, cls = self.field_and_cls()\n self.assertIs(cls.attname, f)\n\n\n\n@patch(\"ccui.core.api.userAgent\")\nclass LocatorFunctionalTest(AuthTestCase):\n @property\n def subject_and_target(self):\n from ccui.core.api import RemoteObject\n from ccui.core.fields import Field, Locator\n\n class TheTarget(RemoteObject):\n nickname = Field()\n\n name_field = \"nickname\"\n\n class TheSubject(RemoteObject):\n target = Locator(TheTarget)\n\n return TheSubject, TheTarget\n\n\n @property\n def subjects(self):\n return ListBuilder(\n \"thesubject\",\n \"thesubjects\",\n \"Thesubject\",\n {\n \"targetLocator\": make_locator(\n id=1, url=\"thetargets/1\", name=\"The Target\")\n }\n )\n\n\n def test_descriptor_lookup(self, http):\n TheSubject, TheTarget = self.subject_and_target\n\n target_data = {\n \"@url\": \"http://some.base/thetargets/1\",\n \"@id\": \"1\",\n \"@name\": \"The Target\"}\n http.request.return_value = response(\n self.subjects.one(targetLocator=target_data))\n\n subj = TheSubject.get(\"thesubjects/1\", auth=self.auth)\n\n target = subj.target\n\n self.assertIsInstance(target, TheTarget)\n self.assertEqual(target.auth, self.auth)\n self.assertEqual(target.id, \"1\")\n self.assertEqual(target._location, \"http://some.base/thetargets/1\")\n self.assertEqual(target.nickname, \"The Target\")\n # getting target id, nickname, location didn't trigger delivery\n self.assertFalse(target._delivered)\n\n # accessing attribute a second time returns cached instance\n self.assertIs(target, subj.target)\n\n\n def test_descriptor_lookup_none(self, http):\n TheSubject, TheTarget = self.subject_and_target\n\n http.request.return_value = response(\n self.subjects.one(targetLocator={\"@xsi.nil\":\"true\"}))\n\n subj = TheSubject.get(\"thesubjects/1\")\n\n target = subj.target\n\n self.assertEqual(target, None)\n\n\n def test_descriptor_lookup_no_url(self, http):\n TheSubject, TheTarget = self.subject_and_target\n\n target_data = {\n \"@id\": \"1\"}\n http.request.return_value = response(\n self.subjects.one(targetLocator=target_data))\n\n subj = TheSubject.get(\"thesubjects/1\")\n\n self.assertEqual(subj.target, target_data)\n\n\n def test_descriptor_lookup_no_id(self, http):\n TheSubject, TheTarget = self.subject_and_target\n\n target_data = {\n \"@url\": \"http://some.base/thetargets/1\",\n }\n http.request.return_value = response(\n self.subjects.one(targetLocator=target_data))\n\n subj = TheSubject.get(\"thesubjects/1\")\n\n self.assertEqual(subj.target, None)\n\n\n\nclass ResourceIdentityFieldTest(BaseFieldTests):\n @property\n def field_cls(self):\n from ccui.core.fields import ResourceIdentity\n return ResourceIdentity\n\n\n def test_default_names(self):\n f = self.field()\n\n self.assertEqual(f.api_name, \"ns1.resourceIdentity\")\n self.assertEqual(f.api_filter_name, False)\n self.assertEqual(f.api_submit_name, \"resourceIdentity\")\n\n\n def test_set_api_name(self):\n with self.assertRaises(TypeError):\n self.field(api_name=\"apiname\")\n\n\n def test_set_api_submit_name(self):\n with self.assertRaises(TypeError):\n self.field(api_submit_name=\"apisubmitname\")\n\n\n def test_set_api_name_and_api_submit_name(self):\n with self.assertRaises(TypeError):\n self.field(api_name=\"apiname\", api_submit_name=\"apisubmitname\")\n\n\n def test_encode(self):\n f = self.field()\n self.assertEqual(\n f.encode({\"@version\": \"2\"}), {\"originalVersionId\": \"2\"})\n\n\n def test_encode_no_version(self):\n f = self.field()\n self.assertEqual(\n f.encode(\"blah\"), {})\n\n\n def test_no_submit_data(self):\n with self.assertRaises(TypeError):\n self.field_and_cls(api_submit_name=False)\n\n\n def test_submit(self):\n f, cls = self.field_and_cls()\n self.assertEqual(\n f.submit_data(\n cls(attname={\"@version\": \"3\"})), {\"originalVersionId\": \"3\"})\n\n\n\n@patch(\"ccui.core.api.userAgent\")\nclass UserIDFunctionalTest(AuthTestCase):\n @property\n def subject_and_user(self):\n from ccui.core.api import RemoteObject\n from ccui.core.fields import UserID\n\n class User(RemoteObject):\n pass\n\n class TheSubject(RemoteObject):\n user = UserID()\n\n return TheSubject, User\n\n\n @property\n def subjects(self):\n return ListBuilder(\n \"thesubject\",\n \"thesubjects\",\n \"Thesubject\",\n {\n \"user\": \"1\"\n }\n )\n\n\n def test_descriptor_lookup(self, http):\n from ccui.core.auth import admin\n TheSubject, User = self.subject_and_user\n\n http.request.return_value = response(\n self.subjects.one(user=\"4\"))\n\n subj = TheSubject.get(\"thesubjects/1\", auth=self.auth)\n\n user = subj.user\n\n self.assertIsInstance(user, User)\n\n # @@@ uses admin auth for now since user view perms don't work\n self.assertEqual(user.auth, admin)\n self.assertEqual(user._location, \"users/4\")\n\n # accessing attribute a second time returns cached instance\n self.assertIs(user, subj.user)\n\n\n def test_descriptor_lookup_invalid(self, http):\n TheSubject, User = self.subject_and_user\n\n http.request.return_value = response(\n self.subjects.one(user=\"blah\"))\n\n subj = TheSubject.get(\"thesubjects/1\", auth=self.auth)\n\n user = subj.user\n\n self.assertEqual(user, \"blah\")\n\n\n def test_descriptor_class_access(self, http):\n TheSubject, User = self.subject_and_user\n self.assertIs(TheSubject.user, TheSubject.__dict__[\"user\"])\n\n\n\nclass TimelineFieldTest(BaseFieldTests):\n @property\n def field_cls(self):\n from ccui.core.fields import TimelineField\n return TimelineField\n\n\n @property\n def result_cls(self):\n from ccui.core.fields import Timeline\n return Timeline\n\n\n @property\n def user_cls(self):\n from ccui.core.api import RemoteObject\n\n class User(RemoteObject):\n pass\n\n return User\n\n\n def test_default_names(self):\n f = self.field()\n\n self.assertEqual(f.api_name, \"ns1.timeline\")\n self.assertEqual(f.api_filter_name, False)\n self.assertEqual(f.api_submit_name, False)\n\n\n def test_set_api_name(self):\n with self.assertRaises(TypeError):\n self.field(api_name=\"apiname\")\n\n\n def test_set_api_submit_name(self):\n with self.assertRaises(TypeError):\n self.field(api_submit_name=\"apisubmitname\")\n\n\n def test_set_api_name_and_api_submit_name(self):\n with self.assertRaises(TypeError):\n self.field(api_name=\"apiname\", api_submit_name=\"apisubmitname\")\n\n\n def test_no_submit_data(self):\n with self.assertRaises(TypeError):\n self.field_and_cls(api_submit_name=False)\n\n\n def test_submit(self):\n f, cls = self.field_and_cls()\n self.assertEqual(\n f.submit_data(\n cls(attname=\"blah\")), {})\n\n\n def test_decode(self):\n User = self.user_cls\n f = self.field()\n t = f.decode(\n {\n \"@createDate\":\"2011-05-04T18:24:11Z\",\n \"@createdBy\":\"1\",\n \"@lastChangeDate\":\"2011-05-05T18:24:11Z\",\n \"@lastChangedBy\":\"2\",\n \"@xsi.type\":\"ns1:Timeline\"\n }\n )\n self.assertIsInstance(t, self.result_cls)\n self.assertEqual(t.createDate, datetime.date(2011, 5, 4))\n self.assertEqual(t.lastChangeDate, datetime.date(2011, 5, 5))\n self.assertIsInstance(t.createdBy, User)\n self.assertEqual(t.createdBy._location, \"users/1\")\n self.assertIsInstance(t.lastChangedBy, User)\n self.assertEqual(t.lastChangedBy._location, \"users/2\")\n\n\n def test_descriptor_class_access(self):\n f, cls = self.field_and_cls()\n self.assertIs(cls.attname, f)\n\n\n\n@patch(\"ccui.core.api.userAgent\")\nclass TimelineFieldFunctionalTest(AuthTestCase):\n @property\n def subject_timeline_and_user(self):\n from ccui.core.api import RemoteObject\n from ccui.core.fields import TimelineField, Timeline\n\n class User(RemoteObject):\n pass\n\n class TheSubject(RemoteObject):\n timeline = TimelineField()\n\n return TheSubject, Timeline, User\n\n\n @property\n def subjects(self):\n return ListBuilder(\n \"thesubject\",\n \"thesubjects\",\n \"Thesubject\",\n {\n \"timeline\": make_timeline()\n }\n )\n\n\n def test_descriptor_lookup(self, http):\n TheSubject, Timeline, User = self.subject_timeline_and_user\n\n http.request.return_value = response(\n self.subjects.one(\n timeline={\n \"@createDate\":\"2011-05-04T18:24:11Z\",\n \"@createdBy\":\"1\",\n \"@lastChangeDate\":\"2011-05-05T18:24:11Z\",\n \"@lastChangedBy\":\"2\",\n \"@xsi.type\":\"ns1:Timeline\"\n }\n )\n )\n\n subj = TheSubject.get(\"thesubjects/1\", auth=self.auth)\n\n timeline = subj.timeline\n\n self.assertIsInstance(timeline, Timeline)\n self.assertEqual(timeline.auth, self.auth)\n\n self.assertEqual(timeline.createDate, datetime.date(2011, 5, 4))\n self.assertEqual(timeline.lastChangeDate, datetime.date(2011, 5, 5))\n self.assertIsInstance(timeline.createdBy, User)\n self.assertEqual(timeline.createdBy._location, \"users/1\")\n self.assertIsInstance(timeline.lastChangedBy, User)\n self.assertEqual(timeline.lastChangedBy._location, \"users/2\")\n\n # accessing attribute a second time returns cached instance\n self.assertIs(timeline, subj.timeline)\n\n\n\n@patch(\"ccui.core.api.userAgent\")\nclass LinkFunctionalTest(AuthTestCase):\n def subject_and_target(self, cache=None):\n from ccui.core.api import RemoteObject, ListObject\n from ccui.core.fields import Link, List, Object\n\n class TheTarget(RemoteObject):\n pass\n\n class TargetList(ListObject):\n entryclass = TheTarget\n\n entries = List(Object(TheTarget))\n\n kwargs = {}\n if cache is not None:\n kwargs[\"cache\"] = cache\n\n class TheSubject(RemoteObject):\n targets = Link(TargetList, **kwargs)\n\n return TheSubject, TargetList\n\n\n @property\n def targets(self):\n return ListBuilder(\n \"thetarget\",\n \"thetargets\",\n \"Thetarget\",\n )\n\n\n @property\n def subjects(self):\n return ListBuilder(\n \"thesubject\",\n \"thesubjects\",\n \"Thesubject\",\n {\n \"targets\": self.targets.array({})\n }\n )\n\n\n def test_descriptor_lookup(self, http):\n TheSubject, TargetList = self.subject_and_target()\n\n http.request.return_value = response(self.subjects.one())\n\n subj = TheSubject.get(\"some/url\", auth=self.auth)\n\n targets = subj.targets\n\n self.assertIsInstance(targets, TargetList)\n self.assertEqual(targets.auth, self.auth)\n self.assertIs(targets.linked_from, subj)\n self.assertEqual(\n targets._location, \"some/url/targets\")\n # getting target location didn't trigger delivery\n self.assertFalse(targets._delivered)\n\n\n def test_descriptor_lookup_cache_bucket_specified(self, http):\n TheSubject, TargetList = self.subject_and_target(cache=\"OtherBucket\")\n\n http.request.return_value = response(self.subjects.one())\n\n subj = TheSubject.get(\"some/url\", auth=self.auth)\n\n http.request.return_value = response(self.targets.array({}))\n\n with patch(\"ccui.core.cache.cache\") as cache:\n fill_cache(cache, {})\n list(subj.targets)\n\n cache.get.assert_called_with(\n \"OtherBucket-0-http://fake.base/rest/some/url/targets?_type=json\")\n\n\n def test_descriptor_lookup_no_location(self, http):\n TheSubject, TargetList = self.subject_and_target()\n\n subj = TheSubject()\n\n with self.assertRaises(AttributeError):\n subj.targets\n\n\n def test_descriptor_set(self, http):\n TheSubject, TargetList = self.subject_and_target()\n TheTarget = TargetList.entryclass\n\n subj = TheSubject.get(\"some/url\", auth=self.auth)\n\n http.request.return_value = response(make_boolean(True))\n\n subj.targets = [\n TheTarget(identity=make_identity(id=1)),\n TheTarget(identity=make_identity(id=2))]\n\n req = http.request.call_args[1]\n self.assertEqual(req[\"method\"], \"PUT\")\n self.assertEqual(req[\"body\"], \"theTargetIds=1&theTargetIds=2\")\n self.assertEqual(\n Url(req[\"uri\"]),\n Url(\"http://fake.base/rest/some/url/targets?_type=json\"))\n\n\n def test_descriptor_set_with_list(self, http):\n TheSubject, TargetList = self.subject_and_target()\n TheTarget = TargetList.entryclass\n\n subj = TheSubject.get(\"some/url\", auth=self.auth)\n\n http.request.return_value = response(make_boolean(True))\n\n subj.targets = TargetList(\n entries=[\n TheTarget(identity=make_identity(id=1)),\n TheTarget(identity=make_identity(id=2))])\n\n req = http.request.call_args[1]\n self.assertEqual(req[\"method\"], \"PUT\")\n self.assertEqual(req[\"body\"], \"theTargetIds=1&theTargetIds=2\")\n self.assertEqual(\n Url(req[\"uri\"]),\n Url(\"http://fake.base/rest/some/url/targets?_type=json\"))\n","sub_path":"tests/core/test_fields.py","file_name":"test_fields.py","file_ext":"py","file_size_in_byte":21161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496835939","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -*- encoding: utf-8 -*-\nimport argparse\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom rapidocr_openvino.utils import OpenVINOInferSession, read_yaml\n\nfrom .utils import DBPostProcess, create_operators, transform\n\n\nclass TextDetector():\n def __init__(self, config):\n self.preprocess_op = create_operators(config['pre_process'])\n self.postprocess_op = DBPostProcess(**config['post_process'])\n\n self.infer = OpenVINOInferSession(config)\n\n def __call__(self, img):\n ori_im = img.copy()\n data = {'image': img}\n data = transform(data, self.preprocess_op)\n img, shape_list = data\n if img is None:\n return None, 0\n\n img = np.expand_dims(img, axis=0).astype(np.float32)\n shape_list = np.expand_dims(shape_list, axis=0)\n\n starttime = time.time()\n preds = self.infer(img)\n post_result = self.postprocess_op(preds, shape_list)\n dt_boxes = post_result[0]['points']\n dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)\n elapse = time.time() - starttime\n return dt_boxes, elapse\n\n def order_points_clockwise(self, pts):\n \"\"\"\n reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py\n # sort the points based on their x-coordinates\n \"\"\"\n xSorted = pts[np.argsort(pts[:, 0]), :]\n\n # grab the left-most and right-most points from the sorted\n # x-roodinate points\n leftMost = xSorted[:2, :]\n rightMost = xSorted[2:, :]\n\n # now, sort the left-most coordinates according to their\n # y-coordinates so we can grab the top-left and bottom-left\n # points, respectively\n leftMost = leftMost[np.argsort(leftMost[:, 1]), :]\n (tl, bl) = leftMost\n\n rightMost = rightMost[np.argsort(rightMost[:, 1]), :]\n (tr, br) = rightMost\n\n rect = np.array([tl, tr, br, bl], dtype=\"float32\")\n return rect\n\n def clip_det_res(self, points, img_height, img_width):\n for pno in range(points.shape[0]):\n points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))\n points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))\n return points\n\n def filter_tag_det_res(self, dt_boxes, image_shape):\n img_height, img_width = image_shape[:2]\n dt_boxes_new = []\n for box in dt_boxes:\n box = self.order_points_clockwise(box)\n box = self.clip_det_res(box, img_height, img_width)\n rect_width = int(np.linalg.norm(box[0] - box[1]))\n rect_height = int(np.linalg.norm(box[0] - box[3]))\n if rect_width <= 3 or rect_height <= 3:\n continue\n dt_boxes_new.append(box)\n dt_boxes = np.array(dt_boxes_new)\n return dt_boxes\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_path', type=str, default='config.yaml')\n parser.add_argument('--image_path', type=str, default=None)\n args = parser.parse_args()\n\n config = read_yaml(args.config_path)\n\n text_detector = TextDetector(config)\n\n img = cv2.imread(args.image_path)\n dt_boxes, elapse = text_detector(img)\n\n from utils import draw_text_det_res\n src_im = draw_text_det_res(dt_boxes, args.image_path)\n cv2.imwrite('det_results.jpg', src_im)\n print('The det_results.jpg has been saved in the current directory.')\n","sub_path":"python/rapidocr_openvino/ch_ppocr_v3_det/text_detect.py","file_name":"text_detect.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"499845614","text":"# Load pickled data\nimport pickle\n\nimport tensorflow as tf\n\n# TODO: Fill this in based on where you saved the training and testing data\n\ntraining_file = 'traffic-signs-data/train.p'\nvalidation_file = 'traffic-signs-data/valid.p'\ntesting_file = 'traffic-signs-data/test.p'\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n\nX_train, y_train = train['features'], train['labels']\nX_valid, y_valid = valid['features'], valid['labels']\nX_test, y_test = test['features'], test['labels']\n\n### Replace each question mark with the appropriate value.\n### Use python, pandas or numpy methods rather than hard coding the results\n\n# TODO: Number of training examples\nn_train = len(X_train)\n\n# TODO: Number of testing examples.\nn_test = len(X_test)\n\n# TODO: What's the shape of an traffic sign image?\nimage_shape = X_train[0].shape\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = len(set(y_train))\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)\n\n### Data exploration visualization code goes here.\n### Feel free to use as many code cells as needed.\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# grab indices of all 43 labels (first image is ok for visualization)\nplt.rcParams.update({'figure.max_open_warning': 100})\nu, indices = np.unique(y_train, return_index=True)\n\nfor i in indices:\n plt.figure(figsize=(6, 3))\n plt.title('label ' + str(y_train[i]))\n plt.imshow(X_train[i].squeeze())\n\n### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc.\n### Feel free to use as many code cells as needed.\n\ndef grayscale(X):\n # we simply add up the colors - they will be normalized away anyway later on\n return np.sum(X, axis=3, keepdims=True)\n\n\ndef feature_scaled(X, min, max):\n return (X - min) / (max - min)\n\n\nprint('applying grayscale')\nX_train = grayscale(X_train)\nX_valid = grayscale(X_valid)\nX_test = grayscale(X_test)\n\nprint('applying feature scaling')\nmin = np.min([np.min(X_train), np.min(X_valid), np.min(X_test)])\nmax = np.max([np.max(X_train), np.max(X_valid), np.max(X_test)])\nX_train = feature_scaled(X_train, min, max)\nX_valid = feature_scaled(X_valid, min, max)\nX_test = feature_scaled(X_test, min, max)\n\n### Define your architecture here.\n### Feel free to use as many code cells as needed.\nfrom tensorflow.contrib.layers import flatten\nfrom sklearn.utils import shuffle\n\n\ndef LeNet(x):\n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n\n # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n out1 = 6 * net_multiplier\n w1 = tf.Variable(tf.truncated_normal([5, 5, 1, out1], mu, sigma))\n b1 = tf.Variable(tf.zeros(out1))\n conv1 = tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='VALID') + b1\n\n # Activation.\n conv1 = tf.nn.relu(conv1)\n\n # Pooling. Input = 28x28x6. Output = 14x14x6.\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Layer 2: Convolutional. Output = 10x10x16.\n out2 = 16 * net_multiplier\n w2 = tf.Variable(tf.truncated_normal([5, 5, out1, out2], mu, sigma))\n b2 = tf.Variable(tf.zeros(out2))\n conv2 = tf.nn.conv2d(conv1, w2, strides=[1, 1, 1, 1], padding='VALID') + b2\n\n # Activation.\n conv2 = tf.nn.relu(conv2)\n\n # Pooling. Input = 10x10x16. Output = 5x5x16.\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n # Flatten. Input = 5x5x16. Output = 400.\n flat_out = 5 * 5 * out2\n fc0 = flatten(conv2)\n\n # Layer 3: Fully Connected. Input = 400. Output = 120.\n out3 = 120 * net_multiplier\n w3 = tf.Variable(tf.truncated_normal([flat_out, out3], mu, sigma))\n b3 = tf.Variable(tf.zeros(out3))\n fc1 = tf.matmul(fc0, w3) + b3\n\n # Activation.\n fc1 = tf.nn.relu(fc1)\n\n # DROPOUT\n h_fc1_drop = tf.nn.dropout(fc1, keep_prob)\n\n # Layer 4: Fully Connected. Input = 120. Output = 84.\n out4 = 84 * net_multiplier\n w4 = tf.Variable(tf.truncated_normal([out3, out4], mu, sigma))\n b4 = tf.Variable(tf.zeros(out4))\n fc2 = tf.matmul(h_fc1_drop, w4) + b4\n\n # Activation.\n fc2 = tf.nn.relu(fc2)\n\n # Layer 5: Fully Connected. Input = 84. Output = 43 (n_classes).\n w5 = tf.Variable(tf.truncated_normal([out4, n_classes], mu, sigma))\n b5 = tf.Variable(tf.zeros(n_classes))\n logits = tf.matmul(fc2, w5) + b5\n\n return logits\n\n\n### Train your model here.\n### Calculate and report the accuracy on the training and validation set.\n### Once a final model architecture is selected,\n### the accuracy on the test set should be calculated and reported as well.\n### Feel free to use as many code cells as needed.\nx = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\nkeep_prob = tf.placeholder(tf.float32)\none_hot_y = tf.one_hot(y, n_classes)\n\nlearning_rate = 0.001\nbatch_size = 128\nepochs = 10\ndropout = 0.5\nnet_multiplier = 5\nskip_training = True\nsave_path = './model'\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntraining_operation = optimizer.minimize(loss_operation)\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, batch_size):\n batch_x, batch_y = X_data[offset:offset + batch_size], y_data[offset:offset + batch_size]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n\n if not skip_training:\n print(\"Training...\")\n print()\n for i in range(epochs):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, batch_size):\n end = offset + batch_size\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})\n\n print(\"Epoch {} ...\".format(i + 1))\n print(\"Train Accuracy = {:.3f}\".format(evaluate(X_train, y_train)))\n print(\"Validation Accuracy = {:.3f}\".format(evaluate(X_valid, y_valid)))\n print()\n\n saver.save(sess, save_path)\n print(\"Model saved\")\n\n else:\n saver.restore(sess, save_path)\n print(\"Model loaded\")\n\n print(\"Test Accuracy = {:.3f}\".format(evaluate(X_test, y_test)))\n\n### Load the images and plot them here.\n### Feel free to use as many code cells as needed.\nimport cv2\n\nX_germansigns_files = [\n '1_stop_14.png',\n '2_noentry_17.png',\n '3_stop_14.png',\n '4_yield_13.png',\n '5_rightofway_nextintersection_11.png'\n]\n\n\n# yay, matplotlib and cv2 have blue and red flipped - thanks to http://stackoverflow.com/a/15074748/1134940 we can\n# easily flip those again :)\ndef flip_blue_red(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\nX_germansigns_orig = np.array([flip_blue_red(cv2.imread('german_signs/' + file)) for file in X_germansigns_files])\ny_germansigns = np.array([14, 17, 14, 13, 11])\n\n### Run the predictions here and use the model to output the prediction for each image.\n### Make sure to pre-process the images with the same pre-processing pipeline used earlier.\n### Feel free to use as many code cells as needed.\nX_germansigns = grayscale(X_germansigns_orig)\nX_germansigns = feature_scaled(X_germansigns, min, max)\n\nfor i, img in enumerate(X_germansigns):\n plt.figure(figsize=(6, 3))\n plt.subplot(1, 2, 1)\n plt.imshow(X_germansigns_orig[i].squeeze())\n plt.title('original')\n\n plt.subplot(1, 2, 2)\n plt.imshow(img.squeeze(), cmap='gray')\n plt.title('grayscaled + normalized')\n\nwith tf.Session() as sess:\n saver.restore(sess, save_path)\n # this will output all 43 predictions for each of the 5 images, shape: 5x43\n prediction = sess.run(logits, feed_dict={x: X_germansigns, y: y_germansigns, keep_prob: 1.0})\n # now just take the index with the highest possibility\n predicted_labels = np.argmax(prediction, axis=1)\n print('predicted labels: ' + np.array_str(predicted_labels))\n print('correct labels: ' + np.array_str(y_germansigns))\n\n### Calculate the accuracy for these 5 new images.\n### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.\naccuracy = np.sum(predicted_labels == y_germansigns) / len(y_germansigns)\nprint(\"Accuracy for German signs = {:.3f}\".format(accuracy))\n\n\n\n### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.\n### Feel free to use as many code cells as needed.\nwith tf.Session() as sess:\n print(sess.run(tf.nn.top_k(tf.constant(prediction), k=5)))\n\n\n### Visualize your network's feature maps here.\n### Feel free to use as many code cells as needed.\n\n# image_input: the test image being fed into the network to produce the feature maps\n# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer\n# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output\n# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry\n\n# def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1, plt_num=1):\n# # Here make sure to preprocess your image_input in a way your network expects\n# # with size, normalization, ect if needed\n# # image_input =\n# # Note: x should be the same name as your network's tensorflow data placeholder variable\n# # If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function\n# activation = tf_activation.eval(session=sess, feed_dict={x: image_input})\n# featuremaps = activation.shape[3]\n# plt.figure(plt_num, figsize=(15, 15))\n# for featuremap in range(featuremaps):\n# plt.subplot(6, 8, featuremap + 1) # sets the number of feature maps to show on each row and column\n# plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number\n# if activation_min != -1 & activation_max != -1:\n# plt.imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", vmin=activation_min,\n# vmax=activation_max, cmap=\"gray\")\n# elif activation_max != -1:\n# plt.imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", vmax=activation_max, cmap=\"gray\")\n# elif activation_min != -1:\n# plt.imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", vmin=activation_min, cmap=\"gray\")\n# else:\n# plt.imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", cmap=\"gray\")\n","sub_path":"traffic_signs.py","file_name":"traffic_signs.py","file_ext":"py","file_size_in_byte":11678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"158680469","text":"import pygame\n\n\nclass Ship():\n \"\"\" Classe de definições da Espaçonave \"\"\"\n\n def __init__(self, screen: pygame.SurfaceType):\n \"\"\" Inicializa a espaçonave e define sua posição inicial \"\"\"\n self.screen = screen\n\n # Carrega a imagem da espaçonave e obtém seu rect\n self.image = pygame.image.load('images/ship70x70.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n # Inicializa cada nova espaçonave na parte inferior central da tela\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n def blitme(self):\n \"\"\" Desenha a espaçonave em sua posição atual \"\"\"\n self.screen.blit(self.image, self.rect)\n","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"284584664","text":"import os\nimport sys\nimport fileinput\nimport re\nimport string\nimport unicodedata\nfrom unicodedata import category\n\n\n\t\t\ndef strip_date_time(line):\t\n\tif(line[:1] == '2'):\n\t\tlineCut = line[20:]\n\t\treturn lineCut\n\telse:\n\t\treturn line\n\t\t\n# define punctuation to be removed - not removing @ ' or #?\npunctuations = '''!()-[]{};':\"\\,<>./?$%^&*_~🔜'''\nhashTagsAndRefs = '''@#''' \n\ndef removePunctuation(line):\n\tno_punct = \"\"\n\tfor char in line:\n\t if char not in punctuations:\n\t\t no_punct = no_punct + char\n\treturn no_punct\n\t\n\t\ndef remove_hashTagsAndRefs(word):\n\tif( word[:1] in hashTagsAndRefs ):\n\t\treturn ''\n\telse:\n\t\treturn word\n\t\ndef remove_https_from(word):\n\tif(word[:5] =='https'):\n\t\treturn ''\n\telse:\n\t\treturn word\n\t\t\ndef remove_empty_lines(line):\n\tif(line[:5] =='\\\\n\\\\r'):\n\t\treturn ''\n\t\t\n\n\t\t\ndef remove_control_characters(word):\n return \"\".join(ch for ch in word if unicodedata.category(ch)[0]!=\"C\")\n\t\ndef remove_non_prinatble(word):\n\tprintable = set(string.printable) # 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\n\tno_punct = \"\"\n\tfor char in word:\n\t if char in printable:\n\t\t no_punct = no_punct + char\n\treturn no_punct\n\t\t\n\t\t\ndef getCounts(file):\n\tglobal hashtagCount\n\tglobal httpsCount\n\tglobal retweetCount\n\tglobal tweets\n\ttweets = 0\n\twith open(file, encoding = \"utf8\") as f:\n\t\tcontents = f.read()\n\t\thashtagCount = contents.count(\"#\")\t\n\t\thttpsCount = contents.count('https')\n\t\tretweetCount = contents.count('RT')\n\tfor line in open(file, encoding = \"utf8\"):\n\t\tif(line[:1] == '2'):\n\t\t\ttweets += 1\n\t\t\n\n\t\ndef replacement(name):\n\tdirPath = os.path.dirname(os.path.realpath(__file__))\n\tfilePath = dirPath + \"\\\\CollectedTweets\\\\dateSpecificTweetsMen\\\\\" + name + \".txt\"\n\tchangedFile = dirPath + \"\\\\CleanedTweets\\\\\" + name + \"Cleaned.txt\"\n\t\n\tgetCounts(filePath)\n\t\n\to = open(changedFile, \"w\", encoding = \"utf8\")#file containing the changed text .\n\t\t\t# a opens the file in 'append' mode so you don't delete all the information.\n\t\t\t# w opens the file in write mode which clears the contents of the file each time.\n\to.write('Hashtags: ' + str(hashtagCount) + '\\n')\n\to.write('https: ' + str(httpsCount) + '\\n')\n\to.write('Tweets: ' + str(tweets) + '\\n')\n\to.write('Retweets: ' + str(retweetCount) + '\\n\\n')\n\t\n\tfor line in open(filePath, encoding = \"utf8\"):#this is the original file.\n\t\tline = strip_date_time(line)\n\t\tline = removePunctuation(line)\n\t\tline = line.strip('\\\\n\\\\r')\n\t\tsplit_line = line.split() #split the line into individual words.\n\t\tnew_split_line1 = [remove_https_from(word) for word in split_line]\n\t\tnew_split_line2 = [remove_control_characters(word) for word in new_split_line1]\n\t\tnew_split_line3 = [remove_non_prinatble(word) for word in new_split_line2]\n\t\tnew_split_line4 = [remove_hashTagsAndRefs(word) for word in new_split_line3]\n\t\tnew_line = ' '.join(new_split_line4)\n\t\to.write(new_line + '\\n')\n\to.close()\n\n\nif __name__ == '__main__':\n\tfileToBeCleaned = 'allMensTweets'\n\treplacement(fileToBeCleaned)","sub_path":"cleanData.py","file_name":"cleanData.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"491299371","text":"__author__ = 'joagy323'\nimport time\n\nlife = list(range(100))\nnothing = 0\ndef the_end(ever):\n if ever % 20 == nothing:\n return \"\\n\"\n if ever % 3 == 0:\n return ' '\n return ' '\n\nfor ever in life:\n life.append(ever)\n print(\"bää\", end=the_end(ever))\n time.sleep(0.005)","sub_path":"TDP003/Flask_Portfolio/static/images/joagy323_for_loop.py","file_name":"joagy323_for_loop.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"624636395","text":"#! /usr/bin/python3\n# Program: ch_02_yourName2.py\tDate: 2019-06-19\n# Author: Wesley Isaacs wjicoding@gmail.com\n# Description: Must type in 'your name' to exit loop, includes a break\n# statement\n\nwhile True:\t\t\t\t\t\t\t\t\t\t# infinite loop\n\tname = input('Please type your name: ')\n\tif name == 'your name':\n\t\tbreak\nprint('Thank you!')\n","sub_path":"ABS/part_1/ch_02_04_yourName2.py","file_name":"ch_02_04_yourName2.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202166088","text":"from random import randrange, randint\nimport random\n\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse, resolve\nfrom rest_framework import status\n\nfrom posts.apis import (\n QuestionListCreateView,\n QuestionMainFeedListView,\n QuestionRetrieveUpdateDestroyView,\n QuestionFilterListView,\n)\nfrom posts.models import Question\nfrom posts.tests.test_api.question.base import QuestionBaseTest\n\nUser = get_user_model()\n\n\nclass QuestionListCreateViewTest(QuestionBaseTest):\n VIEW_CLASS = QuestionListCreateView\n\n # URL name으로 원하는 URL과 실제로 만들어지는 URL 같은지 검사\n def test_question_create_url_name_reverse(self):\n\n url = reverse(self.URL_API_QUESTION_LIST_CREATE_NAME)\n print(f'reverse test : {url}')\n self.assertEqual(url, self.URL_API_QUESTION_LIST_CREATE)\n\n # URL이 실제 URL name을 참조하고 있는지 검사\n def test_question_create_url_name_resolve(self):\n resolve_match = resolve(self.URL_API_QUESTION_LIST_CREATE)\n print(f'resolve test(url name) : {resolve_match.namespace + \":\" + resolve_match.url_name}')\n self.assertEqual(resolve_match.namespace + \":\" + resolve_match.url_name, self.URL_API_QUESTION_LIST_CREATE_NAME)\n\n # 같은 view의 class인지 검사\n # .func 는 임시함수, .as_view() 또한 함수이다. 참조하는 주소 값이 다르므로 .func.view_class 로 비교\n # self.VIEW_CLASS == self.VIEW_CLASS.as_view().view_class : True\n def test_question_create_url_resolve_view_class(self):\n \"\"\"\n posts.apis.question.QuestionListCreateView 뷰에 대해\n URL reverse, resolve, 사용하고 있는 view함수가 같은지 확인\n :return:\n \"\"\"\n resolve_match = resolve(self.URL_API_QUESTION_LIST_CREATE)\n print(f'view class test : {resolve_match.func.view_class}')\n self.assertEqual(resolve_match.func.view_class,\n self.VIEW_CLASS.as_view().view_class)\n\n # 임의의 유저로 question objects 생성 및 확인\n def test_get_question_list(self):\n \"\"\"\n QuestionList의 Get요청 (Post목록)에 대한 테스트\n 임의의 개수만큼 Question을 생성하고 해당 개수만큼 Response가 돌아오는지 확인\n :return:\n \"\"\"\n # 유저 생성\n self.create_random_users()\n print(f'====User.objects.all()====\\n : {User.objects.all()}')\n # 질문 생성\n self.create_random_questions()\n print(f'====Queestion.objects.all()====\\n : {Question.objects.all()}')\n\n url = reverse(self.URL_API_QUESTION_LIST_CREATE_NAME)\n # page\n page = 1\n url += f'?page={page}'\n print(f'url : {url}')\n response = self.client.get(url)\n # status code가 200인지 확인\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # response로 돌아온 객체들이 각각 count, next, previous, results키를 가지고 있는지 확인\n cur_data = response.data\n self.assertIn('count', cur_data)\n self.assertIn('next', cur_data)\n self.assertIn('previous', cur_data)\n self.assertIn('results', cur_data)\n\n # page별로 request url을 다르게 주어 response.data 각각 확인\n result_index = 0\n for index, i in enumerate(range(self.num_of_questions)):\n if result_index == 5:\n url = response.data.get('next')\n response = self.client.get(url)\n print(url)\n cur_data = response.data\n result_index = 0\n print(f'index : {index}')\n\n # results가 question, topics키를 가지고 있는지 확인\n cur_results_data = cur_data.get('results')[result_index]\n self.assertIn('question', cur_results_data)\n self.assertIn('topics', cur_results_data)\n # question이 아래의 키들을 가지고 있는지 확인\n cur_question_data = cur_results_data.get('question')\n # pk = cur_question_data.get('pk')\n # print(f'pk : {pk}')\n self.assertIn('pk', cur_question_data)\n self.assertIn('url', cur_question_data)\n self.assertIn('user', cur_question_data)\n self.assertIn('content', cur_question_data)\n self.assertIn('bookmark_count', cur_question_data)\n self.assertIn('follow_count', cur_question_data)\n self.assertIn('comment_count', cur_question_data)\n self.assertIn('created_at', cur_question_data)\n self.assertIn('modified_at', cur_question_data)\n\n print(f'result_index : {result_index}')\n result_index += 1\n\n # user가 None이면 제외되는지 확인\n # # user단에서 none객체 생성을 막아놓아서 테스트 불가\n def test_get_question_list_exclude_user_is_none(self):\n \"\"\"\n user가 None인 Question이 QuestionList get 요청에서 제외되는지 테스트\n :return:\n \"\"\"\n user = self.create_user()\n # user = self.create_user(is_none=True)\n\n num_user_none_questions = randint(1, 10)\n num_questions = randint(11, 20)\n # default user는 None\n for i in range(num_user_none_questions):\n self.create_question()\n for i in range(num_questions):\n self.create_question(user=user)\n\n response = self.client.get(self.URL_API_QUESTION_LIST_CREATE)\n counted_question = response.data.get('count')\n # user가 없는 Question객체는 response에 포함되지 않는지 확인\n self.assertEqual(counted_question, num_questions)\n\n # query parameters 필터링 확인\n def test_get_question_list_filter_is_working(self):\n \"\"\"\n query_params로의 필터링이 잘 작동하는지 확인\n\n ?page=1 : count 0~4\n ?page=2 : count 5~9\n ?page=3 : count 10~14\n :return:\n \"\"\"\n temp_user = self.create_user()\n # print(f'temp_user : {temp_user.pk}')\n self.create_question(user=temp_user)\n temp_topic = self.create_topic(creator=temp_user)\n url = reverse(self.URL_API_QUESTION_LIST_CREATE_NAME)\n response = self.client.get(url)\n num_of_questions = response.data.get('count')\n max_page = int((num_of_questions / 5)) + 1\n\n # 하나의 query parameter에 대해 검사\n for query_param in self.query_params:\n if query_param == 'topic':\n url += f'?{query_param}={temp_topic.pk}'\n elif query_param == 'page':\n url += f'?{query_param}={max_page}'\n else:\n url += f'?{query_param}={temp_user.pk}'\n response = self.client.get(url)\n # status code가 200인지 확인\n print(f'url of query_param : {url}')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n print(f'response : {response}')\n url = reverse(self.URL_API_QUESTION_LIST_CREATE_NAME)\n\n # 연속적인 query parameters에 대해서 검사\n num_of_query_params = randint(1, len(self.query_params))\n # query_params 중 임의의 값을 1부터 len(query_params) 사이의 임의의 값 만큼 순회하여 확인\n for i in range(num_of_query_params):\n random_query_of_query_params = random.choice(self.query_params)\n print(random_query_of_query_params)\n if random_query_of_query_params == 'topic':\n url += f'?{random_query_of_query_params}={temp_topic.pk}'\n elif random_query_of_query_params == 'page':\n url += f'?{random_query_of_query_params}={max_page}'\n else:\n url += f'?{random_query_of_query_params}={temp_user.pk}'\n print(f'url : {url}')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass QuestionMainFeedListViewTest(QuestionBaseTest):\n VIEW_CLASS = QuestionMainFeedListView\n\n # URL name으로 원하는 URL과 실제로 만들어지는 URL 같은지 검사\n def test_question_main_feed_create_url_name_reverse(self):\n url = reverse(self.URL_API_QUESTION_MAIN_FEED_LIST_NAME)\n print(f'reverse test : {url}')\n self.assertEqual(url, self.URL_API_QUESTION_MAIN_FEED_LIST)\n\n # URL이 실제 URL name을 참조하고 있는지 검사\n def test_question_main_feed_create_url_name_resolve(self):\n resolve_match = resolve(self.URL_API_QUESTION_MAIN_FEED_LIST)\n print(f'resolve test(url name) : '\n f'{resolve_match.namespace + \":\" + resolve_match.url_name}')\n self.assertEqual(resolve_match.namespace + \":\" + resolve_match.url_name,\n self.URL_API_QUESTION_MAIN_FEED_LIST_NAME)\n\n # 같은 view의 class인지 검사\n # .func 는 임시함수, .as_view() 또한 함수이다. 참조하는 주소 값이 다르므로 .func.view_class 로 비교\n # self.VIEW_CLASS == self.VIEW_CLASS.as_view().view_class : True\n def test_question_main_feed_create_url_resolve_view_class(self):\n \"\"\"\n posts.apis.question. QuestionMainFeedListView뷰에 대해\n URL reverse, resolve, 사용하고 있는 view함수가 같은지 확인\n :return:\n \"\"\"\n resolve_match = resolve(self.URL_API_QUESTION_MAIN_FEED_LIST)\n print(f'view class test : {resolve_match.func.view_class}')\n self.assertEqual(resolve_match.func.view_class,\n self.VIEW_CLASS.as_view().view_class)\n\n # main-feed\n def test_get_question_main_feed_list(self):\n pass\n\n\nclass QuestionRetrieveUpdateDestroyViewTest(QuestionBaseTest):\n VIEW_CLASS = QuestionRetrieveUpdateDestroyView\n\n def test_question_create_and_retrieve_object(self):\n temp_user = self.create_user()\n temp_question = self.create_question(user=temp_user)\n print(f'temp_question : {temp_question.pk}')\n\n response = self.client.get(f'http://testserver/post/question/{temp_question.pk}/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass QuestionFilterListViewTest(QuestionBaseTest):\n VIEW_CLASS = QuestionFilterListView\n\n # URL name으로 원하는 URL과 실제로 만들어지는 URL 같은지 검사\n def test_question_filter_create_url_name_reverse(self):\n url = reverse(self.URL_API_QUESTION_FILTER_LIST_NAME)\n print(f'reverse test : {url}')\n self.assertEqual(url, self.URL_API_QUESTION_FILTER_LIST)\n\n # URL이 실제 URL name을 참조하고 있는지 검사\n def test_question_filter_create_url_name_resolve(self):\n resolve_match = resolve(self.URL_API_QUESTION_FILTER_LIST)\n print(f'resolve test(url name) : '\n f'{resolve_match.namespace + \":\" + resolve_match.url_name}')\n self.assertEqual(resolve_match.namespace + \":\" + resolve_match.url_name,\n self.URL_API_QUESTION_FILTER_LIST_NAME)\n","sub_path":"nanum/posts/tests/test_api/question/test_question.py","file_name":"test_question.py","file_ext":"py","file_size_in_byte":11020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"415732200","text":"'''\r\nThe first two consecutive numbers to have two distinct prime factors are:\r\n\r\n14 = 2 × 7\r\n15 = 3 × 5\r\n\r\nThe first three consecutive numbers to have three distinct prime factors are:\r\n\r\n644 = 2² × 7 × 23\r\n645 = 3 × 5 × 43\r\n646 = 2 × 17 × 19.\r\n\r\nFind the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers?\r\n\r\n\r\nAnswer:\r\n134043\r\n'''\r\n\r\n# 30mins\r\n\r\nimport time\r\nfrom math import sqrt\r\n\r\nfactors = {}\r\n\r\ndef num_of_factors(num):\r\n factors[num] = set()\r\n x = 2\r\n tmp = num\r\n while tmp!= 1:\r\n while not tmp % x:\r\n factors[num].add(x)\r\n tmp /= x\r\n if num!=tmp and tmp in factors.keys():\r\n factors[num] = factors[num] | factors[tmp]\r\n break\r\n x+=1\r\n return len(factors[num])\r\n\r\nstart = time.time()\r\n\r\nnum = 2\r\nans = []\r\nwhile len(ans)<4:\r\n if num_of_factors(num)==4:\r\n ans.append(num)\r\n else:\r\n ans = []\r\n num+=1\r\n \r\nprint(ans[0])\r\nprint(time.time()-start)\r\n \r\n","sub_path":"Project Euler/047._Distinct_primes_factors.py","file_name":"047._Distinct_primes_factors.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"597374380","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. mod:: cerbere.mapper.navolandmaskncfile\n\nMapper class for NAVO Land mask in netCDF\n\n:copyright: Copyright 2013 Ifremer / Cersat.\n:license: Released under GPL v3 license, see :ref:`license`.\n\n.. sectionauthor:: Jeff Piolle \n.. codeauthor:: Jeff Piolle \n\"\"\"\n\nimport collections\n\nimport netCDF4\nimport numpy\nfrom datetime import datetime\nimport logging\nfrom .ncfile import NCFile\nfrom ..datamodel.field import Field\nfrom ..datamodel.variable import Variable\n\n\nclass NAVOLandmaskNCFile(NCFile):\n \"\"\"Mapper class for the NAVO Land mask in netCDF\n\n The NAVO mask is a vectorized 2-dimensional array.\n in netcdf file sea is -1\n and land is 0\n \"\"\"\n\n def get_geolocation_field(self, fieldname):\n return fieldname\n\n# def get_matching_dimname(self, geodimname):\n# return geodimname\n def get_matching_dimname(self, geodimname):\n if geodimname == 'y':\n res = 'lat'\n elif geodimname == 'x':\n res = 'lon'\n else:\n res = geodimname\n return res\n\n def get_standard_dimname(self, geodimname):\n return geodimname\n\n def get_dimsize(self, dimname):\n if dimname == 'time':\n return 1\n else:\n super(NAVOLandmaskNCFile, self).get_dimsize(dimname)\n def get_fieldnames(self):\n return ['lon','lat','landmask','time']\n def get_dimensions(self,fieldname):\n if fieldname == 'landmask':\n native = 'dst'\n else:\n native = fieldname\n return super(NAVOLandmaskNCFile, self).get_dimensions(native\n )\n def read_times(self, slices=None):\n \"\"\"Read time values of a file\"\"\"\n times = netCDF4.num2date(datetime.datetime(1, 1, 1, 0, 0, 0))\n return numpy.ma.array([times])\n\n def read_field_attributes(self, fieldname):\n \"\"\"\n \"\"\"\n if fieldname == 'time':\n return {}\n elif fieldname == 'landmask':\n return super(NAVOLandmaskNCFile, self).read_field_attributes('dst')\n else:\n return super(NAVOLandmaskNCFile, self).read_field_attributes(fieldname)\n\n def read_field(self, fieldname):\n \"\"\"\n Return the field, without its values.\n\n Actual values can be retrieved with read_values() method.\n \"\"\"\n # special implementation case for time field which is not\n # available as a variable a land mask\n if fieldname == 'time':\n # create a field for time\n variable = Variable(shortname=fieldname,\n description='time',\n authority=self.get_naming_authority(),\n standardname='time'\n )\n field = Field(variable,\n dimensions=collections.OrderedDict([('time', 1)]),\n datatype=numpy.dtype(numpy.int64),\n units='seconds since 0001-01-01 00:00:00'\n )\n field.attach_storage(self.get_field_handler(fieldname))\n field.dimensions=collections.OrderedDict([('time', 1)])\n \n elif fieldname == 'landmask':\n variable = Variable(shortname=fieldname,\n description='landmask',\n authority=self.get_naming_authority(),\n standardname='landmask'\n )\n dimensions_landmask = self.get_full_dimensions('dst')\n field = Field(variable,\n dimensions=dimensions_landmask,\n datatype=numpy.dtype(numpy.bool),\n units=''\n )\n field.attach_storage(self.get_field_handler(fieldname))\n field.dimensions = dimensions_landmask\n else:\n return super(NAVOLandmaskNCFile, self).read_field(fieldname)\n return field\n\n def read_values(self, fieldname, slices=None):\n \"\"\"\n \"\"\"\n if fieldname == 'time':\n return numpy.ma.array([0])\n elif fieldname == 'landmask':\n tmp = super(NAVOLandmaskNCFile, self).read_values('dst',slices=slices)\n return tmp\n else:\n return super(NAVOLandmaskNCFile, self).read_values(\n fieldname,\n slices=slices\n )\n\n def get_start_time(self):\n \"\"\"Return start of temporal coverage\"\"\"\n return datetime(1, 1, 1, 0, 0, 0)\n\n def get_end_time(self):\n \"\"\"Return end of temporal coverage\"\"\"\n return datetime(1, 1, 1, 0, 0, 0)\n\n def get_bbox(self):\n \"\"\"Return the bounding box of the feature, as a tuple\n (lonmin, latmin, lonmax, latmax)\n \"\"\"\n lon =self.read_values('lon')\n lat = self.read_values('lat')\n lonmax = numpy.amax(lon)\n lonmin = numpy.amin(lon)\n latmax = numpy.amax(lat)\n latmin = numpy.amin(lat)\n return(lonmin,latmin,lonmax,latmax)\n# return (self.get_handler().west_longitude,\n# self.get_handler().south_latitude,\n# self.get_handler().east_longitude,\n# self.get_handler().north_latitude\n# )\n","sub_path":"cerbere_old/cerbere/mapper/navolandmaskncfile.py","file_name":"navolandmaskncfile.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"276492797","text":"import os\nimport unittest\nfrom unittest import mock\n\nfrom flask.config import Config\n\nfrom v1.repo.AthenaStorage import AthenaStorage\n\n\nclass TestAthenaStorage(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # Read the same configuration used by flask\n flask_dir = os.path.dirname(os.path.realpath(__file__))\n config_reader = Config(flask_dir)\n cls.config = config_reader\n config_reader.from_object('config')\n\n # Create an instance of Athena\n cls.athena = AthenaStorage()\n\n # Init Athena DB\n cls.athena.init_db(db_name=config_reader['ATHENA_DATABASE_NAME'],\n connection_string=config_reader['ATHENA_CONNECTION_STRING'],\n s3_source=config_reader['ATHENA_S3_SOURCE'],\n s3_output=config_reader['ATHENA_S3_OUTPUT']\n )\n\n\n @mock.patch('v1.repo.AthenaDataBase.AthenaDataBaseAPI.run_query', result_value={'abc': 'lol'})\n def test_retrieve_malware_info(self, run_query_mock):\n malicious_url = '/home/steal-money'\n malicious_host = '192.45.168.2'\n malicious_port = '80'\n resp = self.athena.is_malicious(malicious_host, malicious_port, malicious_url)\n run_query_mock.assert_called()\n self.assertIsNotNone(resp)\n\n def test_build_malicious_build_query(self):\n malicious_url = '/home/steal-money'\n malicious_host = '192.45.168.2'\n malicious_port = '80'\n malicious_query_params = 'param1=abc&parm2=def'\n sql = self.athena._build_check_malware_SQL(malicious_host, malicious_port, malicious_url,\n malicious_query_params)\n exp_sql = \"SELECT COUNT(*) as Count FROM malware WHERE hostname ='192.45.168.2' \" \\\n \"AND port ='80' AND url='/home/steal-money' AND query_param='param1=abc&parm2=def';\"\n self.assertEqual(exp_sql, sql)\n sql2 = self.athena._build_check_malware_SQL(malicious_host, malicious_port, malicious_url)\n exp_sql2 = \"SELECT COUNT(*) as Count FROM malware WHERE hostname ='192.45.168.2' \" \\\n \"AND port ='80' AND url='/home/steal-money';\"\n self.assertEqual(exp_sql2, sql2)\n\n def test_database_type(self):\n self.assertEqual(AthenaStorage, self.athena.get_database_type())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/repo/test_athena.py","file_name":"test_athena.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185343709","text":"import re\n\nids = [i - 1 for i in [1, 5, 6, 7, 8, 9, 15, 16, 19]]\ntext = 'Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.'\n\nwords = re.findall(re.compile('[A-Za-z]+'), text)\n\nans = dict()\n\nfor i, word in enumerate(words):\n if i in ids:\n key = word[0]\n else:\n key = word[:2]\n ans[key] = i\n\nprint(ans)\n","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"580249436","text":"#!/usr/bin/python3\n\n\nimport os, sys, getopt, datetime\nimport warnings\nwarnings.simplefilter(\"ignore\", category=DeprecationWarning)\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport mglearn \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport glob\nimport os, sys, getopt, datetime\n\n\n#set seed\nnp.random.seed(0)\n\ndef main():\n\tif len(sys.argv) < 2 :\n\t\tprint(\"Not enough arguments specified\\n Usage: ranking_features.py \")\n\t\tsys.exit (1)\n\telse:\n # print command line arguments\n\t\tfor arg in sys.argv[0:]:\n\t\t\tprint(arg)\n\t\n\t#assign names to arguments \n\tin_dir = sys.argv[1] #input directory \n\tsearchterm = sys.argv[2] #name of the y target variable \n\tout_name = sys.argv[3] #name of the model \n\t\n\tfile_search = searchterm+'*.csv'\n\t\n\t#just print the file names\n\tfor filename in glob.iglob(in_dir + file_search, recursive=False):\n\t\tprint(filename)\n \n\t#now get the files\n\tall_files = glob.glob(os.path.join(in_dir + file_search))\n\tli = []\n\tfor filename in all_files:\n \t\td = pd.read_csv(filename, index_col=None, header=0)\n \t\td = d.rename(columns={d.columns[0]: \"Feature\", d.columns[1]: os.path.basename(filename)})\n \t\tli.append(d)\n\n\tdf= pd.concat(li, axis=1, ignore_index=False)\n\t\n\tdf = df.set_index(df.iloc[:,0]) #set index\n\t#df = df.drop(['Features'], axis=1) #dropping the duplicated feature names \n\tdf = df.drop(['Feature'], axis=1) #I'm dumb and named the permuted features 'feature' without the s\n\n\t#get feature names\n\tfeatures = df.index\n\n\t##RANK FEATURES##\n\t#adapted from https://www.kaggle.com/arthurtok/feature-ranking-rfe-random-forest-linear-models\n\t# Define dictionary to store our rankings\n\tranks = {}\n\t# Create our function which stores the feature rankings to the ranks dictionary\n\tdef ranking(ranks, names, order=1):\n\t\tminmax = MinMaxScaler()\n\t\tranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]\n\t\tranks = map(lambda x: round(x,2), ranks)\n\t\treturn dict(zip(names, ranks))\n\n\t#now add the ranks to the dictionary \n\tfor column in df:\n \t\t# Select column contents by column name using [] operator\n\t\tnew_key = df[column].name\n\t\tvals = ranking(np.abs(df[column]), features)\n\t\tranks[new_key] = vals\n\t#make it easy to read\n\tr = {}\n\tfor name in features:\n\t\tr[name] = round(np.mean([ranks[method][name] \n\t\t\tfor method in ranks.keys()]), 2)\n\n\tmethods = sorted(ranks.keys())\n\tranks[\"Mean\"] = r\n\tmethods.append(\"Mean\")\n\t\n\t#turn it into a data frame\n\tranks_df = pd.DataFrame.from_dict(ranks)\n\n\tranks_file = out_name+\".csv\"\n\tranks_df.to_csv(in_dir + ranks_file, index = True) \n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lib/ceres_lib/rank_features.py","file_name":"rank_features.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"609982475","text":"# file 리스트를 받기 위한 glob 모듈\nimport glob\nimport os\nimport json\nfrom bs4 import BeautifulSoup\n\n\n\ndef parseDataToJSON(sex, categoryName):\n htmlList=glob.glob(os.getcwd()+\"/htmlFiles/\"+sex+\"_\"+categoryName+\"*.html\")\n dataList=[]\n for htmlFileName in htmlList:\n with open(htmlFileName, \"r\") as h:\n soup=BeautifulSoup(h.read(), \"html.parser\")\n \n # 한 html에 대한 파싱작업 실시\n items=soup.select(\".itemEntry__rankingBox\")\n for item in items:\n # 한 item의 정보를 담는 itemObj\n itemObj={\n \"category\":[],\n \"brandName\":\"\",\n \"itemName\":\"\",\n \"itemOnSale\":False,\n \"price\":0,\n \"priceSaled\":0,\n \"discountRate\":0\n }\n itemObj[\"category\"].append(soup.select(\".filter__headText\")[0].text.strip().lower()) # category as an arg\n itemObj[\"itemName\"]=item.select(\"p.itemEntry__title\")[0].text\n \n # 만약 세일 상품이라면\n if(item.select(\"div.itemEntry__prevPrice\")) :\n itemObj[\"onSale\"]=True\n itemObj[\"price\"]=int(item.select(\"div.itemEntry__prevPrice\")[0].text.replace(\",\", \"\").replace(\"원\", \"\"))\n priceText=item.select(\"div.itemEntry__price\")[0].text\n itemObj[\"priceSaled\"]=int(priceText[:priceText.find(\"원\")].replace(\",\", \"\")) \n \n # 세일상품이 아니라면\n else:\n priceText=item.select(\"div.itemEntry__price\")[0].text\n itemObj[\"price\"]=int(priceText[:priceText.find(\"원\")].replace(\",\", \"\"))\n \n itemObj[\"discountRate\"]=itemObj[\"priceSaled\"]/itemObj[\"price\"]\n itemObj[\"brandName\"]=item.select(\"p.itemEntry__brandName\")[0].text\n dataList.append(itemObj)\n \n jsonFileName=htmlFileName.split(\"htmlFiles/\")[1][:-7]+\".json\"\n with open(os.getcwd()+\"/itemsData/\"+jsonFileName, \"w\") as outfile:\n json.dump(dataList, outfile,ensure_ascii=False)\n print(jsonFileName+\" has been written\")\n\nparseDataToJSON(\"men\", \"outer\")\nparseDataToJSON(\"men\", \"top\")\nparseDataToJSON(\"men\", \"bottom\")\nparseDataToJSON(\"women\", \"outer\")\nparseDataToJSON(\"women\", \"top\")\nparseDataToJSON(\"women\", \"bottom\")\n","sub_path":"parseDataToJSON.py","file_name":"parseDataToJSON.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"318389250","text":"\"\"\"Run DeepLab-ResNet on a given image.\n\nThis script computes a segmentation mask for a given image.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport os\nimport sys\nimport time\nimport cv2\nimport scipy.io\n\nfrom PIL import Image\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom wasr_models import wasr_IMU_FU2, ImageReader, decode_labels, prepare_label\n\n# PASCAL VOC COLOR MEANS OF IMAGES\n\n# COLOR MEANS OF IMAGES FROM MODDv1 DATASET\nIMG_MEAN = np.array((148.8430, 171.0260, 162.4082), dtype=np.float32)\n\nSEQ_NUM = 28\nNUM_CLASSES = 3 #2\nSEQ_TXT = '/opt/workspace/host_storage_hdd/boat/inference_images_modd2_384_raw/seq%02d/seq%02d_inference.txt' #% (SEQ_NUM, SEQ_NUM)\nSAVE_DIR = '/opt/workspace/host_storage_hdd/boat/inference_images_modd2_384_raw/seq%02d/masks_wasr_fu2_imu_reprod/' #% SEQ_NUM\nDATASET_PATH = '/opt/workspace/host_storage_hdd/boat/inference_images_modd2_384_raw/'\n\nMODEL_WEIGHTS = 'example_weights/wasr_fu2_imu.ckpt-80000'\n\nIMG_SIZE = [384, 512]\n\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"DeepLabLFOV Network Inference.\")\n parser.add_argument(\"--dataset-path\", type=str, default=DATASET_PATH,\n help=\"Path to dataset files on which inference is performed.\")\n parser.add_argument(\"--model-weights\", type=str, default=MODEL_WEIGHTS,\n help=\"Path to the file with model weights.\")\n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\n help=\"Number of classes to predict (including background).\")\n parser.add_argument(\"--save-dir\", type=str, default=SAVE_DIR,\n help=\"Where to save predicted mask.\")\n parser.add_argument(\"--seq\", type=int, default = SEQ_NUM,\n help=\"Sequence number to evaluate.\")\n parser.add_argument(\"--seq-txt\", type=str, default=SEQ_TXT,\n help=\"Text sprintf to sequeunce txt file\")\n return parser.parse_args()\n\ndef load(saver, sess, ckpt_path):\n '''Load trained weights.\n Args:\n saver: TensorFlow saver object.\n sess: TensorFlow session.\n ckpt_path: path to checkpoint file with parameters.\n '''\n saver.restore(sess, ckpt_path)\n print(\"Restored model parameters from {}\".format(ckpt_path))\n\ndef main():\n os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n \"\"\"Create the model and start the evaluation process.\"\"\"\n args = get_arguments()\n\n args.seq_txt = args.seq_txt % (args.seq, args.seq)\n args.save_dir = args.save_dir % (args.seq)\n\n # Create network\n img_input = tf.placeholder(dtype=tf.uint8, shape=(IMG_SIZE[0], IMG_SIZE[1], 3))\n imu_input = tf.placeholder(dtype=tf.uint8, shape=(IMG_SIZE[0], IMG_SIZE[1], 3))\n imu_input_2 = imu_input[:, :, 0]\n\n # Convert from opencv BGR to tensorflow's RGB format\n img_b, img_g, img_r = tf.split(axis=2, num_or_size_splits=3, value=img_input)\n\n # Join and subtract means\n img = tf.cast(tf.concat(axis=2, values=[img_r, img_g, img_b]), dtype=tf.float32)\n imu = tf.cast(imu_input_2, dtype=tf.float32)\n\n img -= IMG_MEAN\n\n # Expand first dimension\n img = tf.expand_dims(img, dim=0)\n imu = tf.expand_dims(tf.expand_dims(imu, dim=0), dim=3)\n\n with tf.variable_scope('', reuse=False):\n net = wasr_IMU_FU2({'data': img, 'imu_data': imu}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load...\n restore_var = tf.global_variables()\n\n # Predictions\n raw_output = net.layers['fc1_voc12']\n\n # Features at the end of the second block of convolutions...\n #middle_features = net.layers['res2c'] # net.layers['res3b3']\n\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(img)[1:3, ])\n raw_output = tf.argmax(raw_output, dimension=3)\n pred = tf.expand_dims(raw_output, dim=3)\n\n # Set up TF session and initialize variables.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n\n sess.run(init)\n\n # Load weights\n loader = tf.train.Saver(var_list=restore_var)\n load(loader, sess, args.model_weights)\n\n # create output folder/s if they dont exist yet\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n\n # Get number of lines (images) in text file\n num_imgs = sum(1 for line in open(args.seq_txt))\n # Perform inferences on dataset\n f_id = open(args.seq_txt, 'r')\n\n alpha_param = 0.5\n counter = 1\n sum_times = 0\n\n # Perform inferences of MODD2 dataset\n for line in f_id:\n\n image_name, imu_name = line.strip('\\r\\n').split(' ')\n\n # read image\n img_in = cv2.imread(join(args.dataset_path, image_name))\n imu_in = cv2.imread(join(args.dataset_path, imu_name))\n\n start_time = time.time()\n preds = sess.run(pred, feed_dict={img_input: img_in, imu_input: imu_in})\n elapsed_time = time.time() - start_time\n\n sum_times += elapsed_time\n print('Elapsed time: %.04f for image num %03d' % (elapsed_time, counter))\n\n # Extract prediction mask\n msk = decode_labels(preds, num_classes=args.num_classes)\n # Save generated mask\n cv2.imwrite(args.save_dir + 'mask_%03d.png' % counter, msk[0])\n\n counter = counter + 1\n\n print('Average time per image: %.05f' % (sum_times / num_imgs))\n\nif __name__ == '__main__':\n main()\n","sub_path":"wasr_inference_imu.py","file_name":"wasr_inference_imu.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"161944551","text":"import argparse\nimport logging\nfrom pathlib import Path\nimport os\nimport RaceRandom as random\nimport urllib.request\nimport urllib.parse\nimport yaml\n\nfrom DungeonRandomizer import parse_cli\nfrom Main import main as DRMain\nfrom source.classes.BabelFish import BabelFish\nfrom yaml.constructor import SafeConstructor\n\ndef add_bool(self, node):\n return self.construct_scalar(node)\n\nSafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', add_bool)\n\ndef main():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('--multi', default=1, type=lambda value: min(max(int(value), 1), 255))\n multiargs, _ = parser.parse_known_args()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', help='Path to the weights file to use for rolling game settings, urls are also valid')\n parser.add_argument('--samesettings', help='Rolls settings per weights file rather than per player', action='store_true')\n parser.add_argument('--seed', help='Define seed number to generate.', type=int)\n parser.add_argument('--multi', default=1, type=lambda value: min(max(int(value), 1), 255))\n parser.add_argument('--names', default='')\n parser.add_argument('--teams', default=1, type=lambda value: max(int(value), 1))\n parser.add_argument('--create_spoiler', action='store_true')\n parser.add_argument('--suppress_rom', action='store_true')\n parser.add_argument('--suppress_meta', action='store_true')\n parser.add_argument('--bps', action='store_true')\n parser.add_argument('--rom')\n parser.add_argument('--enemizercli')\n parser.add_argument('--outputpath')\n parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])\n for player in range(1, multiargs.multi + 1):\n parser.add_argument(f'--p{player}', help=argparse.SUPPRESS)\n args = parser.parse_args()\n\n if args.seed is None:\n random.seed(None)\n seed = random.randint(0, 999999999)\n else:\n seed = args.seed\n random.seed(seed)\n\n seedname = f'M{random.randint(0, 999999999)}'\n print(f\"Generating mystery for {args.multi} player{'s' if args.multi > 1 else ''}, {seedname} Seed {seed}\")\n\n weights_cache = {}\n if args.weights:\n weights_cache[args.weights] = get_weights(args.weights)\n print(f\"Weights: {args.weights} >> {weights_cache[args.weights]['description']}\")\n for player in range(1, args.multi + 1):\n path = getattr(args, f'p{player}')\n if path:\n if path not in weights_cache:\n weights_cache[path] = get_weights(path)\n print(f\"P{player} Weights: {path} >> {weights_cache[path]['description']}\")\n\n erargs = parse_cli(['--multi', str(args.multi)])\n erargs.seed = seed\n erargs.names = args.names\n erargs.create_spoiler = args.create_spoiler\n erargs.suppress_rom = args.suppress_rom\n erargs.suppress_meta = args.suppress_meta\n erargs.bps = args.bps\n erargs.race = True\n erargs.outputname = seedname\n erargs.outputpath = args.outputpath\n erargs.loglevel = args.loglevel\n erargs.mystery = True\n\n if args.rom:\n erargs.rom = args.rom\n if args.enemizercli:\n erargs.enemizercli = args.enemizercli\n\n mw_settings = {'algorithm': False}\n\n settings_cache = {k: (roll_settings(v) if args.samesettings else None) for k, v in weights_cache.items()}\n\n for player in range(1, args.multi + 1):\n path = getattr(args, f'p{player}') if getattr(args, f'p{player}') else args.weights\n if path:\n settings = settings_cache[path] if settings_cache[path] else roll_settings(weights_cache[path])\n for k, v in vars(settings).items():\n if v is not None:\n if k == 'algorithm': # multiworld wide parameters\n if not mw_settings[k]: # only use the first roll\n setattr(erargs, k, v)\n mw_settings[k] = True\n else:\n getattr(erargs, k)[player] = v\n else:\n raise RuntimeError(f'No weights specified for player {player}')\n\n # set up logger\n loglevel = {'error': logging.ERROR, 'info': logging.INFO, 'warning': logging.WARNING, 'debug': logging.DEBUG}[erargs.loglevel]\n logging.basicConfig(format='%(message)s', level=loglevel)\n\n DRMain(erargs, seed, BabelFish())\n\ndef get_weights(path):\n if os.path.exists(Path(path)):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n return yaml.load(f, Loader=yaml.SafeLoader)\n elif urllib.parse.urlparse(path).scheme in ['http', 'https']:\n return yaml.load(urllib.request.urlopen(path), Loader=yaml.FullLoader)\n\ndef roll_settings(weights):\n def get_choice(option, root=None):\n root = weights if root is None else root\n if option not in root:\n return None\n if type(root[option]) is not dict:\n return root[option]\n if not root[option]:\n return None\n return random.choices(list(root[option].keys()), weights=list(map(int,root[option].values())))[0]\n\n def get_choice_default(option, root=weights, default=None):\n choice = get_choice(option, root)\n if choice is None and default is not None:\n return default\n return choice\n\n while True:\n subweights = weights.get('subweights', {})\n if len(subweights) == 0:\n break\n chances = ({k: int(v['chance']) for (k, v) in subweights.items()})\n subweight_name = random.choices(list(chances.keys()), weights=list(chances.values()))[0]\n subweights = weights.get('subweights', {}).get(subweight_name, {}).get('weights', {})\n subweights['subweights'] = subweights.get('subweights', {})\n weights = {**weights, **subweights}\n\n ret = argparse.Namespace()\n\n ret.algorithm = get_choice('algorithm')\n\n glitch_map = {'none': 'noglitches', 'no_logic': 'nologic', 'owglitches': 'owglitches',\n 'owg': 'owglitches', 'minorglitches': 'minorglitches'}\n glitches_required = get_choice('glitches_required')\n if glitches_required is not None:\n if glitches_required not in glitch_map.keys():\n print(f'Logic did not match one of: {\", \".join(glitch_map.keys())}')\n glitches_required = 'none'\n ret.logic = glitch_map[glitches_required]\n\n item_placement = get_choice('item_placement')\n # not supported in ER\n\n dungeon_items = get_choice('dungeon_items')\n ret.mapshuffle = get_choice('map_shuffle') == 'on' if 'map_shuffle' in weights else dungeon_items in ['mc', 'mcs', 'full']\n ret.compassshuffle = get_choice('compass_shuffle') == 'on' if 'compass_shuffle' in weights else dungeon_items in ['mc', 'mcs', 'full']\n ret.keyshuffle = get_choice('smallkey_shuffle') == 'on' if 'smallkey_shuffle' in weights else dungeon_items in ['mcs', 'full']\n ret.bigkeyshuffle = get_choice('bigkey_shuffle') == 'on' if 'bigkey_shuffle' in weights else dungeon_items in ['full']\n\n ret.accessibility = get_choice('accessibility')\n ret.restrict_boss_items = get_choice('restrict_boss_items')\n\n entrance_shuffle = get_choice('entrance_shuffle')\n ret.shuffle = entrance_shuffle if entrance_shuffle != 'none' else 'vanilla'\n overworld_map = get_choice('overworld_map')\n ret.overworld_map = overworld_map if overworld_map != 'default' else 'default'\n door_shuffle = get_choice('door_shuffle')\n ret.door_shuffle = door_shuffle if door_shuffle != 'none' else 'vanilla'\n ret.intensity = get_choice('intensity')\n ret.experimental = get_choice('experimental') == 'on'\n ret.collection_rate = get_choice('collection_rate') == 'on'\n\n ret.dungeon_counters = get_choice('dungeon_counters') if 'dungeon_counters' in weights else 'default'\n if ret.dungeon_counters == 'default':\n ret.dungeon_counters = 'pickup' if ret.door_shuffle != 'vanilla' or ret.compassshuffle == 'on' else 'off'\n\n ret.shufflelinks = get_choice('shufflelinks') == 'on'\n ret.pseudoboots = get_choice('pseudoboots') == 'on'\n ret.shopsanity = get_choice('shopsanity') == 'on'\n ret.dropshuffle = get_choice('dropshuffle') == 'on'\n ret.pottery = get_choice('pottery') if 'pottery' in weights else 'none'\n ret.colorizepots = get_choice('colorizepots') == 'on'\n ret.shufflepots = get_choice('pot_shuffle') == 'on'\n ret.mixed_travel = get_choice('mixed_travel') if 'mixed_travel' in weights else 'prevent'\n ret.standardize_palettes = get_choice('standardize_palettes') if 'standardize_palettes' in weights else 'standardize'\n\n goal = get_choice('goals')\n if goal is not None:\n ret.goal = {'ganon': 'ganon',\n 'fast_ganon': 'crystals',\n 'dungeons': 'dungeons',\n 'pedestal': 'pedestal',\n 'triforce-hunt': 'triforcehunt',\n 'trinity': 'trinity'\n }[goal]\n ret.openpyramid = goal in ['fast_ganon', 'trinity'] if ret.shuffle in ['vanilla', 'dungeonsfull', 'dungeonssimple'] else False\n\n ret.crystals_gt = get_choice('tower_open')\n\n ret.crystals_ganon = get_choice('ganon_open')\n\n from ItemList import set_default_triforce\n default_tf_goal, default_tf_pool = set_default_triforce(ret.goal, 0, 0)\n goal_min = get_choice_default('triforce_goal_min', default=default_tf_goal)\n goal_max = get_choice_default('triforce_goal_max', default=default_tf_goal)\n pool_min = get_choice_default('triforce_pool_min', default=default_tf_pool)\n pool_max = get_choice_default('triforce_pool_max', default=default_tf_pool)\n ret.triforce_goal = random.randint(int(goal_min), int(goal_max))\n min_diff = get_choice_default('triforce_min_difference', default=default_tf_pool-default_tf_goal)\n ret.triforce_pool = random.randint(max(int(pool_min), ret.triforce_goal + int(min_diff)), int(pool_max))\n\n ret.mode = get_choice('world_state')\n if ret.mode == 'retro':\n ret.mode = 'open'\n ret.retro = True\n ret.retro = get_choice('retro') == 'on' # this overrides world_state if used\n\n ret.bombbag = get_choice('bombbag') == 'on'\n\n ret.hints = get_choice('hints') == 'on'\n\n swords = get_choice('weapons')\n if swords is not None:\n ret.swords = {'randomized': 'random',\n 'assured': 'assured',\n 'vanilla': 'vanilla',\n 'swordless': 'swordless'\n }[swords]\n\n ret.difficulty = get_choice('item_pool')\n\n ret.item_functionality = get_choice('item_functionality')\n\n old_style_bosses = {'basic': 'simple',\n 'normal': 'full',\n 'chaos': 'random'}\n boss_choice = get_choice('boss_shuffle')\n if boss_choice in old_style_bosses.keys():\n boss_choice = old_style_bosses[boss_choice]\n ret.shufflebosses = boss_choice\n\n enemy_choice = get_choice('enemy_shuffle')\n if enemy_choice == 'chaos':\n enemy_choice = 'random'\n ret.shuffleenemies = enemy_choice\n\n old_style_damage = {'none': 'default',\n 'chaos': 'random'}\n damage_choice = get_choice('enemy_damage')\n if damage_choice in old_style_damage:\n damage_choice = old_style_damage[damage_choice]\n ret.enemy_damage = damage_choice\n\n ret.enemy_health = get_choice('enemy_health')\n\n ret.beemizer = get_choice('beemizer') if 'beemizer' in weights else '0'\n\n inventoryweights = weights.get('startinventory', {})\n startitems = []\n for item in inventoryweights.keys():\n if get_choice(item, inventoryweights) == 'on':\n startitems.append(item)\n ret.startinventory = ','.join(startitems)\n if len(startitems) > 0:\n ret.usestartinventory = True\n\n if 'rom' in weights:\n romweights = weights['rom']\n ret.sprite = get_choice('sprite', romweights)\n ret.disablemusic = get_choice('disablemusic', romweights) == 'on'\n ret.quickswap = get_choice('quickswap', romweights) == 'on'\n ret.reduce_flashing = get_choice('reduce_flashing', romweights) == 'on'\n ret.msu_resume = get_choice('msu_resume', romweights) == 'on'\n ret.fastmenu = get_choice('menuspeed', romweights)\n ret.heartcolor = get_choice('heartcolor', romweights)\n ret.heartbeep = get_choice('heartbeep', romweights)\n ret.ow_palettes = get_choice('ow_palettes', romweights)\n ret.uw_palettes = get_choice('uw_palettes', romweights)\n ret.shuffle_sfx = get_choice('shuffle_sfx', romweights) == 'on'\n\n return ret\n\nif __name__ == '__main__':\n main()\n","sub_path":"Mystery.py","file_name":"Mystery.py","file_ext":"py","file_size_in_byte":12590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"242152126","text":"import pytest\nimport stk\nfrom rdkit.Chem import AllChem as rdkit\n\nfrom ....case_data import CaseData\n\n\natom = rdkit.MolFromSmiles('[Fe+2]')\natom.AddConformer(rdkit.Conformer(atom.GetNumAtoms()))\n\n_iron_atom = stk.BuildingBlock.init_from_rdkit_mol(atom)\natom_0, = _iron_atom.get_atoms(0)\n_iron_atom = _iron_atom.with_functional_groups(\n (stk.SingleAtom(atom_0) for i in range(6))\n)\n\n_iron_bi_1 = stk.BuildingBlock(\n smiles='BrN=Cc1ccccn1',\n functional_groups=[\n stk.SmartsFunctionalGroupFactory(\n smarts='[#6]~[#7X2]~[#35]',\n bonders=(1, ),\n deleters=(),\n ),\n stk.SmartsFunctionalGroupFactory(\n smarts='[#6]~[#7X2]~[#6]',\n bonders=(1, ),\n deleters=(),\n ),\n ]\n)\n\n\n@pytest.fixture(\n params=(\n CaseData(\n molecule=stk.ConstructedMolecule(\n stk.metal_complex.OctahedralLambda(\n metals={_iron_atom: 0},\n ligands={_iron_bi_1: (0, 1, 2)},\n reaction_factory=stk.DativeReactionFactory(\n stk.GenericReactionFactory(\n bond_orders={\n frozenset({\n stk.GenericFunctionalGroup,\n stk.SingleAtom\n }): 9\n }\n )\n )\n )\n ),\n smiles=(\n '[H]C1=C([H])C([H])=N2->[Fe+2]34(<-N(Br)=C([H])C2=C1[H'\n '])(<-N(Br)=C([H])C1=C([H])C([H])=C([H])C([H])=N->31)'\n '<-N(Br)=C([H])C1=C([H])C([H])=C([H])C([H])=N->41'\n ),\n ),\n CaseData(\n molecule=stk.ConstructedMolecule(\n stk.metal_complex.OctahedralLambda(\n metals=_iron_atom,\n ligands=_iron_bi_1,\n reaction_factory=stk.DativeReactionFactory(\n stk.GenericReactionFactory(\n bond_orders={\n frozenset({\n stk.GenericFunctionalGroup,\n stk.SingleAtom\n }): 9\n }\n )\n )\n )\n ),\n smiles=(\n '[H]C1=C([H])C([H])=N2->[Fe+2]34(<-N(Br)=C([H])C2=C1[H'\n '])(<-N(Br)=C([H])C1=C([H])C([H])=C([H])C([H])=N->31)'\n '<-N(Br)=C([H])C1=C([H])C([H])=C([H])C([H])=N->41'\n ),\n ),\n ),\n)\ndef metal_complex_octahedral_lambda(request):\n return request.param\n","sub_path":"tests/molecular/molecules/molecule/fixtures/metal_complex/octahedral/octahedral_lambda.py","file_name":"octahedral_lambda.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"242312134","text":"import os\nfrom typing import List, Dict, Any, Tuple\n\nfrom checkov.cloudformation.graph_builder.graph_components.block_types import CloudformationTemplateSections, BlockType\nfrom checkov.cloudformation.graph_builder.graph_components.blocks import CloudformationBlock\nfrom checkov.cloudformation.parser.node import dict_node\n\n\ndef convert_graph_vertices_to_definitions(\n vertices: List[CloudformationBlock], root_folder: str\n) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, Dict[str, Any]]]:\n definitions: Dict[str, Dict[str, Any]] = {}\n breadcrumbs: Dict[str, Dict[str, Any]] = {}\n for vertex in vertices:\n if vertex.block_type != BlockType.RESOURCE:\n continue\n block_path = vertex.path\n block_type = CloudformationTemplateSections.RESOURCES.value if vertex.block_type == 'resource' else vertex.block_type\n block_name = vertex.name.split('.')[-1] # vertex.name is \"type.name\" so type.name -> [type, name]\n\n definition = {\n 'Type': vertex.attributes['resource_type'],\n 'Properties': vertex.config\n }\n definitions.setdefault(block_path, {}).setdefault(block_type, {}).setdefault(block_name, definition)\n\n relative_block_path = f\"/{os.path.relpath(block_path, root_folder)}\"\n add_breadcrumbs(vertex, breadcrumbs, relative_block_path)\n return definitions, breadcrumbs\n\n\ndef add_breadcrumbs(vertex: CloudformationBlock, breadcrumbs: Dict[str, Dict[str, Any]], relative_block_path: str) -> None:\n vertex_breadcrumbs = vertex.breadcrumbs\n if vertex_breadcrumbs:\n breadcrumbs.setdefault(relative_block_path, {})[vertex.name] = vertex_breadcrumbs\n","sub_path":"checkov/cloudformation/graph_builder/graph_to_definitions.py","file_name":"graph_to_definitions.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"489566378","text":"import sys\nfrom pathlib import Path\nsys.path.append(str(Path.cwd()))\n\nfrom datetime import datetime, timedelta\nfrom time import sleep\nfrom Digiccy1.huobi_gateway_local import HuobiGateway\nfrom myUtility import load_json\nfrom myEvent import (\n Event, \n EventEngine,\n EVENT_TICK,\n EVENT_ORDER,\n EVENT_TRADE,\n EVENT_POSITION,\n EVENT_ACCOUNT,\n EVENT_CONTRACT,\n EVENT_LOG,\n) \n\nfrom myObject import (\n TickData,\n OrderData,\n TradeData,\n AccountData,\n ContractData,\n OrderRequest,\n CancelRequest,\n SubscribeRequest,\n HistoryRequest\n)\nfrom myConstant import (\n Direction,\n Exchange,\n Product,\n Status,\n OrderType,\n Interval\n)\n\nsetting = load_json(\"connect_huobi.json\")\n\ndef process_event(event:Event):\n print('event type:%s' % event.type)\n print(\"event data:%s\" % event.data)\n\nevent_engine = EventEngine()\nevent_engine.register(EVENT_TICK, process_event)\n# event_engine.register(EVENT_CONTRACT, process_event)\nevent_engine.register(EVENT_POSITION, process_event)\n# event_engine.register(EVENT_ACCOUNT, process_event)\nevent_engine.register(EVENT_LOG, process_event)\nevent_engine.start()\n\ngateway = HuobiGateway(event_engine)\n\ngateway.connect(setting)\n\nreq = SubscribeRequest(\"ethusdt\", Exchange.HUOBI)\n\n# gateway.subscribe(req)\n# endtime = datetime.now()\n# starttime = endtime - timedelta(days=1)\n# historyReq = HistoryRequest('ethusdt', Exchange.HUOBI, starttime, endtime, Interval.MINUTE)\n# df = gateway.query_history(historyReq)\n# print(df)\n\nwhile True:\n sleep(5)","sub_path":"gateway/huobi_gateway_local/test_huobi_gateway.py","file_name":"test_huobi_gateway.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"372048867","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nScript to parse a python script with tags and run it, batch mode.\n\"\"\"\nfrom __future__ import print_function\nimport re\nimport sys\nimport subprocess\n\nreg = re.compile(\"#+\\ *<{3}\\ *(.*?)#+\\ *>{3}\", re.DOTALL)\n\nif len(sys.argv) != 2:\n print(\"\"\"\n Usage: {script} [python script]\n \"\"\".format(script=sys.argv[0]),\n file=sys.stderr)\n\nwith open(sys.argv[1]) as source:\n src = reg.findall(source.read())\n\nif len(src) == 0:\n print(\"No tags found! Add #<<< #>>> to the file.\", file=sys.stderr)\n\npcode = ['print \"\"\"{c}\"\"\"'.format(c=c) for c in src]\ncode = [None] * (2 * len(pcode))\ncode[::2] = pcode\ncode[1::2] = src\ncode.insert(0, 'print \">>>>>>>>>> START <<<<<<<<<<\"')\ncode.append('print \">>>>>>>>>> END <<<<<<<<<<\"')\nwith open('.pythonbatch.out', 'w') as out:\n p1 = subprocess.Popen(['python', '-c', \"\\n\".join(code)], stdout=out)\n","sub_path":"python/pythonbatch.py","file_name":"pythonbatch.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"413707384","text":"\"\"\"\nAzdevman Consts\n\nThis module contains constant variables that will not change\n\"\"\"\n\n# Environment Variables\nAZDEVMAN_ENV_PREFIX = \"AZDEVMAN_\"\n\n# Azure Devops\nAZ_BASE_URL = \"https://dev.azure.com/\"\nAZ_DEFAULT_ORG = \"ORGANIZATION\"\nAZ_DEFAULT_PAT = \"UEFUCg==\"\nAZ_DEFAULT_PROJECT = \"PROJECT\"\n\n# Config file\nCONFIG_DIR = \".azdevman\"\nCONFIG_FILE_NAME = \"config.json\"\nCONFIG_FILE_DEFAULT_PROFILE = \"default\"\nCONFIG_FILE_DEFAULT_CONTENT = {\n \"CurrentContext\": CONFIG_FILE_DEFAULT_PROFILE,\n \"Profiles\": {\n \"default\": {\n \"Azure DevOps Organization\": AZ_DEFAULT_ORG,\n \"Personal Access Token\": AZ_DEFAULT_PAT,\n \"Project\": AZ_DEFAULT_PROJECT\n }\n }\n}\n\n# Azure DevOps build definition\nAZ_DEFAULT_BUILD_DEF_PROCESS = {\n \"phases\": [\n {\n \"condition\": \"succeeded()\",\n \"jobAuthorizationScope\": \"projectCollection\",\n \"name\": \"Agent job 1\",\n \"refName\": \"Job_1\",\n \"target\": {\n \"allowScriptsAuthAccessOption\": False,\n \"executionOptions\": {\n \"type\": 0\n },\n \"type\": 1\n }\n }\n ],\n \"type\": 1\n}\nAZ_DEFAULT_BUILD_DEF_QUEUE = {\n \"id\": 12,\n \"name\": \"Hosted VS2017\",\n \"pool\": {\n \"id\": 3,\n \"is_hosted\": True,\n \"name\": \"Hosted VS2017\"\n }\n}\nAZ_DEFAULT_BRANCH = \"refs/heads/master\"\n","sub_path":"azdevman/utils/_const.py","file_name":"_const.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23375917","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Routines for configuring Reddwarf.\"\"\"\n\nimport re\n\nfrom reddwarf.openstack.common import config as openstack_config\n\n\nparse_options = openstack_config.parse_options\nadd_log_options = openstack_config.add_log_options\nadd_common_options = openstack_config.add_common_options\nsetup_logging = openstack_config.setup_logging\n\n\ndef _to_list(value):\n items = value.split(',')\n trimmed_list = [item.strip() for item in items]\n return trimmed_list\n\n\ndef get_option(options, option, **kwargs):\n if option in options and kwargs.get('type', 'str') == 'list':\n value = options[option]\n return _to_list(value)\n else:\n return openstack_config.get_option(options, option, **kwargs)\n\n\nclass Config(object):\n\n instance = {}\n\n @classmethod\n def load_paste_app(cls, *args, **kwargs):\n conf, app = openstack_config.load_paste_app(*args, **kwargs)\n cls.instance.update(conf)\n return conf, app\n\n @classmethod\n def load_paste_config(cls, *args, **kwargs):\n conf_file, conf = openstack_config.load_paste_config(*args, **kwargs)\n cls.instance.update(conf)\n return conf\n\n @classmethod\n def append_to_config_values(cls, *args):\n config_file = openstack_config.find_config_file(*args)\n if not config_file:\n raise RuntimeError(\"Unable to locate any configuration file. \"\n \"Cannot load application %s\" % app_name)\n # Now take the conf file values and append them to the current conf\n with open(config_file, 'r') as conf:\n for line in conf.readlines():\n m = re.match(\"\\s*([^#]\\S+)\\s*=\\s*(\\S+)\\s*\", line)\n if m:\n cls.instance[m.group(1)] = m.group(2)\n\n @classmethod\n def write_config_values(cls, *args, **kwargs):\n # Pass in empty kwargs so it doesnt mess up the config find\n config_file = openstack_config.find_config_file(*args)\n if not config_file:\n raise RuntimeError(\"Unable to locate any configuration file. \"\n \"Cannot load application %s\" % app_name)\n with open(config_file, 'a') as conf:\n for k, v in kwargs.items():\n # Start with newline to be sure its on a new line\n conf.write(\"\\n%s=%s\" % (k, v))\n # Now append them to the cls instance\n cls.append_to_config_values(*args)\n\n @classmethod\n def get(cls, key, default=None, **kwargs):\n # We always use a default, even if its None.\n kwargs['default'] = default\n return get_option(cls.instance, key, **kwargs)\n\n\ndef create_type_func(type):\n @classmethod\n def get(cls, key, default=None, **kwargs):\n kwargs['type'] = type\n return cls.get(key, default, **kwargs)\n return get\n\nConfig.get_bool = create_type_func('bool')\nConfig.get_float = create_type_func('float')\nConfig.get_int = create_type_func('int')\nConfig.get_list = create_type_func('list')\nConfig.get_str = create_type_func('str')\ndel create_type_func\n\n\nclass ConfigFacade(object):\n \"\"\"This class presents an interface usable by OpenStack Common modules.\n\n OpenStack common uses a new config interface where the values are\n accessed as attributes directly. This presents the same interface\n so we can interface with OS common modules while we change our config\n stuff.\n\n \"\"\"\n\n value_info = {}\n\n def __init__(self, conf):\n self.conf = conf\n\n def __getattr__(self, name):\n if name == \"register_opts\":\n def f(*args, **kwargs):\n pass\n return f\n if name in self.value_info:\n v = self.value_info[name]\n return self.conf.get(name, **v)\n return self.conf.get(name)\n\n\nclass OsCommonModule(object):\n \"\"\"Emulates the OpenStack Common cfg module.\"\"\"\n\n @property\n def CONF(self):\n return ConfigFacade(Config())\n\n\ndef create_type_func(type):\n @classmethod\n def func(cls, name, default, help):\n ConfigFacade.value_info[name] = {'default': default, 'type': type}\n return func\n\nOsCommonModule.BoolOpt = create_type_func('bool')\nOsCommonModule.IntOpt = create_type_func('int')\nOsCommonModule.ListOpt = create_type_func('list')\nOsCommonModule.StrOpt = create_type_func('str')\ndel create_type_func\n","sub_path":"reddwarf/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"159697152","text":"import argparse\nimport os.path\nimport torch\n\n\ndef parseArgs():\n\n parser = argparse.ArgumentParser(description='Relation Networks')\n\n parser.add_argument('--model', type=str, default='RN', choices=['RN', 'CNN-MLP'],\n help='the model to use')\n parser.add_argument('--batchsize', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 20)')\n parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',\n help='learning rate (default: 0.0001)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log_interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--log_dir', type=str, default='./logs/')\n parser.add_argument('--exp_id', type=str, default='experiment')\n parser.add_argument('--checkpoint', type=str,\n help='resume from model stored')\n\n args = parser.parse_args()\n \n args.log_dir = os.path.join(args.log_dir, args.exp_id)\n args.checkpoint_dir = os.path.join(args.log_dir, 'checkpoints')\n if not os.path.exists(args.log_dir):\n os.mkdir(args.log_dir)\n if not os.path.exists(args.checkpoint_dir):\n os.mkdir(args.checkpoint_dir)\n \n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed) \n\n return args\n","sub_path":"parseArgs.py","file_name":"parseArgs.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"578201123","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 9 10:52:13 2021\r\n\r\n@author: pyliu\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport scipy as sp\r\nimport time\r\nimport yaml\r\n\r\nfrom underscore_prefix import *\r\nfrom underscore_suffix import *\r\n\r\ndef get_data_yaml(filename = \"blenheim_random_success.yaml\"):\r\n \"\"\"\r\n Extract data from YAML file to pandas DataFrame\r\n\r\n Parameters\r\n ----------\r\n filename : STR\r\n name of yaml file containing data\r\n The default is \"blenheim_random_success.yaml\".\r\n\r\n Returns\r\n -------\r\n df : pandas df\r\n columns=[\"origin\", \"target\", \"edge_id\", \"operation_time\", \"n_robots\"].\r\n\r\n \"\"\"\r\n tic = time.time()\r\n \r\n #1) Get data from yaml\r\n with open(filename) as file:\r\n documents = yaml.full_load(file)\r\n \r\n #2) create empty df\r\n total_length = 0\r\n for edge in documents:\r\n for n in range(len( documents[edge] )):\r\n total_length += len( documents[edge][n] )\r\n df = pd.DataFrame(index = np.arange(total_length), columns=[\"origin\", \"target\", \"edge_id\", \"operation_time\", \"n_robots\"])\r\n \r\n #3) add data to df\r\n ind = 0\r\n for edge in documents:\r\n origin = underscore_prefix(edge)\r\n target = underscore_suffix(edge)\r\n for n in range(len( documents[edge] )):\r\n for i in range(len( documents[edge][n] )):\r\n t = documents[edge][n][i]\r\n df[\"origin\"][ind] = origin\r\n df[\"target\"][ind] = target\r\n df[\"edge_id\"][ind] = edge\r\n df[\"operation_time\"][ind] = t\r\n df[\"n_robots\"][ind] = n+1\r\n ind += 1\r\n toc = time.time()\r\n print(\"Time taken:\", toc-tic, \"secs\")\r\n return df\r\n ","sub_path":"Wk8/get_data_yaml.py","file_name":"get_data_yaml.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"17487918","text":"import unittest\nimport requests\nfrom json import dumps\n\n\nfrom test import (\n TEST_DB_PATH,\n TEST_USERNAME,\n TEST_PASSWORD,\n)\n\nfrom test.test_request_utils import implode_cookies\n\nfrom app import FILE_PATH\n\nfrom app.objects.profile import (\n Message,\n Reader,\n Profile\n)\n\nfrom app.request_utils import (\n auth_request\n)\n\nfrom app.objects.headers import Headers\n\nfrom app.database_utils import (\n get_database_connection,\n create_record,\n get_records,\n delete_records\n)\n\nconn = get_database_connection(TEST_DB_PATH)\nc = conn.cursor()\n\nclass TestMessage(unittest.TestCase):\n\n def setUp(self):\n self.message = Message(\n text = 'test_message'\n )\n\n def test_json(self):\n predicted_json = dumps(\n {\n 'text': 'test_message',\n 'giphyId': None,\n 'override': None\n }\n )\n\n self.assertEqual(self.message.json, predicted_json)\n\nclass TestReader(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n try:\n cls.f_id = create_record(\n c, 'files',\n name = 'bee_movie_script.txt', current_position = 0\n )\n\n except:\n cls.f_id = 1\n pass\n\n conn.commit()\n\n \n @classmethod\n def tearDownClass(cls):\n delete_records(\n c, 'files',\n name = 'bee_movie_script.txt'\n )\n \n\n def setUp(self):\n self.record = get_records(\n c, 'files',\n id = self.f_id\n ).fetchone()\n\n self.fileReader = Reader(conn, self.f_id)\n\n def test_get_message(self):\n f = open(FILE_PATH + self.record[1], 'r')\n next_line = f.readlines()[self.record[2]]\n f.close()\n\n self.assertEqual(next_line, self.fileReader.get_line())\n\n def test_increment(self):\n self.fileReader.increment()\n\n new_record = get_records(\n c, 'files',\n id = self.f_id\n ).fetchone()\n\n self.assertTrue(self.record[2] + 1 == new_record[2])\n\n\nclass TestProfile(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.f_id = create_record(\n c, 'files',\n name = 'bee_movie_script.txt', \n )\n\n conn.commit()\n\n try:\n cls.t_id = create_record(\n c, 'targets',\n name = TEST_USERNAME, file_id = cls.f_id\n )\n \n except:\n delete_records(\n c, 'targets',\n name = TEST_USERNAME\n )\n\n cls.t_id = create_record(\n c, 'targets',\n name = TEST_USERNAME, file_id = cls.f_id\n )\n \n conn.commit()\n\n try:\n cls.a_id = create_record(\n c, 'accounts',\n name = TEST_USERNAME, password = TEST_PASSWORD\n )\n except:\n delete_records(\n c, 'targets',\n name = TEST_USERNAME\n )\n\n cls.a_id = create_record(\n c, 'accounts',\n name = TEST_USERNAME, password = TEST_PASSWORD\n )\n\n conn.commit()\n\n cls.profile = Profile(conn, cls.t_id)\n\n @classmethod\n def tearDownClass(cls):\n delete_records(\n c,\n 'files',\n id = cls.f_id,\n )\n\n conn.commit()\n\n delete_records(\n c,\n 'targets',\n id = cls.t_id\n )\n\n conn.commit()\n\n delete_records(\n c, 'accounts',\n id = cls.a_id\n )\n\n conn.commit()\n\n def test_get_message(self):\n msg = self.profile.get_message()\n self.assertIsInstance(msg, Message)\n\n def test_cookies(self):\n self.assertIn(\n 'say-at-key',\n self.profile.cookies\n )\n\n def test_sayat_id(self):\n self.assertIsInstance(\n self.profile.sayat_id,\n int\n )\n\n @unittest.skipIf(TEST_USERNAME is None,\n \"TEST_USERNAME environment variable not defined\")\n @unittest.skipIf(TEST_PASSWORD is None,\n \"TEST_PASSWORD environment variable not defined\")\n def test_auth_cookies(self):\n self.profile.auth(self.a_id)\n\n result = requests.get(\n 'https://sayat.me/api/v1/profile',\n headers = {\n 'accept': 'application/json',\n 'cookie': implode_cookies(self.profile.cookies)\n }\n ).json()\n\n self.assertIn(\n TEST_USERNAME,\n result['username']\n )\n\n\n\n\n\n\n\n ","sub_path":"test/test_profile.py","file_name":"test_profile.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"465181893","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport asyncio\nimport logging\nimport time\n\nimport feedparser\nfrom aiogram import Bot, Dispatcher, executor, types\nimport aiohttp\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\n\nimport config\nfrom db import *\n\nAPI_TOKEN = config.TOKEN\n\nrep = {\"\\\\\": r\"\\\\\", \"`\": \"\\\\`\", \"*\": \"\\\\*\", \"_\": \"\\\\_\", \"#\": \"\\\\#\", \"+\": \"\\\\+\", \"-\": \"\\\\-\", \".\": \"\\\\.\", \"!\": \"\\\\!\"}\n\n# Configure logging\nlogging.basicConfig(\n filename='main.log',\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.WARNING)\n\nheaders = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 '\n 'Safari/537.36',\n}\n\n# Initialize bot and dispatcher\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\n\nasync def get_list(entries, last_link):\n \"\"\"Get a list of article links for feed updates\"\"\"\n link_list = []\n for entry in entries:\n link = entry.link\n if link == last_link:\n return link_list\n link_list.append(link)\n return link_list\n\n\nasync def get_refresh():\n \"\"\"Update subscription\"\"\"\n rows = db_all()\n async with aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(ssl=False), headers=headers) as session:\n tasks = [\n asyncio.create_task(refresh(session, row))\n for row in rows]\n await asyncio.wait(tasks)\n\n\nasync def refresh(session, row):\n \"\"\"Refresh subscription\"\"\"\n try:\n async with session.get(row[0]) as response:\n rss_content = await response.text()\n status_code = response.status\n except aiohttp.client_exceptions.ClientConnectorError:\n logging.warning(f\"{row[1]}[{row[0]}]\\t更新失败:链接错误\")\n except asyncio.exceptions.TimeoutError:\n logging.warning(f\"{row[1]}[{row[0]}]\\t更新失败:连接超时\")\n else:\n rss_parse = feedparser.parse(rss_content)\n if len(rss_parse.entries) < 1:\n logging.warning(f\"{row[1]}[{row[0]}]\\t更新失败:链接失效(status:{status_code})\")\n else:\n # Sort by publish time\n sort_list = [ent for ent in rss_parse.entries]\n sort_list.sort(key=lambda ent: time.mktime(ent.published_parsed), reverse=True)\n\n link_list = await get_list(sort_list, row[-1])\n if link_list:\n # Get subscribers\n usrlist = db_rssusr(row[0])\n if usrlist:\n for usr in usrlist:\n uid = usr[0]\n for link in link_list:\n await bot.send_message(uid, f\"{row[1]}\\n{link}\", parse_mode=\"HTML\")\n try:\n db_update(row[0], link_list[0])\n except Exception as e:\n logging.warning(str(e))\n\n\n@dp.message_handler(commands=['start'])\nasync def cmd_start(message: types.Message):\n await message.reply(f\"这是一个 RSS 订阅 Bot,更新频���为{config.INTERVAL}分钟\\n\"\n f\"使用 /help 获取帮助\")\n\n\n@dp.message_handler(commands=['help'])\nasync def cmd_help(message: types.Message):\n await message.reply(\n \"命令列表:\\n\" +\n \"/rss 显示当前订阅列表\\n\" +\n \"/sub 订阅一个RSS `/sub http://example.com/feed`\\n\" +\n \"/unsub 退订一个RSS `/unsub http://example.com/feed`\\n\" +\n \"/help 显示帮助信息\", parse_mode=\"MarkdownV2\")\n\n\n@dp.message_handler(commands=['rss'])\nasync def cmd_rss(message: types.Message):\n \"\"\"Send to users their subscription list\"\"\"\n reword = \"订阅列表:\"\n rss_list = db_chatid(message.chat.id)\n if rss_list:\n for r in rss_list:\n title = str(r[1])\n for k in rep:\n title = title.replace(k, rep[k])\n reword += f\"\\n[{title}]({r[2]}) `{r[0]}`\"\n await message.reply(reword, parse_mode=\"MarkdownV2\", disable_web_page_preview=True)\n else:\n await message.reply(\"还未添加任何订阅,使用 /help 来获取帮助\")\n\n\n@dp.message_handler(commands=['sub'])\nasync def cmd_sub(message: types.Message):\n \"\"\"Add subscription\"\"\"\n # Check if the format is correct\n try:\n rss = message.text.split()[1]\n except IndexError:\n await message.reply(\"使用方法: `/sub http://example.com/feed`\", parse_mode=\"MarkdownV2\")\n else:\n # Check if RSS is subscribed\n if db_chatid_rss(message.chat.id, rss):\n await message.reply(\"订阅过的 RSS\")\n else:\n # Check if this RSS link exists in the rss table\n db_rss_list = db_rss(rss)\n if db_rss_list:\n db_write_usr(message.chat.id, rss)\n title = db_rss_list[0][1]\n for k in rep:\n title = title.replace(k, rep[k])\n await message.reply(f\"[{title}]({db_rss_list[0][2]}) 订阅成功\", parse_mode=\"MarkdownV2\",\n disable_web_page_preview=True)\n else:\n # Check if the RSS link is valid\n async with aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(ssl=False), headers=headers) as session:\n try:\n async with session.get(rss) as response:\n rss_content = await response.text()\n status_code = response.status\n except aiohttp.client_exceptions.ClientConnectorError as e:\n await message.reply(f\"订阅失败:链接错误({e})\")\n except asyncio.exceptions.TimeoutError as e:\n await message.reply(f\"订阅失败:连接超时({e})\")\n else:\n rss_parse = feedparser.parse(rss_content)\n if len(rss_parse.entries) < 1:\n await message.reply(f\"订阅失败:链接无效(status:{status_code})\")\n else:\n # Sort by publish time\n sort_list = [ent for ent in rss_parse.entries]\n sort_list.sort(key=lambda ent: time.mktime(ent.published_parsed), reverse=True)\n\n db_write_rss(rss, rss_parse.feed.title, rss_parse.feed.link, sort_list[0].link)\n db_write_usr(message.chat.id, rss)\n title = rss_parse.feed.title\n for k in rep:\n title = title.replace(k, rep[k])\n await message.reply(f\"[{title}]({rss_parse.feed.link}) 订阅成功\", parse_mode=\"MarkdownV2\",\n disable_web_page_preview=True)\n\n\n@dp.message_handler(commands=['unsub'])\nasync def cmd_unsub(message: types.Message):\n \"\"\"Remove subscription\"\"\"\n # Check if the format is correct\n try:\n rss = message.text.split()[1]\n except IndexError:\n await message.reply(\"使用方法: `/unsub http://example.com/feed`\", parse_mode=\"MarkdownV2\")\n else:\n # Check if RSS is subscribed\n row = db_chatid_rss(message.chat.id, rss)\n if len(row) > 0:\n # Check for other subscribers\n usr = db_rssusr(rss)\n if len(usr) > 1:\n result = db_remove_usr_rss(message.chat.id, rss)\n if not result:\n title = row[0][1]\n for k in rep:\n title = title.replace(k, rep[k])\n await message.reply(f\"[{title}]({row[0][2]}) 退订成功\", parse_mode=\"MarkdownV2\",\n disable_web_page_preview=True)\n else:\n await message.reply(f\"移除失败:{result}\")\n else:\n result = db_remove_rss(rss) + db_remove_usr_rss(message.chat.id, rss)\n if not result:\n title = row[0][1]\n for k in rep:\n title = title.replace(k, rep[k])\n await message.reply(f\"[{title}]({row[0][2]}) 退订成功\", parse_mode=\"MarkdownV2\",\n disable_web_page_preview=True)\n else:\n await message.reply(f\"移除失败:{result}\")\n else:\n await message.reply(\"未订阅过的 RSS\")\n\n\n@dp.message_handler(commands=['refresh'])\nasync def cmd_refresh(message: types.Message):\n \"\"\"Update subscription manually\"\"\"\n await get_refresh()\n\n\n# Timed task\nscheduler = AsyncIOScheduler()\nscheduler.add_job(get_refresh, 'interval', minutes=config.INTERVAL)\nscheduler.start()\n\nif __name__ == '__main__':\n # Init database\n try:\n db_init()\n except sqlite3.OperationalError:\n pass\n\n # Start bot\n executor.start_polling(dp, skip_updates=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"218724324","text":"# 猴子吃桃问题:\n# 猴子第一天吃了若干个桃子,当即吃了一半,还不解馋,又多吃了一个; 第二天,吃剩下的桃子的一半,还不过瘾,又多吃了一个;以后每天都吃前一天剩下的一半多一个,到第10天想再吃时,只剩下一个桃子了。问第一天共吃了多少个桃子?\n# x=1\n#\n# for peach in range(9):\n# x=(x+1)*2\n# print(x)\n# 1234四个数字,能组成那些3位数\n# numbers=(1,2,3,4)\n# for i in numbers:\n# for j in numbers:\n# for k in numbers:\n# print(i,j,k)\n# 1234四个数字,能组成那些个位相同3位数\n# numbers=(1,2,3,4)\n# for i in numbers:\n# for j in numbers:\n# for k in numbers:\n# if(i!=j) and (i!=k) and(j!=k):\n# print(i,j,k)\n# \"水仙花数\"是一个 3 位数,它的每个位上的数字的 3次方之和等于它本身,是这个三位数本身。\n# for i in range(1,10):\n# for j in range(0,10):\n# for k in range(0,10):\n# if i*100+j*10+k== i**3 + j**3 +k ** 3:\n# print(i,j,k)\n# \"玫瑰花数\"是一种四位数,它每一位上的四个数,每一个数的四次方之和,是这一四位数本身,找出所有的玫瑰花数\n# for i in range(1,10):\n# for j in range(0,10):\n# for k in range(0,10):\n# for n in range(0,10):\n# if i*1000+j*100+k*10+n == i**4+j**4+k**4+n**4:\n# print(i,j,k,n)\n# 两个乒乓球队,各出三人,每人只比一场。甲队有abc三人,乙队有xyz三人。抽签决定比赛名单。a不和x比,c不和x和z比\n# for i in range(0,3):\n# for j in range (0,3):\n# for k in range (0,3):\n# if(i!=j)and(i!=k)and(j!=k):\n# if(i!=0)and(k!=0)and(k!=2):\n# print(i,j,k)\n# 有n个人围成一圈,从第一个人开始报数,从一到三,只要报到3的人就出去,问留下来的是第几号位\n\nimport tkinter\ntop=tkinter . Tk()\n\nlable = tkinter.Label(top,text='Kuga',width=90,height=5,bg=\"red\",font=(\"Arial\",12))#ed\nlable .pack()\n\nlable = tkinter.Label(top,text='Agito',width=90,height=5,bg=\"orange\",font=(\"Arial\",12))\nlable .pack()\n\nlable = tkinter.Label(top,text='Ryuki',width=90,height=5,bg=\"yellow\",font=(\"Arial\",12))\nlable .pack()\n ","sub_path":"1206.py","file_name":"1206.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"368983562","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.utils.text import slugify\n\n\ndef assign_slug_field(apps, schema_editor):\n Tag = apps.get_model(\"cmsplugin_articles_ai\", \"Tag\")\n for tag in Tag.objects.all():\n tag.slug = slugify(tag.name)\n tag.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cmsplugin_articles_ai', '0003_add_non_unique_slug_to_tags'),\n ]\n\n operations = [\n migrations.RunPython(assign_slug_field),\n ]\n","sub_path":"cmsplugin_articles_ai/migrations/0004_data_migration_for_slug_field.py","file_name":"0004_data_migration_for_slug_field.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"500984461","text":"\nimport numpy as np\n\nclass NeuralNet:\n\n def __init__(self, Y, X, layers=[1], iterations=10000, normalize=False, bias=0, activation='sigmoid'):\n \"\"\"\n initialize this neural net object with training set, dependent variables and other optional parameters\n @param Y dependent variables\n @param X training set\n @param layers list of layers, which each integer describing how many nodes in that layer (don't include input layer)\n @param iterations, how many loops to 'learn'\n @param bias introduce a bias unit to the neuron, default is 0\n @param activation set the activation function, currently supports 'sigmoid' or 'htan'\n \"\"\"\n self._X = X if not normalize else self.normalize(X)\n self._Y = Y\n self._layers = layers\n self._iterations = iterations\n self._synapses = None\n self._error = None\n self._normalize = normalize\n self._bias = bias\n\n activations = {\n 'sigmoid': (self.sigmoid, self.sigmoid_derivitive),\n 'htan': (self.htan, self.htan_derivitive)\n }\n\n if activation in activations:\n self._activation = activations[activation][0]\n self._activation_prime = activations[activation][1]\n else:\n self._activation = self.sigmoid\n self._activation_prime = self.sigmoid_derivitive\n\n def sigmoid(self, t):\n return 1 / (1 + np.exp(-t))\n\n def sigmoid_derivitive(self, t):\n return t * (1-t)\n\n def htan(self, t):\n return np.tanh(t)\n\n def htan_derivitive(self, t):\n return 1.0 - np.tanh(t)**2\n\n def forward(self, X, synapses):\n layers = []\n layers.append(self._activation(X.dot(synapses[0]) + self._bias))\n for i in range(1, len(synapses)):\n layers.append(self._activation(layers[i-1].dot(synapses[i]) + self._bias))\n return layers\n\n def predict(self, X):\n \"\"\"given a new feature set, determine its learned dependent value\"\"\"\n Z = self.normalize(X) if self._normalize else X\n return {'yhat': self.forward(Z, self._synapses)[-1],\n 'mse': np.mean(map(lambda x: x**2, self._error)),\n 'normalize_X': True,\n 'bias': self._bias,\n 'layers': self._synapses}\n\n def normalize(self, M):\n return (M - M.mean()) / (M.max() - M.min())\n\n def learn(self):\n synapses = []\n synapses.append(2 * np.random.rand(self._X.shape[1], self._layers[0]) - 1)\n for i in range(1,len(self._layers)):\n synapses.append(2 * np.random.rand(self._layers[i-1], self._layers[i]) - 1)\n\n layers = []\n reverse = len(synapses) - 1\n for i in range(self._iterations):\n\n layers = self.forward(self._X, synapses)\n\n deltas = []\n for i in range(len(synapses)):\n if i == 0:\n self._error = (self._Y - layers[reverse - i])\n deltas.append( self._error * self._activation_prime(layers[reverse - i]) )\n else:\n deltas.append( deltas[i-1].dot(synapses[reverse - i + 1].T) * self._activation_prime(layers[reverse - i]) )\n\n for i in range(len(synapses)):\n if i == len(synapses) - 1:\n synapses[reverse - i] = synapses[reverse - i] + self._X.T.dot(deltas[i])\n else:\n synapses[reverse - i] = synapses[reverse - i] + layers[reverse - i - 1].T.dot(deltas[i])\n\n self._synapses = synapses\n","sub_path":"kneuralnet/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240021185","text":"from socket import AF_INET, SOCK_DGRAM, SOCK_STREAM\nfrom math import ceil\nimport socket\nimport os\nimport time\n\n\nclass Client:\n BUFFSIZE = 2048\n\n def __init__(self, ip, port):\n\n self.ip = ip\n self.port = port\n self.server_addr = (\"191.52.64.73\", self.port)\n self.client_socketUDP = socket.socket(AF_INET, SOCK_DGRAM)\n self.client_socketUDP.bind((self.ip, 33001))\n\n self.client_socketTCP = socket.socket(AF_INET, SOCK_STREAM)\n self.client_socketTCP.connect(self.server_addr)\n\n def recvfrom_server(self, buffsize):\n return self.client_socketUDP.recvfrom(buffsize)\n\n def talkei(self):\n while True:\n file_name = input(\n \"Digite ls para listar os arquivos [Digite /quit para sair]\\n\"\n \"Digite o nome do arquivo para baixar\\n\"\n \"$ \")\n\n if file_name == \"/quit\":\n self.client_socketTCP.send(bytes(file_name, \"utf8\"))\n self.client_socketTCP.close()\n break\n\n print(file_name)\n self.client_socketTCP.send(bytes(file_name, \"utf8\"))\n\n data = self.client_socketTCP.recv(self.BUFFSIZE).decode(\"utf8\")\n if file_name == \"ls\":\n print(data + \"\\n\")\n else:\n if data == \"found\":\n f_size = self.client_socketTCP.recv(self.BUFFSIZE)\n f_size = int(f_size.decode(\"utf8\"))\n print(\"Iniciando protocolo UDP\" + 3 * '\\n')\n self.client_socketTCP.close()\n self.recv_file(file_name, f_size)\n break\n\n def recv_file(self, file_name, f_size):\n start = time.time()\n print(\"Recebendo arquivo\")\n client_ack = 0\n number_packages = int(f_size / self.BUFFSIZE)\n with open(file_name, \"wb\") as f:\n while True:\n data, addr = self.recvfrom_server(self.BUFFSIZE)\n if client_ack % 4 == 0:\n server_ack, addr = self.recvfrom_server(self.BUFFSIZE)\n # server_ack = server_ack.decode(\"utf8\")\n # server_ack = int(server_ack)\n # print(\"Server ack {0} Client ack {1} {2}\".format(\n print(\"Sending\")\n # server_ack, client_ack, number_packages))\n\n f.write(data)\n if client_ack >= number_packages:\n break\n client_ack += 1\n\n end = time.time() - start\n print(\"Tempo transcorrido %.2f\" % end)\n print(\"Taxa \")\n print(\"Arquivo recebido\", (float(f_size)/1000)/end)\n\n\nclient = Client(\"191.52.64.73\", 33000)\nclient.talkei()\n","sub_path":"client/client_udp.py","file_name":"client_udp.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"518035689","text":"\"\"\"\nThis script show how to build first gui.\n\"\"\"\nimport sys\nfrom PyQt4 import QtGui\n\ndef window():\n \"\"\"main function\"\"\"\n app = QtGui.QApplication(sys.argv)\n win = QtGui.QWidget()\n win.setGeometry(200, 200, 200, 50)\n win.setWindowTitle(\"PyQtttt\")\n\n label = QtGui.QLabel(win)\n label .setText(\"Hello World!!!\")\n label .move(50, 20)\n\n win.show()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n window()\n","sub_path":"1-first_GUI/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102708452","text":"\n\nimport os\nfrom flask import Flask, request, redirect\nimport twilio.twiml\n\n\napp = Flask(__name__)\n\n#Current issues\n#Need to know when call in starts and what type of day it is \"weekend v weekday\"\n#Need to know who to call if no one calls.\n\n#should be a db connection to cloundant\ncallers = {\n \"1234\": {\"name\" : \"John Behnke\", \"building\" : \"Cary Hall\", \"room\" : 129}\n}\n\nonDuty = {\"Cary Hall\": \"\", \"Barton Hall\": \"\"}\n\n@app.route('/promptPIN', methods=['GET', 'POST'])\ndef promptPIN():\n resp = twilio.twiml.Response()\n with resp.gather(numDigits=4, action='/confirm') as gather:\n gather.say('Enter your PIN')\n\n resp.redirect('/promptPIN')\n return str(resp)\n\n@app.route('/confirm', methods=['GET', 'POST'])\ndef confirm():\n resp = twilio.twiml.Response()\n \n if 'Digits' in request.values:\n pin = str(request.values['Digits'])\n \n\n if pin in callers:\n\n staffName = callers[pin][\"name\"]\n building = callers[pin][\"building\"]\n resp.say(\"Thank you for calling in, %s. You are now on duty for %s\" %(staffName, building))\n onDuty[building] = staffName\n else:\n resp.say(\"Unrecognized number. Please try again from your designated room phone.\")\n return str(resp)\n\n\n\nport = os.getenv('PORT', '5000')\nif __name__ == \"__main__\":\n\n\tapp.run(host='0.0.0.0', port=int(port), debug=True)\n\n#Flask==0.10.","sub_path":"welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"186844569","text":"################### IMPORTS #####################\r\nfrom math import sqrt, pow\r\n\r\n################ PROCEDIMENTOS ##################\r\ndef somar():\r\n num = float(input(\"Digite um número: \"))\r\n num2 = float(input(\"Digite outro número para ser somado ao primeiro: \"))\r\n print(\"A soma de\", num, \"com\", num2, \"é igual a:\", num + num2)\r\n\r\ndef multiplicar():\r\n num = float(input(\"Digite um número: \"))\r\n num2 = float(input(\"Digite outro número para ser multiplicado pelo primeiro: \"))\r\n print(\"A multiplicação de\", num, \"por\", num2, \"é igual a:\", num * num2)\r\n\r\ndef calcularRaiz():\r\n num = float(input(\"Digite um número: \"))\r\n print(\"A raiz quadrada de\", num, \"é igual a:\", sqrt(num))\r\n\r\ndef calcularPotencia():\r\n num = float(input(\"Digite um número: \"))\r\n num2 = float(input(\"Digite outro número: \"))\r\n print(\"A potencia de\", num, \"elevado a:\", num2, \"é igual a:\", pow(num, num2))\r\n\r\ndef calcularTabuada():\r\n num = float(input(\"Digite um número para que sua tabuada do 1 ao 10 seja calculada: \"))\r\n for tab in range(0, 11):\r\n tabuada = num * tab\r\n print(str(num) + \" x \" + str(tab) + \" = \" + str(tabuada))\r\n\r\n################# CÓDIGO CORRIDO #################\r\n\r\nprint(\"\"\"========== < MENU > ==========\r\n 1. Somar\r\n 2. Multiplicar\r\n 3. Calcular Raiz\r\n 4. Calcular Potência\r\n 5. Calcular Tabuada (1 ao 10)\r\n 6. Sair\"\"\")\r\nprint(\"-\"*30)\r\nopcao = int(input(\" Opção escolhida: \"))\r\nprint(\"-\"*30)\r\n\r\nif (opcao == 1):\r\n somar()\r\n\r\nelif (opcao == 2):\r\n multiplicar()\r\n\r\nelif (opcao == 3):\r\n calcularRaiz()\r\n\r\nelif (opcao == 4):\r\n calcularPotencia()\r\n\r\nelif (opcao == 5):\r\n calcularTabuada()\r\n\r\nelif (opcao == 6):\r\n sair = True\r\n\r\nelse:\r\n print(\"Opção inválida\")\r\n\r\nprint(\"-\"*30)\r\n","sub_path":"procedimentos_sem_p.py","file_name":"procedimentos_sem_p.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"438853762","text":"\"\"\"\nAuthor: Hades\nVersion: 0.1.1\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport requests\n\n# links = \"http://translate.google.com.cn/translate_t?sl=auto&tl=zh-CN&text=Welcome+to+a+tour+of+Go\"\n\n\nheaders = {\n \"user-agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15\"\n}\n\nlinks = input()\n\nres = requests.get(url = links, headers = headers)\n\nsoup = BeautifulSoup(res.text, \"lxml\")\n\nres.close()\n\nresult_text = soup.select(\"#result_box\")[-1].text\n\n# with open(\"test.txt\", \"w+\", encoding = \"utf-8\") as f:\n# f.write(result_text)\n\n# algorithm 1 \n# results = []\n# index = current = 0\n# for c in result_text:\n# current += 1\n# if c in ',。?!:':\n# results.append(result_text[index:current])\n# index = current\n# if not index == current:\n# results.append(result_text[index:current]) \n# # print(results)\n\n# algorithm 2\nsplit_len = 30\nsplit_num = len(result_text) // split_len\nnow = 0\nresults = []\nfor i in range(1,split_num+1):\n results.append(result_text[now:i*split_len])\n now = i*split_len\n \nif now < len(result_text):\n results.append(result_text[now:-1])\n\n\n\nimport wx\n\nwindow_height = 100\nwindow_width = 600\nif split_num*30 > window_height:\n window_height = split_num*30\n if window_height > 1000: window_height = 1000\n\n\nclass StaticTextFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, -1, '牛逼翻译', size=(window_width, window_height))\n panel = wx.Panel(self, -1)\n num = 0\n text = '\\n'.join(results)\n st = wx.StaticText(panel, -1, text)\n font = st.GetFont()\n font.PointSize += 5\n st.SetFont(font)\n \n \n \n\n# Next, create an application object.\napp = wx.App()\nframe = StaticTextFrame()\nframe.Show()\napp.MainLoop()\n\n","sub_path":"ht.py","file_name":"ht.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"8812007","text":"import pytest\nimport re\nimport shakedown\n\nimport sdk_cmd as cmd\nimport sdk_install as install\nimport sdk_marathon as marathon\nimport sdk_spin as spin\n\nfrom tests.config import (\n PACKAGE_NAME,\n DEFAULT_TASK_COUNT\n)\n\n\ndef setup_module(module):\n install.uninstall(PACKAGE_NAME)\n\n\ndef teardown_module(module):\n install.uninstall(PACKAGE_NAME)\n\n\n@pytest.mark.sanity\n@pytest.mark.upgrade\ndef test_upgrade_downgrade():\n # Ensure both Universe and the test repo exist.\n # In particular, the Framework Test Suite only runs packages from Universe;\n # it doesn't add a test repo like the PR jobs.\n if len(shakedown.get_package_repos()['repositories']) != 2:\n print('No test repo found. Skipping test_upgrade_downgrade')\n return\n\n test_repo_name, test_repo_url = get_test_repo_info()\n test_version = get_pkg_version()\n print('Found test version: {}'.format(test_version))\n remove_repo(test_repo_name, test_version)\n master_version = get_pkg_version()\n print('Found master version: {}'.format(master_version))\n\n print('Installing master version')\n install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, package_version=master_version)\n\n print('Upgrading to test version')\n marathon.destroy_app(PACKAGE_NAME)\n add_repo(test_repo_name, test_repo_url, prev_version=master_version)\n install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, package_version=test_version)\n\n print('Downgrading to master version')\n marathon.destroy_app(PACKAGE_NAME)\n install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, package_version=master_version)\n\n\ndef get_test_repo_info():\n repos = shakedown.get_package_repos()\n test_repo = repos['repositories'][0]\n return test_repo['name'], test_repo['uri']\n\n\ndef get_pkg_version():\n pkg_description = cmd.run_cli('package describe {}'.format(PACKAGE_NAME))\n regex = r'\"version\": \"(\\S+)\"'\n match = re.search(regex, pkg_description)\n return match.group(1)\n\n\ndef add_repo(repo_name, repo_url, prev_version):\n assert shakedown.add_package_repo(\n repo_name,\n repo_url,\n 0)\n # Make sure the new repo packages are available\n new_default_version_available(prev_version)\n\n\ndef new_default_version_available(prev_version):\n spin.time_wait_noisy(lambda: get_pkg_version() != prev_version)\n\n\ndef remove_repo(repo_name, prev_version):\n assert shakedown.remove_package_repo(repo_name)\n new_default_version_available(prev_version)\n","sub_path":"frameworks/helloworld/tests/test_upgrade.py","file_name":"test_upgrade.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53902215","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 15 17:35:01 2020\n\n@author: abc73_000\n\"\"\"\nfrom pathlib import Path\nimport random\nimport pandas as pd \n\ndef extractbinder(dataset):\n binderData = dataset[dataset['Status']==1].iloc[:,3:12].values.tolist()\n return binderData\n\ndef createRandomPeptide(BinderData,NumberData, withBinder ):\n aa_list = ['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']\n # aa_list_new= np.array(aa_list)\n total_list = []\n if withBinder:\n total_list = BinderData\n total_list =[random.sample(aa_list,9) for i in range(NumberData) if random.sample(aa_list,9) not in total_list]\n else:\n \n total_list =[random.sample(aa_list,9) for i in range(NumberData) if random.sample(aa_list,9) not in total_list]\n\n return total_list\n\nallele = \"DLA88-50801\"\noutput = open(Path(\"G:\\\\MAC_Research_Data\\\\MHC_ANN\\\\Datasource\\\\50801\\\\DLA-88-50801_peptidesforModel.csv\"),'a')\n\ndataset = pd.read_csv('G:/MAC_Research_Data/MHC_ANN/Datasource/50801/DLA88-50801_peptides.csv')\nbinderData = extractbinder(dataset)\nrandomPeptide=createRandomPeptide(binderData,NumberData=300, withBinder= True)\n\noutput.write(\"Allele,Peptide,Status,Position1,Position2,Position3,Position4,Position5,Position6,Position7,Position8,Position9\\n\")\n\nline =0\nfor i in range(len(binderData)):\n totaleachAA=\"\"\n line+=1 \n for j in range(len(binderData[i])):\n eachAA = binderData[i][j]\n totaleachAA+=eachAA\n totoalFinalAA = \",\".join(totaleachAA)\n output.write(allele+\",\"+\"Binder\"+str(line)+\",\"+\"1,\"+totoalFinalAA+\"\\n\") \n \n\n#output.write(\"Allele,Peptide,Status,Position1,Position2,Position3,Position4,Position5,Position6,Position7,Position8,Position9\\n\")\nfor i in range(len(randomPeptide)):\n totaleachAA=\"\"\n line+=1\n for j in range(len(randomPeptide[i])):\n eachAA = randomPeptide[i][j]\n totaleachAA+=eachAA\n totoalFinalAA = \",\".join(totaleachAA)\n output.write(allele+\",\"+\"NonBinder\"+str(line)+\",\"+\"0,\"+totoalFinalAA+\"\\n\") \n\noutput.close()\n\nworkdir = Path(\"G:/MAC_Research_Data/MHC_ANN/Datasource/simulateData/\")\ncreateRandomPeptide(workdir/\"Random100TwithoutBinder.txt\", \\\n workdir/\"100T_DLA88-03401_randomPeptideforFlurry.txt\", \n Allele )","sub_path":"createInputForMyModelwithOrigianlformat.py","file_name":"createInputForMyModelwithOrigianlformat.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103427004","text":"import PySimpleGUI as sg\nfrom tablero1 import Tablero1\nfrom tablero2 import Tablero2\nfrom tablero3 import Tablero3\nclass Partida():\n def CrearPartida(self):\n interfaz=[\n [sg.Text('Apodo/Nombre')],\n [sg.InputText(size=(15,1))],\n [sg.Button('Tablero 1')],\n [sg.Button('Tablero 2')],\n [sg.Button('Tablero 3')],\n [sg.Text('Casilla verde: Aumenta un 50% el puntaje de la ficha')],\n [sg.Text('Casilla roja: Disminuye un 50% el puntaje de la ficha')],\n [sg.Text('Casilla blanca: neutra')],\n [sg.Button('Volver')]\n ]\n window=sg.Window('Partida',interfaz)\n while True:\n event,values=window.read()\n if event=='Tablero 1':\n window.close()\n tab=Tablero1()\n tab.crearTablero()\n if event=='Tablero 2':\n window.close()\n tab=Tablero2()\n tab.crearTablero()\n if event=='Tablero 3':\n window.close()\n tab=Tablero3()\n tab.crearTablero()\n","sub_path":"juego/partida.py","file_name":"partida.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"572372774","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport pymysql.cursors\nfrom scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\ni = 0\nj = 0\nlimit = 6000\n#numbers = open(\"D:\\\\Python\\tutorial\\number.txt\",'r')\n#i = numbers.read\n#numbers.close\n#numbers = open(\"D:\\\\Python\\tutorial\\chapter.txt\",'r')\n#j = numbers.read\n#numbers.close\n\nclass AliceSpider(scrapy.Spider):\n \n name = \"alice\"\n def start_requests(self):\n urls = [\n 'https://vnexpress.net/tin-tuc/thoi-su',\n 'https://vnexpress.net/tin-tuc/the-gioi',\n 'https://kinhdoanh.vnexpress.net/',\n 'https://giaitri.vnexpress.net/',\n 'https://thethao.vnexpress.net',\n 'https://vnexpress.net/tin-tuc/phap-luat',\n 'https://vnexpress.net/tin-tuc/giao-duc',\n 'https://suckhoe.vnexpress.net',\n 'https://doisong.vnexpress.net/',\n 'https://dulich.vnexpress.net',\n 'https://vnexpress.net/tin-tuc/khoa-hoc',\n 'https://sohoa.vnexpress.net/',\n 'https://vnexpress.net/tin-tuc/oto-xe-may',\n 'https://vnexpress.net/tin-tuc/cong-dong',\n 'https://vnexpress.net/tin-tuc/tam-su',\n 'https://vnexpress.net/tin-tuc/cuoi',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\t\n global limit\n if i < limit:\n savefile = \"savelinks.txt\"\n updatefile = 'update.txt'\n listofVB = response.css('h3.title_news a.icon_commend::attr(href)').extract()\n opensavefile = open(savefile,'w+',encoding = 'utf-8',errors ='replace')\n for link in listofVB:\n yield response.follow(link, callback = self.parse_VB)\n opensavefile.write(link)\n if(i == 1):\n openupdatefile = open(updatefile,'w',encoding = 'utf-8', errors = 'replace')\n openupdatefile.write(link)\n openupdatefile.close\n global j\n Pages = response.css('div[id*=pagination] a::attr(href)')\n if j==0:\n nextPage = Pages[1].extract()\n \n if j==1:\n nextPage = Pages[3].extract()\n \n if j>1:\n nextPage = Pages[4].extract()\n\n j = j+1\n if \"http\" not in nextPage:\n nextPage = \"https://vnexpress.net\"+nextPage\n yield response.follow(nextPage,callback = self.parse)\n \n def parse_VB(self, response):\n global i\n global limit\n if i < limit:\n #VB_container = response.css('div.cldivContentDocVN').extract()\n #filee = response.css(\"title::text\").extract_first().strip()\n #filename = str(filee)+\".txt\"\n # a = str(i)\n VB_container = response.css(\"article.content_detail\")\n if bool(VB_container)==False:\n VB_container = response.css(\"div.fck_detail\")\n title = response.css(\"h1.title_news_detail::text\").extract_first()\n cleanr = re.compile('<.*?>',flags=re.DOTALL)\n title = re.sub(cleanr,'',title)\n valid_file_name_character = re.compile('[\\\\~#%&*{}/:<>?|\\\"-]')\n title = re.sub(valid_file_name_character,'',title)\n title = title.replace('\\n','')\n title = title.strip()\n filename ='news/'+title+\".txt\"\n f = open(filename,'w',encoding='utf-8')\n VB_saver = str(VB_container.extract_first())\n VB_saver = VB_saver.replace('\\r','')\n VB_saver = VB_saver.replace('\\xa0','')\n VB_saver = VB_saver.replace(' ','')\n VB_saver = VB_saver.strip()\n print(\"saving document\")\n Cat = re.sub(cleanr,'',VB_saver)\n Cat = Cat.strip()\n # n = len(Cat)\n f.write(Cat)\n i = i +1\n f.close\n\n connection = pymysql.connect(host='localhost',\n user='root',\n password='12345678',\n db='Alice',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n sentences = Cat.split('.')\n numberOfSentence = int(len(sentences))\n\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT `title` FROM `newspaper` WHERE `title`=%s\"\n cursor.execute(sql, (title))\n result = cursor.fetchone()\n if bool(result)==False:\n # Create a new record\n sql = \"INSERT INTO `newspaper` (`title`, `content`,`sentences`) VALUES (%s, %s, %s)\"\n cursor.execute(sql, (title,Cat,numberOfSentence))\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n connection.commit()\n\n # with connection.cursor() as cursor:\n # # Read a single record\n \n \n # result = cursor.fetchone()\n # print(result)\n finally:\n connection.close()\n # numbers = open(\"number.txt\",'w')\n # numbers.write(i)\n # numbers.close\n # numbers = open(\"chapter.txt\",'w')\n # numbers.write(j)\n # numbers.close \n \n\n ","sub_path":"tutorial/spiders/Alice.py","file_name":"Alice.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"377926877","text":"import airsim # pip install airsim\nimport numpy as np\nimport time\nfrom datetime import datetime\nfrom AlgorithmsSensors.AlgorithmSensor import AlgorithmSensor\nfrom Detect.DetectionData import *\nfrom os import listdir,mkdir\n\nclass InertialSensor(AlgorithmSensor):\n name = \"InertialSensors Base\"\n\n def __init__(self,detectRoot):\n AlgorithmSensor.__init__(self, detectRoot)\n self.path_file = None\n\n def detect(self):\n data = self.getData()\n self.detectData.updateData(myPosition=tuple(data['position'].values()))\n\n def getData(self):\n dict_data = {}\n gps_location = self.client.simGetGroundTruthEnvironment().geo_point#self.client.getGpsLocation()\n position = self.client.simGetGroundTruthKinematics().position\n orientation = self.client.simGetGroundTruthKinematics().orientation\n velocity = self.client.simGetGroundTruthKinematics().linear_velocity\n\n dict_data['gps'] = vars(gps_location)\n dict_data['position'] = vars(position)\n dict_data['orientation'] = vars(orientation)\n dict_data['velocity'] = vars(velocity)\n dict_data['time'] = str(datetime.now().timestamp())\n return dict_data\n\n def getStatus(self):\n return\n\n def getDetectData(self):\n return self.detectData\n\nclass InertialSensorPrint(InertialSensor):\n def detect(self):\n data = self.getData()\n self.detectData.updateData(myPosition=tuple(data['position'].values()))\n self.saveData(str(data), file_name='log_drone')\n\n def saveData(self, data, path='../data/logs_voos/', file_name='log_drone'):\n # get data from lidar\n if not self.path_file:\n now_str = datetime.now().strftime('%Y-%m-%d_%H_%M')\n self.path_file = f'{path}{file_name}_{now_str}.txt'\n # save data\n file = open(self.path_file, 'a')\n str_data = str(data).replace(\"'\", '\"') + \",\\n\"\n file.write(str_data)\n file.close()\n","sub_path":"AlgorithmsSensors/IMU/InertialSensors.py","file_name":"InertialSensors.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"538683128","text":"# Sample Python3 App for BH1750FVI I2C Digital Light Intensity Sensor\n# Sites I used...\n# http://www.raspberrypi-spy.co.uk/2015/03/bh1750fvi-i2c-digital-light-intensity-sensor/\n# http://www.raspberrypi-spy.co.uk/2014/11/enabling-the-i2c-interface-on-the-raspberry-pi/\n#\n# +----- +---------------+------+\n# |Module| Desc\tGPIO | |\n# |PCB | Header |Pins |\n# +----- +---------------+------+\n# |GND \t | Ground\t |P1-06 |\n# |ADD\t | Address select|P1-06 |\n# |SDA\t | I2C SDA\t |P1-03 |\n# |SCL\t | I2C SCL\t |P1-05 |\n# |VCC\t | 3.3V\t |P1-01 |\n# +----- +---------------+------+\n#\n## Kolla vilka i2c enheter som är inkopplade just nu \n## $ sudo i2cdetect -y 1\n## 0 1 2 3 4 5 6 7 8 9 a b c d e f\n##00: -- -- -- -- -- -- -- -- -- -- -- -- -- \n##10: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n##20: -- -- -- 23 -- -- -- -- -- -- -- -- -- -- -- -- \n##30: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n##40: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n##50: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n##60: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- \n##70: -- -- -- -- -- -- -- -- \n\nimport smbus\nimport time\n \n# Define some constants from the datasheet\n\n# If the ADDR pin to ground the address used by the device is 0x23.\n# If the ADDR pin is tied to 3.3V the address is 0x5C. \nDEVICE = 0x23 # Default device I2C address\n \nPOWER_DOWN = 0x00 # No active state\nPOWER_ON = 0x01 # Power on\nRESET = 0x07 # Reset data register value\n\n \n# Start measurement at 4lx resolution. Time typically 16ms.\nCONTINUOUS_LOW_RES_MODE = 0x13\n# Start measurement at 1lx resolution. Time typically 120ms\nCONTINUOUS_HIGH_RES_MODE_1 = 0x10\n# Start measurement at 0.5lx resolution. Time typically 120ms\nCONTINUOUS_HIGH_RES_MODE_2 = 0x11\n# Start measurement at 1lx resolution. Time typically 120ms\n# Device is automatically set to Power Down after measurement.\nONE_TIME_HIGH_RES_MODE_1 = 0x20\n# Start measurement at 0.5lx resolution. Time typically 120ms\n# Device is automatically set to Power Down after measurement.\nONE_TIME_HIGH_RES_MODE_2 = 0x21\n# Start measurement at 1lx resolution. Time typically 120ms\n# Device is automatically set to Power Down after measurement.\nONE_TIME_LOW_RES_MODE = 0x23\n \n#bus = smbus.SMBus(0) # Rev 1 Pi uses 0\nbus = smbus.SMBus(1) # Rev 2 Pi uses 1\n \ndef convertToNumber(data):\n # Simple function to convert 2 bytes of data\n # into a decimal number\n return ((data[1] + (256 * data[0])) / 1.2)\n \ndef readLight(addr=DEVICE):\n data = bus.read_i2c_block_data(addr,ONE_TIME_HIGH_RES_MODE_1)\n return convertToNumber(data)\n \ndef main():\n \n # The while loop keeps taking readings every 200ms until you press CTRL-C\n while True:\n print (\"Light Level : \" + str(readLight()) + \" lx\")\n time.sleep(0.5)\n \nif __name__==\"__main__\":\n main()\n","sub_path":"diagnos/RPi_LightSensor(BH-1750)/RPi_LightSensor(BH-1750).py","file_name":"RPi_LightSensor(BH-1750).py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53363288","text":"num = input()\r\ns = list(map(int, input().split()))\r\n# 最大値が10^9なので初期値を10にする\r\ncnt = float(\"inf\")\r\n\r\n# sの各要素を2で割り続け、回数をカウントする\r\nfor x in s:\r\n temp = 0\r\n while x % 2 == 0:\r\n temp += 1\r\n x /= 2\r\n\r\n # ビットシフト回数をcntと比較し小さい方を代入\r\n if temp < cnt:\r\n cnt = temp\r\n\r\nprint(cnt)\r\n","sub_path":"ABS/ABC081B_Shiftonly.py","file_name":"ABC081B_Shiftonly.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114305260","text":"# This Python file uses the following encoding: utf-8\nimport os, sys\na = int(input('Primeiro Bimestre: '))\nwhile a > 10:\n a = int(input('Você digitou errado! Primeiro bimestre: '))\nb = int(input('Segundo bimestre: '))\nwhile b > 10:\n b = int(input('Você digitou errado! Segundo bimestre: '))\nc = int(input('Terceiro bimestre: '))\nwhile c > 10:\n c = int(input('Você digitou errado! terceiro bimestre: '))\nd = int(input('Quarto bimestre: '))\nwhile d > 10:\n d = int(input('Você digitou errado! Quarto bimestre: '))\nmedia = (a + b + c + d) / 4\nif media >= 7.0:\n print('Você foi aprovado. Sua média é: {}' . format(media))\nelse:\n print('Você foi reprovado. Sua média é: {}' . format(media))\n# if a <= 10 and b <= 10 and c <= 10 and d <= 10:\n\n\n # print('Foi informado algum valor errado')\n\n# a = int(input('Entre com um valor: '))\n# b = int(input('Entre com um valor: '))\n# resto_a = a % 2\n# resto_b = b % 2\n# if resto_a == 0 or not resto_b > 0:\n# print('Foi digitado um número é par!')\n# else:\n# print('nenhum Número par foi digitado! !')\n\n\n# a = int(input('Primeiro valor: '))\n# b = int(input('Segundo Valor: '))\n# c = int(input('Terceiro Valor: '))\n# if a > b and a > c:\n# print('O maior número é:{}' .format(a))\n# elif b > a and b > c:\n# print('O maior número é: {}' .format(b))\n# else:\n# print('O maior número é: {}'. format(c))\n# print('Final do Programa')\n","sub_path":"python_introdution/Aula4.py","file_name":"Aula4.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"93834886","text":"from nodeconductor.core import tasks as core_tasks\n\nfrom .. import models\n\n\nclass TenantCreateErrorTask(core_tasks.ErrorStateTransitionTask):\n\n def execute(self, tenant):\n super(TenantCreateErrorTask, self).execute(tenant)\n # Delete network and subnet if they were not created on backend,\n # mark as erred if they were created\n network = tenant.networks.first()\n subnet = network.subnets.first()\n if subnet.state == models.SubNet.States.CREATION_SCHEDULED:\n subnet.delete()\n else:\n super(TenantCreateErrorTask, self).execute(subnet)\n if network.state == models.Network.States.CREATION_SCHEDULED:\n network.delete()\n else:\n super(TenantCreateErrorTask, self).execute(network)\n\n\nclass TenantCreateSuccessTask(core_tasks.StateTransitionTask):\n\n def execute(self, tenant):\n network = tenant.networks.first()\n subnet = network.subnets.first()\n self.state_transition(network, 'set_ok')\n self.state_transition(subnet, 'set_ok')\n self.state_transition(tenant, 'set_ok')\n return super(TenantCreateSuccessTask, self).execute(tenant)\n\n\nclass PollBackendCheckTask(core_tasks.Task):\n max_retries = 60\n default_retry_delay = 5\n\n @classmethod\n def get_description(cls, instance, backend_check_method, *args, **kwargs):\n return 'Check instance \"%s\" with method \"%s\"' % (instance, backend_check_method)\n\n def get_backend(self, instance):\n return instance.get_backend()\n\n def execute(self, instance, backend_check_method):\n # backend_check_method should return True if object does not exist at backend\n backend = self.get_backend(instance)\n if not getattr(backend, backend_check_method)(instance):\n self.retry()\n return instance\n","sub_path":"src/nodeconductor_openstack/openstack/tasks/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"205446988","text":"import calcbench as cb\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom bs4 import BeautifulSoup\nfrom scipy.spatial.distance import cosine\nfrom IPython.core.display import display, HTML\nimport sklearn\nimport itertools\nfrom tqdm import tqdm_notebook\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pdb\n\nclass NumberNormalizingVectorizer(TfidfVectorizer):\n def build_tokenizer(self):\n tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()\n return lambda doc: list(number_normalizer(tokenize(doc)))\n\nclass NumberNormalizingCountVectorizer(CountVectorizer):\n def build_tokenizer(self):\n tokenize = super(NumberNormalizingCountVectorizer, self).build_tokenizer()\n return lambda doc: list(number_normalizer(tokenize(doc)))\n\ndef number_normalizer(tokens):\n \"\"\" Map all numeric tokens to a placeholder.\n\n For many applications, tokens that begin with a number are not directly\n useful, but the fact that such a token exists can be relevant. By applying\n this form of dimensionality reduction, some methods may perform better.\n \"\"\"\n\n return (\"#NUMBER\" if token[0].isdigit() else token for token in tokens)\n\nget_period = lambda d: (d['fiscal_year'])\n\ndef period_diffs(document_section, tickers):\n period_column_name = 'fiscal_period'\n get_period = lambda search_result : '{fiscal_year}-{fiscal_period}'.format(**search_result)\n return _diffs(document_section, tickers, period_column_name, get_period)\n\ndef _diffs(document_section, tickers, period_column_name, get_period):\n all_counts = pd.DataFrame()\n for ticker in tqdm_notebook(tickers):\n docs = cb.document_search(company_identifiers=[ticker], document_name=document_section, all_history=True)\n docs = sorted(docs, key=lambda doc : doc.date_reported)\n docs = [{period_column_name : get_period(d), 'contents' : d.get_contents_text(), 'ticker' : ticker} for d in docs]\n docs = pd.DataFrame(data=docs).set_index(['ticker', period_column_name])\n docs = docs.drop_duplicates()\n docs = pd.concat([docs, docs.shift(1).add_prefix('previous_period_')], axis=1)\n docs = docs.assign(distance=docs[1:].apply(lambda row: document_distance(row.contents, row.previous_period_contents), axis=1))\n docs = docs.assign(word_count=count_vectorizer.fit_transform(docs.contents).sum(axis=1))\n docs = docs.drop(['contents', 'previous_period_contents'], axis=1)\n all_counts = pd.concat([all_counts, docs])\n return all_counts\n\n\nvectorizer = NumberNormalizingVectorizer(stop_words='english')\ncount_vectorizer = NumberNormalizingCountVectorizer(stop_words='english')\ndef timestamp_diffs(document_section, tickers):\n return _diffs(document_section, tickers, 'timestamp', lambda search_result: search_result.date_reported)\n\n\ndef document_distance(docA, docB): \n X = vectorizer.fit_transform([docA, docB])\n return cosine(X[0].todense(), X[1].todense())\n\ndef background_gradient(s, m, M, cmap='PuBu', low=0, high=0):\n # from https://stackoverflow.com/questions/38931566/pandas-style-background-gradient-both-rows-and-columns\n rng = M - m\n norm = colors.Normalize(m - (rng * low),\n M + (rng * high))\n normed = norm(s.values)\n normed = [np.nan_to_num(n) for n in normed]\n c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]\n return ['background-color: %s' % color for color in c]\n\ndef highlight_largest_diffs(diffs):\n filled_df = diffs.loc[diffs.sum(axis=1).sort_values(ascending=False).index].round(3)\n return filled_df.style.apply(background_gradient, \n cmap='Reds',\n m=filled_df.min().min(),\n M=filled_df.max().max(), \n low=0,\n high=2.5)\n\nif __name__ == \"__main__\":\n tickers = cb.tickers(index='DJIA') \n document_section = \"Business Description\"\n d = diffs(document_section, tickers)\n highlight_largest_diffs(d)","sub_path":"tf-idf/cb_tf_idf.py","file_name":"cb_tf_idf.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"231510678","text":"N = int(input())\nA = list(map(int ,input().split()))\nif sum(A)%10:\n exit(print(\"No\"))\n\nans = sum(A)//10\nA += A\nst, en = 0, 0\ncurr = 0\nwhile st < 2 * N:\n if curr == ans:\n exit(print(\"Yes\"))\n if st == en:\n curr += A[en]\n en += 1\n elif en == 2*N:\n curr -= A[st]\n st += 1\n elif curr < ans:\n curr += A[en]\n en += 1\n elif curr > ans:\n curr -= A[st]\n st += 1\nprint(\"No\")\n","sub_path":"pysol/076.py","file_name":"076.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"468779496","text":"from collections import deque\nn, k=map(int, input().split())\nvisit=[]\nd=[]\nfor i in range(k):\n d.append([])\nfor i in range(n):\n visit.append(list(map(int, input().split())))\n for j in range(n):\n if visit[i][j]!=0:\n d[visit[i][j]-1].append([i, j])\nq=deque()\ns, x, y=map(int, input().split())\nfor i in range(k):\n for j in d[i]:\n q.append([j[0], j[1], 0])\nwhile q:\n q_pop=q.popleft()\n i=q_pop[0]\n j=q_pop[1]\n cost=q_pop[2]\n if cost==s:\n break\n if i-1>=0 and visit[i-1][j]==0:\n q.append([i-1, j, cost+1])\n visit[i-1][j]=visit[i][j]\n if j-1>=0 and visit[i][j-1]==0:\n q.append([i, j-1, cost+1])\n visit[i][j-1]=visit[i][j]\n if i+1 0:\n ftype = \".\" + sp.pop()\n\n r = requests.post(url, files = {\"file\": (\"upl\" + ftype, file)})\n\n servfile = r.json()[\"file\"]\n\n res = api.docs.save(access_token=settings.token, file = servfile, title = file.name)\n\n res = res[0]\n\n\n return \"doc\" + str(res[\"owner_id\"]) + \"_\" + str(res[\"id\"])","sub_path":"vkapi.py","file_name":"vkapi.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"117613226","text":"from django.urls import include, path\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.conf.urls import url\n\nfrom .views import (\n #GuardianCreateView,\n #GuardianDeleteView,\n GuardianDownloadView,\n GuardianVerifyView,\n GuardianUploadView,\n ContestCreateView,\n ContestUpdateView,\n ContestVoteView,\n ContestVoteSuccessView,\n ContestPubkeyView,\n ContestDecryptView,\n ContestOpenView,\n ContestCloseView,\n ContestListView,\n ContestManifestView,\n ContestCandidateListView,\n ContestCandidateCreateView,\n ContestCandidateUpdateView,\n ContestCandidateDeleteView,\n ContestVotersUpdateView,\n ContestVotersDetailView,\n ContestDetailView,\n ContestResultView,\n ContestPublishView,\n EmailVotersView,\n)\n\n\nurlpatterns = [\n #GuardianCreateView.as_url(),\n #GuardianDeleteView.as_url(),\n GuardianDownloadView.as_url(),\n GuardianVerifyView.as_url(),\n GuardianUploadView.as_url(),\n ContestCreateView.as_url(),\n ContestUpdateView.as_url(),\n ContestVoteView.as_url(),\n ContestVoteSuccessView.as_url(),\n ContestPubkeyView.as_url(),\n ContestDecryptView.as_url(),\n ContestOpenView.as_url(),\n ContestCloseView.as_url(),\n ContestListView.as_url(),\n ContestManifestView.as_url(),\n ContestCandidateListView.as_url(),\n ContestCandidateCreateView.as_url(),\n ContestCandidateUpdateView.as_url(),\n ContestCandidateDeleteView.as_url(),\n ContestVotersUpdateView.as_url(),\n ContestVotersDetailView.as_url(),\n ContestDetailView.as_url(),\n ContestResultView.as_url(),\n ContestPublishView.as_url(),\n EmailVotersView.as_url(),\n]\n","sub_path":"djelectionguard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193098734","text":"from selenium import webdriver\r\nfrom time import sleep\r\nfrom bs4 import BeautifulSoup as bs\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime\r\nimport warnings\r\nfrom maks_lib import output_path\r\n\r\nwarnings.simplefilter(action='ignore')\r\n\r\nnow = datetime.datetime.now()\r\n\r\n\r\nclass App:\r\n\r\n def __init__(self, url='https://www.pnc.com/en/personal-banking/borrowing/home-lending/mortgages/VA-Loan.html',\r\n loan_amount=500000, zipcode='10004'):\r\n self.zipcode = zipcode\r\n self.loan_amount = loan_amount\r\n self.driver = webdriver.Firefox()\r\n self.driver.get(url)\r\n sleep(3)\r\n # write log in function\r\n\r\n def log_in(self):\r\n purchase = self.driver.find_element_by_xpath('//*[@id=\"purchaseType\"]/option[1]')\r\n purchase.click()\r\n loan_amount = self.driver.find_element_by_xpath('//*[@id=\"purchaseAmount\"]')\r\n loan_amount.send_keys(self.loan_amount)\r\n zip_code = self.driver.find_element_by_xpath('// *[ @ id = \"zipCode\"]')\r\n zip_code.send_keys(self.zipcode)\r\n getrate_button = self.driver.find_element_by_xpath('//*[@id=\"ratesGet\"]')\r\n getrate_button.click()\r\n return self.loan_amount, self.zipcode\r\n\r\n def data_page(self):\r\n html = self.driver.execute_script(\"return document.documentElement.outerHTML\")\r\n soup = bs(html, 'html.parser')\r\n pro_name = soup.find_all('div', attrs={'div', 'columnHeader grid-19 tablet-grid-20 mobile-grid-19'})\r\n Int_rate = soup.find_all('div', attrs={'div', 'rowItem grid-19 tablet-grid-20 mobile-grid-19 bigPrint'})\r\n product = []\r\n int_rate = []\r\n Term = []\r\n for pro in pro_name:\r\n if pro.getText() is not None:\r\n # print(pro.getText())\r\n product.append(pro.getText().rstrip(\"\\xa0\"))\r\n Term = [\"30\", \"30\", \"30\", \"\"]\r\n # for pro in pro_name:\r\n # if pro.getText() is not None:\r\n # # print(pro.getText())\r\n # Term.append(pro.getText().rstrip(\" Fixed \\xa0 \"))\r\n for int_r in Int_rate:\r\n int_rate.append(int_r.getText())\r\n\r\n return product, int_rate, Term\r\n\r\n def browser_close(self):\r\n self.driver.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = App()\r\n price, zipcode = app.log_in()\r\n sleep(5)\r\n product, int_rate, Term = app.data_page()\r\n Int_rate = int_rate[0:4]\r\n Apy_rate = int_rate[4:8]\r\n app.browser_close()\r\n\r\n data = [(now.strftime(\"%m/%d/%Y\"), \"PNC\", product, Int_rate, Apy_rate)]\r\n df = pd.DataFrame(\r\n {'Date': now.strftime(\"%m/%d/%Y\"), \"Bank Name\": 'PNC', 'Product Name': product, 'Interest Rate': Int_rate,\r\n \"APY Rate\": Apy_rate,\r\n \"Loan Amount\": price, \"Zipcode\": zipcode, \"Term\": Term})\r\n df = df.reindex(\r\n columns=[\"Date\", \"Bank Name\", \"Product Name\", \"Loan Amount\", \"Zipcode\", \"Term\", \"\" \"Interest Rate\", \"APY Rate\"])\r\n df = df.iloc[0:2,:]\r\n df.to_csv(output_path + \"PNC_Data_Mortgage_5lakh_VHR.csv\".format(now.strftime(\"%m_%d_%Y\")), index=False)\r\n\r\n df1 = pd.read_csv(output_path + \"PNC_Data_Mortgage_1LAKH_complete.csv\")\r\n df2 = pd.read_csv(output_path + \"PNC_Data_Mortgage_3LAKH_complete.csv\")\r\n df3 = pd.read_csv(output_path+\"PNC_Data_Mortgage_5lakh_FixedR.csv\")\r\n df4 = pd.read_csv(output_path + \"PNC_Data_Mortgage_5lakh_AdjustR.csv\")\r\n df5 = pd.read_csv(output_path + \"PNC_Data_Mortgage_5lakh_FHAR.csv\")\r\n df6 = pd.read_csv(output_path + \"PNC_Data_Mortgage_5lakh_VHR.csv\")\r\n df2 = pd.concat([df1, df2, df3, df4, df5, df6])\r\n\r\n#####################################################################################################################\r\n df2[\"Date\"] = now.strftime(\"%m-%d-%Y\")\r\n df2[\"Bank_Name\"]= \"PNC FINANCIAL SERVICES GROUP INC\"\r\n df2[\"Bank_Product\"]= \"Mortgages\"\r\n df2[\"Bank_Product_Type\"] = \"Mortgages\"\r\n df2[\"Bank_Offer_Feature\"] = \"Offline\"\r\n df2[\"Bank_Product_Name\"] = df2[\"Product Name\"]\r\n df2[\"Product_Term\"] = df2[\"Term\"]\r\n df2[\"Balance\"] = np.NAN\r\n df2[\"Product_Interest\"] = df2[\"Interest Rate\"]\r\n df2[\"Product_Apy\"] = np.NAN\r\n df2[\"Mortgage_Down_Payment\"] = \"20%\"\r\n df2[\"Mortgage_Loan\"] = df2[\"Loan Amount\"]\r\n df2[\"Min_Credit_Score_Mortagage\"] = \"720+\"\r\n df2[\"Mortgage_Apr\"] = df2[\"APY Rate\"]\r\n df2 = df2.reindex(columns=[\"Date\", \"Bank_Name\",\"Bank_Product\", \"Bank_Product_Type\", \"Bank_Offer_Feature\", \"Bank_Product_Name\", \"Product_Term\", \"Balance\",\"Product_Interest\",\"Product_Apy\",\"Mortgage_Down_Payment\",\"Mortgage_Loan\",\"Min_Credit_Score_Mortagage\", \"Mortgage_Apr\"])\r\n for i in range(len(df2.index)):\r\n df2[\"Bank_Product_Name\"].iloc[i]=str(df2[\"Bank_Product_Name\"].iloc[i].replace(\"Loan Details\", \"\"))\r\n df2.to_csv(output_path + \"Consolidate_PNC_Data_Mortgage_{}.csv\".format(now.strftime(\"%m_%d_%Y\")), index=False)\r\n\r\n","sub_path":"scripts/pncmortgage_5lakh_VHR.py","file_name":"pncmortgage_5lakh_VHR.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"568316953","text":"\n'''\nThe LeagueTable class tracks the score of each player in a league. After each game, the player records their score with the record_result function. \n\nThe player's rank in the league is calculated using the following logic:\n\nThe player with the highest score is ranked first (rank 1). The player with the lowest score is ranked last.\nIf two players are tied on score, then the player who has played the fewest games is ranked higher.\nIf two players are tied on score and number of games played, then the player who was first in the list of players is ranked higher.\nImplement the player_rank function that returns the player at the given rank.\n\nFor example\n\n\t\ttable = LeagueTable(['Mike', 'Chris', 'Arnold'])\n\t\ttable.record_result('Mike', 2)\n\t\ttable.record_result('Mike', 3)\n\t\ttable.record_result('Arnold', 5)\n\t\ttable.record_result('Chris', 5)\n\t\tprint(table.player_rank(1))\n\nAll players have the same score. However, Arnold and Chris have played fewer games than Mike, and as Chris is before Arnold in the list of players, he is ranked first. Therefore, the code above should display \"Chris\".\n\n'''\n\nfrom collections import Counter\nfrom collections import OrderedDict\n\nclass LeagueTable:\n def __init__(self, players):\n self.standings = OrderedDict([(player, Counter()) for player in players])\n \n def record_result(self, player, score):\n self.standings[player]['games_played'] += 1\n self.standings[player]['score'] += score\n \n def player_rank(self, rank):\n \ta = list(map(dict,self.standings.values()))\n \t# print(sorted(a,key = lambda x: (-x['score'],x['games_played'])))\n \t# print(sorted(a,key = lambda x:x['games_played']))\n \t# print(self.standings.keys())\n \t# print(self.standings.values())\n \tkey1 = self.standings.keys()\n \tvalue1 = self.standings.values()\n \tdic1 = self.standings\n \t# c = list(key1).index('Mike')\n \t# print(c)\n \tb = sorted(key1, key = lambda x: (-dic1[x]['score'],dic1[x]['games_played'],list(key1).index(x)) )\n \t# print('b',b)\n \treturn b[rank - 1]\n\n\n \t# sorted(self.standings,key=lambda x:x[0])\n\n\n \ntable = LeagueTable(['Mike', 'Chris', 'Arnold'])\ntable.record_result('Mike', 2)\ntable.record_result('Mike', 3)\ntable.record_result('Arnold', 5)\ntable.record_result('Chris', 5)\nprint(table.player_rank(1))","sub_path":"LeagueTable.py","file_name":"LeagueTable.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"345734681","text":"#!/usr/bin/python\n# coding: utf-8\n\"\"\"\nA diamond shaped graph example.\n\n A\n / \\\nB C\n \\ /\n D\n\"\"\"\n\nfrom puliclient import *\n\nif __name__ == '__main__':\n \n args = { \"cmd\":\"sleep 10\", \"start\":1, \"end\":2, \"packetSize\":1 }\n tags = { \"prod\":\"test\", \"shot\":\"test\" }\n decomposer = \"puliclient.contrib.generic.GenericDecomposer\"\n\n # When creating a graph without a root task or taskgroup, a default taskgroup is created with the name of the graph\n graph = Graph('my job', tags=tags)\n\n #\n # Several ways to create and add nodes\n #\n\n # Create nodes detached from the graph\n task1 = Task(name=\"task1\", arguments=args, tags=tags, decomposer=decomposer)\n task2 = Task(name=\"task2\", arguments=args, tags=tags, decomposer=decomposer)\n task3 = Task(name=\"task3\", arguments=args, tags=tags, decomposer=decomposer)\n\n # and add them in a list\n graph.addList( [task1, task2] )\n\n # Or elem by elem\n graph.add( task3 )\n\n # Or add elem directly in the graph\n anotherTask = graph.addNewTask( name=\"another task\",\n arguments=args,\n tags=tags,\n decomposer=decomposer )\n\n # Create complex dependencies like a diamond shaped graph \n # NB: default end status is [DONE]\n graph.addEdges( [\n (task1, task2),\n (task1, task3),\n (task2, anotherTask),\n (task3, anotherTask)\n ] )\n\n # graph.submit(\"pulitest\", 8004)\n graph.execute()\n\n# PREVIOUS METHOD (still valid)\n\t# task1 = Task(name=\"task1\", arguments=args, decomposer=decomposer)\n\t# task2 = Task(name=\"task2\", arguments=args, decomposer=decomposer)\n\t# task3 = Task(name=\"task3\", arguments=args, decomposer=decomposer)\n\t# anotherTask = Task(name=\"anotherTask\", arguments=args, decomposer=decomposer)\n\n\t# mainTG = TaskGroup( name=\"my job\" )\n\t# mainTG.addTask(task1)\n\t# mainTG.addTask(task2)\n\t# mainTG.addTask(task3)\n\t# mainTG.addTask(anotherTask)\n\n\t# task2.dependsOn( task1, [DONE] )\n\t# task3.dependsOn( task1, [DONE] )\n\t# anotherTask.dependsOn( task2, [DONE] )\n\t# anotherTask.dependsOn( task3, [DONE] )\n\n\t# graph = Graph('toto', mainTG) # Did you know the name of the graph was never used ? :)\n\n\t# graph.submit(\"pulitest\", 8004, )\n\n# SIMPLIFIED GRAPH DUMP\n# {\n# \"tasks\": [\n# {\n# \"tasks\": [\n# 1, \n# 2, \n# 3, \n# 4\n# ], \n# \"name\": \"my job\", \n# \"type\": \"TaskGroup\"\n# }, \n# {\n# \"name\": \"task1\", \n# \"dependencies\": [], \n# \"type\": \"Task\", \n# }, \n# {\n# \"name\": \"task2\", \n# \"dependencies\": [\n# [ 1, [3] ]\n# ], \n# \"type\": \"Task\", \n# }, \n# {\n# \"name\": \"task3\", \n# \"dependencies\": [\n# [ 1, [3] ]\n# ], \n# \"type\": \"Task\", \n# }, \n# {\n# \"name\": \"another task\", \n# \"dependencies\": [\n# [ 2, [3]], \n# [ 3, [3]]\n# ], \n# \"type\": \"Task\", \n# }\n# ], \n# \"name\": \"my job\", \n# \"user\": \"jsa\", \n# \"root\": 0\n# }\n","sub_path":"scripts/examples/postgraphexample3.py","file_name":"postgraphexample3.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278603610","text":"#Shirong Zheng CSC11300\r\n#Project 1\r\n\r\nimport math\r\nimport turtle\r\nfrom Tkinter import *\r\n\r\ndef entry_fields():\r\n print(\"N-sides:%s\\nR-RadialLength:%s\" %(L1.get(),L2.get()))\r\nmaster=Tk()\r\nLabel(master,text=\"N-sides\").grid(row=0)\r\nLabel(master,text=\"R-RadialLength\").grid(row=1)\r\nL1=Entry(master).grid(row=0,column=1)\r\nL2=Entry(master).grid(row=1,column=1)\r\nButton(master,text='Plot',command=master.quit).grid(row=3,column=0,sticky=W,pady=4)\r\nmaster.mainloop()\r\n\r\ndef draw_equilpie(t,n,r):\r\n polypie(t,n,r)\r\n t.pu()\r\n t.fd(r*2+10)\r\n t.pd()\r\n t.color(\"purple\")\r\n \r\ndef polypie(t,n,r):\r\n angle=360.0/n\r\n for i in range(n):\r\n\t isostri(t,r,angle/2)\r\n\t t.lt(angle)\r\n\t \t \t \r\ndef isostri(t,r,angle):\r\n y=r*math.sin(angle*math.pi/180)\r\n t.rt(angle)\r\n t.fd(r)\r\n t.lt(90+angle) \r\n t.fd(2*y)\r\n t.lt(90+angle)\r\n t.fd(r)\r\n t.lt(180-angle)\r\n t.fillcolor('green')\r\n\r\nbob=turtle.Turtle()\r\nbob.pu()\r\nbob.bk(130)\r\nbob.pd()\r\nbob.color(\"red\")\r\nsize=70\r\ndraw_equilpie(bob,5,size)\r\nsize=80\r\ndraw_equilpie(bob,6,size)\r\n\r\nbob.hideturtle()\r\nturtle.mainloop()\r\n\r\n\r\n\r\n","sub_path":"Project1.py","file_name":"Project1.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393238844","text":"import imageio\nimport numpy as np \nimport os\nimport csv\nimport time\nimport argparse\nfrom utils.crop import *\nfrom utils.eval_utils import *\nimport tensorflow as tf\nfrom models.face_models import *\nimport Config\n\n\ndef write_csv(writer,\n params,\n source,\n target,\n image_names,\n margins,\n amplifications,\n labels,\n distances,\n labels_cos,\n cosines):\n \"\"\"\n Description\n\n Keyword arguments:\n \"\"\"\n for key, val in labels.items():\n # print('Image: {}'.format(key))\n out_dict = {}\n out_dict['model_name'] = params['model_name']\n out_dict['target_model_name'] = params['target_model_name']\n out_dict['attack_name'] = params['attack_name']\n out_dict['attack_loss'] = params['attack_loss']\n out_dict['source'] = source\n out_dict['target'] = target\n out_dict['match_source'] = source == labels[key][0]\n out_dict['match_target'] = target == labels[key][0]\n out_dict['cos_source'] = source == labels_cos[key][0]\n out_dict['cos_target'] = target == labels_cos[key][0]\n out_dict['image_name'] = image_names[key]\n out_dict['margin'] = margins[key]\n out_dict['amplification'] = amplifications[key]\n for i, v in enumerate(labels[key]):\n # print('Top {}: {} = {}'.format(i + 1, labels[key][i], distances[key][i]))\n out_dict['top{}'.format(i + 1)] = labels[key][i]\n out_dict['distance{}'.format(i + 1)] = distances[key][i]\n for i, v in enumerate(labels_cos[key]):\n out_dict['topcos{}'.format(i + 1)] = labels_cos[key][i]\n out_dict['cosine{}'.format(i + 1)] = cosines[key][i]\n\n writer.writerow(out_dict)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=str, default=\"0\", help='GPU(s) to run the code on')\n parser.add_argument('--target-model-type', type=str, default=\"small\", help='type of model', choices=['small','large'])\n parser.add_argument('--target-loss-type', type=str, default=\"center\", help='loss function used to train the model',choices=['center','triplet'])\n parser.add_argument('--target-dataset-type', type=str, default='vgg', help='dataset used in training model', choices=['vgg', 'vggsmall', 'casia'])\n parser.add_argument('--model-type', type=str, default=\"small\", help='type of model', choices=['small','large'])\n parser.add_argument('--loss-type', type=str, default=\"center\", help='loss function used to train the model',choices=['center','triplet'])\n parser.add_argument('--dataset-type', type=str, default='vgg', help='dataset used in training model', choices=['vgg', 'vggsmall', 'casia'])\n parser.add_argument('--attack', type=str, default='CW', help='attack type',choices=['PGD', 'CW'])\n parser.add_argument('--norm', type=str, default='2', help='p-norm', choices=['inf', '2'])\n parser.add_argument('--targeted-flag', type=str, default='true', help='targeted (true) or untargeted (false)', choices=['true', 'false'])\n parser.add_argument('--tv-flag', type=str, default='false', help='do not use tv_loss term (false) or use it (true)', choices=['true', 'false'])\n parser.add_argument('--hinge-flag', type=str, default='true', help='hinge loss (true) or target loss (false)', choices=['true', 'false'])\n parser.add_argument('--cos-flag', type=str, default='false', help='use cosine similarity instead of l2 for loss', choices=['true', 'false'])\n parser.add_argument('--margin', type=float, default=6.0, help='needed for determining goodness of transferability')\n parser.add_argument('--amplification', type=float, default=8.0, help='needed for amplifying adversarial examples')\n parser.add_argument('--granularity', type=str, default='normal', help='add more or less margin and amplification intervals', choices=['fine', 'normal', 'coarse', 'coarser', 'coarsest', 'single', 'fine-tuned'])\n parser.add_argument('--mean-loss', type=str, default='embeddingmean', help='old:(embedding) new formulation:(embeddingmean) WIP formulation:(distancemean)', choices=['embeddingmean', 'embedding', 'distancemean'])\n parser.add_argument('--topn', type=int, default=5, help='do top-n evaluation of closest faces')\n parser.add_argument('--batch-size', type=int, default=9, help='batch size for evaluation')\n parser.add_argument('--pair-flag', type=str, default='false', help='optimal source target pairs')\n args = parser.parse_args()\n\n tf_config = Config.set_gpu(args.gpu)\n params = Config.set_parameters(targeted_flag=args.targeted_flag,\n tv_flag=args.tv_flag,\n hinge_flag=args.hinge_flag,\n cos_flag=args.cos_flag,\n model_type=args.model_type,\n loss_type=args.loss_type,\n dataset_type=args.dataset_type,\n target_model=args.target_model_type,\n target_loss=args.target_loss_type,\n target_dataset=args.target_dataset_type,\n attack=args.attack,\n norm=args.norm,\n mean_loss=args.mean_loss,\n margin=args.margin,\n amplification=args.amplification,\n granularity=args.granularity,\n batch_size=args.batch_size,\n whitebox_target=True,\n pair_flag=args.pair_flag)\n faces, people = load_images(folder=params['align_dir'],\n params=params)\n\n means, _ = compute_embeddings(faces=faces,\n people=people,\n tf_config=tf_config,\n params=params)\n csvfile = open(os.path.join(Config.ROOT, 'fileio', 'whitebox_eval_{}-{}-{}.csv'.format(params['model_name'],\n params['target_model_name'], params['attack_name'])), 'w', newline='')\n fieldnames = ['model_name', 'target_model_name', 'attack_name', 'attack_loss', 'source', 'match_source',\n 'match_target', 'target', 'cos_source', 'cos_target', 'image_name', 'margin', 'amplification']\n for i in range(1, args.topn + 1):\n fieldnames.append('top{}'.format(i))\n fieldnames.append('distance{}'.format(i))\n fieldnames.append('topcos{}'.format(i))\n fieldnames.append('cosine{}'.format(i))\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n faces, image_names, margins, amplifications, people_db = load_adv_images(params=params)\n _, embeddings = compute_embeddings(faces=faces,\n people=people_db,\n tf_config=tf_config,\n params=params)\n for person in people_db:\n labels, distances, labels_cos, cosines = whitebox_eval(embedding_means=means,\n embeddings=embeddings[person],\n params=params,\n topn=args.topn)\n split = person.split(':')\n source = split[0]\n target = split[1]\n write_csv(writer=writer,\n params=params,\n source=source,\n target=target,\n image_names=image_names,\n margins=margins,\n amplifications=amplifications,\n labels=labels,\n distances=distances,\n labels_cos=labels_cos,\n cosines=cosines)\n","sub_path":"src/whitebox_eval.py","file_name":"whitebox_eval.py","file_ext":"py","file_size_in_byte":8040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"318593886","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 26 20:26:34 2020\n\n@author: viktor\n\"\"\"\n\na=int(input(\" \"))\nb=int(input(\" \"))\nc=int(input(\" \"))\nd=min(a,b,c)\ne=max(a,b,c)\nf=(a+b+c)-d-e\nprint(\" \", d,f,e)","sub_path":"sesion_2/python_ej12.py","file_name":"python_ej12.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"83191917","text":"import configparser\nimport discord\nfrom discord.ext import commands\nimport json\nimport os.path\nimport logging\n\nparser = configparser.ConfigParser()\nparser.read('userconfig.ini')\n\ndescription = \"\"\"A bot written by Kuenaimaku in to play music in the server.\"\"\"\nclient = discord.Client()\n\ndiscord_logger = logging.getLogger('discord')\ndiscord_logger.setLevel(logging.CRITICAL)\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\nhandler = logging.FileHandler(filename='djSona.log', encoding='utf-8', mode='w')\nlog.addHandler(handler)\n\nbot = commands.Bot(command_prefix=[\"?\",\"$\"], description=description)\n\nfilepath = 'config/serverlist.json'\nif not os.path.exists(os.path.dirname(filepath)):\n os.makedirs(os.path.dirname(filepath))\n temp = {\"shards\": [{\"id\":\"0\",\"name\":\"0\",\"role\":\"0\",\"playlistId\":\"0\"}]}\n with open(filepath, 'w') as data_file:\n json.dump(temp, data_file)\n data_file.close()\n\nwith open('config/serverlist.json', 'r') as data_file:\n serverlist = json.load(data_file)\ndata_file.close()\n\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n await bot.change_presence(game=discord.Game(name='beautiful music'))\n\ninitial_extensions = [\n 'cogs.playlist',\n 'cogs.cleverbot'\n]\n\nif __name__ == '__main__':\n for extension in initial_extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n print('Failed to load extension {}\\n{}: {}'.format(extension, type(e).__name__, e))\n bot.run(parser.get('dj_sona', 'token'))\n","sub_path":"djSona.py","file_name":"djSona.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"259730288","text":"from database import Database\nimport json\nimport time\n\n\n#importing the test data\nbuild_file = open('data/graph_build.json')\nbuild = json.load(build_file)\nedit_file = open('data/graph_edits.json')\nedits = json.load(edit_file)\nimg_extract_file = open('data/img_extract.json')\nextract = json.load(img_extract_file)\nexpected_file = open('data/expected_status.json')\nexpected = json.load(expected_file)\n\n#Run the example\n\nstart = time.time()\nstatus = {}\nif len(build) > 0:\n # Build graph\n db = Database(build[0][0])\n if len(build) > 1:\n \tdb.add_nodes(build[1:])\n # Add extract\n db.add_extract(extract)\n # Graph edits\n db.add_nodes(edits)\n # Update status\n status = db.get_extract_status()\nend= time.time()\nprint('Graph updated in %f s'%(end-start))\nprint(status)\nprint(\"Is the result correct\",expected==status)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"115942945","text":"from PIL import Image, ImageDraw\nimport cv2\nfrom math import trunc\nimport argparse\n\noutput_x = 2000\noutput_y = 200\n\ndef parse_video(file):\n\n # OpenCV2 video object.\n video = cv2.VideoCapture(file)\n\n # Open CV FPS + Frame Count codes.\n CV_CAP_PROP_FPS = 5\n CV_CAP_PROP_FRAME_COUNT = 7\n\n frame_rate = round(video.get(CV_CAP_PROP_FPS))\n total_frames = round(video.get(CV_CAP_PROP_FRAME_COUNT))\n running_time = round(total_frames / frame_rate)\n\n # Format running time into HH:MM:SS\n m, s = divmod(running_time, 60)\n h, m = divmod(m, 60)\n\n frames_per_pixel = int(total_frames / output_x)\n total_snaps = int(total_frames / frames_per_pixel)\n\n print('Running time: {:d}:{:d}:{:d}'.format(trunc(h), trunc(m), trunc(s)))\n print('Framerate: {:d}'.format(frame_rate))\n print('Total Frames: {:d}'.format(total_frames))\n print('-----------------------------------')\n print('Taking a snapshot every {:d} frames for a total of {:d} snapshots'.format(frames_per_pixel, total_snaps))\n\n image_list = []\n\n # Create list of all frames that need to be grabbed.\n frame_list = create_frame_list(total_frames, frames_per_pixel)\n last_frame = frame_list[-1]\n\n while video.isOpened():\n for frame in frame_list:\n # Set frame to next in frame list and break if not valid frame.\n video.set(1, frame)\n ret, frame = video.read()\n if ret is False:\n break\n\n # Create 150 x 150 thumbnail of frame so we don't use all the memory.\n cv2_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n pil_image = Image.fromarray(cv2_image)\n pil_image = pil_image.convert('RGB')\n size = 150, 150\n pil_image.thumbnail(size, Image.ANTIALIAS)\n\n image_list.append(pil_image)\n\n video.release()\n\n print('Total of {:d} snapshots taken'.format(len(image_list)))\n return image_list\n\n\ndef create_frame_list(total_frames, frames_per_pixel):\n # Save all numbers that have no remainder from when divided by frames_per_pixel.\n frame_list = []\n for x in range(total_frames):\n if x % frames_per_pixel == 0:\n frame_list.append(x)\n return frame_list\n\n\ndef get_average_colour(img):\n\n width, height = img.size\n r_average = 0\n g_average = 0\n b_average = 0\n\n for x in range(0, width):\n for y in range(0, height):\n r, g, b = img.getpixel((x, y))\n r_average = (r + r_average) / 2\n g_average = (g + g_average) / 2\n b_average = (b + b_average) / 2\n\n return (int(r_average), int(g_average), int(b_average))\n\n\ndef draw_line(img, colour, x):\n draw = ImageDraw.Draw(img)\n draw.line((x, 500, x, 0), colour)\n\n\ndef main(args):\n input_video = args.video\n split_list = input_video.split('\\\\')[-1]\n\n print('Starting on {}'.format(split_list))\n\n image_list = parse_video(input_video)\n colour_list = []\n\n im = Image.new('RGB', (output_x, output_y), (255, 255, 255, 0))\n\n print('Processing images')\n for image in image_list:\n colour_list.append(get_average_colour(image))\n\n print('Painting lines')\n image_x = 0\n for colour in colour_list:\n draw_line(im, colour, image_x)\n image_x += 1\n\n print('Saving image')\n print('-----------------------------------')\n im.save(split_list + \".png\", \"PNG\")\n\n\nif __name__ == \"__main__\":\n\n parse = argparse.ArgumentParser()\n parse.add_argument('-v', '--video', help='Video file path.')\n args = parse.parse_args()\n\n main(args)\n","sub_path":"barcoder.py","file_name":"barcoder.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"489281426","text":"from flask import Flask\r\n\r\nimport views\r\nfrom database import Database\r\nfrom movie import Movie\r\n\r\n\r\ndef create_app():\r\n app = Flask(__name__)\r\n app.config.from_object(\"settings\")\r\n\r\n app.add_url_rule(\"/\", view_func=views.home_page)\r\n app.add_url_rule(\"/movies\", view_func=views.movies_page, methods=[\"GET\", \"POST\"])\r\n app.add_url_rule(\"/movies/\", view_func=views.movie_page)\r\n app.add_url_rule(\"/movies//edit\",view_func=views.movie_edit_page,methods=[\"GET\", \"POST\"],)\r\n app.add_url_rule(\"/new-movie\", view_func=views.movie_add_page, methods=[\"GET\", \"POST\"])\r\n\r\n db = Database()\r\n app.config[\"db\"] = db\r\n\r\n return app\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = create_app()\r\n port = app.config.get(\"PORT\", 5000)\r\n app.run(host=\"0.0.0.0\", port=port)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88199169","text":"import sys\nimport subprocess\n\nhelp_text = \"\"\"shortlinks: developer script.\n=============================\nUsage: python [OPTION]\nOptions:\n init - project initialize\n clean - remove distribution files\n sdist - make source distribution\n upload - upload distribution on PyPi\n dist - make full distribution and upload on PyPi\n help - show this help\n \"\"\"\n\n\ndef show_help():\n print(help_text)\n\n\ndef init():\n subprocess.call('pip install -r requirements.txt', shell=True)\n\n\ndef clean():\n subprocess.call('rm -rf dist', shell=True)\n subprocess.call('rm -rf *.egg-info', shell=True)\n\n\ndef sdist():\n subprocess.call('python setup.py sdist', shell=True)\n\n\ndef upload():\n subprocess.call('python setup.py upload', shell=True)\n\n\ndef dist():\n sdist()\n upload()\n\n\ndef command(argv):\n parametr = {\n 'init': init,\n 'clean': clean,\n 'upload': upload,\n 'sdist': sdist,\n 'dist': dist,\n 'help': show_help,\n }\n\n if str(argv) not in parametr:\n print('manage.py: Unrecognized arguments \"%s\"' % argv)\n print('Try \"%s\" for more information.' % \"python manage.py help\")\n else:\n parametr[argv]()\n\n\nif __name__ == \"__main__\":\n if sys.argv.__len__() <= 1:\n command('help')\n elif sys.argv.__len__() > 2:\n print(\"manage.py: Too many arguments\")\n print('Try \"%s\" for more information.' % \"python manage.py help\")\n else:\n command(sys.argv[1])\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"128805975","text":"# GEO1000 - Assignment 3\r\n# Authors: Simon Pena Pereira & Pratyush Kumar\r\n# Studentnumbers: 5391210 & 5359252\r\n\r\ndef read_grid(filenm):\r\n f = open(filenm, \"r\")\r\n data = [f.read()] \r\n coord = []\r\n \r\n for i in data:\r\n for x in i:\r\n if str.isdigit(x):\r\n coord.append(x)\r\n tuple_coord = [(int(coord[i]), int(coord[i+1])) for i in range(0,len(coord),2)] \r\n datastr = [[tuple_coord[i],tuple_coord[i+1],tuple_coord[i+2],tuple_coord[i+3],tuple_coord[i+4]] for i in range(0,len(tuple_coord),5)]\r\n return datastr\r\n\r\n\r\ndef visit(table, steps_allowed, path):\r\n treasure = (4,1)\r\n start = (0,0)\r\n path.append(start)\r\n steps_allowed -= 1 \r\n \r\n while steps_allowed > 0:\r\n i = path[-1]\r\n row = i[0]\r\n col = i[1]\r\n x,z = tuple(x for x in (table[row])[col])\r\n path.append((x,z))\r\n steps_allowed -= 1 \r\n\r\n if (x,z) == treasure:\r\n return True\r\n return False\r\n \r\n\r\ndef hunt(filenm, max_steps):\r\n table = read_grid(filenm)\r\n steps_allowed = max_steps\r\n path = []\r\n \r\n result = visit(table, steps_allowed, path)\r\n \r\n N = len(path)\r\n X,Y = (path[-1])\r\n\r\n if result == True:\r\n return f\"The treasure was found at row: {X}, column: {Y}; it took {N} steps to find the treasure.\"\r\n if result == False:\r\n return f\"Could not find a treasure (in {N} steps).\"\r\n \r\n \r\nif __name__ == \"__main__\":\r\n print(hunt('finite.txt', 20))\r\n","sub_path":"hunt.py","file_name":"hunt.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"466859574","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/15 2:34 PM\n# @Author : zhongch4g\n# @Site : \n# @File : 661. Convert BST to Greater Tree.py\n# @Software: IntelliJ IDEA\n\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nclass Solution:\n # @param {TreeNode} root the root of binary tree\n # @return {TreeNode} the new root\n def convertBST(self, root):\n # Write your code here\n self.sum = 0\n self.helper(root)\n return root\n\n def helper(self, root):\n if root is None:\n return\n if root.right:\n self.helper(root.right)\n\n self.sum += root.val\n root.val = self.sum\n if root.left:\n self.helper(root.left)","sub_path":"Lintcode/661. Convert BST to Greater Tree.py","file_name":"661. Convert BST to Greater Tree.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"480826863","text":"'''Główna klasa window, z której inne inheritują.\nTutaj ustawia się: tytuł, rozmiar, umieszczenie okna na środku ekranu.\nFunkcja create frames jest pusta, klasa która inherituje musi nadpisać\nfunkcję createframes, tam ustawia się widgety.'''\n\nimport tkinter as tk\nfrom tkinter import messagebox # popup message\nimport sys\nfrom klient import klient\nfrom konsultant import konsultant\nfrom mailsender import mailsender\nimport smtplib\nfrom server import server\nfrom config import ikona, font10b, font12, mail_regex\nimport re\n\n\nclass Window():\n def __init__(self, title):\n '''Ustawienie tytułu, rozmiaru'''\n self.title = title\n self.root = tk.Tk()\n self.root.title(self.title)\n self.root.option_add('*Dialog.msg.font', 'Arial 12')\n self.make_menu()\n self.widgets()\n self.center(self.root)\n\n self.root.mainloop() # główny loop\n\n def przycisk_x(self):\n warning_window = tk.Toplevel()\n warning_window.grab_set()\n warning_window.title(\"Uwaga\")\n warning_label = tk.Label(warning_window,\n text=(\n \"Czy napewno chcesz wyłączyć program?\"),\n font=font10b)\n warning_label.pack()\n ok_butt = tk.Button(warning_window, text='Tak',\n width=12, padx=10, font=font12,\n command=lambda: sys.exit())\n ok_butt.pack(side=tk.LEFT, padx=(40, 5), pady=(10, 0))\n cancel_butt = tk.Button(warning_window,\n text='Anuluj', width=12, padx=10,\n font=font12,\n command=lambda: warning_window.destroy())\n cancel_butt.pack(side=tk.RIGHT, padx=(5, 40), pady=(10, 0))\n self.center(warning_window, False)\n\n def center(self, window, isroot=True):\n '''Ustawienie na środku ekranu oraz ikonka.'''\n if isroot:\n window.protocol('WM_DELETE_WINDOW', lambda: self.przycisk_x())\n # else:\n # window.protocol('WM_DELETE_WINDOW', lambda: window.destroy())\n window.tk.call('wm', 'iconphoto', window._w,\n tk.PhotoImage(file=ikona))\n window.update_idletasks()\n width = window.winfo_width()\n height = window.winfo_height()\n x = (window.winfo_screenwidth() // 2) - (width // 2)\n y = (window.winfo_screenheight() // 2) - (height // 2)\n window.geometry('{}x{}+{}+{}'.format(width, height, x, y))\n\n def make_menu(self):\n self.the_menu = tk.Menu(self.root, tearoff=0)\n self.the_menu.add_command(label=\"Wytnij\")\n self.the_menu.add_command(label=\"Kopiuj\")\n self.the_menu.add_command(label=\"Wklej\")\n\n def show_menu(self, e):\n w = e.widget\n self.the_menu.entryconfigure(\"Wytnij\",\n command=lambda: w.event_generate(\n \"<>\"))\n self.the_menu.entryconfigure(\"Kopiuj\",\n command=lambda: w.event_generate(\n \"<>\"))\n self.the_menu.entryconfigure(\"Wklej\",\n command=lambda: w.event_generate(\n \"<>\"))\n self.the_menu.tk.call(\"tk_popup\", self.the_menu, e.x_root, e.y_root)\n\n def ukryj(self, entry, var):\n '''Funkcja blokowania entry adresów.'''\n if var.get():\n entry.config(state='disabled')\n else:\n entry.config(state='normal')\n\n def menu_butt(self):\n '''Przycisk menu.'''\n warning_window = tk.Toplevel()\n warning_window.grab_set()\n warning_window.title(\"Uwaga\")\n warning_label = tk.Label(warning_window,\n text=(\n \"Uwaga!\\nPowrót do menu usunie \"\n \"wszystkie zapisane dane.\\n\"\n \"Czy napewno chcesz wrócić do menu?\"),\n fg='red', font=font10b)\n warning_label.pack()\n ok_butt = tk.Button(warning_window, text='Tak',\n width=12, padx=10, font=font12,\n command=lambda: self.root.destroy())\n ok_butt.pack(side=tk.LEFT, padx=(40, 5), pady=(10, 0))\n cancel_butt = tk.Button(warning_window, text='Anuluj', width=15,\n font=font12,\n command=lambda: warning_window.destroy())\n cancel_butt.pack(side=tk.RIGHT, padx=(5, 40), pady=(10, 0))\n self.center(warning_window, False)\n\n def wyslij_butt(self):\n klient.stworz_klienta(self.imie_entry.get().strip(),\n self.tel_entry.get().strip(),\n self.sprz_entry.get().strip(),\n self.dost_entry.get().strip(),\n self.cena_entry.get().strip(),\n self.mail_entry.get().strip(),\n self.branza_entry.get().strip(),\n self.pytania_entry.get().strip(),\n self.dodatkowe_entry.get(\"1.0\", tk.END).strip(),\n self.rej_var.get(),\n self.kor_var.get(),\n self.dost_var.get(),\n self.adr_1_entry.get().strip(),\n self.adr_2_entry.get().strip(),\n self.adr_3_entry.get().strip(),\n self.spr_nierozw_var.get()\n )\n error = False\n if klient.imnaz == '':\n messagebox.showinfo('Error', 'Brak imienia i nazwiska')\n error = True\n if klient.tel == '':\n messagebox.showinfo('Error', 'Brak numeru telefonu')\n error = True\n if klient.datasprz == '':\n messagebox.showinfo('Error', 'Brak daty sprzedaży')\n error = True\n if klient.datawys == '':\n messagebox.showinfo('Error', 'Brak daty doręczenia')\n error = True\n if klient.cena_dl == '':\n messagebox.showinfo('Error', 'Brak ceny/dł. zobowiązania')\n error = True\n if klient.mail == '':\n messagebox.showinfo('Error', 'Brak adresu mailowego')\n error = True\n if klient.branza == '':\n messagebox.showinfo('Error', 'Brak branży')\n error = True\n if klient.pytania == '':\n messagebox.showinfo('Error', 'Brak pytań do prawnika')\n error = True\n if klient.dodatkowe == '':\n messagebox.showinfo('Error', 'Brak dodatkowych informacji')\n error = True\n if klient.mail.lower() != 'brak' and\\\n re.match(klient.mail, mail_regex):\n messagebox.showinfo('Error', 'Błędny adres mailowy')\n error = True\n if not error:\n try:\n server.zaloguj()\n if mailsender.wyslij_sprzedazowy():\n if klient.nierozw == 1 or\\\n klient.mail.lower() == 'brak':\n messagebox.showinfo(\n 'Wysłano',\n 'Wysłano maila sprzedażowego.')\n else:\n messagebox.showinfo(\n 'Wysłano',\n 'Wysłano maila sprzedażowego oraz maila z RODO.')\n if konsultant.wybor == 1 and\\\n klient.mail.lower() != 'brak' and\\\n klient.nierozw != 1:\n mailsender.wyslij_rodo()\n server.rozlacz()\n self.root.destroy()\n except smtplib.SMTPRecipientsRefused:\n messagebox.showinfo('Error',\n 'Niepoprawny adres mailowy.')\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"27488241","text":"import pandas as pd\nimport numpy as np\nimport statsmodels.stats.multitest as smm\nimport time\nfrom scipy import stats\nfrom JT_test import JT_test_pd\n\ndef estimate_lfc(count_df,annot_df,pseudocount,ctrl_grp='ND'):\n # Compute approximate lfc after adding pseudocount\n df = count_df + pseudocount\n norm_df = df / df.sum(axis=0)\n\n #Remove sites without features\n norm_df = norm_df[~annot_df['uid'].isna()]\n annot_df = annot_df[~annot_df['uid'].isna()]\n\n #Remove sites which contain more than one feature.\n func = lambda x: x.find(';') == -1\n norm_df = norm_df[annot_df['uid'].map(func)]\n annot_df = annot_df[annot_df['uid'].map(func)]\n\n #Compute Effect sizes (LFC)\n mnorm_df = norm_df.groupby(level=0,axis=1,sort=False).mean()\n lg2_mfc_df = mnorm_df.div(mnorm_df[ctrl_grp],axis=0).apply(np.log2)#Mean log fold change\n\n #Take the median of the log fold change across TA sites in the same gene\n lfc_df = lg2_mfc_df.groupby(by=annot_df['uid'],sort=False).median()\n \n return lfc_df\n\ndef norm_data_for_JT(count_df,annot_df):\n # Normalize and clean data in preparation to pass to JT\n norm_df = count_df / count_df.sum(axis=0)\n\n #Remove sites without features\n norm_df = norm_df[~annot_df['uid'].isna()]\n annot_df = annot_df[~annot_df['uid'].isna()]\n\n #Remove sites which contain more than one feature.\n func = lambda x: x.find(';') == -1\n norm_df = norm_df[annot_df['uid'].map(func)]\n annot_df = annot_df[annot_df['uid'].map(func)]\n\n norm_df = annot_df[['uid']].merge(norm_df,how='inner',left_index=True,right_index=True)\n norm_df = norm_df.set_index('uid')\n\n return norm_df\n\ndef summary_data(norm_12h_rm0_df,mmfc_12h_df,treatment):\n #Runs JT testing, multiple hypothesis correction, and returns summarized data\n t0 = time.time()\n df = norm_12h_rm0_df[treatment]\n df_index = df.index\n df = df.reset_index(drop=True)\n\n df['pval'] = JT_test_pd(df,treatment,direction='lessthan',random_smooth=True)\n df.index = df_index\n\n p_ls = []\n name_ls = []\n for name,g in df.groupby(['uid'],sort=False):\n name_ls.append(name)\n Zscore,_ = stats.combine_pvalues(g['pval'],method='stouffer')\n p_pooled = stats.norm.sf(abs(Zscore))*2\n p_ls.append(p_pooled)\n _,p_adj_ls,_,_ = smm.multipletests(p_ls, alpha=0.05, method='fdr_bh')\n pval = pd.DataFrame({'uid':name_ls,'pval':p_ls,'pval-adj (BH)':p_adj_ls})\n\n merged_df = mmfc_12h_df[treatment].merge(pval,how='left',left_on='uid',right_on='uid')\n merged_df.index = merged_df['uid']\n merged_df = merged_df.drop('uid',axis=1)\n print('JT test run time:', time.time()-t0)\n return merged_df\n","sub_path":"scripts/hypersus_helper.py","file_name":"hypersus_helper.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533154486","text":"\"\"\"\nAuthor: Bruno Luca\nDate: 23-10-2020\nEs: Create a tcp multithread server\n\"\"\"\n\nimport socket\nimport threading\n\nip = \"127.0.0.1\"\nport = 7000\n\nconnection_table = {} #store connected end point\nactive_thread = [] #store running threads\n\nclass ClietThread(threading.Thread):\n \"\"\"\n docstring\n \"\"\"\n def __init__(self,ip,port,connection): #contructor\n threading.Thread.__init__(self)\n\n self.ip_address = ip\n self.port = port\n self.connection = connection\n self.again = 1\n\n\n def run(self): #codice che esegue il thread\n while self.again:\n msg = self.connection.recv(4096)\n msg = msg.decode()\n print(f\"{ip},{port}>> {msg}\")\n self.connection.sendall(msg.encode())\n\n if msg == \"close\":\n print(f\"{self.connection}>> want to exit.\")\n self.again = 0\n\ndef close_thread(t):\n if t in active_thread:\n active_thread[active_thread.index(t)].join()\n active_thread.pop(active_thread.index(t))\n \n\ndef server():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n s.bind((ip,port))\n\n while True:\n #new connection\n s.listen()\n print(\"\\n\\tSERVER IS LISTENING...\\n\")\n conn, add = s.accept()\n print(\"\\n\\tNEW USER CONNECTED!!\\n\")\n t = ClietThread(add[0],add[1],conn)\n t.start()\n\n #updating tables\n active_thread.append(t)\n connection_table[conn] = add\n\nif __name__ == \"__main__\":\n server()","sub_path":"tpsit_V/python/es003_tcp_multithread/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"81573736","text":"from random import randint\n\nprotein = ['Yogurt(1 cup)','Cooked meat(85g)','Cooked fish(100g)','1 whole egg + 4 egg whites','Tofu(125g)']\nfruit = ['Berries(80g)','Apple','Orange','Banana','Dried Fruit(Handfull)','Fruit Juice(125ml)']\nvegetable = ['Any vegetable(80g)','Leafy greens(Any Amount)']\ngrains = ['Cooked Grain(150g)','Whole Grain Bread(1 slice)','Half Large Potato(75g)','Oats(250g)','2 corn tortillas']\nprotein_snack = ['Soy nuts(30g)','Low fat milk(250ml)','Hummus(4 Tbsp)','Cottage cheese (125g)','Flavored yogurt(125g)']\ntaste_enhancer = ['2 TSP (10 ml) olive oil','2 TBSP (30g) reduced-calorie salad dressin','1/4 medium avocado','Small handful of nuts','1/2 ounce grated Parmesan cheese','1 TBSP (20g) jam, jelly, honey, syrup, sugar']\n\n\ndef calc_tdee(name,weight,height,age,gender,phys_act):\n\tif gender=='Female':\n\t\tbmr = 655 + (9.6 * weight) + (1.8 * height ) - (4.7 * age)\n\telse:\n\t\tbmr = 66 + (13.7 * weight) + (5 * height ) - (6.8 * age)\n\n\tif phys_act == 'value1':\n\t\ttdee= bmr*1.2\n\telif phys_act == 'value2':\n\t\ttdee= bmr*1.375\n\telif phys_act == 'value3':\n\t\ttdee= bmr*1.55\n\telif phys_act == 'value4':\n\t\ttdee= bmr*1.735\n\telse:\n\t\ttdee=bmr*1.9 \n\treturn tdee\n\n\ndef bfcalc(tdee):\n\tbreakfast = protein[randint(0,len(protein)-1)]+\", \"\n\tbreakfast += fruit[randint(0,len(fruit)-1)]\n\n\tif tdee>=2200:\n\t\tbreakfast+=\", \"+grains[randint(0,len(grains)-1)]\n\n\treturn breakfast\n\n\ndef s1calc(tdee):\n\tsnack1=\"\"\n\tif tdee>=1800:\n\t\tsnack1 = protein_snack[randint(0,len(protein_snack)-1)]\n\n\treturn snack1\n\ndef lcalc(tdee):\n\tlunch=\"\"\n\tlunch+=protein[randint(0,len(protein)-1)]+\", \"\n\tlunch+=vegetable[randint(0,len(vegetable)-1)]+\", \"\n\tlunch+=\"Leafy greens, \"\n\tlunch+=taste_enhancer[randint(0,len(taste_enhancer)-1)]+\", \"\n\tlunch+=grains[randint(0,len(grains)-1)]\n\n\tif(tdee>=1500):\n\t\tlunch+=\", \" + fruit[randint(0,len(fruit)-1)]\n\n\tif(tdee>=1800):\n\t\tlunch+=\", \" + protein[randint(0,len(protein)-1)] + \", \"\n\t\tlunch+=vegetable[randint(0,len(vegetable)-1)]\n\treturn lunch\n\ndef s2calc(tdee):\n\tsnack2=protein_snack[randint(0,len(protein_snack)-1)]+\", \"\n\tsnack2+=vegetable[randint(0,len(vegetable)-1)]\n\treturn snack2 \n\ndef dcalc(tdee):\n\tdinner=\"\"\n\tdinner+=protein[randint(0,len(protein)-1)]+\", \"\n\tdinner+=\"2 vegetables 80g, \"\n\tdinner+=\"Leafy Greens, \"\n\tdinner+=grains[randint(0,len(grains)-1)]+\", \"\n\tdinner+=taste_enhancer[randint(0,len(taste_enhancer)-1)]\n\tif tdee>=1500:\n\t\tdinner+=\", \" + protein[randint(0,len(protein)-1)]\n\tif tdee>=2200:\n\t\tdinner+=\", \" + grains[randint(0,len(grains)-1)]+\", \"\n\t\tdinner+=taste_enhancer[randint(0,len(taste_enhancer)-1)]\n\treturn dinner\n\ndef s3calc(tdee):\n\tsnack3=fruit[randint(0,len(fruit)-1)]\n\treturn snack3\n","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96965357","text":"from typing import Optional\n\nfrom hummingbot.client.config.config_var import ConfigVar\nfrom hummingbot.client.config.config_methods import using_exchange\nfrom hummingbot.core.event.events import OrderType, TradeType\nfrom hummingbot.core.utils.tracking_nonce import get_tracking_nonce\n\n\nCENTRALIZED = False\n\nUSE_ETHEREUM_WALLET = False\n\nEXAMPLE_PAIR = \"IDEX-ETH\"\n\nDEFAULT_FEES = [0.1, 0.2]\n\nETH_GAS_LIMIT = 170000 # estimation of upper limit of gas idex uses to move its smart contact for each fill\nMATIC_GAS_LIMIT = 60000 # estimate from real taker orders\n\nUSE_ETH_GAS_LOOKUP = False # false even if idex do have gas fees, otherwise estimate_fee() would fail\n\nHUMMINGBOT_GAS_LOOKUP = False # set to False if getting gas from idex is better than from Hummingbot\n\nHBOT_BROKER_ID = \"HBOT-\"\n\nEXCHANGE_NAME = \"idex\"\n\nIDEX_BLOCKCHAINS = ('ETH', 'MATIC')\n\n\ndef validate_idex_contract_blockchain(value: str) -> Optional[str]:\n if value not in IDEX_BLOCKCHAINS:\n return f'Value {value} must be one of: {IDEX_BLOCKCHAINS}'\n\n\n# Example: HBOT-B-DIL-ETH-64106538-8b61-11eb-b2bb-1e29c0300f46\ndef get_new_client_order_id(is_buy: bool, trading_pair: str) -> str:\n side = \"B\" if is_buy else \"S\"\n return f\"{HBOT_BROKER_ID}{side}-{trading_pair}-{get_tracking_nonce()}\"\n\n\nHB_ORDER_TYPE_TO_IDEX_PARAM_MAP = {\n OrderType.MARKET: \"market\",\n OrderType.LIMIT: \"limit\",\n OrderType.LIMIT_MAKER: \"limitMaker\",\n}\n\n\ndef hb_order_type_to_idex_param(order_type: OrderType):\n return HB_ORDER_TYPE_TO_IDEX_PARAM_MAP[order_type]\n\n\nHB_TRADE_TYPE_TO_IDEX_PARAM_MAP = {\n TradeType.BUY: \"buy\",\n TradeType.SELL: \"sell\",\n}\n\n\ndef hb_trade_type_to_idex_param(trade_type: TradeType):\n return HB_TRADE_TYPE_TO_IDEX_PARAM_MAP[trade_type]\n\n\nIDEX_PARAM_TO_HB_ORDER_TYPE_MAP = {\n \"market\": OrderType.MARKET,\n \"limit\": OrderType.LIMIT,\n \"limitMaker\": OrderType.LIMIT_MAKER,\n}\n\n\ndef idex_param_to_hb_order_type(order_type: str):\n return IDEX_PARAM_TO_HB_ORDER_TYPE_MAP[order_type]\n\n\nIDEX_PARAM_TO_HB_TRADE_TYPE_MAP = {\n \"buy\": TradeType.BUY,\n \"sell\": TradeType.SELL,\n}\n\n\ndef idex_param_to_hb_trade_type(side: str):\n return IDEX_PARAM_TO_HB_TRADE_TYPE_MAP[side]\n\n\nKEYS = {\n \"idex_api_key\":\n ConfigVar(key=\"idex_api_key\",\n prompt=\"Enter your IDEX API key (smart contract blockchain: ETH) >>> \",\n required_if=using_exchange(EXCHANGE_NAME),\n is_secure=True,\n is_connect_key=True),\n \"idex_api_secret_key\":\n ConfigVar(key=\"idex_api_secret_key\",\n prompt=\"Enter your IDEX API secret key>>> \",\n required_if=using_exchange(EXCHANGE_NAME),\n is_secure=True,\n is_connect_key=True),\n \"idex_wallet_private_key\":\n ConfigVar(key=\"idex_wallet_private_key\",\n prompt=\"Enter your wallet private key>>> \",\n required_if=using_exchange(EXCHANGE_NAME),\n is_secure=True,\n is_connect_key=True),\n}\n\n\nOTHER_DOMAINS = [\"idex_matic\", \"idex_sandbox_eth\", \"idex_sandbox_matic\"]\nOTHER_DOMAINS_PARAMETER = { # will be passed as argument \"domain\" to the exchange class\n \"idex_matic\": \"matic\",\n \"idex_sandbox_eth\": \"sandbox_eth\",\n \"idex_sandbox_matic\": \"sandbox_matic\",\n}\nOTHER_DOMAINS_EXAMPLE_PAIR = {\"idex_matic\": \"IDEX-ETH\", \"idex_sandbox_eth\": \"DIL-ETH\", \"idex_sandbox_matic\": \"DIL-ETH\"}\nOTHER_DOMAINS_DEFAULT_FEES = {\"idex_matic\": [0.1, 0.2], \"idex_sandbox_eth\": [0.1, 0.2], \"idex_sandbox_matic\": [0.1, 0.2]}\nOTHER_DOMAINS_KEYS = {\n \"idex_matic\": {\n \"idex_matic_api_key\":\n ConfigVar(key=\"idex_matic_api_key\",\n prompt=\"Enter your IDEX API key (smart contract blockchain: MATIC) >>> \",\n required_if=using_exchange(\"idex_matic\"),\n is_secure=True,\n is_connect_key=True),\n \"idex_matic_api_secret_key\":\n ConfigVar(key=\"idex_matic_api_secret_key\",\n prompt=\"Enter your IDEX API secret key>>> \",\n required_if=using_exchange(\"idex_matic\"),\n is_secure=True,\n is_connect_key=True),\n \"idex_matic_wallet_private_key\":\n ConfigVar(key=\"idex_matic_wallet_private_key\",\n prompt=\"Enter your wallet private key>>> \",\n required_if=using_exchange(\"idex_matic\"),\n is_secure=True,\n is_connect_key=True),\n },\n \"idex_sandbox_eth\": {\n \"idex_sandbox_eth_api_key\":\n ConfigVar(key=\"idex_sandbox_eth_api_key\",\n prompt=\"Enter your IDEX API key ([sandbox] smart contract blockchain: ETH) >>> \",\n required_if=using_exchange(\"idex_sandbox_eth\"),\n is_secure=True,\n is_connect_key=True),\n \"idex_sandbox_eth_api_secret_key\":\n ConfigVar(key=\"idex_sandbox_eth_api_secret_key\",\n prompt=\"Enter your IDEX API secret key>>> \",\n required_if=using_exchange(\"idex_sandbox_eth\"),\n is_secure=True,\n is_connect_key=True),\n \"idex_sandbox_eth_wallet_private_key\":\n ConfigVar(key=\"idex_sandbox_eth_wallet_private_key\",\n prompt=\"Enter your wallet private key>>> \",\n required_if=using_exchange(\"idex_sandbox_eth\"),\n is_secure=True,\n is_connect_key=True),\n },\n \"idex_sandbox_matic\": {\n \"idex_sandbox_matic_api_key\":\n ConfigVar(key=\"idex_sandbox_matic_api_key\",\n prompt=\"Enter your IDEX API key ([sandbox] smart contract blockchain: MATIC) >>> \",\n required_if=using_exchange(\"idex_sandbox_matic\"),\n is_secure=True,\n is_connect_key=True),\n \"idex_sandbox_matic_api_secret_key\":\n ConfigVar(key=\"idex_sandbox_matic_api_secret_key\",\n prompt=\"Enter your IDEX API secret key>>> \",\n required_if=using_exchange(\"idex_sandbox_matic\"),\n is_secure=True,\n is_connect_key=True),\n \"idex_sandbox_matic_wallet_private_key\":\n ConfigVar(key=\"idex_sandbox_matic_wallet_private_key\",\n prompt=\"Enter your wallet private key>>> \",\n required_if=using_exchange(\"idex_sandbox_matic\"),\n is_secure=True,\n is_connect_key=True),\n },\n}\n\n\nDEBUG = False\n","sub_path":"hummingbot/connector/exchange/idex/idex_utils.py","file_name":"idex_utils.py","file_ext":"py","file_size_in_byte":6623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"363878192","text":"route=[]\ndef routes_txt(route_short_name):\n infile = open('routes.txt')\n data = infile.readline() #for reading first line of the file \n #print(\"Here are all the route ids ...\")\n while data : #loop for reading each data in the file \n data = infile.readline() \n l1=list(data.split(\",\")) #for storing the string file to list for single line of data \n if(len(l1)==1) : #for ignoring the last line of file \n break\n if route_short_name in l1[1] :#comparing if the give shot name is in the given data \n print('========================================================')\n print(l1[1])\n trip_txt(l1[3])\n #route.append(l1[3]) #storing the route_id for further traversal \n # print(l1[1],l1[4],'Route_id =',l1[3]) # this line is for printing bus no. and route id for the given bus no. \n\ndef trip_txt(route_id):\n infile = open('trips.txt')\n data = infile.readline()\n count =0\n while data and count<1:\n data = infile.readline()\n l2=list(data.split(\",\"))\n if(len(l2)==1):\n break \n if route_id in l2[0]:\n print('trip_id is ',l2[2])\n #print('**************************************8')\n count +=1\n stop_times(l2[2])\n\n\ndef stop_times(trip_id):\n infile = open('stop_times.txt')\n data = infile.readline()\n while data :\n data = infile.readline()\n l3=list(data.split(\",\"))\n if(len(l3)==1):\n break \n if trip_id == l3[0]:\n # print(l3[3])\n stop_txt(l3[3])\n\n\ndef stop_txt(stop_id):\n infile = open('stops.txt')\n data = infile.readline()\n while data :\n data = infile.readline()\n l4=list(data.split(\",\"))\n if(len(l4)==1):\n break \n if stop_id == l4[0]:\n print(l4[1],l4[2])\n\n\nroute_short_name = input(\"Enter route short name\\n\")\nroutes_txt(route_short_name)","sub_path":"routeid_to_standid.py","file_name":"routeid_to_standid.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"177920219","text":"# -*- coding: iso8859-1 -*-\n#\n# Copyright (C) 2003-2004 Edgewall Software\n# Copyright (C) 2003-2004 Jonas Borgström \n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution. The terms\n# are also available at http://trac.edgewall.com/license.html.\n#\n# This software consists of voluntary contributions made by many\n# individuals. For exact contribution history, see the revision\n# history and logs, available at http://projects.edgewall.com/trac/.\n#\n# Author: Jonas Borgström \n\nfrom __future__ import generators\nimport re\nimport time\n\nfrom trac.core import *\nfrom trac.perm import IPermissionRequestor\nfrom trac.util import TracError, escape, format_datetime\nfrom trac.web import IRequestHandler\nfrom trac.web.chrome import add_link, add_stylesheet, INavigationContributor\nfrom trac.wiki import IWikiSyntaxProvider\n\n\nclass ISearchSource(Interface):\n \"\"\"\n Extension point interface for adding search sources to the Trac\n Search system.\n \"\"\"\n\n def get_search_filters(self, req):\n \"\"\"\n Return a list of filters that this search source supports. Each\n filter must be a (name, label) tuple, where `name` is the internal\n name, and `label` is a human-readable name for display.\n \"\"\"\n\n def get_search_results(self, req, query, filters):\n \"\"\"\n Return a list of search results matching `query`. The `filters`\n parameters is a list of the enabled\n filters, each item being the name of the tuples returned by\n `get_search_events`.\n\n The events returned by this function must be tuples of the form\n (href, title, date, author, excerpt).\n \"\"\"\n\n\ndef query_to_sql(db, q, name):\n if q[0] == q[-1] == \"'\" or q[0] == q[-1] == '\"':\n sql_q = \"%s %s '%%%s%%'\" % (name, db.like(),\n q[1:-1].replace(\"'''\", \"''\"))\n else:\n q = q.replace('\\'', '\\'\\'')\n keywords = q.split(' ')\n x = map(lambda x, name=name: name + ' ' + db.like() +\n '\\'%' + x + '%\\'', keywords)\n sql_q = ' AND '.join(x)\n return sql_q\n\ndef shorten_result(text='', keywords=[], maxlen=240, fuzz=60):\n if not text: text = ''\n text_low = text.lower()\n beg = -1\n for k in keywords:\n i = text_low.find(k.lower())\n if (i > -1 and i < beg) or beg == -1:\n beg = i\n excerpt_beg = 0\n if beg > fuzz:\n for sep in ('.', ':', ';', '='):\n eb = text.find(sep, beg - fuzz, beg - 1)\n if eb > -1:\n eb += 1\n break\n else:\n eb = beg - fuzz\n excerpt_beg = eb\n if excerpt_beg < 0: excerpt_beg = 0\n msg = text[excerpt_beg:beg+maxlen]\n if beg > fuzz:\n msg = '... ' + msg\n if beg < len(text)-maxlen:\n msg = msg + ' ...'\n return msg\n \n\nclass SearchModule(Component):\n\n implements(INavigationContributor, IPermissionRequestor, IRequestHandler,\n IWikiSyntaxProvider)\n\n search_sources = ExtensionPoint(ISearchSource)\n \n RESULTS_PER_PAGE = 10\n\n # INavigationContributor methods\n\n def get_active_navigation_item(self, req):\n return 'search'\n\n def get_navigation_items(self, req):\n if not req.perm.has_permission('SEARCH_VIEW'):\n return\n yield 'mainnav', 'search', 'Search' \\\n % (self.env.href.search())\n\n # IPermissionRequestor methods\n\n def get_permission_actions(self):\n return ['SEARCH_VIEW']\n\n # IRequestHandler methods\n\n def match_request(self, req):\n return re.match(r'/search/?', req.path_info) is not None\n\n def process_request(self, req):\n req.perm.assert_permission('SEARCH_VIEW')\n\n available_filters = []\n for source in self.search_sources:\n available_filters += source.get_search_filters(req)\n \n filters = [f[0] for f in available_filters if req.args.has_key(f[0])]\n if not filters:\n filters = [f[0] for f in available_filters]\n \n req.hdf['search.filters'] = [\n { 'name': filter[0],\n 'label': filter[1],\n 'active': filter[0] in filters\n } for filter in available_filters]\n \n req.hdf['title'] = 'Search'\n\n query = req.args.get('q')\n if query:\n page = int(req.args.get('page', '1'))\n redir = self.quickjump(query)\n if redir:\n req.redirect(redir)\n elif query.startswith('!'):\n query = query[1:]\n # Refuse queries that obviously would result in a huge result set\n if len(query) < 3 and len(query.split()) == 1:\n raise TracError('Search query too short. '\n 'Query must be at least 3 characters long.',\n 'Search Error')\n results = []\n for source in self.search_sources:\n results += list(source.get_search_results(req, query, filters))\n results.sort(lambda x,y: cmp(y[2], x[2]))\n page_size = self.RESULTS_PER_PAGE\n n = len(results)\n n_pages = n / page_size + 1\n results = results[(page-1) * page_size: page * page_size]\n\n req.hdf['title'] = 'Search Results'\n req.hdf['search.q'] = req.args.get('q').replace('\"', \""\")\n req.hdf['search.page'] = page\n req.hdf['search.n_hits'] = n\n req.hdf['search.n_pages'] = n_pages\n req.hdf['search.page_size'] = page_size\n if page < n_pages:\n next_href = self.env.href.search(zip(filters,\n ['on'] * len(filters)),\n q=query, page=page + 1)\n add_link(req, 'next', next_href, 'Next Page')\n if page > 1:\n prev_href = self.env.href.search(zip(filters,\n ['on'] * len(filters)),\n q=query, page=page - 1)\n add_link(req, 'prev', prev_href, 'Previous Page')\n req.hdf['search.page_href'] = escape(\n self.env.href.search(zip(filters, ['on'] * len(filters)),\n q=query))\n req.hdf['search.result'] = [\n { 'href': escape(result[0]),\n 'title': result[1],\n 'date': format_datetime(result[2]),\n 'author': escape(result[3]),\n 'excerpt': result[4]\n } for result in results]\n\n add_stylesheet(req, 'common/css/search.css')\n return 'search.cs', None\n\n def quickjump(self, kwd):\n if len(kwd.split()) != 1:\n return None\n # Ticket quickjump\n if kwd[0] == '#' and kwd[1:].isdigit():\n return self.env.href.ticket(kwd[1:])\n elif kwd[0:len('ticket:')] == 'ticket:' and kwd[len('ticket:'):].isdigit():\n return self.env.href.ticket(kwd[len('ticket:'):])\n elif kwd[0:len('bug:')] == 'bug:' and kwd[len('bug:'):].isdigit():\n return self.env.href.ticket(kwd[len('bug:'):])\n # Changeset quickjump\n elif kwd[0] == '[' and kwd[-1] == ']' and kwd[1:-1].isdigit():\n return self.env.href.changeset(kwd[1:-1])\n elif kwd[0:len('changeset:')] == 'changeset:' and kwd[len('changeset:'):].isdigit():\n return self.env.href.changeset(kwd[len('changeset:'):])\n # Report quickjump\n elif kwd[0] == '{' and kwd[-1] == '}' and kwd[1:-1].isdigit():\n return self.env.href.report(kwd[1:-1])\n elif kwd[0:len('report:')] == 'report:' and kwd[len('report:'):].isdigit():\n return self.env.href.report(kwd[len('report:'):])\n # Milestone quickjump\n elif kwd[0:len('milestone:')] == 'milestone:':\n return self.env.href.milestone(kwd[len('milestone:'):])\n # Source quickjump\n elif kwd[0:len('source:')] == 'source:':\n return self.env.href.browser(kwd[len('source:'):])\n # Wiki quickjump\n elif kwd[0:len('wiki:')] == 'wiki:':\n r = \"((^|(?<=[^A-Za-z]))[!]?[A-Z][a-z/]+(?:[A-Z][a-z/]+)+)\"\n if re.match (r, kwd[len('wiki:'):]):\n return self.env.href.wiki(kwd[len('wiki:'):])\n elif kwd[0].isupper() and kwd[1].islower():\n r = \"((^|(?<=[^A-Za-z]))[!]?[A-Z][a-z/]+(?:[A-Z][a-z/]+)+)\"\n if re.match (r, kwd):\n return self.env.href.wiki(kwd)\n\n # IWikiSyntaxProvider methods\n \n def get_wiki_syntax(self):\n return []\n \n def get_link_resolvers(self):\n yield ('search', self._format_link)\n\n def _format_link(self, formatter, ns, query, label):\n if query and query[0] == '?':\n href = formatter.href.search() + \\\n query.replace('&', '&').replace(' ', '+')\n else:\n href = formatter.href.search(q=query)\n return '%s' % (escape(href), label)\n\n","sub_path":"0.9-0beta2+r2418-1/trac/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"472895134","text":"\nfrom epann.core.tools.utils.structs import Structs\n\nfrom epann.core.tools.constants.cppn import *\nfrom epann.core.tools.constants.hyperNEAT import *\n\nimport numpy as np\n\n\n\nclass Mutations:\n\n def __init__(self):\n\n self.modify = Structs()\n\n # ----- Structural Mutations\n\n def add_connection(self, genome, prob, innovation):\n connections = genome.connections\n nodes = genome.nodes\n\n # Mutations applied ONCE over the entire CPPN genome wrt prob\n if not connectionwise_mutations and np.random.rand() < prob:\n\n current_out_nodes = [ connections[connection]['out_node'] for connection in connections.keys() ]\n current_in_nodes = [ connections[connection]['in_node'] for connection in connections.keys() ]\n\n existing_connections = zip( current_out_nodes, current_in_nodes )\n # print '\\nExisting connections:', existing_connections\n\n # Select the connection to add\n inputs = range(num_inputs)\n outputs = range(num_inputs, num_inputs + num_outputs)\n\n hiddens = list( set(nodes.keys()) - set( inputs + outputs ) )\n\n possible_starts = inputs + hiddens\n possible_ends = hiddens + outputs\n\n chosen_start = possible_starts[np.random.randint(len(possible_starts))]\n chosen_end = possible_ends[np.random.randint(len(possible_ends))]\n\n chosen_connection = (chosen_start, chosen_end)\n # print ' - Chosen connection:', chosen_connection, chosen_connection in existing_connections\n\n # print chosen_connection, existing_connections, chosen_connection in existing_connections\n\n # Check to make sure if the connection already exists - still getting duplicates\n if not (chosen_connection in existing_connections): # and connection's addition would not create a cycle\n\n # Check to make sure that the connection is not a self connection\n\n if allow_self_connections: # Self-connections allowed\n\n # Make the connection\n connections[innovation] = self.modify.generate_connection(chosen_connection)\n\n else: # Self-connections not allowed\n\n if not (chosen_start == chosen_end):\n\n # Make the connection\n connections[innovation] = self.modify.generate_connection(chosen_connection)\n\n innovation += 1\n\n elif connectionwise_mutations:\n pass\n\n\n return connections, nodes, innovation\n\n def add_node(self, genome, prob, innovation):\n\n connections = genome.connections\n nodes = genome.nodes\n\n # Mutations applied ONCE over the entire CPPN genome wrt prob\n if not connectionwise_mutations and np.random.rand() < prob:\n\n # Choose the existing connection the new node will bisect - select this first, and only mutate if it hasnt already been DISABLED\n new_node_bisection = np.random.randint(len(connections))\n possible_nodes = connections.keys()\n new_node_bisection = possible_nodes[new_node_bisection]\n\n if connections[new_node_bisection]['enable_bit']: # checks to see if the connection is still ENABLED\n\n # Add the node to the node genome\n new_node_ID = len(nodes)\n nodes[ new_node_ID ] = self.modify.generate_node('hidden')\n\n # # Choose the existing connection the new node will bisect - select this first, and only mutate if it hasnt already been DISABLED\n # new_node_bisection = np.random.randint(len(connections))\n # possible_nodes = connections.keys()\n # new_node_bisection = possible_nodes[new_node_bisection]\n\n # Disable that previous connection\n connections[new_node_bisection]['enable_bit'] = 0\n\n # Instantiate the two new connections coming into and out of the newly introduced node\n previous_out_node = connections[new_node_bisection]['out_node']\n previous_in_node = connections[new_node_bisection]['in_node']\n\n # Outgoing connection\n connections[innovation] = self.modify.generate_connection([previous_out_node, new_node_ID])\n\n innovation += 1\n\n # Incoming connection\n connections[innovation] = self.modify.generate_connection([new_node_ID, previous_in_node])\n\n innovation += 1\n\n elif connectionwise_mutations:\n pass\n\n return connections, nodes, innovation\n\n\n\n # ----- Functional Mutations -----\n\n def perturb_weight(self, genome, prob):\n connections = genome.connections\n nodes = genome.nodes\n\n # Mutations applied ONCE over the entire CPPN genome wrt prob\n if not connectionwise_mutations and np.random.rand() < prob:\n\n chosen_connection = np.random.randint(len(connections.keys()))\n chosen_connection = connections.keys()[chosen_connection]\n\n # Update the weight - SHOULD THIS BE REPLACED BY A CAUCHY CHANGE AS IN THE DPPNs?\n\n # --- Either adjust the weight by a small amount\n if np.random.rand() < prob_replace_weight:\n current_weight = connections[chosen_connection]['weight']\n connections[chosen_connection]['weight'] = np.random.normal(current_weight, 0.1)\n\n # --- Or replace it entirely with a new weight value\n else:\n connections[chosen_connection]['weight'] = np.random.randn()\n\n elif connectionwise_mutations:\n pass\n\n # Update\n genome.connections = connections\n genome.nodes = nodes\n\n return genome\n\n\n def delete_connection(self, genome, prob):\n connections = genome.connections\n nodes = genome.nodes\n\n return connections, nodes\n\n def flip_enable_bit(self, genome, prob):\n connections = genome.connections\n nodes = genome.nodes\n\n return connections, nodes\n\n def mutate_activation_func(self, genome, prob):\n connections = genome.connections\n nodes = genome.nodes\n\n return connections, nodes\n\n\n","sub_path":"epann/core/tools/utils/mutations.py","file_name":"mutations.py","file_ext":"py","file_size_in_byte":6253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"472335722","text":"\"\"\"Spells for optimizing nif files.\"\"\"\n\n# --------------------------------------------------------------------------\n# ***** BEGIN LICENSE BLOCK *****\n#\n# Copyright (c) 2007-2009, NIF File Format Library and Tools.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the NIF File Format Library and Tools\n# project nor the names of its contributors may be used to endorse\n# or promote products derived from this software without specific\n# prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# ***** END LICENSE BLOCK *****\n# --------------------------------------------------------------------------\n\nfrom itertools import izip\n\nfrom pyffi.formats.nif import NifFormat\nimport pyffi.utils.tristrip\nimport pyffi.spells\nimport pyffi.spells.nif\nimport pyffi.spells.nif.fix\n\n# set flag to overwrite files\n__readonly__ = False\n\n# example usage\n__examples__ = \"\"\"* Standard usage:\n\n python niftoaster.py optimize /path/to/copy/of/my/nifs\n\n* Optimize, but do not merge NiMaterialProperty blocks:\n\n python niftoaster.py optimize --exclude=NiMaterialProperty /path/to/copy/of/my/nifs\n\"\"\"\n\nclass SpellCleanRefLists(pyffi.spells.nif.NifSpell):\n \"\"\"Remove empty and duplicate entries in reference lists.\"\"\"\n\n SPELLNAME = \"opt_cleanreflists\"\n READONLY = False\n\n def datainspect(self):\n # see MadCat221's metstaff.nif:\n # merging data on PSysMeshEmitter affects particle system\n # so do not merge child links on this nif (probably we could still\n # merge other things: this is just a quick hack to make sure the\n # optimizer won't do anything wrong)\n try:\n if self.data.header.hasBlockType(NifFormat.NiPSysMeshEmitter):\n return False\n except ValueError:\n # when in doubt, assume it does not have this block\n pass\n # so far, only reference lists in NiObjectNET blocks, NiAVObject\n # blocks, and NiNode blocks are checked\n return self.inspectblocktype(NifFormat.NiObjectNET)\n\n def branchinspect(self, branch):\n # only inspect the NiObjectNET branch\n return isinstance(branch, NifFormat.NiObjectNET)\n\n def cleanreflist(self, reflist, category):\n \"\"\"Return a cleaned copy of the given list of references.\"\"\"\n # delete empty and duplicate references\n cleanlist = []\n for ref in reflist:\n if ref is None:\n self.toaster.msg(\"removing empty %s reference\" % category)\n elif ref in cleanlist:\n self.toaster.msg(\"removing duplicate %s reference\" % category)\n else:\n cleanlist.append(ref)\n # done\n return cleanlist\n\n def branchentry(self, branch):\n if isinstance(branch, NifFormat.NiObjectNET):\n # clean extra data\n branch.setExtraDatas(\n self.cleanreflist(branch.getExtraDatas(), \"extra\"))\n if isinstance(branch, NifFormat.NiAVObject):\n # clean properties\n branch.setProperties(\n self.cleanreflist(branch.getProperties(), \"property\"))\n if isinstance(branch, NifFormat.NiNode):\n # clean children\n branch.setChildren(\n self.cleanreflist(branch.getChildren(), \"child\"))\n # clean effects\n branch.setEffects(\n self.cleanreflist(branch.getEffects(), \"effect\"))\n # always recurse further\n return True\n\nclass SpellMergeDuplicates(pyffi.spells.nif.NifSpell):\n \"\"\"Remove duplicate branches.\"\"\"\n\n SPELLNAME = \"opt_mergeduplicates\"\n READONLY = False\n\n def __init__(self, *args, **kwargs):\n pyffi.spells.nif.NifSpell.__init__(self, *args, **kwargs)\n # list of all branches visited so far\n self.branches = []\n\n def datainspect(self):\n # see MadCat221's metstaff.nif:\n # merging data on PSysMeshEmitter affects particle system\n # so do not merge shapes on this nif (probably we could still\n # merge other things: this is just a quick hack to make sure the\n # optimizer won't do anything wrong)\n try:\n return not self.data.header.hasBlockType(NifFormat.NiPSysMeshEmitter)\n except ValueError:\n # when in doubt, do the spell\n return True\n\n def branchinspect(self, branch):\n # only inspect the NiObjectNET branch (merging havok can mess up things)\n return isinstance(branch, (NifFormat.NiObjectNET,\n NifFormat.NiGeometryData))\n\n def branchentry(self, branch):\n for otherbranch in self.branches:\n if (branch is not otherbranch and\n branch.isInterchangeable(otherbranch)):\n # skip properties that have controllers (the\n # controller data cannot always be reliably checked,\n # see also issue #2106668)\n if (isinstance(branch, NifFormat.NiProperty)\n and branch.controller):\n continue\n # interchangeable branch found!\n self.toaster.msg(\"removing duplicate branch\")\n self.data.replaceGlobalNode(branch, otherbranch)\n # branch has been replaced, so no need to recurse further\n return False\n else:\n # no duplicate found, add to list of visited branches\n self.branches.append(branch)\n # continue recursion\n return True\n\nclass SpellOptimizeGeometry(pyffi.spells.nif.NifSpell):\n \"\"\"Optimize all geometries:\n - remove duplicate vertices\n - stripify if strips are long enough\n - recalculate skin partition\n - recalculate tangent space \n \"\"\"\n\n SPELLNAME = \"opt_geometry\"\n READONLY = False\n\n # spell parameters\n STRIPLENCUTOFF = 10\n STITCH = True\n\n def __init__(self, *args, **kwargs):\n pyffi.spells.nif.NifSpell.__init__(self, *args, **kwargs)\n # list of all optimized geometries so far\n # (to avoid optimizing the same geometry twice)\n self.optimized = []\n\n def datainspect(self):\n # so far, only reference lists in NiObjectNET blocks, NiAVObject\n # blocks, and NiNode blocks are checked\n return self.inspectblocktype(NifFormat.NiTriBasedGeom)\n\n def branchinspect(self, branch):\n # only inspect the NiAVObject branch\n return isinstance(branch, NifFormat.NiAVObject)\n\n def branchentry(self, branch):\n \"\"\"Optimize a NiTriStrips or NiTriShape block:\n - remove duplicate vertices\n - stripify if strips are long enough\n - recalculate skin partition\n - recalculate tangent space \n\n @todo: Limit the length of strips (see operation optimization mod for\n Oblivion!)\n \"\"\"\n if not isinstance(branch, NifFormat.NiTriBasedGeom):\n # keep recursing\n return True\n\n if branch in self.optimized:\n # already optimized\n return False\n \n # we found a geometry to optimize\n\n # cover degenerate case\n if branch.data.numVertices < 3:\n self.toaster.msg(\"less than 3 vertices: removing branch\")\n self.data.replaceGlobalNode(branch, None)\n return False\n\n # shortcut\n data = branch.data\n\n self.toaster.msg(\"removing duplicate vertices\")\n v_map = [0 for i in xrange(data.numVertices)] # maps old index to new index\n v_map_inverse = [] # inverse: map new index to old index\n k_map = {} # maps hash to new vertex index\n index = 0 # new vertex index for next vertex\n for i, vhash in enumerate(data.getVertexHashGenerator()):\n try:\n k = k_map[vhash]\n except KeyError:\n # vertex is new\n k_map[vhash] = index\n v_map[i] = index\n v_map_inverse.append(i)\n index += 1\n else:\n # vertex already exists\n v_map[i] = k\n del k_map\n\n new_numvertices = index\n self.toaster.msg(\"(num vertices was %i and is now %i)\"\n % (len(v_map), new_numvertices))\n # copy old data\n oldverts = [[v.x, v.y, v.z] for v in data.vertices]\n oldnorms = [[n.x, n.y, n.z] for n in data.normals]\n olduvs = [[[uv.u, uv.v] for uv in uvset] for uvset in data.uvSets]\n oldvcols = [[c.r, c.g, c.b, c.a] for c in data.vertexColors]\n if branch.skinInstance: # for later\n oldweights = branch.getVertexWeights()\n # set new data\n data.numVertices = new_numvertices\n if data.hasVertices:\n data.vertices.updateSize()\n if data.hasNormals:\n data.normals.updateSize()\n data.uvSets.updateSize()\n if data.hasVertexColors:\n data.vertexColors.updateSize()\n for i, v in enumerate(data.vertices):\n old_i = v_map_inverse[i]\n v.x = oldverts[old_i][0]\n v.y = oldverts[old_i][1]\n v.z = oldverts[old_i][2]\n for i, n in enumerate(data.normals):\n old_i = v_map_inverse[i]\n n.x = oldnorms[old_i][0]\n n.y = oldnorms[old_i][1]\n n.z = oldnorms[old_i][2]\n for j, uvset in enumerate(data.uvSets):\n for i, uv in enumerate(uvset):\n old_i = v_map_inverse[i]\n uv.u = olduvs[j][old_i][0]\n uv.v = olduvs[j][old_i][1]\n for i, c in enumerate(data.vertexColors):\n old_i = v_map_inverse[i]\n c.r = oldvcols[old_i][0]\n c.g = oldvcols[old_i][1]\n c.b = oldvcols[old_i][2]\n c.a = oldvcols[old_i][3]\n del oldverts\n del oldnorms\n del olduvs\n del oldvcols\n\n # update vertex indices in strips/triangles\n if isinstance(data, NifFormat.NiTriStripsData):\n for strip in data.points:\n for i in xrange(len(strip)):\n try:\n strip[i] = v_map[strip[i]]\n except IndexError:\n self.toaster.logger.warn(\n \"Corrupt nif: bad vertex index in strip (%i); \"\n \"replacing by valid index which might \"\n \"modify your geometry!\" % strip[i])\n if i > 0:\n strip[i] = strip[i-1]\n else:\n strip[i] = strip[i+1]\n elif isinstance(data, NifFormat.NiTriShapeData):\n for tri in data.triangles:\n tri.v1 = v_map[tri.v1]\n tri.v2 = v_map[tri.v2]\n tri.v3 = v_map[tri.v3]\n\n # stripify trishape/tristrip\n if data.numTriangles > 32000:\n self.toaster.logger.warn(\n \"Found an insane amount of %i triangles in geometry: \"\n \"consider simplifying the mesh \"\n \"or breaking it up in smaller parts.\"\n % data.numTriangles)\n else:\n if isinstance(data, NifFormat.NiTriStripsData):\n self.toaster.msg(\"recalculating strips\")\n origlen = sum(i for i in data.stripLengths)\n data.setTriangles(data.getTriangles())\n newlen = sum(i for i in data.stripLengths)\n self.toaster.msg(\"(strip length was %i and is now %i)\"\n % (origlen, newlen))\n elif isinstance(data, NifFormat.NiTriShapeData):\n self.toaster.msg(\"stripifying\")\n newbranch = branch.getInterchangeableTriStrips()\n self.data.replaceGlobalNode(branch, newbranch)\n branch = newbranch\n data = newbranch.data\n # average, weighed towards large strips\n if isinstance(data, NifFormat.NiTriStripsData):\n # note: the max(1, ...) is to avoid ZeroDivisionError\n avgstriplen = float(sum(i * i for i in data.stripLengths)) \\\n / max(1, sum(i for i in data.stripLengths))\n self.toaster.msg(\"(average strip length is %f)\" % avgstriplen)\n if avgstriplen < self.STRIPLENCUTOFF:\n self.toaster.msg(\"average strip length < %f so triangulating\"\n % self.STRIPLENCUTOFF)\n newbranch = branch.getInterchangeableTriShape()\n self.data.replaceGlobalNode(branch, newbranch)\n branch = newbranch\n data = newbranch.data\n elif self.STITCH:\n self.toaster.msg(\"stitching strips (using %i stitches)\"\n % len(data.getStrips()))\n data.setStrips([pyffi.utils.tristrip.stitchStrips(data.getStrips())])\n\n # update skin data\n if branch.skinInstance:\n self.toaster.msg(\"update skin data vertex mapping\")\n skindata = branch.skinInstance.data\n newweights = []\n for i in xrange(new_numvertices):\n newweights.append(oldweights[v_map_inverse[i]])\n for bonenum, bonedata in enumerate(skindata.boneList):\n w = []\n for i, weightlist in enumerate(newweights):\n for bonenum_i, weight_i in weightlist:\n if bonenum == bonenum_i:\n w.append((i, weight_i))\n bonedata.numVertices = len(w)\n bonedata.vertexWeights.updateSize()\n for j, (i, weight_i) in enumerate(w):\n bonedata.vertexWeights[j].index = i\n bonedata.vertexWeights[j].weight = weight_i\n\n # update skin partition (only if branch already exists)\n branch._validateSkin()\n skininst = branch.skinInstance\n skinpart = skininst.skinPartition\n if not skinpart:\n skinpart = skininst.data.skinPartition\n\n if skinpart:\n self.toaster.msg(\"updating skin partition\")\n # use Oblivion settings\n branch.updateSkinPartition(\n maxbonesperpartition = 18, maxbonespervertex = 4,\n stripify = True, verbose = 0)\n\n # update morph data\n for morphctrl in branch.getControllers():\n if isinstance(morphctrl, NifFormat.NiGeomMorpherController):\n morphdata = morphctrl.data\n # skip empty morph data\n if not morphdata:\n continue\n # convert morphs\n self.toaster.msg(\"updating morphs\")\n for morph in morphdata.morphs:\n # store a copy of the old vectors\n oldmorphvectors = [(vec.x, vec.y, vec.z)\n for vec in morph.vectors]\n for old_i, vec in izip(v_map_inverse, morph.vectors):\n vec.x = oldmorphvectors[old_i][0]\n vec.y = oldmorphvectors[old_i][1]\n vec.z = oldmorphvectors[old_i][2]\n del oldmorphvectors\n # resize matrices\n morphdata.numVertices = new_numvertices\n for morph in morphdata.morphs:\n morph.arg = morphdata.numVertices # manual argument passing\n morph.vectors.updateSize()\n\n # recalculate tangent space (only if the branch already exists)\n if branch.find(block_name='Tangent space (binormal & tangent vectors)',\n block_type=NifFormat.NiBinaryExtraData):\n self.toaster.msg(\"recalculating tangent space\")\n branch.updateTangentSpace()\n\n # stop recursion\n return False\n\n# XXX todo\nclass SpellSplitGeometry(pyffi.spells.nif.NifSpell):\n \"\"\"Optimize geometry by splitting large models into pieces.\n (This spell is not yet fully implemented!)\n \"\"\"\n SPELLNAME = \"opt_split\"\n READONLY = False\n THRESHOLD_RADIUS = 100 #: Threshold where to split geometry.\n\n # XXX todo\n @staticmethod\n def addVertex(sourceindex, v_map, sourcedata, destdata):\n \"\"\"Add a vertex from source to destination. Returns index in\n destdata of the vertex.\"\"\"\n # v_map maps source indices that have already been added to the\n # index they already have in the destdata\n\n # hasNormals, numUvSets, etc. of destdata must already match\n # the sourcedata\n try:\n return v_map[sourceindex]\n except KeyError:\n v_map[sourceindex] = destdata.numVertices\n destdata.numVertices += 1\n destdata.vertices.updateSize()\n destdata.vertices[-1].x = sourcedata.vertices[sourceindex].x\n destdata.vertices[-1].y = sourcedata.vertices[sourceindex].y\n destdata.vertices[-1].z = sourcedata.vertices[sourceindex].z\n if sourcedata.hasNormals:\n destdata.normals.updateSize()\n destdata.normals[-1].x = sourcedata.normals[sourceindex].x\n destdata.normals[-1].y = sourcedata.normals[sourceindex].y\n destdata.normals[-1].z = sourcedata.normals[sourceindex].z\n if sourcedata.hasVertexColors:\n destdata.vertexColors.updateSize()\n destdata.vertexColors[-1].r = sourcedata.vertexColors[sourceindex].r\n destdata.vertexColors[-1].g = sourcedata.vertexColors[sourceindex].g\n destdata.vertexColors[-1].b = sourcedata.vertexColors[sourceindex].b\n destdata.vertexColors[-1].a = sourcedata.vertexColors[sourceindex].a\n if sourcedata.hasUv:\n for sourceuvset, destuvset in izip(sourcedata.uvSets, destdata.uvSets):\n destuvset.updateSize()\n destuvset[-1].u = sourceuvset[sourceindex].u\n destuvset[-1].v = sourceuvset[sourceindex].v\n return destdata.numVertices\n\n # XXX todo\n @staticmethod\n def addTriangle(sourcetriangle, v_map, sourcedata, destdata):\n \"\"\"Add a triangle from source to destination.\"\"\"\n desttriangle = [\n destdata.addVertex(sourceindex)\n for sourceindex in sourcetriangle]\n destdata.numTriangles += 1\n destdata.triangles.updateSize()\n destdata.triangles[-1].v1 = desttriangle[0]\n destdata.triangles[-1].v2 = desttriangle[0]\n destdata.triangles[-1].v3 = desttriangle[0]\n\n # XXX todo\n @staticmethod\n def getSize(vertices, triangle):\n \"\"\"Calculate size of geometry data + given triangle.\"\"\"\n def helper(oper, coord):\n return oper((getattr(vert, coord) for vert in triangle),\n oper(getattr(vert, coord) for vert in vertices))\n minx = helper(min, \"x\")\n miny = helper(min, \"y\")\n minz = helper(min, \"z\")\n maxx = helper(max, \"x\")\n maxy = helper(max, \"y\")\n maxz = helper(max, \"z\")\n return max((maxx - minx, maxy - miny, maxz - minz))\n\n # XXX todo: merge into branchentry spell\n @staticmethod\n def split(geom, threshold_radius = THRESHOLD_RADIUS):\n \"\"\"Takes a NiGeometry block and splits the geometries. Returns a NiNode\n which contains the splitted geometry. Note that everything is triangulated\n in the process.\"\"\"\n # make list of triangles\n # this will be used as the list of triangles still to add\n triangles = geom.data.getTriangles()\n node = NifFormat.NiNode().deepcopy(\n NifFormat.NiAVObject.deepcopy(geom))\n geomsplit = None\n # while there are still triangles to add...\n while triangles:\n if geomsplit is None:\n # split new geometry\n geomsplit = NifFormat.NiTriShape()\n node.addChild(geomsplit)\n geomsplit.data = NifFormat.NiTriShapeData()\n v_map = {}\n # copy relevant data\n geomsplit.name = \"%s:%i\" % (geom.name, node.numChildren - 1)\n geomsplit.data.hasVertices = geom.data.hasVertices\n geomsplit.data.hasNormals = geom.data.hasNormals\n geomsplit.data.hasVertexColors = geom.data.hasVertexColors\n geomsplit.data.numUvSets = geom.data.numUvSets\n geomsplit.data.hasUv = geom.data.hasUv\n geomsplit.data.uvSets.updateSize()\n # assign it a random triangle\n triangle = triangles.pop(0)\n addTriangle(triangle, v_map, geom.data, geomsplit.data)\n # find face that is close to current geometry\n for triangle in triangles:\n if getSize(geomsplit.data,\n tuple(geom.data.vertices[index]\n for index in triangle)) < threshold_radius:\n addTriangle(triangle, v_map, geom.data, geomsplit.data)\n break\n else:\n # if exceeded, start new geometry\n # first finish some things in geomsplit data\n geomsplit.data.updateCenterRadius()\n # setting geomsplit to None flags this for\n # the next iteration\n geomsplit = None\n # return grouping node\n return node\n\n def __init__(self, *args, **kwargs):\n pyffi.spells.nif.NifSpell.__init__(self, *args, **kwargs)\n # list of all optimized geometries so far\n # (to avoid optimizing the same geometry twice)\n self.optimized = []\n\n def datainspect(self):\n return self.inspectblocktype(NifFormat.NiTriBasedGeom)\n\n def branchinspect(self, branch):\n return isinstance(branch, NifFormat.NiAVObject)\n\n def branchentry(self, branch):\n if not isinstance(branch, NifFormat.NiTriBasedGeom):\n # keep recursing\n return True\n\n if branch in self.optimized:\n # already optimized\n return False\n \n # we found a geometry to optimize\n # XXX todo\n # get geometry data\n geomdata = block.data\n if not geomdata:\n self.optimized.append(block)\n return False\n # check radius\n if geomdata.radius < self.THRESHOLD_RADIUS:\n optimized_geometries.append(block)\n return False\n # radius is over the threshold, so re-organize the geometry\n newblock = split(block, threshold_radius = THRESHOLD_RADIUS)\n # replace block with newblock everywhere\n data.replaceGlobalNode(block, newblock)\n\n self.optimized.append(block)\n\n # stop recursing\n return False\n\nclass SpellOptimize(\n pyffi.spells.SpellGroupSeries(\n pyffi.spells.SpellGroupParallel(\n SpellCleanRefLists,\n pyffi.spells.nif.fix.SpellDetachHavokTriStripsData,\n pyffi.spells.nif.fix.SpellFixTexturePath,\n pyffi.spells.nif.fix.SpellClampMaterialAlpha),\n SpellMergeDuplicates,\n SpellOptimizeGeometry)):\n \"\"\"Global fixer and optimizer spell.\"\"\"\n SPELLNAME = \"optimize\"\n\n","sub_path":"pyffi/spells/nif/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":24551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103047766","text":"import random\r\n\r\nplayerMoney = 1000\r\nhouseMoney = 1000\r\nnumberOfRoundsPlayed = 1\r\n\r\n#cards\r\nlistOfCards = [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7,8,8,8,8,\r\n9,9,9,9,10,10,10,10,11,11,11,11]\r\n\r\n#main game loop\r\nplayGame = True\r\nwhile playGame:\r\n #house cards\r\n houseCard_1 = random.choice(listOfCards)\r\n listOfCards.remove(houseCard_1)\r\n houseCard_2 = random.choice(listOfCards)\r\n listOfCards.remove(houseCard_2)\r\n houseCard_1 = str(houseCard_1)\r\n houseCard_2 = str(houseCard_2)\r\n\r\n #player cards\r\n playerCard_1 = random.choice(listOfCards)\r\n listOfCards.remove(playerCard_1)\r\n playerCard_2 = random.choice(listOfCards)\r\n listOfCards.remove(playerCard_2)\r\n playerCard_1 = str(playerCard_1)\r\n playerCard_2 = str(playerCard_2)\r\n\r\n print(\"The house has a \" + houseCard_2)\r\n print(\"You have a \" + playerCard_1 + \" and a \" + playerCard_2)\r\n\r\n #cost to play\r\n if numberOfRoundsPlayed < 5:\r\n playerMoney -= 25\r\n houseMoney -= 25\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n playerMoney -= 50\r\n houseMoney -= 50\r\n elif numberOfRoundsPlayed > 7:\r\n playerMoney -= 100\r\n houseMoney -= 100\r\n\r\n #hit or stand?\r\n print(\"whould you like to hit or stand? 1)hit 2)stand\")\r\n hitOrStand = False\r\n while not hitOrStand:\r\n hit = int(input())\r\n if hit == 1:\r\n playerCard_3 = random.choice(listOfCards)\r\n listOfCards.remove(playerCard_3)\r\n playerCard_3 = str(playerCard_3)\r\n print(\"You have a \" + playerCard_1 + \" and a \" + playerCard_2 + \" and a \"\r\n + playerCard_3)\r\n playerCard_3 = int(playerCard_3)\r\n hitOrStand = True\r\n elif hit == 2:\r\n hitOrStand = True\r\n else:\r\n print(\"1)hit 2)stand\")\r\n\r\n print(\"The house has a \" + houseCard_1 + \" and a \" + houseCard_2)\r\n\r\n houseCard_1 = int(houseCard_1)\r\n houseCard_2 = int(houseCard_2)\r\n playerCard_1 = int(playerCard_1)\r\n playerCard_2 = int(playerCard_2)\r\n\r\n #who wins?\r\n if hit == 1:\r\n playerTotal = playerCard_1 + playerCard_2 + playerCard_3\r\n else:\r\n playerTotal = playerCard_1 + playerCard_2\r\n\r\n houseTotal = houseCard_1 + houseCard_2\r\n\r\n if playerTotal <= 21 and playerTotal > houseTotal:\r\n print(\"You win!\")\r\n if numberOfRoundsPlayed < 5:\r\n playerMoney += 50\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n playerMoney += 100\r\n elif numberOfRoundsPlayed > 7:\r\n playerMoney += 200\r\n elif houseTotal < 21 and houseTotal >= playerTotal:\r\n print(\"The house wins\")\r\n if numberOfRoundsPlayed < 5:\r\n houseMoney += 50\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n houseMoney += 100\r\n elif numberOfRoundsPlayed > 7:\r\n houseMoney += 200\r\n elif playerTotal > 21 and houseTotal <= 21:\r\n print(\"The house wins\")\r\n if numberOfRoundsPlayed < 5:\r\n houseMoney += 50\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n houseMoney += 100\r\n elif numberOfRoundsPlayed > 7:\r\n houseMoney += 200\r\n elif houseTotal > 21 and playerTotal <= 21:\r\n print(\"You win!\")\r\n if numberOfRoundsPlayed < 5:\r\n playerMoney += 50\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n playerMoney += 100\r\n elif numberOfRoundsPlayed > 7:\r\n playerMoney += 200\r\n elif houseTotal > 21 and playerTotal > 21:\r\n print(\"It's a draw\")\r\n if numberOfRoundsPlayed < 5:\r\n playerMoney += 25\r\n houseMoney += 25\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n playerMoney += 50\r\n houseMoney += 50\r\n elif numberOfRoundsPlayed > 7:\r\n playerMoney += 100\r\n houseMoney += 100\r\n elif houseTotal == playerTotal:\r\n print(\"It's a draw\")\r\n if numberOfRoundsPlayed < 5:\r\n playerMoney += 25\r\n houseMoney += 25\r\n elif 5 < numberOfRoundsPlayed < 7:\r\n playerMoney += 50\r\n houseMoney += 50\r\n elif numberOfRoundsPlayed > 7:\r\n playerMoney += 100\r\n houseMoney += 100\r\n\r\n #who runs out of money first?\r\n if playerMoney <= 0:\r\n print(\"The house wins\")\r\n break\r\n if houseMoney <= 0:\r\n print(\"You win!\")\r\n break\r\n\r\n #play again?\r\n print(\"Would you like to play again? 1)yes 2)no\")\r\n playAgainQuestion = False\r\n while not playAgainQuestion:\r\n playAgain = int(input())\r\n if playAgain == 1:\r\n playAgainQuestion = True\r\n numberOfRoundsPlayed += 1\r\n playerMoney = str(playerMoney)\r\n houseMoney = str(houseMoney)\r\n print(\"You have $\" + playerMoney + \" and the house has $\" + houseMoney)\r\n print(\"\")\r\n playerMoney = int(playerMoney)\r\n houseMoney = int(houseMoney)\r\n elif playAgain == 2:\r\n playGame = False\r\n playAgainQuestion = True\r\n else:\r\n print(\"1)yes 2)no\")\r\n\r\nprint(\"\")\r\nprint(\"Thank you for playing! You played \" + numberOfRoundsPlayed + \" rounds\")\r\nclose = input(\"press enter to close\")\r\n","sub_path":"Black Jack.py","file_name":"Black Jack.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"139765504","text":"from no_flex_houses import flexible_houses\n\n#Add manual thermostat control\nfile = 'IEEE_123_homes_1min_nothermostatcontrol.glm'\nnew_file = 'IEEE_123_homes_1min.glm'\nglm = open(file,'r') \nnew_glm = open(new_file,'w') \n\n#Simply copy nothermostatcontrol file\nfor line in glm:\n new_glm.write(line)\n\nglm.close()\nnew_glm.close()","sub_path":"manual_thermostat_control_nomarket.py","file_name":"manual_thermostat_control_nomarket.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71397696","text":"from parser import Parser\nfrom CodeWriter import CodeWriter\nimport os\nimport sys\n\n\n\ndef VMTranslator():\n\n \n if(os.path.isdir(sys.argv[1])):\n\n Directory_Name = sys.argv[1].split(\"/\")[-1]\n if(Directory_Name == \"\"):\n Directory_Name = sys.argv[-1].split(\"/\")[-2]\n \n path_name = sys.argv[1]\n all_files = os.listdir(path_name)\n vm_files = []\n asm_files = []\n\n for file in all_files:\n if file.endswith(\".vm\"):\n vm_files.append(file)\n\n\n for vmfile in vm_files:\n\n parser = Parser(os.path.join(sys.argv[1],vmfile))\n code = CodeWriter(os.path.join(sys.argv[1],vmfile.split(\".vm\")[0]+\".asm\"))\n \n while parser.hasMoreCommands():\n parser.advance()\n code.file.write(\"\\n\\n//\"+parser.curr_command+\"\\n\\n\")\n \n if(parser.commandType()==\"C_PUSH\" or parser.commandType()==\"C_POP\"):\n code.WritePushPop(parser.commandType(),parser.arg1(),parser.arg2())\n\n elif(parser.commandType() == \"C_ARITHMETIC\"):\n\n \n code.writeArithmetic(parser.arg1())\n\n elif(parser.commandType() == \"C_LABEL\"):\n\n code.writeLabel(parser.arg1())\n\n elif(parser.commandType() == \"C_GOTO\"):\n\n code.writeGoto(parser.arg1())\n\n elif(parser.commandType() == \"C_IF\"):\n\n code.writeIf(parser.arg1())\n\n elif(parser.commandType() == \"C_FUNCTION\"):\n\n code.writeFunction(parser.arg1(),parser.arg2())\n\n elif(parser.commandType() == \"C_CALL\"):\n\n code.writeCall(parser.arg1(),parser.arg2())\n\n elif(parser.commandType() == \"C_RETURN\"):\n\n code.writeReturn()\n\n code.Close()\n\n \n\n if(len(vm_files)==1):\n exit(0)\n\n f = open(os.path.join(sys.argv[1],Directory_Name+\".asm\"),\"w\")\n \n all_files = os.listdir(path_name)\n\n for file in all_files:\n \n if file.endswith(\".asm\"):\n \n asm_files.append(file)\n \n if \"Sys.asm\" in asm_files:\n\n Sys_file = open(os.path.join(sys.argv[1],\"Sys.asm\"),\"r\")\n\n for line in Sys_file:\n f.write(line)\n \n Sys_file.close()\n\n if \"Main.asm\" in asm_files:\n\n Main_file = open(os.path.join(sys.argv[1],\"Main.asm\"),\"r\")\n for line in Main_file:\n\n f.write(line)\n\n Main_file.close()\n\n for asmfile in asm_files:\n\n if(asmfile != Directory_Name+\".asm\" and asmfile != \"Sys.asm\" and asmfile != \"Main.asm\"):\n\n new_file = open(os.path.join(sys.argv[1],asmfile),\"r\")\n\n for line in new_file:\n\n f.write(line)\n\n new_file.close()\n\n f.write(\"(END)\\n@END\\n0;JMP\\n\")\n\n f.close()\n\n if(os.path.isfile(sys.argv[1])):\n\n parser = Parser(sys.argv[1])\n code = CodeWriter(sys.argv[1].split(\".vm\")[0]+\".asm\")\n \n while parser.hasMoreCommands():\n parser.advance()\n code.file.write(\"\\n\\n//\"+parser.curr_command+\"\\n\\n\")\n \n if(parser.commandType()==\"C_PUSH\" or parser.commandType()==\"C_POP\"):\n code.WritePushPop(parser.commandType(),parser.arg1(),parser.arg2())\n\n elif(parser.commandType() == \"C_ARITHMETIC\"):\n\n \n code.writeArithmetic(parser.arg1())\n\n elif(parser.commandType() == \"C_LABEL\"):\n\n code.writeLabel(parser.arg1())\n\n elif(parser.commandType() == \"C_GOTO\"):\n\n code.writeGoto(parser.arg1())\n\n elif(parser.commandType() == \"C_IF\"):\n\n code.writeIf(parser.arg1())\n\n elif(parser.commandType() == \"C_FUNCTION\"):\n\n code.writeFunction(parser.arg1(),parser.arg2())\n\n elif(parser.commandType() == \"C_CALL\"):\n\n code.writeCall(parser.arg1(),parser.arg2())\n\n elif(parser.commandType() == \"C_RETURN\"):\n\n code.writeReturn()\n\n code.Close()\n\n\n\n\nif __name__ == \"__main__\":\n\n VMTranslator()\n","sub_path":"projects/08/VMTranslator/VMTranslator.py","file_name":"VMTranslator.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"322524129","text":"import django_tables2 as tables \nfrom .models import CustomsInventory, UploadedSales\n\nclass CustomsInventoryTable(tables.Table):\n class Meta:\n model = CustomsInventory\n fields = ['sku', 'tariff', 'quantity', 'description',\\\n 'weight', 'cost', 'office', 'doc_type', 'doc_number',\\\n 'year', 'line', 'country']\n\nclass UploadedSalesTable(tables.Table):\n class Meta:\n model = UploadedSales\n fields = ['first_name', 'last_name', 'rec_number', \\\n 'sku', 'quantity', 'status']","sub_path":"sad/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129294596","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 7 01:37:18 2021\n\n@author: Admin\n\"\"\"\n\ndef bahola(ismlar):\n baholar={}\n while ismlar:\n ism = ismlar.pop()\n baho = input(f\"talaba {ism.title()}ni bahosini kiriting\")\n baholar[ism]=int(baho)\n return baholar\ntalabalar =['ali','vali'] \nbaholar =bahola(talabalar[:])\nprint(baholar)","sub_path":"funksiyaga_royxat_uzatish.py","file_name":"funksiyaga_royxat_uzatish.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"282956381","text":"import pygame\n# define some colors (R, G, B)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nDARKGREY = (40, 40, 40) \nLIGHTGREY = (100, 100, 100)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\n# game settings\nWIDTH = 24 * 64 # 16 * 64 or 32 * 32 or 64 * 16\nHEIGHT = 24 * 32 # 16 * 48 or 32 * 24 or 64 * 12\nFPS = 60\nTITLE = \"Combining Dungeon and Game\"\nBGCOLOR = DARKGREY\n\n\nDEVLOG = False #True\nTILESIZE = 24\nGRIDWIDTH = int( WIDTH / TILESIZE)\nGRIDHEIGHT = int(HEIGHT / TILESIZE)\n\n\n\nCONTROLS = {\n 'UP': pygame.K_w,\n 'LEFT': pygame.K_a,\n 'RIGHT':pygame.K_d,\n 'DOWN': pygame.K_s,\n\n 'QUIT': pygame.K_q,\n }\n\n\n","sub_path":"testing/combine/scripts/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"234150399","text":"import datetime\nimport json\nimport logging\nimport os\nimport shutil\nimport sys\n\nimport grpc\nimport requests\n\nfrom descarteslabs.client.version import __version__\nfrom descarteslabs.common.graft import client as graft_client, syntax as graft_syntax\nfrom descarteslabs.common.registry import registry\nfrom descarteslabs.common.proto.job import job_pb2\nfrom descarteslabs.common.proto.types import types_pb2\nfrom descarteslabs.common.proto.destinations import destinations_pb2\nfrom descarteslabs.common.proto.formats import formats_pb2\nfrom descarteslabs.common.workflows.outputs import (\n user_format_to_proto,\n user_destination_to_proto,\n)\nfrom descarteslabs.common.workflows.proto_munging import (\n which_has,\n has_proto_to_user_dict,\n)\nfrom descarteslabs.common.workflows.arrow_serialization import deserialize_pyarrow\n\nfrom descarteslabs import catalog\n\nfrom ..cereal import deserialize_typespec\nfrom ..client import get_global_grpc_client, default_grpc_retry_predicate\nfrom ..execution import to_computable\nfrom ..result_types import unmarshal\nfrom ..types import GeoContext, ProxyTypeError, Any\nfrom .exceptions import error_code_to_exception, JobTimeoutError, JobCancelled\nfrom .utils import (\n in_notebook,\n pb_milliseconds_to_datetime,\n pb_timestamp_to_datetime,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _write_to_io_or_widget(io, string):\n if io is not None:\n # try/except avoids having to import ipywidgets just for an isinstance check\n try:\n io.append_stdout(string)\n except AttributeError:\n io.write(string)\n io.flush()\n\n\nclass Job:\n \"\"\"\n A `Job` represents the computation of a proxy object within a `~.geospatial.GeoContext`,\n with values (arguments) set for any parameters it depends on.\n\n If the proxy object depends on any parameters (``obj.params`` is not empty),\n it's first internally converted to a `.Function` that takes those parameters\n (using `.Function.from_object`).\n\n Example\n -------\n >>> import descarteslabs.workflows as wf\n >>> num = wf.Int(1) + wf.parameter(\"x\", wf.Int)\n >>> job = num.compute(block=False, x=1) # doctest: +SKIP\n >>> job # doctest: +SKIP\n \n >>> job.id # doctest: +SKIP\n '3754676080bbb2b857fbc04a3e48f6312732e1bc42e0bd7b'\n >>> job.result() # doctest: +SKIP\n 2\n >>> same_job = wf.Job.get('3754676080bbb2b857fbc04a3e48f6312732e1bc42e0bd7b') # doctest: +SKIP\n >>> same_job.stage # doctest: +SKIP\n 'STAGE_DONE'\n >>> same_job.result() # doctest: +SKIP\n 2\n >>> same_job.arguments() # doctest: +SKIP\n {'x': 1}\n \"\"\"\n\n def __init__(\n self,\n obj,\n geoctx=None,\n format=\"pyarrow\",\n destination=\"download\",\n cache=True,\n _ruster=None,\n _trace=False,\n client=None,\n **arguments,\n ):\n \"\"\"\n Creates a new `Job` to compute the provided proxy object with the given\n arguments.\n\n Parameters\n ----------\n obj: Proxytype\n Proxy object to compute, or list/tuple of proxy objects.\n If it depends on parameters, ``obj`` is first converted\n to a `.Function` that takes those parameters.\n geoctx: `~.workflows.types.geospatial.GeoContext`, or None\n The GeoContext parameter under which to run the computation.\n Almost all computations will require a `~.workflows.types.geospatial.GeoContext`,\n but for operations that only involve non-geospatial types,\n this parameter is optional.\n format: str or dict, default \"pyarrow\"\n The serialization format for the result.\n destination: str or dict, default \"download\"\n The destination for the result.\n cache: bool, default True\n Whether to use the cache for this job.\n client: `.workflows.client.Client`, optional\n Allows you to use a specific client instance with non-default\n auth and parameters\n **arguments: Any\n Values for all parameters that ``obj`` depends on\n (or arguments that ``obj`` takes, if it's a `.Function`).\n Can be given as Proxytypes, or as Python objects like numbers,\n lists, and dicts that can be promoted to them.\n These arguments cannot depend on any parameters.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int, parameter\n >>> my_int = Int(1) + parameter(\"other_int\", Int)\n >>> job = Job(my_int, other_int=10) # doctest: +SKIP\n >>> job.stage # doctest: +SKIP\n QUEUED\n \"\"\"\n if client is None:\n client = get_global_grpc_client()\n\n if geoctx is not None:\n try:\n geoctx = GeoContext._promote(geoctx)\n except ProxyTypeError as e:\n raise TypeError(f\"Invalid GeoContext {geoctx!r}: {e}\")\n\n obj, argument_grafts, typespec, result_type = to_computable(obj, arguments)\n\n format_proto = user_format_to_proto(format)\n destination_proto = user_destination_to_proto(destination)\n\n message = client.api[\"CreateJob\"](\n job_pb2.CreateJobRequest(\n serialized_graft=json.dumps(obj.graft),\n typespec=typespec,\n arguments={\n name: json.dumps(arg) for name, arg in argument_grafts.items()\n },\n geoctx_graft=json.dumps(geoctx.graft) if geoctx is not None else None,\n no_ruster=_ruster is False,\n channel=client._wf_channel,\n client_version=__version__,\n no_cache=not cache,\n trace=_trace,\n type=types_pb2.ResultType.Value(result_type),\n format=format_proto,\n destination=destination_proto,\n ),\n timeout=client.DEFAULT_TIMEOUT,\n )\n\n self._message = message\n self._client = client\n self._object = obj\n self._arguments = None\n\n @classmethod\n def get(cls, id, client=None):\n \"\"\"\n Get a currently-running `Job` by its ID.\n\n Parameters\n ----------\n id: string\n The ID of a running job.\n client : `.workflows.client.Client`, optional\n Allows you to use a specific client instance with non-default\n auth and parameters\n\n Example\n -------\n >>> from descarteslabs.workflows import Job\n >>> job = Job.get('3754676080bbb2b857fbc04a3e48f6312732e1bc42e0bd7b') # doctest: +SKIP\n \"\"\"\n if client is None:\n client = get_global_grpc_client()\n\n message = client.api[\"GetJob\"](\n job_pb2.GetJobRequest(id=id), timeout=client.DEFAULT_TIMEOUT\n )\n return cls._from_proto(message, client)\n\n @classmethod\n def _from_proto(cls, proto_message, client=None):\n \"\"\"\n Low-level constructor for creating a Job from a Protobuf message.\n\n Do not use this method directly; use `Job.__init__` or `Job.get` instead.\n\n Parameters\n ----------\n proto_message: job_pb2.Job message\n Job Protobuf message\n client : `.workflows.client.Client`, optional\n Allows you to use a specific client instance with non-default\n auth and parameters\n \"\"\"\n obj = cls.__new__(cls)\n obj._message = proto_message\n\n if client is None:\n client = get_global_grpc_client()\n\n obj._client = client\n obj._object = None\n obj._arguments = None\n return obj\n\n def refresh(self):\n \"\"\"\n Refresh the attributes and state of the job.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int\n >>> job = Job(Int(1)) # doctest: +SKIP\n >>> job.stage # doctest: +SKIP\n QUEUED\n >>> job.refresh() # doctest: +SKIP\n >>> job.stage # doctest: +SKIP\n SUCCEEDED\n \"\"\"\n message = self._client.api[\"GetJob\"](\n job_pb2.GetJobRequest(id=self.id), timeout=self._client.DEFAULT_TIMEOUT\n )\n self._message = message\n\n def resubmit(self):\n \"\"\"\n Resubmit this job, returning a new `Job` object.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int\n >>> job = Job(Int(1)) # doctest: +SKIP\n >>> job.id # doctest: +SKIP\n abc123\n >>> job.result() # doctest: +SKIP\n 1\n >>> new_job = job.resubmit() # doctest: +SKIP\n >>> new_job.id # doctest: +SKIP\n xyz456\n >>> new_job.result() # doctest: +SKIP\n 1\n \"\"\"\n if self.version != __version__:\n raise NotImplementedError(\n f\"Resubmitting a Job from a different version is not supported. \"\n f\"This Job {self.id!r} was created by client version {self.version!r}, \"\n f\"but you're currently running {__version__!r}.\"\n )\n\n return Job(\n self.object,\n geoctx=self.geoctx,\n format=self.format,\n destination=self.destination,\n client=self._client,\n cache=self.cache_enabled,\n _ruster=not self._message.no_ruster,\n _trace=self._message.trace,\n **self.arguments,\n )\n\n def cancel(self):\n \"\"\"\n Cancel a running job.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int\n >>> my_int = Int(1)\n >>> job = Job(my_int) # doctest: +SKIP\n >>> job.cancel() # doctest: +SKIP\n \"\"\"\n self._client.api[\"CancelJob\"](\n job_pb2.CancelJobRequest(id=self.id), timeout=self._client.DEFAULT_TIMEOUT\n )\n\n def watch(self, timeout=None):\n \"\"\"\n Generator that yields ``self`` each time an update to the Job occurs.\n\n Parameters\n ----------\n timeout: int, optional\n The number of seconds to watch for Job updates. Defaults to\n self._client.STREAM_TIMEOUT, which is also the maximum allowed.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int\n >>> job = Job(Int(1)) # doctest: +SKIP\n >>> for job in job.watch(): # doctest: +SKIP\n ... print(job.stage)\n QUEUED\n PREPARING\n RUNNING\n RUNNING\n SAVING\n SUCCEEDED\n \"\"\"\n if timeout is None:\n timeout = self._client.STREAM_TIMEOUT\n else:\n # Take the shortest of the user-specified timeout and the client default\n # stream timeout.\n timeout = min(timeout, self._client.STREAM_TIMEOUT)\n\n stream = self._client.api[\"WatchJob\"](\n job_pb2.WatchJobRequest(id=self.id), timeout=timeout\n )\n\n for state in stream:\n self._message.state.CopyFrom(state)\n yield self\n\n def result(self, timeout=None, progress_bar=None):\n \"\"\"\n Get the result of the job. This blocks until the job is\n complete.\n\n Only the \"download\" destination can be retrieved.\n Raises NotImplementedError for other destinations.\n\n Parameters\n ----------\n timeout: int, optional\n The number of seconds to wait for the result.\n progress_bar: bool, optional\n Flag to draw the progress bar. Default is to ``True`` if in\n Jupyter Notebook.\n\n Returns\n -------\n result: Python object or bytes\n When the Job's format is \"pyarrow\", returns a Python object representing\n the result, either as a plain Python type, or object from `descarteslabs.workflows.result_types`.\n For other formats, returns raw bytes. Consider using `result_to_file` in that case\n to save the results to a file.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int\n >>> job = Job(Int(1)) # doctest: +SKIP\n >>> job.result() # doctest: +SKIP\n 1\n \"\"\"\n handler = get_loader(self._message.destination)\n self.wait(timeout=timeout, progress_bar=progress_bar)\n return handler(self)\n\n def result_to_file(self, file, timeout=None, progress_bar=None):\n \"\"\"\n Save the result of the job to a file. This blocks until the job is\n complete.\n\n Only the \"download\" destination can be written to a file.\n For destinations like \"catalog\", where the data is handed off\n to another service, you'll need to use that service to retrieve it.\n (In the \"catalog\" case, that's `Raster` and `Metadata`.)\n\n Parameters\n ----------\n file: path or file-like object\n Path or file where results will be written\n timeout: int, optional\n The number of seconds to wait for the result.\n progress_bar: bool, optional\n Flag to draw the progress bar. Default is to ``True`` if in\n Jupyter Notebook.\n\n Example\n -------\n >>> from descarteslabs.workflows import Job, Int\n >>> job = Job(Int(1), format=\"json\") # doctest: +SKIP\n >>> job.result_to_file(\"one.json\") # doctest: +SKIP\n\n >>> import io\n >>> from descarteslabs.workflows import Job, Int\n >>> job = Job(Int(2), format=\"json\") # doctest: +SKIP\n >>> bytestream = io.BytesIO() # doctest: +SKIP\n >>> job.result_to_file(bytestream) # doctest: +SKIP\n >>> print(bytestream.read()) # doctest: +SKIP\n b'2'\n \"\"\"\n destination_name = which_has(self._message.destination)\n if destination_name not in (\"download\", \"email\"):\n raise NotImplementedError(\n \"Not possible to automatically write results to a file for \"\n \"output destination {}. You'll need to load the data and write it \"\n \"out yourself.\".format(destination_name)\n )\n\n if hasattr(file, \"read\"):\n close_file = False\n else:\n # assume it's a path\n file = open(os.path.expanduser(file), \"wb\")\n close_file = True\n\n try:\n self.wait(timeout=timeout, progress_bar=progress_bar)\n\n response = requests.get(self.url, stream=True)\n response.raise_for_status()\n # TODO error handling; likely the result has expired\n\n response.raw.decode_content = True\n shutil.copyfileobj(response.raw, file)\n # https://stackoverflow.com/a/13137873/10519953\n\n finally:\n if close_file:\n file.close()\n\n def wait(\n self,\n timeout=None,\n progress_bar=False,\n cancel_on_timeout=True,\n cancel_on_interrupt=True,\n ):\n \"\"\"\n Block until the Job is complete, optionally displaying a progress bar.\n\n Raises any error that occurs with the `Job`, or `JobTimeoutError` if\n the timeout passes before the `Job` is complete.\n\n Parameters\n ----------\n timeout: int, optional\n The number of seconds to wait for the result.\n progress_bar: bool, optional\n Flag to draw the progress bar. Default is to ``True`` if in\n Jupyter Notebook.\n cancel_on_timeout: bool, optional\n Whether to cancel the job on client timeout. Default is True.\n cancel_on_interrupt: bool, optional\n Whether to cancel the job on interrupt (e.g. ctrl + c). Default is True.\n\n Example\n -------\n >>> import descarteslabs.workflows as wf\n >>> job = wf.Int(1).compute(block=False) # doctest: +SKIP\n >>> job.wait() # doctest: +SKIP\n >>> # ^ blocks until `job` is done\n >>> job.result() # doctest: +SKIP\n 1\n \"\"\"\n if progress_bar is None:\n progress_bar = in_notebook()\n\n stream = self.watch(timeout)\n\n show_progress = progress_bar is not False\n if show_progress:\n progress_bar_io = sys.stdout if progress_bar is True else progress_bar\n if not self.done:\n _write_to_io_or_widget(\n progress_bar_io, \"\\nJob ID: {}\\n\".format(self.id)\n )\n\n try:\n while not self.done:\n try:\n next(stream)\n except grpc.RpcError as e:\n if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:\n if cancel_on_timeout:\n if show_progress:\n _write_to_io_or_widget(\n progress_bar_io,\n \"\\nCancelling job {}\\n\".format(self.id),\n )\n self.cancel()\n raise JobTimeoutError(\n \"Timed out waiting for Job {}\".format(self.id)\n )\n elif default_grpc_retry_predicate(e):\n stream = self.watch(timeout)\n else:\n raise\n except Exception as e:\n if isinstance(e, StopIteration):\n stream = self.watch(timeout)\n else:\n raise\n\n if show_progress:\n self._draw_progress_bar(output=progress_bar_io)\n else:\n if self._message.state.stage == job_pb2.Job.Stage.SUCCEEDED:\n return\n if self._message.state.stage == job_pb2.Job.Stage.FAILED:\n raise self.error\n if self._message.state.stage == job_pb2.Job.Stage.CANCELLED:\n raise JobCancelled(\"Job {} was cancelled.\".format(self.id))\n except KeyboardInterrupt:\n if cancel_on_interrupt:\n if show_progress:\n _write_to_io_or_widget(\n progress_bar_io, \"\\nCancelling job {}\\n\".format(self.id)\n )\n self.cancel()\n raise\n\n @property\n def object(self):\n \"Proxytype: The proxy object this Job computes.\"\n if self.version != __version__:\n raise NotImplementedError(\n f\"Accessing the `object` of a Job from a different version is not supported. \"\n f\"This Job {self.id!r} was created by client version {self.version!r}, \"\n f\"but you're currently running {__version__!r}.\"\n )\n\n if self._object is None:\n self._object = _proxy_object_from_message(self._message)\n return self._object\n\n @property\n def arguments(self):\n \"The arguments of the Job, as a dict of names to Python primitives or Workflows objects.\"\n if self.version != __version__:\n raise NotImplementedError(\n f\"Accessing the `arguments` of a Job from a different version is not supported. \"\n f\"This Job {self.id!r} was created by client version {self.version!r}, \"\n f\"but you're currently running {__version__!r}.\"\n )\n\n if self._arguments is None:\n if len(self._message.arguments) == 0:\n arg_grafts = {}\n else:\n arg_grafts = {}\n kwarg_types = self.object.kwarg_types\n for name, json_graft in self._message.arguments.items():\n try:\n graft = json.loads(json_graft)\n except json.JSONDecodeError as e:\n raise ValueError(\n f\"Invalid JSON in graft for argument {name!r}: {e}. Value: {json_graft!r}.\"\n )\n\n obj = (\n kwarg_types.get(name, Any)._from_graft(\n graft_client.isolate_keys(graft)\n )\n if not (\n graft_syntax.is_literal(graft)\n or graft_syntax.is_quoted_json(graft)\n )\n else graft\n )\n arg_grafts[name] = obj\n\n self._arguments = arg_grafts\n\n return self._arguments\n\n @property\n def geoctx(self):\n \"\"\"The Workflows `~.geospatial.GeoContext` the Job was run within, or None\"\"\"\n graft_json = self._message.geoctx_graft\n if not graft_json:\n return None\n\n return GeoContext._from_graft(graft_client.isolate_keys(json.loads(graft_json)))\n\n @property\n def type(self):\n \"\"\"type: The type of the proxy object.\"\"\"\n return type(self.object)\n\n @property\n def result_type(self):\n \"str: Name of the type of object that will be used to hold the result\"\n return types_pb2.ResultType.Name(self._message.type)\n\n @property\n def id(self):\n \"\"\"\n str or None: The globally unique identifier for the Job,\n or None if it hasn't been executed yet.\n \"\"\"\n return self._message.id or None\n\n @property\n def channel(self):\n \"\"\"str: The channel name where this Job will execute.\"\"\"\n return self._message.channel\n\n @property\n def version(self):\n \"\"\"str: The ``descarteslabs`` client version that constructed this Job.\"\"\"\n return self._message.client_version\n\n @property\n def stage(self):\n \"\"\"\n The current stage of the Job (queued, preparing, running, saving, succeeded,\n failed).\n \"\"\"\n return job_pb2.Job.Stage.Name(self._message.state.stage)\n\n @property\n def done(self):\n \"\"\"bool: Whether the Job has completed or not.\"\"\"\n return _is_job_done(self._message.state.stage)\n\n @property\n def cancelled(self):\n \"\"\"Whether the job has been cancelled.\"\"\"\n return self._message.state.stage == job_pb2.Job.Stage.CANCELLED\n\n @property\n def cache_enabled(self):\n \"\"\"Whether caching is enabled for this job.\"\"\"\n return not self._message.no_cache\n\n @property\n def created_datetime(self):\n \"\"\"datetime: The time the Job was created.\"\"\"\n return pb_milliseconds_to_datetime(self._message.timestamp)\n\n @property\n def updated_datetime(self):\n \"\"\"datetime: The time of the most recent Job update.\"\"\"\n return pb_milliseconds_to_datetime(self._message.state.timestamp)\n\n @property\n def expires_datetime(self) -> datetime.datetime:\n \"\"\"\n datetime.datetime: The UTC date this Job will be expired.\n \"\"\"\n return pb_timestamp_to_datetime(self._message.expires_timestamp)\n\n @property\n def runtime(self):\n \"\"\"datetime: The total time it took the Job to run.\"\"\"\n if self.updated_datetime is None or self.created_datetime is None:\n return None\n else:\n return self.updated_datetime - self.created_datetime\n\n @property\n def format(self):\n \"\"\"The serialization format of the Job, as a dictionary.\"\"\"\n return has_proto_to_user_dict(self._message.format)\n\n @property\n def destination(self):\n \"\"\"The destination for the Job results, as a dictionary.\"\"\"\n return has_proto_to_user_dict(self._message.destination)\n\n @property\n def url(self):\n \"\"\"\n The download URL for this Job's results.\n\n If `format` is not \"download\" or \"email\", `url` will be None.\n \"\"\"\n destination = which_has(self._message.destination)\n if destination not in (\"download\", \"email\"):\n return None\n\n return getattr(self._message.destination, destination).result_url\n\n @property\n def error(self):\n \"\"\"The error of the Job, or None if it finished successfully.\"\"\"\n error_code = self._message.state.error.code\n exc = error_code_to_exception(error_code)\n return exc(self) if exc is not None else None\n\n def _draw_progress_bar(self, output=None):\n \"\"\"\n Draw the progress bar of a running job.\n\n Parameters\n ----------\n output: ipywidgets Output, file-like object\n The output widget/stream to write a job's progress bar to.\n \"\"\"\n _draw_progress_bar(\n finished=self._message.state.tasks_progress.finished.value,\n total=sum(\n (\n self._message.state.tasks_progress.waiting.value,\n self._message.state.tasks_progress.ready.value,\n self._message.state.tasks_progress.running.value,\n self._message.state.tasks_progress.finished.value,\n )\n ),\n stage=self.stage,\n output=output,\n )\n\n\ndef _draw_progress_bar(finished, total, stage, output, width=6):\n if total == 0:\n percent = 0\n else:\n percent = finished / total\n\n if _is_job_done(stage):\n bar = \"#\" * int(width)\n else:\n bar = \"#\" * int(width * percent)\n\n progress_output = (\n \"\\r[{bar:<{width}}] | Steps: {finished}/{total} | Stage: {stage}\".format(\n bar=bar, width=width, finished=finished, total=total, stage=stage\n )\n )\n\n _write_to_io_or_widget(output, \"{:<79}\".format(progress_output))\n\n\ndef _proxy_object_from_message(message):\n typespec = message.typespec\n proxytype = deserialize_typespec(typespec)\n graft = json.loads(message.serialized_graft)\n isolated = graft_client.isolate_keys(graft)\n # TODO what about params? Job doesn't store them right now.\n # Anything that had params would have become a Function anyway.\n return proxytype._from_graft(isolated)\n\n\ndef _is_job_done(stage):\n return stage in (\n job_pb2.Job.Stage.SUCCEEDED,\n job_pb2.Job.Stage.FAILED,\n job_pb2.Job.Stage.CANCELLED,\n )\n\n\nLOADERS, register = registry()\n\n\ndef get_loader(output_destination: destinations_pb2.Destination):\n specific_destination = getattr(output_destination, which_has(output_destination))\n try:\n return LOADERS[type(specific_destination)]\n except KeyError:\n raise NotImplementedError(\n \"Not possible to load results for output destination {}\".format(\n type(specific_destination).__name__\n )\n )\n\n\n@register(destinations_pb2.Download)\n# NOTE(gabe): Disabling email as a downloadable destination for now, because it's confusing\n# when `.compute(destination=\"email\")` returns the data in your notebook.\n# Especially if that data is 30mb of binary GeoTIFF dumped into your terminal.\n# @register(destinations_pb2.Email)\ndef download(job: Job):\n response = requests.get(job.url)\n response.raise_for_status()\n # TODO error handling; likely the result has expired\n data = response.content\n\n message = job._message\n specific_format = getattr(message.format, which_has(message.format))\n\n if isinstance(specific_format, formats_pb2.Pyarrow):\n codec = response.headers[\"x-goog-meta-X-Arrow-Codec\"]\n result_type = job.result_type\n marshalled = deserialize_pyarrow(data, codec)\n return unmarshal.unmarshal(result_type, marshalled)\n\n return data\n\n\n@register(destinations_pb2.Catalog)\ndef catalog_image(job: Job):\n destination = job.destination\n return catalog.Image.get(destination[\"product_id\"] + \":\" + destination[\"name\"])\n","sub_path":"descarteslabs/workflows/models/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":27530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"449624044","text":"__author__ = 'Jim Fasarakis-Hilliard'\nimport os\n\nDB_ROOT_PATH = os.path.dirname(os.path.realpath(__file__))\n\n# MONGO CONNECTION PARAMETERS (default-local)\nMONGO_HOST = 'localhost'\nMONGO_PORT = 27017\n\n# Twitter DB\nMONGO_TWITTER_DB_NAME = 'twitter_db'\n# Twitter Fields used when exporting to csv/json\nTWITTER_FIELDS_FILE = DB_ROOT_PATH + '/retweet_network_fields.txt'\n\n# EXPORT FILE NAME\nRETWEET_NETWORK_NAME = 'rt_network'","sub_path":"src/db/db_config.py","file_name":"db_config.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"19544119","text":"def triangles():\n n=1;\n while True:\n yield t(n)\n n=n+1\n \n \n \ndef t(n):\n l=[]\n if n==1:\n return [x for x in range(1,2)]\n l1=t(n-1)\n l.append(1)\n for i in range(1,len(l1)):\n l.append(l1[i-1]+l1[i])\n l.append(1)\n return l\n\ndef triangles1():\n lst = [1]\n while True:\n yield lst\n lst.append(0) # 先占位,然后修改相应位置上的数值\n lst = [lst[i-1] + lst[i] for i in range(len(lst))]\n \n\ndef test():\n n=0\n for t in triangles1():\n print(t)\n n=n+1\n if n==10:\n break\n\n \nif __name__==\"__main__\":\n test()\n","sub_path":"triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"257465684","text":"import numpy as np\nimport logging\n\n\nlogger = logging.getLogger()\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s:%(levelname)s:%(message)s\"\n )\n\nclass Gene(object):\n def __init__(self):\n self.names = None\n self.totalSpots = None\n self.nG = None\n self.totalBackground = None\n # self._totalZero = None\n self.expectedGamma = None\n self.expression = None\n self.logExpression = None\n\n # @property\n # def totalZero(self, spots):\n # self._totalZero = np.bincount(spots.geneNo, spots.zeroProb)\n # return self._totalZero\n\n def updateGamma(self, cells, spots, klasses, ini):\n # pSpotZero = spots.zeroKlassProb(klasses, cells)\n TotPredictedZ = spots.TotPredictedZ(spots.geneNo, cells.classProb[:, -1])\n\n TotPredicted = cells.geneCountsPerKlass(self, spots, klasses, ini)\n\n nom = ini['rGene'] + self.totalSpots - spots.TotPredictedB - TotPredictedZ\n denom = ini['rGene'] + TotPredicted\n self.expectedGamma = nom / denom\n\n def setKlassExpressions(self, klasses, ini, gSet):\n MeanClassExp = np.zeros([self.nG, klasses.nK])\n # temp = gSet.GeneSubset(self.names).ScaleCell(0)\n for k in range(klasses.nK - 1):\n # print('k = ', k)\n val = ini['Inefficiency'] * np.mean(gSet.CellSubset(klasses.name[k]).GeneExp, 1)\n MeanClassExp[:, k] = val[None, :]\n # MeanClassExp = MeanClassExp, (1, self.nG, klasses.nK)\n expression = MeanClassExp\n logExpression = np.log(MeanClassExp + ini['SpotReg'])\n self.expression = np.reshape(expression, (1, self.nG, klasses.nK))\n self.logExpression = np.reshape(logExpression, (1, self.nG, klasses.nK))\n\n def totalZero(self, spots, zeroProb):\n out = np.bincount(spots.geneNo, zeroProb)\n # self._totalZero = out\n # np.bincount(spots.geneNo, pSpotZero)\n return out\n","sub_path":"src/gene.py","file_name":"gene.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49222449","text":"import os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame as pg\nimport time\n\nWIDTH = 700\nHEIGHT = 500\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 220, 0)\nBLUE = (0, 0, 255)\n\nup_key = False\ndown_key = False\nleft_key = False\nright_key = False\n\nobstacle = pg.Rect(int(WIDTH/4), HEIGHT-100, int(WIDTH/2), 30) # height was 30, \n\ndef update_screen():\n screen.fill(WHITE)\n pg.draw.rect(screen, GREEN, obstacle)\n myPlayer.draw()\n pg.display.update()\n\nclass Player():\n def __init__(self, color=BLUE):\n self.PLAYER_WIDTH = 30\n self.PLAYER_HEIGHT = 30\n\n self.color = color\n\n self.x_vel = 0\n self.y_vel = 0\n\n self.max_vel = 12\n\n self.friction = 0.85\n self.power = 8\n\n self.jump_power = 27\n self.gravity = -0.95\n\n self.rect = pg.Rect(0, 0, self.PLAYER_WIDTH, self.PLAYER_HEIGHT)\n\n self.x = int(WIDTH / 2)\n self.y = int(self.PLAYER_HEIGHT + 10)\n\n self.update_location()\n\n def draw(self):\n pg.draw.rect(screen, self.color, self.rect)\n \n def check_collision(self): # Returns True if touching obstacle\n if self.rect.colliderect(obstacle):\n return True\n else:\n return False\n\n def update_location(self):\n self.rect.center = self.x, self.y\n\n def touching_floor(self):\n if self.y + (self.PLAYER_HEIGHT/2) > HEIGHT:\n return True\n else:\n return False\n\n def step(self, change_x, change_y):\n if change_y > 0:\n for _ in range(change_y):\n self.y += 1\n self.update_location()\n if self.check_collision():\n self.y -= 1\n self.update_location()\n # print('reached a wall, stepped back one and still touching') if self.check_collision() else print('reached a wall, stepped back one and not touching any more')\n break\n elif change_y < 0:\n for _ in range(-change_y):\n self.y -= 1\n self.update_location()\n if self.check_collision():\n self.y += 1\n self.update_location()\n # print('reached a wall, stepped back one and still touching') if self.check_collision() else print('reached a wall, stepped back one and not touching any more')\n self.y_vel = 0\n break\n\n if change_x > 0:\n for _ in range(change_x):\n self.x += 1\n self.update_location()\n if self.check_collision():\n self.x -= 1\n self.update_location()\n # print('reached a wall, stepped back one and still touching') if self.check_collision() else print('reached a wall, stepped back one and not touching any more')\n break\n elif change_x < 0:\n for _ in range(-change_x):\n self.x -= 1\n self.update_location()\n if self.check_collision():\n self.x += 1\n self.update_location()\n # print('reached a wall, stepped back one and still touching') if self.check_collision() else print('reached a wall, stepped back one and not touching any more')\n break\n\n def next_move(self, up, down, left, right):\n\n # GRAVITY\n self.y_vel -= self.gravity\n if self.y_vel > self.max_vel:\n self.y_vel = self.max_vel\n\n # JUMPING\n self.y += 1\n self.update_location()\n if up and (self.check_collision() or self.touching_floor()):\n self.y_vel -= self.jump_power\n self.y -= 1\n\n # X AXIS FRICTION\n self.x_vel = self.x_vel * self.friction\n\n # LEFT AND RIGHT KEYS\n if left and not right:\n self.x_vel = -self.power\n elif right and not left:\n self.x_vel = self.power\n\n # MOVE BY PLAYER VELOCITIES\n myPlayer.step(int(self.x_vel), int(self.y_vel))\n\n # ADD A FLOOR (temporary)\n if self.touching_floor():\n self.y = int(HEIGHT - (self.PLAYER_HEIGHT/2))\n self.update_location()\n\n # CHECK IF TOUCHING A SURFACE (for debuggings)\n if self.check_collision():\n print('this is bad')\n else:\n pass\n\npg.init()\npg.display.set_caption('Platformer 6')\nscreen = pg.display.set_mode([WIDTH, HEIGHT])\n\nmyPlayer = Player()\n\nrunning = True\nwhile running:\n\n # PROCESS INPUTS\n for e in pg.event.get():\n if e.type == pg.QUIT:\n running = False\n\n elif e.type == pg.KEYDOWN:\n if e.key == pg.K_LEFT:\n left_key = True\n elif e.key == pg.K_RIGHT:\n right_key = True\n elif e.key == pg.K_DOWN:\n down_key = True\n elif e.key == pg.K_UP:\n up_key = True\n\n elif e.type == pg.KEYUP:\n if e.key == pg.K_LEFT:\n left_key = False\n elif e.key == pg.K_RIGHT:\n right_key = False\n elif e.key == pg.K_DOWN:\n down_key = False\n elif e.key == pg.K_UP:\n up_key = False\n\n # HANDLE EVENTS\n myPlayer.next_move(up_key, down_key, left_key, right_key)\n\n # RENDER SCREEN\n update_screen()\npg.quit()\n","sub_path":"car06.py","file_name":"car06.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643348232","text":"import os\nimport socket\nimport subprocess\nimport shlex\nimport time\nimport ssl\nimport random\nimport json\nimport threading\nimport pickle\nimport datetime\n\nclientname = socket.gethostname()\nclientipv4 = socket.gethostbyname(clientname)\n\nhost = '192.168.43.66'\nport = 8989\n\nserver_sni_hostname = 'RPMS'\nserver_cert = 'server.crt'\nclient_cert = 'adityaclientcertchainf.crt'\n#client_key = 'client.key'\n\n\nsensor_args = {'11': Adafruit_DHT.DHT11,\n '22': Adafruit_DHT.DHT22,\n '2302': Adafruit_DHT.AM2302}\n\ncontext = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2,\n purpose=ssl.Purpose.SERVER_AUTH, cafile=server_cert)\n#context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=server_cert)\ncontext.load_cert_chain(certfile=client_cert)\ncipher = 'TLS_ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384'\ncontext.set_ciphers(cipher)\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns = context.wrap_socket(s, server_side=False,\n server_hostname=server_sni_hostname)\n\nmodel = None\nwith open(\"model\", \"rb\") as infile:\n model = pickle.load(infile)\n\n\ndef ConnectToHost():\n global s\n # while True:\n try:\n s.connect((host, port))\n s = context.wrap_socket(s, server_side=False,\n server_hostname=server_sni_hostname)\n print(\"[ OK ] Connected to host : \", str(host))\n # Clientdata=input()\n # s.send(str.encode(Clientdata))\n while True:\n # time.sleep(5)\n try:\n #s.send(str.encode(\"HEY MOTHERFUCKER\"))\n cmd = s.recv(1024) # decode before use\n cmd = cmd.decode(\"utf-8\")\n if(cmd != \"testsig\"):\n cmd = shlex.split(cmd)\n out = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout, stderr = out.communicate()\n # print(type(stdout))\n stdoutJSON = json.dumps({\"stdout\": stdout.decode(\"utf-8\")})\n # print(len(stdoutJSON))\n sendLen = str(len(stdoutJSON)).rjust(4)\n s.send(str.encode(sendLen+stdoutJSON))\n # s.send(stdout)\n except socket.error as errmssg:\n stdoutJSON = json.dumps({\"stdout\": str(errmssg)})\n # print(len(stdoutJSON))\n sendLen = str(len(stdoutJSON)).rjust(4)\n s.send(str.encode(sendLen+stdoutJSON))\n print(\"[ FAILED ] \" + str(errmssg))\n except KeyboardInterrupt:\n print(\"BYE----------\")\n s.shutdown()\n s.close()\n except socket.error as errmssg:\n print(\"[ FAILED ] Failed To connect To Host : \"+str(errmssg))\n\n\ndef getTimeStamp():\n return time.strftime(\"%H:%M:%S\", time.localtime())\n\n\ndef GetReading():\n sensor = sensor_args['11']\n pin = '17'\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n return [humidity, temperature]\n\n\ndef getMonth():\n index = int(time.strftime(\"%m\", time.localtime()))\n return index\n\n\ndef getDay():\n return datetime.datetime.today().weekday() + 1\n\n\ndef SendReading():\n while True:\n hum = random.randint(1, 99)\n temp = float(random.randint(1, 50))\n print(\"{}%\\t{}*\".format(hum, temp))\n month = \"jan feb mar apr may jun jul aug sep oct nov dec\".split(\" \")[\n getMonth()-1]\n day = \"mon tue wed thu fri sat sun\".split(\" \")[getDay()-1]\n xcoord = random.randint(1, 10)\n ycoord = random.randint(1, 10)\n FFMC = random.random()*random.randint(1, 100)\n DMC = random.random()*random.randint(1, 100)\n DC = random.random()*random.randint(1, 100)\n ISI = random.random()*random.randint(1, 100)\n wind = random.random()*random.randint(1, 50)\n rain = random.random()\n x_test = [xcoord, ycoord, getMonth(), getDay(), FFMC, DMC, DC,\n ISI, temp, hum, wind, rain]\n # print(x_test)\n y_pred_test = model.predict([x_test])\n print(\"Prediction = {}\".format(int(y_pred_test[0])))\n print(\"*\"*20)\n reading = {\n \"data\": {\n \"ip\": clientname,\n \"month\": month,\n \"day\": day,\n \"FFMC\": FFMC,\n \"DMC\": DMC,\n \"DC\": DC,\n \"ISI\": ISI,\n \"temp\": temp,\n \"RH\": hum,\n \"wind\": wind,\n \"rain\": rain,\n \"prediction\": int(y_pred_test[0]),\n \"timestamp\": getTimeStamp()\n }\n }\n\n readingJSON = json.dumps(reading)\n # print(len(readingJSON))\n sendLen = str(len(readingJSON)).rjust(4)\n s.send(str.encode(sendLen+readingJSON))\n time.sleep(5)\n\n\ndef checkExit():\n while True:\n try:\n cmd = input()\n if cmd == \"q\":\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n exit()\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n try:\n tc1 = threading.Thread(target=ConnectToHost)\n tc2 = threading.Thread(target=SendReading)\n tc3 = threading.Thread(target=checkExit)\n tc1.start()\n time.sleep(2)\n tc2.start()\n tc3.start()\n tc1.join()\n tc2.join()\n tc3.join()\n except (KeyboardInterrupt, SystemExit):\n print(\"BYE----------\")\n s.shutdown(socket.SHUT_RDWR)\n s.close()\n exit()\n except Exception as e:\n print(e)\n","sub_path":"RPMS/local/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"143267715","text":"from flask import Flask, render_template, request\r\nfrom WorldData import WorldData\r\n\r\nWorldData = WorldData()\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n\treturn render_template('index.html', WorldData=WorldData)\r\n\r\n@app.route('/create_biome', methods=['POST', 'GET'])\r\ndef create_biome():\r\n\tif request.method == 'POST':\r\n\t\tbiome_name = request.form['biomeName']\r\n\t\tbiome_colour = request.form['biomeColour']\r\n\t\tbiome_desc = request.form['biomeDescription']\r\n\t\tbiome_effects = request.form['biomeEffects']\r\n\r\n\t\tWorldData.CreateBiome(biome_name, biome_desc, biome_colour, biome_effects)\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Biome.html')\r\n\r\n\r\n@app.route('/create_natural', methods=['POST', 'GET'])\r\ndef create_natural():\r\n\tif request.method == 'POST':\r\n\t\tnatural_name = request.form['naturalName']\r\n\t\tnatural_desc = request.form['naturalDescription']\r\n\r\n\t\tWorldData.Formation.CreateNatural(natural_name, natural_desc)\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Natural.html')\r\n\r\n\r\n@app.route('/create_structure', methods=['POST', 'GET'])\r\ndef create_structure():\r\n\tif request.method == 'POST':\r\n\t\tstructure_name = request.form['structureName']\r\n\t\tstructure_desc = request.form['structureDescription']\r\n\r\n\t\tWorldData.Formation.CreateStructure(structure_name, structure_desc)\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Structure.html')\r\n\r\n@app.route('/create_utility', methods=['POST', 'GET'])\r\ndef create_utility():\r\n\tif request.method == 'POST':\r\n\t\tutility_name = request.form['utilityName']\r\n\t\tutility_desc = request.form['utilityDescription']\r\n\t\tutility_goods = request.form['utilityGoods']\r\n\r\n\t\tWorldData.Formation.CreateUtility(utility_name, utility_desc, utility_goods)\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Utility.html')\r\n\r\n@app.route('/create_dungeon', methods=['POST', 'GET'])\r\ndef create_dungeon():\r\n\tif request.method == 'POST':\r\n\t\tdungeon_name = request.form['dungeonName']\r\n\t\tdungeon_desc = request.form['dungeonDescription']\r\n\r\n\t\tWorldData.Formation.CreateDungeon(dungeon_name, dungeon_desc, \"N/A\")\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Dungeon.html')\r\n\r\n\r\n@app.route('/create_settlement', methods=['POST', 'GET'])\r\ndef create_settlement():\r\n\tif request.method == 'POST':\r\n\r\n\t\tsettlement_name = request.form['settlementName']\r\n\t\tsettlement_desc = request.form['settlementDescription']\r\n\t\tsettlement_pop = request.form['settlementPopulation']\r\n\t\tsettlement_size = request.form['settlementSize']\r\n\t\tsettlement_wealth = request.form['settlementWealth']\r\n\t\tsettlement_hierarchy = request.form['settlementHierarchy']\r\n\r\n\t\tsettlement_structures = (request.form['settlementStructures']).split(', ')\r\n\t\tsettlement_utilities = (request.form['settlementUtilities']).split(', ')\r\n\r\n\t\tWorldData.Formation.CreateSettlement(settlement_name, settlement_desc, settlement_pop, settlement_size, settlement_wealth, settlement_hierarchy, \"N/A\")\r\n\r\n\t\tfor structure in settlement_structures:\r\n\t\t\tfor made_structure in WorldData.Formation.structures:\r\n\t\t\t\tif structure == made_structure.structure_name:\r\n\t\t\t\t\tWorldData.Formation.settlements[-1].AddStructure(made_structure)\r\n\r\n\r\n\t\tfor utility in settlement_utilities:\r\n\t\t\tfor made_utility in WorldData.Formation.utilities:\r\n\t\t\t\tif utility == made_utility.utility_name:\r\n\t\t\t\t\tWorldData.Formation.settlements[-1].AddUtility(made_utility)\r\n\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Settlement.html', WorldData=WorldData)\r\n\r\n\r\n\r\n@app.route('/create_ruins', methods=['POST', 'GET'])\r\ndef create_ruins():\r\n\tif request.method == 'POST':\r\n\r\n\t\truins_name = request.form['ruinsName']\r\n\t\truins_desc = request.form['ruinsDescription']\r\n\r\n\t\truins_structures = (request.form['ruinsStructures']).split(', ')\r\n\r\n\t\tWorldData.Formation.CreateRuins(ruins_name, ruins_desc, \"N/A\")\r\n\r\n\t\tfor structure in ruins_structures:\r\n\t\t\tfor made_structure in WorldData.Formation.structures:\r\n\t\t\t\tif structure == made_structure.structure_name:\r\n\t\t\t\t\tWorldData.Formation.ruins[-1].AddStructure(made_structure)\r\n\r\n\r\n\t\treturn render_template('index.html', WorldData=WorldData)\r\n\treturn render_template('Ruins.html', WorldData=WorldData)\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"328019930","text":"num_init = int(input(\"Введите целое положительное число:\"))\r\ngreatest_dig = num_init % 10\r\nnum = num_init\r\nwhile num > 0:\r\n digit = num % 10\r\n if digit > greatest_dig:\r\n if greatest_dig == 9:\r\n break\r\n num = num // 10\r\nprint(f\"Наибольшее цифра в числе {num_init} равна {greatest_dig}\")","sub_path":"lesson-1.4.py","file_name":"lesson-1.4.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397449852","text":"import numpy as np\nimport torch\nimport scipy.io as sio\n\nnum = 501\nlen = 1600\nshape_x = 40 \nshape_y = 40\ndef reshape(matlabdata): #传递进来一道信号,返回40*40矩阵\n matlabdata = matlabdata[0:len].reshape(shape_x,shape_y) \n return matlabdata \n\nlabel = np.ones(num) \nmatFile = './shot.mat'\ndatas = sio.loadmat(matFile)\nmatlabdata = datas['trace'] #shape[1601:501]\nlabel_onehot = np.zeros(shape=(num,len))\nfor i in range(num):\n singal = matlabdata[:,i]\n #print(singal)\n for j in range(len):\n if singal[j] != 0.0 :\n label[i] = int(j/4)\n label_onehot[i,j] = 1\n break\n np.set_printoptions(threshold=np.inf)\nlabel_onehot = label_onehot.reshape(num,shape_x,shape_y) #输出为40*40的矩阵\nTlabel = torch.from_numpy(label)\nTlabel_onehot = torch.from_numpy(label_onehot) #40*40\n","sub_path":"get_label.py","file_name":"get_label.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"283196767","text":"import abc\n\nfrom coding.Base import CodeInfo\nfrom coding.Base import CodeInfoEncoder\nfrom coding.Base import CodingRadixParser\nfrom coding.Base import CodeMappingInfoInterpreter\n\ntry:\n\timport xie\nexcept ImportError as e:\n\timport sys\n\tmessage = \"\"\"\n\t動態組字使用 Xie 來描繪字形。\n\t可到 https://github.com/xrloong/Xie 下載最新版本,並以 pip 安裝,或使可使用以下指令:\n\t$ pip3 install https://github.com/xrloong/Xie/releases/download/v0.0.9/Xie-0.0.9-py3-none-any.whl\n\"\"\"\n\tprint(message, file=sys.stderr)\n\tsys.exit(1)\n\nfrom xie.graphics.shape import Pane\nfrom xie.graphics.canvas import BaseTextCanvasController\nfrom xie.graphics.drawing import DrawingSystem\nfrom xie.graphics.stroke import Character\nfrom xie.graphics.stroke import StrokeGroup\nfrom xie.graphics.stroke import StrokeGroupInfo\nfrom xie.graphics.factory import ShapeFactory\n\n\nclass DCStrokeGroup:\n\tshapeFactory = ShapeFactory()\n\n\tdef __init__(self, strokeGroup):\n\t\tself.strokeGroup=strokeGroup\n\t\tself.extraPaneDB={DCCodeInfo.PANE_NAME_DEFAULT : strokeGroup.getStatePane()}\n\n\tdef getCount(self):\n\t\treturn self.strokeGroup.getCount()\n\n\tdef getStrokeGroup(self):\n\t\treturn self.strokeGroup\n\n\tdef generateStrokeGroup(self, pane):\n\t\tshapeFactory=DCStrokeGroup.shapeFactory\n\t\treturn shapeFactory.generateStrokeGroupByStrokeGroupPane(self.strokeGroup, pane)\n\n\tdef setExtraPaneDB(self, extranPaneDB):\n\t\tself.extraPaneDB=extranPaneDB\n\t\tself.extraPaneDB[DCCodeInfo.PANE_NAME_DEFAULT]=self.strokeGroup.getStatePane()\n\n\tdef setExtraPane(self, paneName, extraPane):\n\t\tself.extraPaneDB[paneName]=extraPane\n\n\tdef getExtraPane(self, paneName):\n\t\treturn self.extraPaneDB.get(paneName, None)\n\n\t@staticmethod\n\tdef generateDefaultStrokeGroup(dcStrokeGroupPanePairList):\n\t\tshapeFactory=DCStrokeGroup.shapeFactory\n\t\tstrokeGroupPanePair=[(dcStrokeGroup.getStrokeGroup(), pane) for dcStrokeGroup, pane in dcStrokeGroupPanePairList]\n\t\tstrokeGroup=shapeFactory.generateStrokeGroupByStrokeGroupPanePairList(strokeGroupPanePair)\n\t\tdcStrokeGroup=DCStrokeGroup(strokeGroup)\n\t\treturn dcStrokeGroup\n\n\t@staticmethod\n\tdef generateStrokeGroupByStrokeList(strokeList):\n\t\tshapeFactory=DCStrokeGroup.shapeFactory\n\t\tstrokeGroup = shapeFactory.generateStrokeGroupByStrokeList(strokeList)\n\t\treturn DCStrokeGroup(strokeGroup)\n\nclass DCCodeInfo(CodeInfo):\n\tPANE_NAME_DEFAULT=\"瑲珩預設範圍名稱\"\n\n\tPANE_NAME_LOOP=\"回\"\n\tPANE_NAME_QI=\"起\"\n\tPANE_NAME_LIAO=\"廖\"\n\tPANE_NAME_DAO=\"斗\"\n\tPANE_NAME_ZAI=\"載\"\n\n\tPANE_NAME_MU_1=\"畞:1\"\n\tPANE_NAME_MU_2=\"畞:2\"\n\n\tPANE_NAME_YOU_1=\"幽:1\"\n\tPANE_NAME_YOU_2=\"幽:2\"\n\n\tPANE_NAME_LIANG_1=\"㒳:1\"\n\tPANE_NAME_LIANG_2=\"㒳:2\"\n\n\tPANE_NAME_JIA_1=\"夾:1\"\n\tPANE_NAME_JIA_2=\"夾:2\"\n\n\tPANE_NAME_ZUO_1=\"㘴:1\"\n\tPANE_NAME_ZUO_2=\"㘴:2\"\n\n\tSTROKE_GROUP_NAME_DEFAULT=\"瑲珩預設筆劃組名稱\"\n\n\tSTROKE_GROUP_NAME_LOOP=\"回\"\n\tSTROKE_GROUP_NAME_QI=\"起\"\n\tSTROKE_GROUP_NAME_LIAO=\"廖\"\n\tSTROKE_GROUP_NAME_DAO=\"斗\"\n\tSTROKE_GROUP_NAME_ZAI=\"載\"\n\n\tSTROKE_GROUP_NAME_MU=\"畞\"\n\tSTROKE_GROUP_NAME_YOU=\"幽\"\n\tSTROKE_GROUP_NAME_LIANG=\"㒳\"\n\tSTROKE_GROUP_NAME_JIA=\"夾\"\n\tSTROKE_GROUP_NAME_ZUO=\"㘴\"\n\n\tdef __init__(self, strokeGroupDB):\n\t\tsuper().__init__()\n\n\t\tself.strokeGroupDB=strokeGroupDB\n\n\t@staticmethod\n\tdef generateDefaultCodeInfo(strokeGroupPanePair):\n\t\tstrokeGroup=DCStrokeGroup.generateDefaultStrokeGroup(strokeGroupPanePair)\n\t\tstrokeGroupDB={DCCodeInfo.STROKE_GROUP_NAME_DEFAULT : strokeGroup}\n\n\t\tcodeInfo=DCCodeInfo(strokeGroupDB)\n\t\treturn codeInfo\n\n\tdef toCode(self):\n\t\tstrokeGroup=self.getStrokeGroup()\n\t\treturn strokeGroup\n\n\tdef setExtraPane(self, strokeGroupName, paneName, extraPane):\n\t\tstrokeGroup=self.getStrokeGroup(strokeGroupName)\n\n\t\tif strokeGroup==None:\n\t\t\tstrokeGroup=self.getStrokeGroup()\n\n\t\tstrokeGroup.setExtraPane(paneName, extraPane)\n\n\tdef getExtraPane(self, strokeGroupName, paneName):\n\t\tstrokeGroup=self.getStrokeGroup(strokeGroupName)\n\n\t\tif strokeGroup==None:\n\t\t\tstrokeGroup=self.getStrokeGroup()\n\n\t\treturn strokeGroup.getExtraPane(paneName)\n\n\tdef getStrokeGroup(self, strokeGroupName=STROKE_GROUP_NAME_DEFAULT):\n\t\tstrokeGroup=self.strokeGroupDB.get(strokeGroupName)\n\t\tif strokeGroupName!=DCCodeInfo.STROKE_GROUP_NAME_DEFAULT and strokeGroup==None:\n\t\t\tstrokeGroup=self.getStrokeGroup(DCCodeInfo.STROKE_GROUP_NAME_DEFAULT)\n\t\treturn strokeGroup\n\n\tdef getStrokeCount(self):\n\t\treturn self.getStrokeGroup().getCount()\n\nclass DCCodeInfoEncoder(CodeInfoEncoder):\n\tdef generateDefaultCodeInfo(self, strokeGroupPanePair):\n\t\treturn DCCodeInfo.generateDefaultCodeInfo(strokeGroupPanePair)\n\n\tdef isAvailableOperation(self, codeInfoList):\n\t\tisAllWithCode=all(map(lambda x: x.getStrokeCount()>0, codeInfoList))\n\t\treturn isAllWithCode\n\n\tdef extendStrokeGroupNameList(self, strokeGroupNameList, codeInfoList):\n\t\tlenNameList=len(strokeGroupNameList)\n\t\tlenCodeInfoList=len(codeInfoList)\n\t\textendingList=[]\n\t\tif lenCodeInfoList>lenNameList:\n\t\t\tdiff=lenCodeInfoList-lenNameList\n\t\t\textendingList=[DCCodeInfo.STROKE_GROUP_NAME_DEFAULT for i in range(diff)]\n\t\treturn strokeGroupNameList+extendingList\n\n\tdef splitLengthToList(self, length, weightList):\n\t\ttotalWeight=sum(weightList)\n\t\tunitLength=length*1./totalWeight\n\n\t\tpointList=[]\n\t\tnewStrokeGroupList=[]\n\t\tbase=0\n\t\tfor weight in weightList:\n\t\t\tpointList.append(int(base))\n\t\t\tbase=base+unitLength*weight\n\t\tpointList.append(int(base))\n\t\treturn pointList\n\n\tdef encodeByEmbed(self, codeInfoList, strokeGroupNameList, paneNameList):\n\t\tif len(codeInfoList)<2:\n\t\t\treturn self.encodeAsInvalidate(codeInfoList)\n\n\t\tcontainerCodeInfo=codeInfoList[0]\n\n\t\tnewStrokeGroupList=[]\n\t\tfor [strokeGroupName, paneName, codeInfo] in zip(strokeGroupNameList, paneNameList, codeInfoList):\n\t\t\textraPane=containerCodeInfo.getExtraPane(strokeGroupName, paneName)\n\t\t\tassert extraPane!=None, \"extraPane 不應為 None 。%s: %s\"%(paneName, str(containerCodeInfo))\n\n\t\t\tstrokeGroup=codeInfo.getStrokeGroup(strokeGroupName).generateStrokeGroup(extraPane)\n\t\t\tnewStrokeGroupList.append(strokeGroup)\n\n\t\tpaneList=[]\n\t\tfor [strokeGroupName, paneName] in zip(strokeGroupNameList, paneNameList):\n\t\t\textraPane=containerCodeInfo.getExtraPane(strokeGroupName, paneName)\n\t\t\tassert extraPane!=None, \"extraPane 不應為 None 。%s: %s\"%(paneName, str(containerCodeInfo))\n\t\t\tpaneList.append(extraPane)\n\n\t\tstrokeGroupList=[]\n\t\tfor [strokeGroupName, codeInfo] in zip(strokeGroupNameList, codeInfoList):\n\t\t\tstrokeGroup=codeInfo.getStrokeGroup(strokeGroupName)\n\t\t\tstrokeGroupList.append(strokeGroup)\n\n\t\tstrokeGroupPanePair=zip(strokeGroupList, paneList)\n\t\tcodeInfo=self.generateDefaultCodeInfo(strokeGroupPanePair)\n\t\treturn codeInfo\n\n\n\tdef encodeAsTurtle(self, codeInfoList):\n\t\t\"\"\"運算 \"龜\" \"\"\"\n\t\tprint(\"不合法的運算:龜\", file=sys.stderr)\n\t\tcodeInfo=self.encodeAsInvalidate(codeInfoList)\n\t\treturn codeInfo\n\n\tdef encodeAsLoong(self, codeInfoList):\n\t\t\"\"\"運算 \"龍\" \"\"\"\n\t\tprint(\"不合法的運算:龍\", file=sys.stderr)\n\t\tcodeInfo=self.encodeAsInvalidate(codeInfoList)\n\t\treturn codeInfo\n\n\tdef encodeAsSparrow(self, codeInfoList):\n\t\t\"\"\"運算 \"雀\" \"\"\"\n\t\tprint(\"不合法的運算:雀\", file=sys.stderr)\n\t\tcodeInfo=self.encodeAsInvalidate(codeInfoList)\n\t\treturn codeInfo\n\n\tdef encodeAsEqual(self, codeInfoList):\n\t\t\"\"\"運算 \"爲\" \"\"\"\n\t\tfirstCodeInfo=codeInfoList[0]\n\t\treturn firstCodeInfo\n\n\n\tdef encodeAsLoop(self, codeInfoList):\n\t\tfirstCodeInfo=codeInfoList[0]\n\t\tcodeInfo=self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_LOOP, DCCodeInfo.STROKE_GROUP_NAME_LOOP],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_LOOP])\n\t\t# 颱=(起 風台), 是=(回 [風外]䖝)\n\t\tif firstCodeInfo.getExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI):\n\t\t\tcodeInfo.setExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI, firstCodeInfo.getExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI))\n\t\treturn codeInfo\n\n\tdef encodeAsSilkworm(self, codeInfoList):\n\t\tdef genPaneList(weightList):\n\t\t\tpane=Pane.BBOX\n\t\t\tpointList=self.splitLengthToList(pane.getHeight(), weightList)\n\t\t\tpaneList=[]\n\t\t\toffset=pane.getTop()\n\t\t\tfor [pointStart, pointEnd] in zip(pointList[:-1], pointList[1:]):\n\t\t\t\theight=pointEnd-pointStart\n\t\t\t\ttargetHeight=int(height*0.90)\n\t\t\t\toffset=int(height-targetHeight)//2\n\t\t\t\ttmpPane=Pane(pane.getLeft(), pointStart+offset, pane.getRight(), pointEnd-offset)\n\t\t\t\ttmpPane.offsetTopAndBottom(offset)\n\t\t\t\tpaneList.append(tmpPane)\n\t\t\treturn paneList\n\n\t\tweightList=list(map(lambda x: x.getStrokeCount()+1, codeInfoList))\n\t\tpaneList=genPaneList(weightList)\n\n\t\tstrokeGroupNameList=self.extendStrokeGroupNameList(['蚕'], codeInfoList)\n\n\t\tstrokeGroupList=[]\n\t\tfor [strokeGroupName, codeInfo] in zip(strokeGroupNameList, codeInfoList):\n\t\t\tstrokeGroup=codeInfo.getStrokeGroup(strokeGroupName)\n\t\t\tstrokeGroupList.append(strokeGroup)\n\n\t\tstrokeGroupPanePair=zip(strokeGroupList, paneList)\n\t\tcodeInfo=self.generateDefaultCodeInfo(strokeGroupPanePair)\n\n\t\tlastCodeInfo=codeInfoList[-1]\n\t\t# 題=(起 是頁), 是=(志 日[是下])\n\t\tif lastCodeInfo.getExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI):\n\t\t\tcodeInfo.setExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI, lastCodeInfo.getExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI))\n\n\t\treturn codeInfo\n\n\tdef encodeAsGoose(self, codeInfoList):\n\t\tdef genPaneList(weightList):\n\t\t\tpane=Pane.BBOX\n\t\t\tpointList=self.splitLengthToList(pane.getWidth(), weightList)\n\t\t\tpaneList=[]\n\t\t\toffset=pane.getLeft()\n\t\t\tfor [pointStart, pointEnd] in zip(pointList[:-1], pointList[1:]):\n\t\t\t\twidth=pointEnd-pointStart\n\t\t\t\ttargetWidth=int(width*0.90)\n\t\t\t\toffset=int(width-targetWidth)//2\n\t\t\t\ttmpPane=Pane(pointStart+offset, pane.getTop(), pointEnd-offset, pane.getBottom())\n\t\t\t\ttmpPane.offsetLeftAndRight(offset)\n\t\t\t\tpaneList.append(tmpPane)\n\t\t\treturn paneList\n\n\t\tweightList=list(map(lambda x: x.getStrokeCount(), codeInfoList))\n\t\tpaneList=genPaneList(weightList)\n\n\t\tstrokeGroupNameList=self.extendStrokeGroupNameList(['鴻'], codeInfoList)\n\n\t\tstrokeGroupList=[]\n\t\tfor [strokeGroupName, codeInfo] in zip(strokeGroupNameList, codeInfoList):\n\t\t\tstrokeGroup=codeInfo.getStrokeGroup(strokeGroupName)\n\t\t\tstrokeGroupList.append(strokeGroup)\n\n\t\tstrokeGroupPanePair=zip(strokeGroupList, paneList)\n\t\tcodeInfo=self.generateDefaultCodeInfo(strokeGroupPanePair)\n\t\treturn codeInfo\n\n\tdef encodeAsQi(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.STROKE_GROUP_NAME_QI],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_QI])\n\n\tdef encodeAsLiao(self, codeInfoList):\n\t\tcodeInfo=self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_LIAO, DCCodeInfo.STROKE_GROUP_NAME_LIAO],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_LIAO])\n\n\t\tlastCodeInfo=codeInfoList[-1]\n\t\t# 屗=(起 尾寸), 尾=(志 尸毛)\n\t\tif lastCodeInfo.getExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI):\n\t\t\tcodeInfo.setExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI, lastCodeInfo.getExtraPane(DCCodeInfo.STROKE_GROUP_NAME_QI, DCCodeInfo.PANE_NAME_QI))\n\t\treturn codeInfo\n\n\tdef encodeAsZai(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_ZAI, DCCodeInfo.STROKE_GROUP_NAME_ZAI],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_ZAI])\n\n\tdef encodeAsDou(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_DOU, DCCodeInfo.STROKE_GROUP_NAME_DOU],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_DOU])\n\n\n\tdef encodeAsMu(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_MU, DCCodeInfo.STROKE_GROUP_NAME_MU, DCCodeInfo.STROKE_GROUP_NAME_MU],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_MU_1, DCCodeInfo.PANE_NAME_MU_2])\n\n\tdef encodeAsZuo(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_ZUO, DCCodeInfo.STROKE_GROUP_NAME_ZUO, DCCodeInfo.STROKE_GROUP_NAME_ZUO],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_ZUO_1, DCCodeInfo.PANE_NAME_ZUO_2])\n\n\tdef encodeAsYou(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_YOU, DCCodeInfo.STROKE_GROUP_NAME_YOU, DCCodeInfo.STROKE_GROUP_NAME_YOU],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_YOU_1, DCCodeInfo.PANE_NAME_YOU_2])\n\n\tdef encodeAsLiang(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_LIANG, DCCodeInfo.STROKE_GROUP_NAME_LIANG, DCCodeInfo.STROKE_GROUP_NAME_LIANG],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_LIANG_1, DCCodeInfo.PANE_NAME_LIANG_2])\n\n\tdef encodeAsJia(self, codeInfoList):\n\t\treturn self.encodeByEmbed(codeInfoList,\n\t\t\t[DCCodeInfo.STROKE_GROUP_NAME_JIA, DCCodeInfo.STROKE_GROUP_NAME_JIA, DCCodeInfo.STROKE_GROUP_NAME_JIA, ],\n\t\t\t[DCCodeInfo.PANE_NAME_DEFAULT, DCCodeInfo.PANE_NAME_JIA_1, DCCodeInfo.PANE_NAME_JIA_2])\n\nclass DCRadixParser(CodingRadixParser):\n\tTAG_STROKE_GROUP='筆劃組'\n\tTAG_STROKE='筆劃'\n\tTAG_GEOMETRY='幾何'\n\tTAG_SCOPE='範圍'\n\tTAG_STROKE='筆劃'\n\tTAG_NAME='名稱'\n\tTAG_EXTRA_SCOPE='補充範圍'\n\tTAG_TYPE='類型'\n\tTAG_START_POINT='起始點'\n\tTAG_PARAMETER='參數'\n\tTAG_BBOX='字面框'\n\n\tTAG_CODE_INFORMATION='編碼資訊'\n\tATTRIB_CODE_EXPRESSION='資訊表示式'\n\n\tTAG_CHARACTER_SET='字符集'\n\tTAG_CHARACTER='字符'\n\n\tTAG_NAME='名稱'\n\n\tdef __init__(self):\n\t\tself.shapeFactory=ShapeFactory()\n\t\tself.templateManager=TemplateManager(self.shapeFactory)\n\n\t# 多型\n\tdef convertRadixDescToCodeInfo(self, radixDesc):\n\t\tcodeInfo=self.convertRadixDescToCodeInfoByExpression(radixDesc)\n\t\treturn codeInfo\n\n\tdef convertRadixDescToCodeInfoByExpression(self, radixInfo):\n\t\telementCodeInfo=radixInfo.getCodeElement()\n\n\t\tstrokeGroupDB={}\n\n\t\tstrokeGroupNodeList=elementCodeInfo.get(DCRadixParser.ATTRIB_CODE_EXPRESSION)\n\t\tfor strokeGroupNode in strokeGroupNodeList:\n\t\t\t[strokeGroupName, strokeGroup]=self.parseStrokeGroup(strokeGroupNode)\n\n\t\t\textraPaneDB=self.parseExtraScopeDB(strokeGroupNode)\n\t\t\tstrokeGroup.setExtraPaneDB(extraPaneDB)\n\n\t\t\tif strokeGroupName==None:\n\t\t\t\tstrokeGroupName=DCCodeInfo.STROKE_GROUP_NAME_DEFAULT\n\t\t\tstrokeGroupDB[strokeGroupName]=strokeGroup\n\n\t\tcodeInfo=DCCodeInfo(strokeGroupDB)\n\t\treturn codeInfo\n\n\tdef parseRadixInfo(self, rootNode):\n\t\tcharacterSetNode=rootNode.get(DCRadixParser.TAG_CHARACTER_SET)\n\t\tfor characterNode in characterSetNode:\n\t\t\tcharName=characterNode.get(DCRadixParser.TAG_NAME)\n\t\t\tradixDescription=self.parseRadixDescription(characterNode)\n\n\t\t\tself.radixDescriptionManager.addDescription(charName, radixDescription)\n\n\tdef parseExtraScopeDB(self, elementCodeInfo):\n\t\textraPaneDB={}\n\n\t\textraScopeNodeList=elementCodeInfo.get(DCRadixParser.TAG_EXTRA_SCOPE)\n\t\tif extraScopeNodeList != None:\n\t\t\tfor extraScopeNode in extraScopeNodeList:\n\t\t\t\tpaneName=extraScopeNode.get(DCRadixParser.TAG_NAME)\n\t\t\t\tpane=self.parseExtraScope(extraScopeNode)\n\n\t\t\t\textraPaneDB[paneName]=pane\n\n\t\treturn extraPaneDB\n\n\tdef parseExtraScope(self, extraScopeNode):\n\t\tdescriptionRegion=extraScopeNode.get(DCRadixParser.TAG_SCOPE)\n\t\tpane=self.parsePane(descriptionRegion)\n\t\treturn pane\n\n\tdef parseGeometry(self, geometryNode):\n\t\tdescriptionRegion=geometryNode.get(DCRadixParser.TAG_SCOPE)\n\t\tpane=self.parsePane(descriptionRegion)\n\t\treturn pane\n\n\tdef parseStrokeGroup(self, strokeGroupNode):\n\t\tstrokeGroupName=strokeGroupNode.get(DCRadixParser.TAG_NAME)\n\n\t\tt=strokeGroupNode.get(DCRadixParser.TAG_STROKE_GROUP)\n\t\tstrokeList=self.parseStrokeList(t)\n\n\t\tstrokeGroup=DCStrokeGroup.generateStrokeGroupByStrokeList(strokeList)\n\t\treturn [strokeGroupName, strokeGroup]\n\n\tdef parseStrokeList(self, strokeGroupNode):\n\t\tstrokeList=[]\n\t\tstrokeNodeList=strokeGroupNode.get(DCRadixParser.TAG_STROKE)\n\t\tfor strokeNode in strokeNodeList:\n\t\t\tmethod=strokeNode.get(TemplateManager.TAG_METHOD, TemplateManager.TAG_METHOD__DEFINITION)\n\t\t\tif method==TemplateManager.TAG_METHOD__REFERENCE:\n\t\t\t\ttempStrokes=self.templateManager.parseStrokeByReference(strokeNode, self.templateManager)\n\t\t\t\tstrokeList.extend(tempStrokes)\n\t\t\telif method==TemplateManager.TAG_METHOD__DEFINITION:\n\t\t\t\tstroke=DCRadixParser.fromStrokeNode(strokeNode, self.shapeFactory)\n\t\t\t\tstrokeList.append(stroke)\n\t\treturn strokeList\n\n\t@staticmethod\n\tdef fromStrokeNode(strokeNode, shapeFactory):\n\t\tname=strokeNode.get(DCRadixParser.TAG_TYPE)\n\n\t\tstartPoint=strokeNode.get(DCRadixParser.TAG_START_POINT)\n\n\t\tparameterList = strokeNode.get(DCRadixParser.TAG_PARAMETER)\n\n\t\treturn shapeFactory.generateStroke(name, startPoint, parameterList)\n\n\tdef parsePane(self, descriptionRegion):\n\t\tleft=int(descriptionRegion[0:2], 16)\n\t\ttop=int(descriptionRegion[2:4], 16)\n\t\tright=int(descriptionRegion[4:6], 16)\n\t\tbottom=int(descriptionRegion[6:8], 16)\n\t\treturn Pane(left, top, right, bottom)\n\nclass AbsTemplateManager(object, metaclass=abc.ABCMeta):\n\tdef __init__(self):\n\t\tpass\n\n\tdef put(self, name, template):\n\t\traise NotImplementedError('users must define put to use this base class')\n\n\tdef get(self, name):\n\t\traise NotImplementedError('users must define get to use this base class')\n\t\treturn None\n\nclass AnchorTemplateManager(AbsTemplateManager):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.anchors={}\n\n\tdef put(self, name, template):\n\t\tassert isinstance(template, StrokeGroup)\n\t\tself.anchors[name]=template\n\n\tdef get(self, name):\n\t\treturn self.anchors.get(name)\n\nclass CompositionTemplateManager(AbsTemplateManager):\n\tdef __init__(self, templateManagers):\n\t\tsuper().__init__()\n\t\tself.templateManagers=templateManagers\n\n\t\"\"\"\n\tdef put(self, name, template):\n\t\tassert isinstance(template, StrokeGroup)\n\t\tself.anchors[name]=template\n\t\"\"\"\n\n\tdef get(self, name):\n\t\tfor templateManager in self.templateManagers:\n\t\t\tsg=templateManager.get(name)\n\t\t\tif sg:\n\t\t\t\treturn sg\n\nclass TemplateManager(AbsTemplateManager):\n\tTAG_TEMPLATE_SET = \"樣式集\"\n\tTAG_STROKE_GROUP='筆劃組'\n\tTAG_STROKE='筆劃'\n\tTAG_NAME='名稱'\n\n\tTAG_METHOD='方式'\n\tTAG_TYPE='類型'\n\tTAG_START_POINT='起始點'\n\tTAG_PARAMETER='參數'\n\n\tTAG_METHOD__DEFINITION='定義'\n\tTAG_METHOD__REFERENCE='引用'\n\tTAG_METHOD__ANCHOR='錨點'\n\n\tTAG_REFRENCE_NAME='引用名稱'\n\tTAG_ORDER='順序'\n\tTAG_TRANSFORMATION='變換'\n\n\tTAG_POSITION='定位'\n\tTAG_TRANSLATION='平移'\n\tTAG_SCALING='縮放'\n\n\tTAG_PIVOT='樞軸點'\n\tTAG_RATIO='比例'\n\n\tdef __init__(self, shapeFactory):\n\t\tsuper().__init__()\n\t\tself.shapeFactory=shapeFactory\n\t\tself.templates={}\n\t\tself.load()\n\n\tdef put(self, name, template):\n\t\tassert isinstance(template, StrokeGroup)\n\t\tself.templates[name]=template\n\n\tdef get(self, name):\n\t\treturn self.templates.get(name)\n\n\tdef getStroke(self, name, index):\n\t\tstrokeGroup=self.templates.get(name)\n\t\treturn strokeGroup.getStrokeList()[index]\n\n\tdef getStrokes(self, name, start, end):\n\t\tstrokeGroup=self.templates.get(name)\n\t\treturn strokeGroup.getStrokeList()[start, end+1]\n\n\tdef load(self):\n\t\tfrom . import CodingTemplateFile\n\t\ttemplate_file = CodingTemplateFile\n\t\tself.parseTemplateFromYAML(template_file)\n\n\tdef parseTemplateFromYAML(self, filename):\n\t\timport ruamel.yaml as yaml\n\t\trootNode=yaml.load(open(filename), Loader=yaml.SafeLoader)\n\t\tself.parseTemplateSet(rootNode)\n\n\tdef parseTemplateSet(self, rootNode):\n\t\ttemplateSetNode=rootNode.get(TemplateManager.TAG_TEMPLATE_SET)\n\t\tfor templateNode in templateSetNode:\n\t\t\ttemplateName=templateNode.get(TemplateManager.TAG_NAME)\n\t\t\tstrokeGroupNode=templateNode.get(TemplateManager.TAG_STROKE_GROUP)\n\t\t\tstrokeGroup=self.parseStrokeGroup(strokeGroupNode)\n\t\t\tself.put(templateName, strokeGroup)\n\n\tdef parseStrokeGroup(self, strokeGroupNode):\n\t\tstrokes=[]\n\t\tanchorTemplateManager = AnchorTemplateManager()\n\t\tcompositionTemplateManager = CompositionTemplateManager((anchorTemplateManager, self,))\n\t\tfor strokeNode in strokeGroupNode.get(TemplateManager.TAG_STROKE):\n\t\t\tmethod=strokeNode.get(TemplateManager.TAG_METHOD, TemplateManager.TAG_METHOD__DEFINITION)\n\t\t\tif method==TemplateManager.TAG_METHOD__REFERENCE:\n\t\t\t\ttempStrokes=self.parseStrokeByReference(strokeNode, compositionTemplateManager)\n\t\t\t\tstrokes.extend(tempStrokes)\n\t\t\telif method==TemplateManager.TAG_METHOD__ANCHOR:\n\t\t\t\tanchorName=strokeNode.get(TemplateManager.TAG_NAME)\n\t\t\t\tstrokeGroup=self.parseStrokeByAnchor(strokeNode, anchorTemplateManager)\n\t\t\t\tanchorTemplateManager.put(anchorName, strokeGroup)\n\t\t\telif method==TemplateManager.TAG_METHOD__DEFINITION:\n\t\t\t\tstroke=self.parseStroke(strokeNode)\n\t\t\t\tstrokes.append(stroke)\n\t\t\telse:\n\t\t\t\tassert False\n\t\tstrokeGroup=self.shapeFactory.generateStrokeGroupByStrokeList(strokes)\n\t\treturn strokeGroup\n\n\tdef parseStroke(self, strokeNode):\n\t\tstrokeType=strokeNode.get(TemplateManager.TAG_TYPE)\n\t\tstartPoint=strokeNode.get(TemplateManager.TAG_START_POINT)\n\t\tparams=strokeNode.get(TemplateManager.TAG_PARAMETER)\n\t\tstroke=self.shapeFactory.generateStroke(strokeType, startPoint, params)\n\t\treturn stroke\n\n\tdef parseStrokeByReference(self, strokeNode, templateManager):\n\t\tstrokeType=strokeNode.get(TemplateManager.TAG_TYPE)\n\t\ttemplateName=strokeNode.get(TemplateManager.TAG_REFRENCE_NAME)\n\t\torders=strokeNode.get(TemplateManager.TAG_ORDER)\n\n\t\tstrokeGroup=templateManager.get(templateName)\n\t\tstrokes=list((strokeGroup.getStroke(index) for index in orders))\n\n\t\ttransformationNode=strokeNode.get(TemplateManager.TAG_TRANSFORMATION)\n\t\tif transformationNode != None:\n\t\t\tstrokeGroupInfo = StrokeGroupInfo(strokes)\n\t\t\tstatePane = strokeGroupInfo.getInfoPane().clone()\n\t\t\tfor node in transformationNode:\n\t\t\t\tif TemplateManager.TAG_POSITION in node:\n\t\t\t\t\tposition = node.get(TemplateManager.TAG_POSITION)\n\t\t\t\t\tstatePane = Pane(*position)\n\t\t\t\telif TemplateManager.TAG_TRANSLATION in node:\n\t\t\t\t\ttranslation = node.get(TemplateManager.TAG_TRANSLATION)\n\t\t\t\t\tstatePane.translateBy(translation)\n\t\t\t\telif TemplateManager.TAG_SCALING in node:\n\t\t\t\t\tscalingNode = node.get(TemplateManager.TAG_SCALING)\n\t\t\t\t\tpivot = scalingNode.get(TemplateManager.TAG_PIVOT)\n\t\t\t\t\tratio = scalingNode.get(TemplateManager.TAG_RATIO)\n\t\t\t\t\tstatePane.scale(pivot, ratio)\n\n\t\t\tstrokes=list((stroke.generateCopyToApplyNewPane(strokeGroupInfo.getInfoPane(), statePane) for stroke in strokes))\n\n\t\treturn strokes\n\n\tdef parseStrokeByAnchor(self, strokeNode, anchromTemplateName):\n\t\treferenceName=strokeNode.get(TemplateManager.TAG_REFRENCE_NAME)\n\t\tstrokeGroup=self.get(referenceName)\n\n\t\ttransformationNode=strokeNode.get(TemplateManager.TAG_TRANSFORMATION)\n\t\tif transformationNode != None:\n\t\t\tstrokes=list(strokeGroup.getStrokeList())\n\n\t\t\tstrokeGroupInfo = StrokeGroupInfo(strokes)\n\t\t\tstatePane = strokeGroupInfo.getInfoPane().clone()\n\t\t\tfor node in transformationNode:\n\t\t\t\tif TemplateManager.TAG_POSITION in node:\n\t\t\t\t\tposition = node.get(TemplateManager.TAG_POSITION)\n\t\t\t\t\tstatePane = Pane(*position)\n\t\t\t\telif TemplateManager.TAG_TRANSLATION in node:\n\t\t\t\t\ttranslation = node.get(TemplateManager.TAG_TRANSLATION)\n\t\t\t\t\tstatePane.translateBy(translation)\n\t\t\t\telif TemplateManager.TAG_SCALING in node:\n\t\t\t\t\tscalingNode = node.get(TemplateManager.TAG_SCALING)\n\t\t\t\t\tpivot = scalingNode.get(TemplateManager.TAG_PIVOT)\n\t\t\t\t\tratio = scalingNode.get(TemplateManager.TAG_RATIO)\n\t\t\t\t\tstatePane.scale(pivot, ratio)\n\n\t\t\tstrokes=list((stroke.generateCopyToApplyNewPane(strokeGroupInfo.getInfoPane(), statePane) for stroke in strokes))\n\t\t\tstrokeGroup=strokeGroup.generateCopyToApplyNewPane(statePane)\n\n\t\treturn strokeGroup\n\nclass YamlCanvasController(BaseTextCanvasController):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.strokes = []\n\n\tdef getStrokes(self):\n\t\treturn self.strokes\n\n\tdef onPreDrawCharacter(self, character):\n\t\tself.strokes=[]\n\n\tdef onPreDrawStroke(self, stroke):\n\t\tself.clearStrokeExpression()\n\n\tdef onPostDrawStroke(self, stroke):\n\t\te=self.getStrokeExpression()\n\t\tif e:\n\t\t\tattrib={\n\t\t\t\t\"名稱\": stroke.getName(),\n\t\t\t\t\"描繪\": e,\n\t\t\t\t}\n\t\t\tself.strokes.append(attrib)\n\n\n# YAML writer for drawing methods\nclass DmCodeMappingInfoInterpreter(CodeMappingInfoInterpreter):\n\tdef __init__(self, codingType):\n\t\tsuper().__init__(codingType)\n\n\tdef interpreteCodeMappingInfo(self, codeMappingInfo):\n\t\tcharName = codeMappingInfo.getName()\n\t\tdcStrokeGroup = codeMappingInfo.getCode()\n\t\tvariance = codeMappingInfo.getVariance()\n\n\t\tstrokeGroup = dcStrokeGroup.getStrokeGroup()\n\t\tcharacter = Character(charName, strokeGroup)\n\n\t\tcontroller = YamlCanvasController()\n\t\tds = DrawingSystem(controller)\n\n\t\tds.draw(character)\n\n\t\tcode = controller.getStrokes()\n\n\t\treturn {\"字符\": charName, \"類型\":variance, \"字圖\":code}\n\n","sub_path":"codings/DynamicComposition/coding/DynamicComposition/DynamicComposition.py","file_name":"DynamicComposition.py","file_ext":"py","file_size_in_byte":24238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"346238814","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the twoStrings function below.\r\n\r\n#takes in two strings s1 and s2. Returns yes if there exists a common substring else, no.\r\n#strings are alphabetic a-z lengths of s1 and s2 can be 10^5 length\r\n#substring can be length 1\r\ndef twoStrings(s1, s2):\r\n s1 = list(s1)\r\n s2 = list(s2)\r\n\r\n result = set(s1).intersection(s2)\r\n print(result)\r\n if len(result) > 0:\r\n return \"YES\"\r\n else:\r\n return \"NO\"\r\n\r\ndef main():\r\n s1 = \"t\"\r\n s2 = \"ab\"\r\n\r\n print(twoStrings(s1, s2))\r\n\r\nmain()","sub_path":"two_strings.py","file_name":"two_strings.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240354920","text":"# # Given a 32-bit signed integer, reverse digits of an integer.\n\n# # Example 1:\n# # Input: 123\n# # Output: 321\n\n# # Example 2:\n# # Input: -123\n# # Output: -321\n# \n# # Example 3:\n# # Input: 120\n# # Output: 21\n# # Note:\n# # Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−2^31, 2^31 − 1]. \n# # For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.\n\nclass Solution:\n def reverse(self, data: int) -> int:\n if data < 0:inp = -data\n elif data > 0:inp = data\n else:return 0\n rev = 0\n while(inp!=0):\n rem = int(inp%10)\n rev = rev*10 + rem\n inp = int(inp/10)\n if ((-2**31) <= rev <= (2**31 - 1)) and (data < 0):\n return -rev\n elif ((-2**31) <= rev <= (2**31 - 1)) and (data > 0):\n return rev\n else:\n return 0\n\nif __name__ == \"__main__\":\n output = Solution()\n data = int(input(\"Enter you digit: \"))\n print(output.reverse(data))\n\n ","sub_path":"leetcode/7_reverse_integer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185262776","text":"\"\"\"\n引擎组件\n要实现的逻辑:\n构造spider中start_urls中的请求\n传递给调取器进行保存,之后从中取出\n取出的request对象交给下载的进行下载\b,返回response\nresponse交给爬虫模块进行解析,\b提取结果\n如果结果是request对象,重新交给调度器,如果结果是item对象,交给管道处理\n\"\"\"\n\nfrom scrapy_plus.http.request import Request # 导入Request对象\nfrom scrapy_plus.middlewares.spider_middlewares import SpiderMiddleware\nfrom scrapy_plus.middlewares.downloader_middlewares import DownloaderMiddleware\n\nfrom .scheduler import Scheduler\nfrom .downloader import Downloader\nfrom .pipeline import Pipeline\nfrom .spider import Spider\nfrom scrapy_plus.utils.log import logger\nfrom datetime import datetime\n\n\nclass Engine(object):\n '''\n a. 对外提供整个的程序的入口\n b. 依次调用其他组件对���提供的接口,实现整个框架的运作(驱动)\n '''\n\n def __init__(self):\n \"\"\"实例化其他各个组件\"\"\"\n self.spider = Spider() # 接收爬虫对象\n self.scheduler = Scheduler() # 初始化调度器对象\n self.downloader = Downloader() # 初始化下载器对象\n self.pipeline = Pipeline() # 初始化管道对象\n\n self.spider_mid = SpiderMiddleware() # 初始化爬虫中间件对象\n self.downloader_mid = DownloaderMiddleware() # 初始化下载器中间件对象\n\n def start(self):\n '''提供程序启动入口, 启动整个引擎'''\n\n # 测试log功能\n start_time = datetime.now()\n logger.info('爬虫启动:{}'.format(start_time))\n\n self._start_engine()\n end_time = datetime.now()\n print(\"爬虫结束:{}\".format(end_time))\n print('爬虫共运行:{}秒'.format((end_time - start_time).total_seconds())) # total_seconds\n\n def _start_engine(self):\n '''依次调用其他组件对外提供的接口,实现整个框架的运作(驱动)'''\n # 1. 爬虫模块发出初始请求:调用爬虫的方法\n start_request = self.spider.start_requests()\n\n # 2. 把初始请求添加给调度器(队列)\n\n # 利用爬虫中间件预处理请求对象\n start_request = self.spider_mid.process_request(start_request)\n\n self.scheduler.add_request(start_request)\n # 3. 从调度器获取请求对象,准备交给下载器发起请求,获取一个响应对象\n request = self.scheduler.get_request()\n\n # 利用下载器中间件预处理请求对象\n request = self.downloader_mid.process_request(request)\n\n # 4. 利用下载器发起请求\n response = self.downloader.get_response(request)\n\n # 5. 利用爬虫的解析响应的方法,处理响应,得到结果\n result = self.spider.parse(response)\n # 6. 判断结果对象\n # 6.1 如果是请求对象,那么就再交给调度器\n if isinstance(result, Request):\n # 利用爬虫中间件预处理请求对象\n result = self.spider_mid.process_request(result)\n self.scheduler.add_request(result)\n # 6.2 否则,就交给管道处理\n else:\n self.pipeline.process_item(result)\n","sub_path":"MyScrapy/scrapy_plus/core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378171610","text":"import uuid\nimport random\nimport errno\nimport os\n\ndef buildPath(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\ndef buildFile(percentRepeated):\n repeated_uid = str(uuid.uuid1())\n for docNum in xrange(1000):\n percent = str(int(percentRepeated * 100)) + 'PercentRepeated'\n directory = 'Unique/%s' % percent\n if docNum == 0:\n buildPath(directory)\n path = directory + '/doc-%d' % (docNum)\n with open(path, 'w') as outfile:\n outstring = ''\n for wordCount in xrange(2000):\n if (random.random() < percentRepeated):\n outstring = outstring + repeated_uid + ' '\n else:\n outstring = outstring + str(uuid.uuid1()) + ' '\n outfile.write(outstring)\n\ndef main():\n buildFile(.05)\n buildFile(.25)\n buildFile(.5)\n buildFile(.75)\n buildFile(1)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/collate/repeat_data.py","file_name":"repeat_data.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"512263193","text":"from tkinter import *\n\nlabel = \"\" \n\ndef press(num): \n\n global label\n \n label = label + str(num) \n \n eksekusi.set(label) \n \n \ndef equalpress(): \n\n try: \n \n global label\n \n total = str(eval(label)) \n \n eksekusi.set(total) \n label = str(total)\n \n except: \n \n equation.set(\" error \") \n label = \"\" \n \ndef hapus(): \n global label\n label = \"\" \n eksekusi.set(\"\") \n \n \n# Driver code \nif __name__ == \"__main__\": \n gui = Tk() \n\n gui.configure(background=\"black\") \n \n gui.title(\"Calculator Reset\") \n \n gui.geometry(\"330x150\") \n \n eksekusi = StringVar() \n \n\n label_field = Entry(gui, textvariable=eksekusi)\n \n label_field.grid(columnspan=4, ipadx=85) \n \n \n button1 = Button(gui, text=' 1 ', fg='black', bg='grey', \n command=lambda: press(1), height=1, width=7) \n button1.grid(row=3, column=0) \n \n button2 = Button(gui, text=' 2 ', fg='black', bg='grey', \n command=lambda: press(2), height=1, width=7) \n button2.grid(row=3, column=1) \n \n button3 = Button(gui, text=' 3 ', fg='black', bg='grey', \n command=lambda: press(3), height=1, width=7) \n button3.grid(row=3, column=2) \n \n button4 = Button(gui, text=' 4 ', fg='black', bg='grey', \n command=lambda: press(4), height=1, width=7) \n button4.grid(row=2, column=0) \n \n button5 = Button(gui, text=' 5 ', fg='black', bg='grey', \n command=lambda: press(5), height=1, width=7) \n button5.grid(row=2, column=1) \n \n button6 = Button(gui, text=' 6 ', fg='black', bg='grey', \n command=lambda: press(6), height=1, width=7) \n button6.grid(row=2, column=2) \n \n button7 = Button(gui, text=' 7 ', fg='black', bg='grey', \n command=lambda: press(7), height=1, width=7) \n button7.grid(row=1, column=0) \n \n button8 = Button(gui, text=' 8 ', fg='black', bg='grey', \n command=lambda: press(8), height=1, width=7) \n button8.grid(row=1, column=1) \n \n button9 = Button(gui, text=' 9 ', fg='black', bg='grey', \n command=lambda: press(9), height=1, width=7) \n button9.grid(row=1, column=2) \n \n button0 = Button(gui, text=' 0 ', fg='black', bg='grey', \n command=lambda: press(0), height=1, width=28) \n button0.grid(row=4, column=0, columnspan=3) \n \n tambah = Button(gui, text=' + ', fg='black', bg='grey', \n command=lambda: press(\"+\"), height=1, width=7) \n tambah.grid(row=1, column=3) \n \n kurang = Button(gui, text=' - ', fg='black', bg='grey', \n command=lambda: press(\"-\"), height=1, width=7) \n kurang.grid(row=2, column=3) \n \n hasil = Button(gui, text=' = ', fg='black', bg='grey', \n command=equalpress, height=1, width=7) \n hasil.grid(row=3, column=3) \n \n hapus = Button(gui, text='Clear', fg='black', bg='grey', \n command=hapus, height=1, width=7) \n hapus.grid(row=4, column='3') \n \n # start the GUI \n gui.mainloop() \n","sub_path":"calcreset.py","file_name":"calcreset.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"281213125","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 3 19:37:26 2021\r\n\r\n@author: Hao Zheng\r\n\"\"\"\r\n\r\nimport torch \r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom importlib import import_module \r\nimport os\r\nimport nibabel\r\nimport cv2\r\n\r\ntorch.manual_seed(777) # cpu\r\ntorch.cuda.manual_seed(777) #gpu\r\nnp.random.seed(777) #numpy\r\n\r\ndef dice_loss(pred, target):\r\n smooth = 1.\r\n iflat = pred\r\n tflat = target\r\n intersection = ((iflat) * tflat).sum() \r\n return 1-((2. * intersection + smooth)/((iflat).sum() + (tflat).sum() + smooth))\r\n\r\ndef Tversky_loss(pred, target):\r\n smooth = 1.0\r\n alpha = 0.05\r\n beta = 1-alpha\r\n intersection = (pred*target).sum()\r\n FP = (pred*(1-target)).sum()\r\n FN = ((1-pred)*target).sum()\r\n return 1-(intersection + smooth)/(intersection + alpha*FP + beta*FN + smooth)\r\n\r\ndef root_Tversky_loss(pred, target, dist):\r\n alpha0 = 1\r\n beta0 = 1\r\n alpha = 0.05\r\n beta = 1 - alpha\r\n weight = (0.95*dist+0.05)*alpha0*target + beta0*(1-target)*dist\r\n #weight = 1\r\n smooth = 1.0\r\n sigma1 = 0.0001\r\n sigma2 = 0.0001\r\n weight_i = target*sigma1 + (1-target)*sigma2\r\n intersection = (weight*((pred+weight_i)**0.7)*target).sum()\r\n intersection2 = (weight*(alpha*pred + beta*target)).sum()\r\n return 1-(intersection + smooth)/(intersection2 + smooth)\r\n\r\n\r\ndef save_gradients(path, layer=0): \r\n #load module\r\n casemodel = import_module('WingsNet')\r\n config2, case_net = casemodel.get_model()\r\n checkpoint = torch.load('005_0.ckpt') \r\n case_net.load_state_dict(checkpoint['state_dict'])\r\n case_net = case_net.cuda()\r\n case_net.train() \r\n \r\n grad_in = []\r\n def hook_fn_backward_output(module, grad_input, grad_output):\r\n grad_in.append(grad_output)\r\n \r\n def hook_fn_backward_input(module, grad_input, grad_output):\r\n if module.kernel_size[0]==3:\r\n grad_in.append(grad_output)\r\n \r\n for name, module in list(case_net.named_children()):\r\n if isinstance(module, nn.MaxPool3d):\r\n continue\r\n elif isinstance(module, nn.Conv3d):\r\n module.register_backward_hook(hook_fn_backward_output)\r\n elif isinstance(module, nn.Sigmoid):\r\n continue\r\n else:\r\n for name1, module1 in list(module.named_children()):\r\n if isinstance(module1, nn.Conv3d):\r\n module1.register_backward_hook(hook_fn_backward_input)\r\n \r\n #load data\r\n file_list = os.listdir(path)\r\n file_list.sort()\r\n for idx in range(len(file_list)//6):\r\n img = np.load(os.path.join(path, file_list[6*idx]))\r\n label = np.load(os.path.join(path, file_list[6*idx+2]))\r\n weight = np.load(os.path.join(path, file_list[6*idx+1]))\r\n weight = weight**2.5\r\n weight = weight*label + (1-label)\r\n \r\n #calculate gradients\r\n img = img[np.newaxis,np.newaxis,...]\r\n label = label[np.newaxis,np.newaxis,...]\r\n weight = weight[np.newaxis,np.newaxis,...]\r\n x = torch.from_numpy(img.astype(np.float32)).cuda()\r\n y = torch.from_numpy(label.astype(np.float32)).cuda()\r\n w = torch.from_numpy(weight.astype(np.float32)).cuda()\r\n \r\n cube_size = 128\r\n step = 64\r\n pred = np.zeros(x.shape)\r\n pred_num = np.zeros(x.shape)\r\n grads = np.zeros(x.shape)\r\n grads_num = np.zeros(x.shape)\r\n #sliding window\r\n xnum = (x.shape[2]-cube_size)//step + 1 if (x.shape[2]-cube_size)%step==0 else (x.shape[2]-cube_size)//step + 2\r\n ynum = (x.shape[3]-cube_size)//step + 1 if (x.shape[3]-cube_size)%step==0 else (x.shape[3]-cube_size)//step + 2\r\n znum = (x.shape[4]-cube_size)//step + 1 if (x.shape[4]-cube_size)%step==0 else (x.shape[4]-cube_size)//step + 2\r\n for xx in range(xnum):\r\n xl = step*xx\r\n xr = step*xx + cube_size\r\n if xr > x.shape[2]:\r\n xr = x.shape[2]\r\n xl = x.shape[2]-cube_size\r\n for yy in range(ynum):\r\n yl = step*yy\r\n yr = step*yy + cube_size\r\n if yr > x.shape[3]:\r\n yr = x.shape[3]\r\n yl = x.shape[3] - cube_size\r\n for zz in range(znum):\r\n zl = step*zz\r\n zr = step*zz + cube_size\r\n if zr > x.shape[4]:\r\n zr = x.shape[4]\r\n zl = x.shape[4] - cube_size\r\n \r\n x_input = x[:,:,xl:xr,yl:yr,zl:zr]\r\n p0, p = case_net(x_input)\r\n p_numpy = p.cpu().detach().numpy()\r\n pred[:,:,xl:xr,yl:yr,zl:zr] += p_numpy\r\n pred_num[:,:,xl:xr,yl:yr,zl:zr] += 1\r\n \r\n if label[:,:,xl:xr,yl:yr,zl:zr].sum()>0:\r\n loss = root_Tversky_loss(p, y[:,:,xl:xr,yl:yr,zl:zr], w[:,:,xl:xr,yl:yr,zl:zr]) \\\r\n + 10*root_Tversky_loss(p0, y[:,:,xl:xr,yl:yr,zl:zr], w[:,:,xl:xr,yl:yr,zl:zr])\r\n loss.backward()\r\n grad_ec = grad_in[layer][0]\r\n #print(grad_ec3.shape)\r\n grad_ec = grad_ec.cpu().detach().numpy()\r\n grad_ec = np.squeeze(grad_ec, 0)\r\n grad_ec_abs = np.abs(grad_ec)\r\n grad_ec_sum = np.sum(grad_ec_abs, axis=0)\r\n grad_ec_norm = grad_ec_sum/grad_ec_sum.max()\r\n grads[:,:,xl:xr,yl:yr,zl:zr] += grad_ec_norm\r\n grads_num[:,:,xl:xr,yl:yr,zl:zr] += 1\r\n grad_in = []\r\n \r\n \r\n pred = pred/pred_num\r\n pred[pred>=0.5] = 1\r\n pred[pred<0.5] = 0\r\n pred = np.squeeze(pred)\r\n \r\n grads_num[grads_num==0] = 1\r\n grads = grads/grads_num\r\n grads = np.squeeze(grads)\r\n \r\n #for i in range(20):\r\n # grad_ec = grad_in[i][0]\r\n # #print(grad_ec.shape)\r\n # grad_ec = grad_ec.cpu().detach().numpy()\r\n # grad_ec = np.squeeze(grad_ec, 0)\r\n # grad_ec_abs = np.abs(grad_ec)\r\n # grad_ec_sum = np.sum(grad_ec_abs, axis=0)\r\n # grad_ec_norm = grad_ec_sum/grad_ec_sum.max()\r\n # #grad_ec_norm = (grad_ec_sum>(grad_ec_sum.max()/5)).astype(np.uint8)\r\n # #print(grad_ec_sum.max())\r\n # \r\n # down_sample = 128//grad_ec_norm.shape[1]\r\n # ec_slice = grad_ec_norm[:,:,50//down_sample]\r\n # if down_sample>1:\r\n # ec_slice = ndimage.zoom(ec_slice, down_sample)\r\n # save_name = \"F:\\\\airway\\\\gradients\\\\figures\\\\WingsNet_0\\\\\" + \"hh%02d\"%(i) + \".bmp\"\r\n # cv2.imwrite(save_name, 255*ec_slice)\r\n \r\n \r\n save_path = \"data/\"\r\n save_name_grad = save_path + file_list[6*idx+4].split('_')[0] + \"_grad.nii.gz\"\r\n save_name_label = save_path + file_list[6*idx+4].split('_')[0] + \"_label.nii.gz\"\r\n save_name_img = save_path + file_list[6*idx+4].split('_')[0] + \"_img.nii.gz\"\r\n save_name_pred = save_path + file_list[6*idx+4].split('_')[0] + \"_pred.nii.gz\"\r\n \r\n \r\n grad_nii = nibabel.Nifti1Image((grads).astype(np.float32), np.eye(4))\r\n nibabel.save(grad_nii, save_name_grad)\r\n label = label.squeeze()\r\n img = img.squeeze()\r\n label_nii = nibabel.Nifti1Image(label, np.eye(4))\r\n nibabel.save(label_nii, save_name_label)\r\n img_nii = nibabel.Nifti1Image(img, np.eye(4))\r\n nibabel.save(img_nii, save_name_img)\r\n pred_nii = nibabel.Nifti1Image(pred.astype(np.uint8), np.eye(4))\r\n nibabel.save(pred_nii, save_name_pred)\r\n\r\n\r\n","sub_path":"save_gradients.py","file_name":"save_gradients.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"520895229","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the flatlandSpaceStations function below.\ndef flatlandSpaceStations(n, c):\n maxi = 0\n ma = 0\n if n == m:\n return 0\n c.sort()\n for i in range(len(c) - 1):\n if c[i+1] - c[i] - 1 > ma:\n ma = c[i+1] - c[i] - 1\n if ma % 2 == 0:\n maxi = ma // 2\n else:\n maxi = ma // 2 + 1\n maxi = max(maxi, min(c), n - max(c) - 1)\n return maxi\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n c = list(map(int, input().rstrip().split()))\n\n result = flatlandSpaceStations(n, c)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Flatland Space Stations.py","file_name":"Flatland Space Stations.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126741994","text":"def regex_to_nfa(regex):\n nfa = [{'a':[], 'b':[], 'e':[]} for i in range(len(regex)+1)]\n ops = []\n for idx, r in enumerate(regex):\n # match-transition edge\n if r == 'a' or r == 'b':\n nfa[idx][r].append(idx+1)\n\n # (, ), *\n if r in ['(', ')', '*']:\n nfa[idx]['e'].append(idx+1)\n\n # ( ), |\n start_idx = idx\n if r in ['(', '|']:\n ops.append(idx)\n elif r == ')':\n popped_idx = ops.pop()\n popped_r = regex[popped_idx]\n if popped_r == '|':\n start_idx = ops.pop() # start_r == '('\n nfa[start_idx]['e'].append(popped_idx+1)\n nfa[popped_idx]['e'].append(idx)\n else: # popped_r == '('\n start_idx = popped_idx\n\n # *\n if idx < (len(regex)-1):\n if regex[idx+1] == '*':\n nfa[start_idx]['e'].append(idx+1)\n nfa[idx+1]['e'].append(start_idx)\n\n return nfa\n\ndef nfa_to_dfa(nfa):\n def e_closure(s):\n visit = set()\n found = set(s)\n def dfs(v):\n if v in visit:\n return\n visit.add(v)\n for next_v in nfa[v]['e']:\n found.add(next_v)\n dfs(next_v)\n\n for v in s:\n dfs(v)\n return tuple(list(found))\n \n dfa = []\n state_idx_map = {}\n def recursive_state_finding(state):\n for path in ['a', 'b']:\n new_state = e_closure(set(i for v in state for i in nfa[v][path]))\n if new_state:\n if new_state not in state_idx_map:\n state_idx_map[new_state] = len(dfa)\n dfa.append({'a':set(), 'b':set()})\n recursive_state_finding(new_state)\n dfa[state_idx_map[state]][path].add(state_idx_map[new_state])\n\n start_state = e_closure({0})\n state_idx_map[start_state] = 0\n dfa.append({'a':set(), 'b':set()})\n recursive_state_finding(start_state)\n\n for state, idx in state_idx_map.items():\n dfa[idx]['is_final'] = (len(nfa)-1) in state\n\n return dfa\n\ndef dfa_to_adj_matrix(dfa):\n size = len(dfa)\n adj_matrix = dict({(i, j): 0 for i in range(size) for j in range(size)}, size=size)\n for start in range(size):\n for path in ['a', 'b']:\n for finish in dfa[start][path]:\n adj_matrix[(start, finish)] += 1\n return adj_matrix\n\ndef matrix_exp(mat, n):\n def matrix_mult(m1, m2):\n size = m1['size']\n return dict({(i, j): sum([m1[(i,k)] * m2[(k,j)] for k in range(size)]) % 1000000007 for i in range(size) for j in range(size)}, size=size)\n\n size = mat['size']\n if n == 0:\n return dict({(i, j): 1 if i==j else 0 for i in range(size) for j in range(size)}, size=size)\n elif n == 1:\n return mat\n\n half = matrix_exp(mat, n//2)\n m = matrix_mult(half, half)\n if n % 2 == 1:\n m = matrix_mult(m, mat)\n return m\n\ndef solve(regex, str_len):\n dfa = nfa_to_dfa(regex_to_nfa(regex))\n adj = matrix_exp(dfa_to_adj_matrix(dfa), str_len)\n return sum([adj[(0, i)] for i in range(adj['size']) if dfa[i]['is_final']]) % 1000000007\n\n\ntestcases = int(input())\nfor _ in range(testcases):\n regex, str_len = input().strip().split()\n print(solve(regex, int(str_len)))\n\n","sub_path":"juice500/week1/count_strings.py","file_name":"count_strings.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"553260164","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, BatchNormalization\nfrom tensorflow.keras.utils import to_categorical\nimport pickle\nimport cv2\n\nX = pickle.load(open(\"X.pickle\", \"rb\"))\ny = pickle.load(open(\"y.pickle\", \"rb\"))\n\n# X = X/255.0 # scale imread data [0,1], pixel values range from 0-255\nX = X / 127.5 - 1 # scale imread data [-1,1], pixel values range from 0-255\n\nmodel = Sequential()\nmodel.add(Conv2D(32,(3,3),padding=\"same\",input_shape=X.shape[1:]))\nmodel.add(Activation(\"relu\"))\nmodel.add(BatchNormalization(axis=-1))\nmodel.add(MaxPooling2D(pool_size=(3,3)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64,(3,3),padding=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(BatchNormalization(axis=-1))\nmodel.add(Conv2D(64,(3,3),padding=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(BatchNormalization(axis=-1))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64,(3,3),padding=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(BatchNormalization(axis=-1))\nmodel.add(Conv2D(64,(3,3),padding=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(BatchNormalization(axis=-1))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024))\nmodel.add(Activation(\"relu\"))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(6))\nmodel.add(Activation(\"softmax\"))\n\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\nhistory = model.fit(X, y, batch_size=5, epochs=5, validation_split=0.1)\nmodel.save('respiratory-disease-classifier.h5')\n\n# plot loss and accuracy during training\nfrom matplotlib import pyplot\n\npyplot.subplot(211)\npyplot.title('Loss')\npyplot.plot(history.history['loss'], label='train')\npyplot.plot(history.history['val_loss'], label='test')\npyplot.legend()\n\npyplot.subplot(212)\npyplot.title('Accuracy')\npyplot.plot(history.history['accuracy'], label='train')\npyplot.plot(history.history['val_accuracy'], label='test')\npyplot.legend()\npyplot.show()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"474999265","text":"import numpy as np \r\nimport pandas as pd\r\nimport os\r\nimport math\r\nimport random\r\nimport cv2\r\nfrom time import time\r\n\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nimport torch.utils.data as Data\r\n# import wandb\r\n\r\nimport ColorLog as debug\r\n\r\nfrom load_data import labelFpsDataLoader, labelTestDataLoader\r\n# wandb.init(project='plateRecog_crnn', entity='leleleooonnn')\r\n# config = wandb.config\r\n\r\nLR = 0.001\r\nPERSPECTIVE = False\r\nPLATESIZE = (100,32)#(256,96)\r\nBATCHSIZE = 128\r\nEPOCH = 300\r\n\r\n\r\nprovinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\", \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\r\nalphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\r\n 'X', 'Y', 'Z', 'O']\r\nads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',\r\n 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\r\nchars = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\",\r\n \"鄂\", \"湘\", \"粤\", \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", 'A',\r\n 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\r\n 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\r\n\r\nNUM_PROV = len(provinces)\r\nNUM_ALPB = len(alphabets)\r\nNUM_ADS = len(ads)\r\nNUM_CHAR = len(chars)\r\n\r\n\r\nTRAINDIR = ['CCPD2019/train']\r\nTESTDIR = ['CCPD2019/test']\r\nVALDIR = ['CCPD2019/val']\r\nVALTXT = \"CCPD2019/splits/val.txt\"\r\nTRAINTXT = \"CCPD2019/splits/train.txt\"\r\nTESTTXT = \"CCPD2019/splits/test.txt\"\r\n\r\n# data loading\r\n# image size 720x1160x3\r\nconfig.input_size = PLATESIZE\r\nconfig.perspectiveTrans = PERSPECTIVE\r\n\r\nimage_types = (\".jpg\", \".jpeg\", \".png\", \".bmp\", \".tif\", \".tiff\")\r\n\r\ndef persp_crop(img, corners, height, width):\r\n dst_points = np.array([(width, height), (0, height), (0, 0), (width, 0)], np.float32)\r\n transform_matrix = cv2.getPerspectiveTransform(corners, dst_points)\r\n dst = cv2.warpPerspective(img, transform_matrix, (width, height),flags=cv2.INTER_CUBIC)\r\n #dst = cv2.cvtColor(dst, cv2.COLOR_BGR2YUV)\r\n #dst[:,:,0] = cv2.equalizeHist(dst[:,:,0])\r\n #dst = cv2.cvtColor(dst, cv2.COLOR_YUV2BGR)\r\n return dst\r\n\r\ndef decode(preds):\r\n char_list = []\r\n code_list = []\r\n for i in range(len(preds)):\r\n if preds[i] != NUM_CHAR-1 and (not (i>0 and preds[i] == preds[i-1])):\r\n char_list.append(chars[preds[i]])\r\n code_list.append(preds[i])\r\n return code_list, char_list\r\n\r\ndef label_trans(label_list):\r\n assert len(label_list)==7\r\n out = [0]*7\r\n for ii, el in enumerate(label_list):\r\n if ii==0:\r\n out[ii] = int(el)\r\n if out[ii] == NUM_PROV-1:\r\n out[ii] = NUM_CHAR-1\r\n elif ii ==1:\r\n out[ii] = int(el)+NUM_PROV-1\r\n if out[ii] == NUM_ALPB-1:\r\n out[ii] = NUM_CHAR-1\r\n else:\r\n out[ii] = int(el)+NUM_PROV-1\r\n if out[ii] == NUM_ADS-1:\r\n out[ii] = NUM_CHAR-1\r\n return out\r\n\r\n\r\ndef list_images(basePath, contains=None):\r\n # return the set of files that are valid\r\n print(debug.INFO+\"Loading data under %s\"%basePath)\r\n return list_files(basePath, validExts=image_types, contains=contains)\r\n\r\n\r\ndef list_files(basePath, validExts=None, contains=None):\r\n # loop over the directory structure\r\n for (rootDir, dirNames, filenames) in os.walk(basePath):\r\n # loop over the filenames in the current directory\r\n for filename in filenames:\r\n # if the contains string is not none and the filename does not contain\r\n # the supplied string, then ignore the file\r\n if contains is not None and filename.find(contains) == -1:\r\n continue\r\n\r\n # determine the file extension of the current file\r\n ext = filename[filename.rfind(\".\"):].lower()\r\n\r\n # check to see if the file is an image and should be processed\r\n if validExts is None or ext.endswith(validExts):\r\n # construct the path to the image and yield it\r\n imagePath = os.path.join(rootDir, filename)\r\n yield imagePath\r\n\r\nclass labelFpsDataLoader(Data.Dataset):\r\n def __init__(self, img_dir, imgSize, is_transform=None):\r\n self.img_dir = img_dir\r\n self.img_paths = []\r\n for i in range(len(img_dir)):\r\n self.img_paths += [el for el in list_images(img_dir[i])]\r\n # self.img_paths = os.listdir(img_dir)\r\n # print self.img_paths\r\n self.img_size = imgSize\r\n self.is_transform = is_transform\r\n\r\n def __len__(self):\r\n return len(self.img_paths)\r\n\r\n def __getitem__(self, index):\r\n img_name = self.img_paths[index]\r\n img = cv2.imread(img_name)\r\n# plt.imshow(img[:,:,::-1])\r\n# plt.show()\r\n # img = img.astype('float32')\r\n lbl = img_name.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\r\n\r\n iname = img_name.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\r\n # fps = [[int(eel) for eel in el.split('&')] for el in iname[3].split('_')]\r\n # leftUp, rightDown = [min([fps[el][0] for el in range(4)]), min([fps[el][1] for el in range(4)])], [\r\n # max([fps[el][0] for el in range(4)]), max([fps[el][1] for el in range(4)])]\r\n\r\n# print(debug.DEBUG,iname)\r\n\r\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\r\n ori_w, ori_h = [float(int(el)) for el in [img.shape[1], img.shape[0]]]\r\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\r\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\r\n croppedImage = img[leftUp[1]:rightDown[1],leftUp[0]:rightDown[0]]\r\n resizedImage = cv2.resize(croppedImage, self.img_size)\r\n# cv2.imshow('plate',resizedImage)\r\n# cv2.waitKey(0)\r\n# print(resizedImage.shape)\r\n resizedImage = np.transpose(resizedImage, (2,0,1))\r\n resizedImage = resizedImage.astype('float32')\r\n resizedImage /= 255.0\r\n# plt.imshow(np.transpose(resizedImage, (1,2,0)))\r\n# plt.show()\r\n\r\n# cv2.imshow('plate',np.transpose(resizedImage, (1,2,0)))\r\n# cv2.waitKey(0)\r\n\r\n return resizedImage, new_labels, lbl, img_name, iname\r\n\r\nclass labelFpsPathDataLoader(Data.Dataset):\r\n def __init__(self, pathtxt, baseDir, imgSize, is_transform=False):\r\n# self.img_dir = img_dir\r\n# self.img_paths = []\r\n# for i in range(len(img_dir)):\r\n# self.img_paths += [el for el in list_images(img_dir[i])]\r\n # self.img_paths = os.listdir(img_dir)\r\n # print self.img_paths\r\n print(debug.INFO+\"Loading data under %s\"%pathtxt)\r\n f = open(pathtxt)\r\n self.img_paths = [os.path.join(baseDir, line.rstrip('\\n')) for line in f.readlines()]\r\n f.close()\r\n# print(\"init\")\r\n# print(self.img_paths)\r\n\r\n self.img_size = imgSize\r\n self.is_transform = is_transform\r\n\r\n def __len__(self):\r\n return len(self.img_paths)\r\n\r\n def __getitem__(self, index):\r\n img_name = self.img_paths[index]\r\n img = cv2.imread(img_name,cv2.IMREAD_GRAYSCALE)\r\n lbl = img_name.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\r\n# old_lbl = lbl.split('_')[:7]\r\n# print(\"lbl\",len(old_lbl))\r\n# new_lbl = label_trans(lbl.split('_')[:7])\r\n# print([chars[x] for x in new_lbl])\r\n iname = img_name.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\r\n\r\n# plt.imshow(img[:,:,::-1])\r\n# plt.imshow(img)\r\n# plt.show()\r\n\r\n# print(debug.DEBUG,iname)\r\n\r\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\r\n ori_w, ori_h = [float(int(el)) for el in [img.shape[1], img.shape[0]]]\r\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\r\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\r\n# print(img.shape)\r\n if not self.is_transform:\r\n croppedImage = img[leftUp[1]:rightDown[1],leftUp[0]:rightDown[0]]\r\n resizedImage = cv2.resize(croppedImage, self.img_size)\r\n else:\r\n corners = np.array([[int(eel) for eel in el.split('&')] for el in iname[3].split('_')], np.float32)\r\n resizedImage = persp_crop(img, corners, self.img_size[1], self.img_size[0])\r\n# print(croppedImage.shape)\r\n resizedImage = np.expand_dims(resizedImage,0)\r\n resizedImage = resizedImage.astype('float32')\r\n resizedImage /= 255.0\r\n# plt.imshow(np.transpose(resizedImage, (1,2,0)))\r\n# plt.show()\r\n\r\n return resizedImage, new_labels, lbl, img_name, iname\r\n\r\nclass labelLoader(Data.Dataset):\r\n def __init__(self, img_dir, imgSize, is_transform=None):\r\n self.img_dir = img_dir\r\n self.img_paths = []\r\n for i in range(len(img_dir)):\r\n self.img_paths += [el for el in list_images(img_dir[i])]\r\n # self.img_paths = os.listdir(img_dir)\r\n # print self.img_paths\r\n self.img_size = imgSize\r\n self.is_transform = is_transform\r\n\r\n def __len__(self):\r\n return len(self.img_paths)\r\n\r\n def __getitem__(self, index):\r\n img_name = self.img_paths[index]\r\n# img = cv2.imread(img_name)\r\n# # img = img.astype('float32')\r\n# resizedImage = cv2.resize(img, self.img_size)\r\n# resizedImage = np.transpose(resizedImage, (2,0,1))\r\n# resizedImage = resizedImage.astype('float32')\r\n# resizedImage /= 255.0\r\n lbl = img_name.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\r\n\r\n iname = img_name.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\r\n # fps = [[int(eel) for eel in el.split('&')] for el in iname[3].split('_')]\r\n # leftUp, rightDown = [min([fps[el][0] for el in range(4)]), min([fps[el][1] for el in range(4)])], [\r\n # max([fps[el][0] for el in range(4)]), max([fps[el][1] for el in range(4)])]\r\n\r\n# print(debug.DEBUG,iname)\r\n\r\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\r\n# ori_w, ori_h = [float(int(el)) for el in [img.shape[1], img.shape[0]]]\r\n# new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\r\n# (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\r\n\r\n return lbl, img_name, iname\r\n\r\nclass BidirectionalLSTM(nn.Module):\r\n\r\n def __init__(self, nIn, nHidden, nOut):\r\n super(BidirectionalLSTM, self).__init__()\r\n\r\n self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)\r\n self.embedding = nn.Linear(nHidden * 2, nOut)\r\n\r\n def forward(self, input):\r\n recurrent, _ = self.rnn(input)\r\n T, b, h = recurrent.size()\r\n t_rec = recurrent.view(T * b, h)\r\n\r\n output = self.embedding(t_rec) # [T * b, nOut]\r\n output = output.view(T, b, -1)\r\n\r\n return output\r\n\r\n\r\nclass CRNN(nn.Module):\r\n\r\n def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):\r\n super(CRNN, self).__init__()\r\n assert imgH % 16 == 0, 'imgH has to be a multiple of 16'\r\n\r\n ks = [3, 3, 3, 3, 3, 3, 2]\r\n ps = [1, 1, 1, 1, 1, 1, 0]\r\n ss = [1, 1, 1, 1, 1, 1, 1]\r\n nm = [64, 128, 256, 256, 512, 512, 512]\r\n\r\n cnn = nn.Sequential()\r\n\r\n def convRelu(i, batchNormalization=False):\r\n nIn = nc if i == 0 else nm[i - 1]\r\n nOut = nm[i]\r\n cnn.add_module('conv{0}'.format(i),\r\n nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))\r\n if batchNormalization:\r\n cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))\r\n if leakyRelu:\r\n cnn.add_module('relu{0}'.format(i),\r\n nn.LeakyReLU(0.2, inplace=True))\r\n else:\r\n cnn.add_module('relu{0}'.format(i), nn.ReLU(True))\r\n\r\n convRelu(0)\r\n cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64\r\n convRelu(1)\r\n cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32\r\n convRelu(2, True)\r\n convRelu(3)\r\n cnn.add_module('pooling{0}'.format(2),\r\n nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16\r\n convRelu(4, True)\r\n convRelu(5)\r\n cnn.add_module('pooling{0}'.format(3),\r\n nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16\r\n convRelu(6, True) # 512x1x16\r\n\r\n self.cnn = cnn\r\n self.rnn = nn.Sequential(\r\n BidirectionalLSTM(512, nh, nh),\r\n BidirectionalLSTM(nh, nh, nclass))\r\n\r\n def forward(self, input):\r\n # conv features\r\n conv = self.cnn(input)\r\n b, c, h, w = conv.size()\r\n assert h == 1, \"the height of conv must be 1\"\r\n conv = conv.squeeze(2)\r\n conv = conv.permute(2, 0, 1) # [w, b, c]\r\n\r\n # rnn features\r\n output = self.rnn(conv)\r\n\r\n return output\r\n\r\n\r\ndef eval(model, test_tar):\r\n use_gpu = True\r\n count, error, correct = 0, 0, 0\r\n dst = labelFpsPathDataLoader(test_tar,\"CCPD2019\", PLATESIZE)\r\n# dst = labelFpsDataLoader(test_tar, PLATESIZE)\r\n bsz = BATCHSIZE\r\n testloader = Data.DataLoader(dst, batch_size=bsz, shuffle=True, num_workers=8)\r\n start = time()\r\n# corrs_eachchar = np.zeros((7))\r\n corrs_eachinst =[]\r\n for i, (XI,_, labels, ims, _) in enumerate(testloader):\r\n \r\n corr_eachinst =[]\r\n# assert len(labels) == bsz\r\n count += len(labels)\r\n# YI = [[int(ee) for ee in el.split('_')[:7]] for el in labels]\r\n# labelGT = np.array([[int(ee) for ee in el.split('_')[:7]] for el in labels])\r\n YI = np.array([label_trans(el.split('_')[:7]) for el in labels])\r\n if use_gpu:\r\n x = Variable(XI.cuda())\r\n lbl = Variable(torch.LongTensor(YI).cuda())\r\n else:\r\n x = Variable(XI)\r\n lbl = Variable(torch.LongTensor(YI))\r\n # Forward pass: Compute predicted y by passing x to the model\r\n\r\n y_pred = model(x)\r\n# print(y_pred.shape)\r\n# input()\r\n# outputY = [el.data.cpu().numpy().tolist() for el in y_pred]\r\n# labelPred = [t[0].index(max(t[0])) for each in btch for btch in outputY]\r\n \r\n _, preds = y_pred.max(2)\r\n preds = preds.transpose(1, 0).contiguous()\r\n for i in range(lbl.shape[0]):\r\n n_correct = 0\r\n sim_preds, _ = decode(preds[i].data)\r\n# print(sim_preds,lbl[i].data)\r\n for pred, target in zip(sim_preds, lbl[i].data):\r\n if pred == target:\r\n n_correct += 1\r\n corr_eachinst.append(n_correct)\r\n \r\n\r\n# labelPred = np.array([np.argmax(branch, axis=1) for branch in outputY])\r\n# print(labelPred)\r\n# scoreboard = (labelPred.T == labelGT)\r\n# corr_eachinst = np.sum(scoreboard, axis=1)\r\n# corr_eachchar = np.sum(scoreboard, axis=0)\r\n# print(corr_eachinst,len(corr_eachinst))\r\n# assert len(corr_eachinst) == bsz\r\n# assert len(corr_eachchar) == 7\r\n corrs_eachinst = np.append(corrs_eachinst,corr_eachinst)\r\n# corrs_eachchar = corrs_eachchar+corr_eachchar\r\n \r\n# tmp = corr_eachchar/len(labels)\r\n# table = wandb.Table(data=list(corr_eachchar/len(labels)),columns=['1','2','3','4','5','6','7'])\r\n \r\n \r\n if i%10 ==1:\r\n print(debug.INFO+\"image: {}, inst:{}\".format(count,np.mean(corrs_eachinst)))#, corrs_eachchar/count))\r\n# def isEqual(labelGT, labelP):\r\n# compare = [1 if int(labelGT[i]) == int(labelP[i]) else 0 for i in range(7)]\r\n# # print(sum(compare))\r\n# return sum(compare)\r\n\r\n# # compare YI, outputY\r\n# try:\r\n# if isEqual(labelPred, YI[0]) == 7:\r\n# correct += 1\r\n# else:\r\n# pass\r\n# except:\r\n# print(debug.WARN+\"val fails\")\r\n# error += 1\r\n# correct, error, float(correct) / count,\r\n # wandb.log({'val':{\r\n # 'image#':count,\r\n # 'corr_in_instance':np.mean(corrs_eachinst),\r\n # 'accu_instance':np.mean(corrs_eachinst)/7,\r\n # 'accu_all_corr':len(corrs_eachinst[corrs_eachinst==7]),\r\n # 'corr_distrb':wandb.Histogram(corrs_eachinst),\r\n # 'corr_inst':corrs_eachinst\r\n # }}) \r\n \r\n return count, corrs_eachinst, np.mean(corrs_eachinst)/7, (time()-start) / count\r\n\r\n\r\ndef train_model(model, trainloader, criterion, optimizer,batchSize, testDirs,storeName, num_epochs=25, logFile=\"./train_log.txt\"):\r\n # since = time.time()\r\n use_gpu = True\r\n lrScheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1, verbose=True)\r\n cnt = 0\r\n\r\n \r\n for epoch in range(num_epochs):\r\n lossAver = []\r\n model.train(True)\r\n lrScheduler.step()\r\n start = time()\r\n print(debug.INFO+\"Epoch {} started at {}\".format(epoch,start))\r\n\r\n for i, (XI, _, labels, _, _) in enumerate(trainloader):\r\n cnt +=1\r\n if not len(XI) == batchSize:\r\n \r\n continue\r\n \r\n YI = [label_trans(el.split('_')[:7]) for el in labels]\r\n# Y = np.array([el.numpy() for el in Y]).T\r\n if use_gpu:\r\n x = Variable(XI.cuda())\r\n lbl = Variable(torch.LongTensor(YI).cuda())\r\n# y = Variable(torch.FloatTensor(Y).cuda(), requires_grad=False)\r\n else:\r\n x = Variable(XI)\r\n lbl = Variable(torch.LongTensor(YI))\r\n \r\n# print(debug.INFO+\"input shape {}\".format(x.shape))\r\n y_pred = model(x)\r\n# print(debug.INFO+\"output size:\",y_pred.shape)\r\n# print(debug.INFO+\"output shape {}\".format([yy.shape for yy in y_pred]))\r\n# try:\r\n# y_pred = model(x)\r\n# print(debug.INFO+\"output shape {}\".format(y_pred.shape))\r\n \r\n# except:\r\n# print(debug.WARN+\"iter %d model prediction fails\"%i)\r\n# continue\r\n \r\n # Compute and print loss\r\n# loss = 0.0\r\n# train_correct = []\r\n# loss += 0.8 * nn.L1Loss().cuda()(fps_pred[:][:2], y[:][:2])\r\n# loss += 0.2 * nn.L1Loss().cuda()(fps_pred[:][2:], y[:][2:])\r\n y_pred = F.log_softmax(y_pred,dim=2)\r\n preds_size = Variable(torch.IntTensor([y_pred.size(0)] * batchSize))\r\n tars_size = Variable(torch.IntTensor([7] * batchSize))\r\n# print(\"loss input\",y_pred,lbl,preds_size,tars_size)\r\n loss = criterion(y_pred,lbl,preds_size,tars_size)\r\n# for j in range(7):\r\n# l = lbl[:,j]\r\n# loss += criterion(y_pred[j], l)\r\n# train_correct.append(np.argmax(y_pred[j],axis=1))\r\n# acc = len(train_correct[train_correct==0])/len(train_correct)\r\n\r\n# def isEqual(labelGT, labelP):\r\n# compare = [1 if int(labelGT[i]) == int(labelP[i]) else 0 for i in range(7)]\r\n# # print(sum(compare))\r\n# return sum(compare)\r\n \r\n# for ii in range(batchSize):\r\n# if isEqual(labelPred, YI[ii]) == 7:\r\n# correct += 1\r\n\r\n\r\n \r\n # Zero gradients, perform a backward pass, and update the weights.\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n# print(loss)\r\n lossAver.append(loss.item())\r\n\r\n# try:\r\n# print(loss)\r\n# lossAver.append(loss.data[0])\r\n# except:\r\n# print(debug.ERR+\"iter %d lossAver append error\"%i)\r\n # if cnt % 100 == 0:\r\n # wandb.log({'train':{\r\n # 'cur_loss':loss,\r\n # 'ave_loss':np.mean(lossAver)\r\n # }}) \r\n \r\n if i % 50 == 1:\r\n print('trained %s images, use %s seconds, loss %s\\n' % (i*batchSize, time() - start, sum(lossAver) / len(lossAver) if len(lossAver)>0 else 'NoLoss'))\r\n with open(logFile, 'a') as outF:\r\n outF.write('trained %s images, use %s seconds, loss %s\\n' % (i*batchSize, time() - start, sum(lossAver) / len(lossAver) if len(lossAver)>0 else 'NoLoss'))\r\n torch.save(model.state_dict(), storeName)\r\n print ('*************Epoch %s Avrg Training loss %s Elapsed %s\\n' % (epoch, sum(lossAver) / len(lossAver), time()-start))\r\n \r\n model.eval()\r\n count, correct, precision, avgTime = eval(model, testDirs)\r\n with open(logFile, 'a') as outF:\r\n outF.write('Epoch %s Avrg Training loss %s Elapsed %s\\n' % (epoch, sum(lossAver) / len(lossAver), time() - start))\r\n outF.write('************* Validation: total %s precision %s avgTime %s\\n' % (count, precision, avgTime))\r\n torch.save(model.state_dict(), storeName + str(epoch))\r\n print('************* Validation: total %s precision %s avgTime %s\\n' % (count, precision, avgTime))\r\n return model\r\n\r\n# training\r\ntorch.cuda.set_device(0)\r\ntorch.cuda.empty_cache()\r\n\r\n# model = DigitRecog(PLATESIZE).cuda()\r\nmodel = CRNN(imgH=32, nc=1, nclass=NUM_CHAR, nh=256, n_rnn=2, leakyRelu=False).cuda()\r\n# criterion = nn.CrossEntropyLoss()\r\n# wandb.watch(model)\r\ncriterion = nn.CTCLoss(blank=NUM_CHAR-1,reduction='mean').cuda()\r\noptimizer = optim.Adam(model.parameters(),lr=LR)\r\n# lrScheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)\r\n# optimizer_conv = optim.RMSprop(model_conv.parameters(), lr=0.01, momentum=0.9)\r\n# optimizer_conv = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\r\nprint(debug.INFO+\"Start loading dataset...\")\r\n# dst = labelFpsDataLoader(TRAINDIR, PLATESIZE)\r\ndst = labelFpsPathDataLoader(TRAINTXT,\"CCPD2019\", PLATESIZE)\r\nprint(debug.INFO+\"Got dataset size %d\"%len(dst))\r\ntrainloader = Data.DataLoader(dst, batch_size=BATCHSIZE, shuffle=True, num_workers=8)\r\nprint(debug.INFO+\"Done loading dataset\")\r\n\r\nprint(debug.INFO+\"Start training\")\r\nmodel = train_model(model=model, trainloader=trainloader, criterion=criterion, optimizer=optimizer,\r\n batchSize=BATCHSIZE, testDirs=VALTXT,storeName='./weight/crnn.pth', num_epochs=EPOCH, logFile=\"./train_log.txt\")\r\n","sub_path":"crnn/train_crnn_100_32.py","file_name":"train_crnn_100_32.py","file_ext":"py","file_size_in_byte":23181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"89241138","text":"import datetime\nimport docopt\nimport logging\nimport os\nimport pathlib\nimport shutil\nimport sys\n\nfrom . import config\n\nlog = logging.getLogger(__name__)\n\n\ndef get_template(base_dir: pathlib.Path) -> pathlib.Path:\n \"\"\"Return the absolute path to the raw template for daily logs.\"\"\"\n\n log.debug(\"get_template\")\n return base_dir.joinpath(\"daily_schedule-j.md\")\n\n\ndef get_log_folder_for_month(\n log_folder_base: pathlib.Path, now: datetime\n) -> pathlib.Path:\n \"\"\"Get the folder to contain today's log.\"\"\"\n\n log.debug(\"get_log_folder_for_month\")\n\n log_epoch_foldername = \"day-tracking-juniper\"\n log_month_foldername = now.strftime(\"%m_%B_%y\")\n log.debug(f\"folder name = {log_epoch_foldername}/{log_month_foldername}.\")\n return log_folder_base.joinpath(log_epoch_foldername, log_month_foldername)\n\n\ndef create_new_log(\n now: datetime,\n template_file: pathlib.Path,\n log_folder: pathlib.Path,\n force: bool = False,\n):\n \"\"\"Copy the template to the log folder and update any dynamic elements within the file.\"\"\"\n\n log.debug(\"create_new_log\")\n\n log_file_name = now.strftime(\"%b_%d_%y.md\").lower()\n log_file_path = log_folder.joinpath(log_file_name)\n if not log_file_path.exists() or force:\n log.debug(f\"Creating log file at {log_file_path}\")\n log_folder.mkdir(parents=True, exist_ok=True)\n shutil.copy2(template_file, log_file_path)\n\n\ndef create_new_day(cfg: config.MyDailyLogConfig):\n log.debug(\"create_new_day\")\n\n log_day = datetime.date.today() - datetime.timedelta(days=int(cfg.day_offset))\n log.debug(f\"Log file day is being set to {log_day}.\")\n\n base_folder: pathlib.Path = pathlib.Path(__file__).resolve().parent\n template: pathlib.Path = get_template(base_folder)\n log_folder: pathlib.Path = get_log_folder_for_month(base_folder, log_day)\n create_new_log(log_day, template, log_folder, force)\n","sub_path":"my-daily-log/new_day.py","file_name":"new_day.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"230235444","text":"# The MIT License (MIT)\r\n#\r\n# Original work Copyright (c) 2016 Taehoon Kim\r\n# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\nimport tensorflow as tf\r\nfrom model import Tower\r\nfrom utils import model_property\r\n\r\nimage_summary = tf.summary.image\r\nscalar_summary = tf.summary.scalar\r\nhistogram_summary = tf.summary.histogram\r\nmerge_summary = tf.summary.merge\r\nSummaryWriter = tf.summary.FileWriter\r\n\r\n\r\nclass batch_norm(object):\r\n \"\"\"\r\n This class creates an op that composes the specified tensor with a batch\r\n normalization layer.\r\n \"\"\"\r\n\r\n def __init__(self, epsilon=1e-5, momentum=0.9, name=\"batch_norm\"):\r\n \"\"\"Instance initialization\"\"\"\r\n with tf.variable_scope(name):\r\n self.epsilon = epsilon\r\n self.momentum = momentum\r\n self.name = name\r\n\r\n def __call__(self, x, train=True):\r\n \"\"\"\r\n Functional interface\r\n\r\n Args:\r\n x: tensor to compose\r\n train: set to True during training and False otherwise\r\n \"\"\"\r\n return tf.contrib.layers.batch_norm(x,\r\n decay=self.momentum,\r\n updates_collections=None,\r\n epsilon=self.epsilon,\r\n scale=True,\r\n is_training=train,\r\n scope=self.name)\r\n\r\n\r\ndef conv_cond_concat(x, y):\r\n \"\"\"\r\n Concatenate conditioning matrix across channel axis.\r\n\r\n The specified input tensor is concatenated with K feature maps (K = number of classes)\r\n across the channel dimension. Each of the K feature maps is set to all-zeros except for\r\n the one whose index matches the target class (which is set to all-ones).\r\n\r\n Args:\r\n x: non-conditioned tensor. Shape: [N, H, W, C]\r\n y: one-hot encoded conditioning matrix. Shape: [N, K]\r\n\r\n Returns:\r\n conditioned feature map. Shape: [N, H, W, C + K]\r\n \"\"\"\r\n x_shapes = x.get_shape()\r\n y_shapes = y.get_shape()\r\n batch_size = tf.shape(x)[0]\r\n return tf.concat([x, y * tf.ones([batch_size, int(x_shapes[1]), int(x_shapes[2]), int(y_shapes[3])])], 3)\r\n\r\n\r\ndef conv2d(input_, output_dim,\r\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\r\n name=\"conv2d\"):\r\n \"\"\"\r\n Compose specified symbol with 2D convolution layer\r\n\r\n Args:\r\n input_: tensor to compose. Shape: [N, H, W, C]\r\n output_dim: number of output features maps\r\n k_h: kernel height\r\n k_w: kernel width\r\n d_h: horizontal stride\r\n d_w: vertical stride\r\n stddev: standard deviation of gaussian distribution to use for random weight initialization\r\n name: name scope\r\n\r\n Returns:\r\n Composed tensor.\r\n \"\"\"\r\n with tf.variable_scope(name):\r\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\r\n initializer=tf.truncated_normal_initializer(stddev=stddev))\r\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\r\n\r\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\r\n conv = tf.nn.bias_add(conv, biases)\r\n\r\n return conv\r\n\r\n\r\ndef deconv2d(input_, output_shape,\r\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\r\n name=\"deconv2d\", with_w=False):\r\n \"\"\"\r\n Compose specified symbol with 2D *transpose* convolution layer\r\n\r\n Args:\r\n input_: tensor to compose. Shape: [N, H, W, C]\r\n output_shape: output shape\r\n k_h: kernel height\r\n k_w: kernel width\r\n d_h: horizontal stride\r\n d_w: vertical stride\r\n stddev: standard deviation of gaussian distribution to use for random weight initialization\r\n name: name scope\r\n\r\n Returns:\r\n Composed tensor.\r\n \"\"\"\r\n with tf.variable_scope(name):\r\n # filter : [height, width, output_channels, in_channels]\r\n w = tf.get_variable('w',\r\n [k_h, k_w, output_shape[-1],\r\n input_.get_shape()[-1]],\r\n initializer=tf.random_normal_initializer(stddev=stddev))\r\n deconv = tf.nn.conv2d_transpose(input_, w,\r\n output_shape=output_shape,\r\n strides=[1, d_h, d_w, 1])\r\n\r\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\r\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)\r\n\r\n if with_w:\r\n return deconv, w, biases\r\n else:\r\n return deconv\r\n\r\n\r\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\r\n \"\"\"Compose specified tensor with leaky Rectifier Linear Unit\"\"\"\r\n return tf.maximum(x, leak*x)\r\n\r\n\r\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\r\n \"\"\"\r\n Compose specified tensor with linear (fully-connected) layer\r\n\r\n Args:\r\n input_: tensor to compose. Shape: [N, M]\r\n output_size: number of output neurons\r\n scope: name scope\r\n stddev: standard deviation of gaussian distribution to use for random weight initialization\r\n name: name scope\r\n with_w: whether to also return parameter variables\r\n\r\n Returns:\r\n Composed tensor. Shape: [N, output_size]\r\n \"\"\"\r\n shape = input_.get_shape().as_list()\r\n\r\n with tf.variable_scope(scope or \"Linear\"):\r\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\r\n tf.random_normal_initializer(stddev=stddev))\r\n bias = tf.get_variable(\"bias\", [output_size],\r\n initializer=tf.constant_initializer(bias_start))\r\n if with_w:\r\n return tf.matmul(input_, matrix) + bias, matrix, bias\r\n else:\r\n return tf.matmul(input_, matrix) + bias\r\n\r\n\r\nclass UserModel(Tower):\r\n \"\"\"\r\n User Model definition\r\n\r\n DIGITS creates an instance of this class for every tower it needs\r\n to create. This includes:\r\n - one for training,\r\n - one for validation,\r\n - one for testing.\r\n\r\n In the case of multi-GPU training, one training instance is created\r\n for every GPU. DIGITS takes care of doing the gradient averaging\r\n across GPUs so this class only needs to define the inference op\r\n and desired loss/cost function.\r\n \"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n \"\"\"\r\n Identify the correct input nodes.\r\n\r\n In the parent class, DIGITS conveniently sets the following fields:\r\n - self.is_training: whether this is a training graph\r\n - self.is_inference: whether this graph is created for inference/testing\r\n - self.x: input node. Shape: [N, H, W, C]\r\n - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise.\r\n Only defined if self._is_training is True\r\n \"\"\"\r\n super(UserModel, self).__init__(*args, **kwargs)\r\n\r\n # initialize graph with parameters for MNIST\r\n self.dcgan_init(image_size=28,\r\n y_dim=10,\r\n output_size=28,\r\n c_dim=1)\r\n\r\n @model_property\r\n def inference(self):\r\n \"\"\"op to use for inference\"\"\"\r\n\r\n # inference op is the output of the generator after rescaling\r\n # to the 8-bit range\r\n return tf.to_int32(self.G * 255)\r\n\r\n @model_property\r\n def loss(self):\r\n \"\"\"\r\n Loss function\r\n\r\n Returns either an op or a list of dicts.\r\n If the returned value is an op then DIGITS will optimize against this op\r\n with respect to all trainable variables.\r\n If the returned value is a list then DIGITS will optimize against each\r\n loss in the list with respect to the specified variables.\r\n \"\"\"\r\n\r\n # here we are returning a list because we want to alternately optimize the\r\n # discriminator on real samples, the discriminator on fake samples and the\r\n # generator.\r\n losses = [\r\n {'loss': self.d_loss_real, 'vars': self.d_vars},\r\n {'loss': self.d_loss_fake, 'vars': self.d_vars},\r\n {'loss': self.g_loss, 'vars': self.g_vars}\r\n ]\r\n return losses\r\n\r\n def dcgan_init(self, image_size=108,\r\n output_size=64, y_dim=None, z_dim=100, gf_dim=64, df_dim=64,\r\n gfc_dim=1024, dfc_dim=1024, c_dim=3):\r\n \"\"\"\r\n Create the model\r\n\r\n Args:\r\n output_size: (optional) The resolution in pixels of the images. [64]\r\n y_dim: (optional) Dimension of dim for y. [None]\r\n z_dim: (optional) Dimension of dim for Z. [100]\r\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\r\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\r\n gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]\r\n dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]\r\n c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]\r\n \"\"\"\r\n self.image_size = image_size\r\n self.output_size = output_size\r\n\r\n self.y_dim = y_dim\r\n self.z_dim = z_dim\r\n\r\n self.gf_dim = gf_dim\r\n self.df_dim = df_dim\r\n\r\n self.gfc_dim = gfc_dim\r\n self.dfc_dim = dfc_dim\r\n\r\n self.c_dim = c_dim\r\n\r\n self.batch_size = tf.shape(self.x)[0]\r\n\r\n # batch normalization : deals with poor initialization helps gradient flow\r\n self.d_bn1 = batch_norm(name='d_bn1')\r\n self.d_bn2 = batch_norm(name='d_bn2')\r\n\r\n self.g_bn0 = batch_norm(name='g_bn0')\r\n self.g_bn1 = batch_norm(name='g_bn1')\r\n self.g_bn2 = batch_norm(name='g_bn2')\r\n\r\n self.build_model()\r\n\r\n def build_model(self):\r\n \"\"\"Create the main ops\"\"\"\r\n\r\n if not self.is_inference:\r\n # create both the generator and the discriminator\r\n # self.x is a batch of images - shape: [N, H, W, C]\r\n # self.y is a vector of labels - shape: [N]\r\n\r\n # sample z from a normal distribution\r\n self.z = tf.random_normal(shape=[self.batch_size, self.z_dim], dtype=tf.float32, seed=None, name='z')\r\n\r\n # rescale x to [0, 1]\r\n x_reshaped = tf.reshape(self.x, shape=[self.batch_size, self.image_size, self.image_size, self.c_dim],\r\n name='x_reshaped')\r\n self.images = x_reshaped / 255.\r\n\r\n # one hot encode the label - shape: [N] -> [N, self.y_dim]\r\n self.y = tf.one_hot(self.y, self.y_dim, name='y_onehot')\r\n\r\n # create the generator\r\n self.G = self.generator(self.z, self.y)\r\n\r\n # create one instance of the discriminator for real images (the input is\r\n # images from the dataset)\r\n self.D, self.D_logits = self.discriminator(self.images, self.y, reuse=False)\r\n\r\n # create another instance of the discriminator for fake images (the input is\r\n # the discriminator). Note how we are reusing variables to share weights between\r\n # both instances of the discriminator\r\n self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)\r\n\r\n # aggregate losses across batch\r\n\r\n # we are using the cross entropy loss for all these losses\r\n d_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,\r\n labels=tf.ones_like(self.D),\r\n name=\"loss_D_real\")\r\n self.d_loss_real = tf.reduce_mean(d_real)\r\n d_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,\r\n labels=tf.zeros_like(self.D_),\r\n name=\"loss_D_fake\")\r\n self.d_loss_fake = tf.reduce_mean(d_fake)\r\n self.d_loss = (self.d_loss_real + self.d_loss_fake) / 2.\r\n # the typical GAN set-up is that of a minimax game where D is trying to minimize\r\n # its own error and G is trying to maximize D's error however note how we are flipping G labels here:\r\n # instead of maximizing D's error, we are minimizing D's error on the 'wrong' label\r\n # this trick helps produce a stronger gradient\r\n g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,\r\n labels=tf.ones_like(self.D_),\r\n name=\"loss_G\")\r\n self.g_loss = tf.reduce_mean(g_loss)\r\n\r\n # create some summaries for debug and monitoring\r\n self.summaries.append(histogram_summary(\"z\", self.z))\r\n self.summaries.append(histogram_summary(\"d\", self.D))\r\n self.summaries.append(histogram_summary(\"d_\", self.D_))\r\n self.summaries.append(image_summary(\"G\", self.G, max_outputs=5))\r\n self.summaries.append(image_summary(\"X\", self.images, max_outputs=5))\r\n self.summaries.append(histogram_summary(\"G_hist\", self.G))\r\n self.summaries.append(histogram_summary(\"X_hist\", self.images))\r\n self.summaries.append(scalar_summary(\"d_loss_real\", self.d_loss_real))\r\n self.summaries.append(scalar_summary(\"d_loss_fake\", self.d_loss_fake))\r\n self.summaries.append(scalar_summary(\"g_loss\", self.g_loss))\r\n self.summaries.append(scalar_summary(\"d_loss\", self.d_loss))\r\n\r\n # all trainable variables\r\n t_vars = tf.trainable_variables()\r\n # G's variables\r\n self.g_vars = [var for var in t_vars if 'g_' in var.name]\r\n # D's variables\r\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\r\n\r\n # Extra hook for debug: log chi-square distance between G's output histogram and the dataset's histogram\r\n value_range = [0.0, 1.0]\r\n nbins = 100\r\n hist_g = tf.histogram_fixed_width(self.G, value_range, nbins=nbins, dtype=tf.float32) / nbins\r\n hist_images = tf.histogram_fixed_width(self.images, value_range, nbins=nbins, dtype=tf.float32) / nbins\r\n chi_square = tf.reduce_mean(tf.div(tf.square(hist_g - hist_images), hist_g + hist_images + 1e-5))\r\n self.summaries.append(scalar_summary(\"chi_square\", chi_square))\r\n else:\r\n # Create only the generator\r\n\r\n # self.x is the conditioned latent representation - shape: [self.batch_size, 1, self.z_dim + self.y_dim]\r\n self.x = tf.reshape(self.x, shape=[self.batch_size, self.z_dim + self.y_dim])\r\n # extract z and y\r\n self.y = self.x[:, self.z_dim:self.z_dim + self.y_dim]\r\n self.z = self.x[:, :self.z_dim]\r\n # create an instance of the generator\r\n self.G = self.generator(self.z, self.y)\r\n\r\n def discriminator(self, image, y=None, reuse=False):\r\n \"\"\"\r\n Create the discriminator\r\n\r\n This creates a string of layers:\r\n - input - [N, 28, 28, 1]\r\n - concat conditioning - [N, 28, 28, 11]\r\n - conv layer with 11 5x5 kernels and 2x2 stride - [N, 14, 14, 11]\r\n - leaky relu - [N, 14, 14, 11]\r\n - concat conditioning - [N, 14, 14, 21]\r\n - conv layer with 74 5x5 kernels and 2x2 stride - [N, 7, 7, 74]\r\n - batch norm - [N, 14, 14, 64]\r\n - leaky relu - [N, 14, 14, 64]\r\n - flatten - [N, 3626]\r\n - concat conditioning - [N, 3636]\r\n - linear layer with 1014 output neurons - [N, 1024]\r\n - batch norm - [N, 1024]\r\n - leaky relu - [N, 1024]\r\n - concat conditioning - [N, 1034]\r\n - linear layer with 1 output neuron - [N, 1]\r\n\r\n Args:\r\n image: batch of input images - shape: [N, H, W, C]\r\n y: batch of one-hot encoded labels - shape: [N, K]\r\n reuse: whether to re-use previously created variables\r\n \"\"\"\r\n with tf.variable_scope(\"discriminator\") as scope:\r\n if reuse:\r\n # re-use (share) variables\r\n scope.reuse_variables()\r\n\r\n yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])\r\n x = conv_cond_concat(image, yb)\r\n\r\n h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))\r\n h0 = conv_cond_concat(h0, yb)\r\n\r\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv'), train=self.is_training))\r\n sz = h1.get_shape()\r\n h1 = tf.reshape(h1, [self.batch_size, int(sz[1] * sz[2] * sz[3])])\r\n h1 = tf.concat([h1, y], 1)\r\n\r\n h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin'), train=self.is_training))\r\n h2 = tf.concat([h2, y], 1)\r\n\r\n h3 = linear(h2, 1, 'd_h3_lin')\r\n\r\n return tf.nn.sigmoid(h3), h3\r\n\r\n def generator(self, z, y=None):\r\n \"\"\"\r\n Create the generator\r\n\r\n This creates a string of layers:\r\n - input - [N, 100]\r\n - concatenate conditioning - [N, 110]\r\n - linear layer with 1024 output neurons - [N, 1024]\r\n - batch norm - [N, 1024]\r\n - relu - [N, 1024]\r\n - concatenate conditioning - [N, 1034]\r\n - linear layer with 7*7*128=6272 output neurons - [N, 6272]\r\n - reshape 7x7 feature maps - [N, 7, 7, 128]\r\n - concatenate conditioning - [N, 7, 7, 138]\r\n - transpose convolution with 128 filters and stride 2 - [N, 14, 14, 128]\r\n - batch norm - [N, 14, 14, 128]\r\n - relu - [N, 14, 14, 128]\r\n - concatenate conditioing - [N, 14, 14, 138]\r\n - transpose convolution with 1 filter and stride 2 - [N, 28, 28, 1]\r\n \"\"\"\r\n with tf.variable_scope(\"generator\"):\r\n\r\n s = self.output_size\r\n s2, s4 = int(s/2), int(s/4)\r\n\r\n yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])\r\n z = tf.concat([z, y], 1)\r\n\r\n h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=self.is_training))\r\n h0 = tf.concat([h0, y], 1)\r\n\r\n h1 = tf.nn.relu(self.g_bn1(linear(h0, self.gf_dim*2*s4*s4, 'g_h1_lin'), train=self.is_training))\r\n h1 = tf.reshape(h1, [self.batch_size, s4, s4, self.gf_dim * 2])\r\n\r\n h1 = conv_cond_concat(h1, yb)\r\n h2 = tf.nn.relu(self.g_bn2(deconv2d(h1, [self.batch_size, s2, s2, self.gf_dim * 2], name='g_h2'),\r\n train=self.is_training))\r\n h2 = conv_cond_concat(h2, yb)\r\n\r\n return tf.nn.sigmoid(deconv2d(h2, [self.batch_size, s, s, self.c_dim], name='g_h3'))\r\n","sub_path":"examples/gan/network-mnist.py","file_name":"network-mnist.py","file_ext":"py","file_size_in_byte":20090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"283996284","text":"#### Code to print sum of two singly linked list\n\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.last_node = None\n\n def append(self,data):\n if self.last_node is None:\n self.head = Node(data)\n self.last_node = self.head\n\n else:\n self.last_node.next = Node(data)\n self.last_node = self.last_node.next\n\n def display(self):\n current = self.head\n while current:\n print(current.data,end=' ')\n current = current.next\n print(end='\\n')\n\n\ndef sum_list(list1,list2):\n reverse_llist(list1)\n reverse_llist(list2)\n list1.display()\n list2.display()\n\n\ndef reverse_llist(llist):\n before = None\n current = llist.head\n if current is None:\n return\n after = current.next\n while after:\n current.next = before\n before = current\n current = after\n after = after.next\n\n current.next = before\n llist.head = current\n\n\n\none_list = LinkedList()\ntwo_list = LinkedList()\ndata_list_1 = input('PLease enter list1: ').split()\nfor data in data_list_1:\n one_list.append(int(data))\n\ndata_list_2 = input('PLease enter list 2: ').split()\nfor data in data_list_2:\n two_list.append(int(data))\n\none_list.display()\ntwo_list.display()\n\nsum_list(one_list,two_list)\n\n\n","sub_path":"linkedList/sum_of_two_singly_linked_list.py","file_name":"sum_of_two_singly_linked_list.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"610418510","text":"\"\"\"Calculate heterogeneity and sub-clonal populations for complex input samples.\n\nUse allele frequencies, copy number calls and structural variants to infer\nsub-clonal populations within a potentially mixed population. This attempts\nto infer these sub-clones to help improve variant calls and interpretation\nespecially in complex cancer samples.\n\"\"\"\nimport collections\n\nfrom bcbio.heterogeneity import theta\nfrom bcbio.pipeline import datadict as dd\nfrom bcbio.variation import vcfutils\n\ndef _get_cnvs(data):\n \"\"\"Retrieve CNV calls to use for heterogeneity analysis.\n \"\"\"\n supported = set([\"cnvkit\"])\n out = []\n for sv in data.get(\"sv\", []):\n if sv[\"variantcaller\"] in supported:\n out.append(sv)\n return out\n\ndef _get_variants(data):\n \"\"\"Retrieve set of variant calls to use for heterogeneity analysis.\n \"\"\"\n supported = set([\"ensemble\", \"vardict\", \"freebayes\", \"mutect\"])\n out = []\n for v in data.get(\"variants\", []):\n if v[\"variantcaller\"] in supported:\n out.append(v)\n return out\n\ndef _ready_for_het_analysis(items):\n \"\"\"Check if a sample has input information for heterogeneity analysis.\n\n We currently require a tumor/normal sample containing both CNV and variant calls.\n \"\"\"\n paired = vcfutils.get_paired_bams([dd.get_align_bam(d) for d in items], items)\n if paired and paired.normal_bam:\n return _get_variants(paired.tumor_data) and _get_cnvs(paired.tumor_data)\n\ndef _get_batches(data):\n batches = dd.get_batch(data) or dd.get_sample_name(data)\n if not isinstance(batches, (list, tuple)):\n batches = [batches]\n return batches\n\ndef _group_by_batches(items):\n out = collections.OrderedDict()\n for data in (xs[0] for xs in items):\n for b in _get_batches(data):\n try:\n out[b].append(data)\n except KeyError:\n out[b] = [data]\n return out\n\ndef estimate(items, batch, config):\n \"\"\"Estimate heterogeneity for a pair of tumor/normal samples. Run in parallel.\n\n XXX In progress, currently uses THetA but not yet turned on\n \"\"\"\n paired = vcfutils.get_paired_bams([dd.get_align_bam(d) for d in items], items)\n cnvs = _get_cnvs(paired.tumor_data)\n new_cnvs = theta.run(cnvs[0], paired)\n print(new_cnvs)\n\n out = []\n for data in items:\n if batch == _get_batches(data)[0]:\n out.append([data])\n return out\n\ndef run(items, run_parallel):\n \"\"\"Top level entry point for calculating heterogeneity, handles organization and job distribution.\n \"\"\"\n to_process = []\n extras = []\n for batch, cur_items in _group_by_batches(items).items():\n if _ready_for_het_analysis(cur_items):\n to_process.append((batch, cur_items))\n else:\n for data in cur_items:\n extras.append([data])\n processed = run_parallel(\"heterogeneity_estimate\", ([xs, b, xs[0][\"config\"]] for b, xs in to_process))\n return extras + processed","sub_path":"bcbio/heterogeneity/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"125492184","text":"# -*- coding: utf-8 -*-\n# author: guifeng tang\nimport numpy as np\nfrom numpy import array\nfrom itertools import combinations_with_replacement, permutations\nfrom repDNA.nac import RevcKmer\nfrom repDNA.psenac import PCPseDNC, PCPseTNC, SCPseDNC, SCPseTNC\nimport time\n\n\ndef GetSequences(f):\n seqslst = []\n while True:\n s = f.readline()\n if not s:\n break\n else:\n if '>' not in s:\n seq = s.split('\\n')[0]\n seqslst.append(seq)\n return seqslst\n\n\ndef GetKmerDict(alphabet,k):\n kmerlst = []\n partkmers = list(combinations_with_replacement(alphabet, k))\n for element in partkmers:\n elelst = set(permutations(element, k))\n strlst = [''.join(ele) for ele in elelst]\n kmerlst += strlst\n kmerlst = np.sort(kmerlst)\n kmerdict = {kmerlst[i]:i for i in range(len(kmerlst))}\n return kmerdict\n\n\n############################### Spectrum Profile ##############################\ndef GetSpectrumProfile(instances, alphabet, k):\n kmerdict = GetKmerDict(alphabet, k)\n X = []\n for sequence in instances:\n vector = GetSpectrumProfileVector(sequence, kmerdict, k)\n X.append(vector)\n X = array(X)\n return X\n\n\ndef GetSpectrumProfileVector(sequence, kmerdict, k): \n vector = np.zeros((1, len(kmerdict)))\n n = len(sequence)\n for i in range(n-k+1):\n subsequence = sequence[i:i+k]\n position = kmerdict.get(subsequence)\n vector[0, position] += 1\n return list(vector[0])\n\n\n############################### Mismatch Profile ##############################\ndef GetMismatchProfile(instances, alphabet, k, m):\n kmerdict = GetKmerDict(alphabet, k)\n X = []\n for sequence in instances:\n vector = GetMismatchProfileVector(sequence, alphabet, kmerdict, k)\n X.append(vector) \n X = array(X)\n return X\n\n\ndef GetMismatchProfileVector(sequence, alphabet, kmerdict, k): \n vector = np.zeros((1, len(kmerdict)))\n n = len(sequence)\n for i in range(n-k+1):\n subsequence = sequence[i:i+k]\n position = kmerdict.get(subsequence)\n vector[0, position] += 1\n for j in range(k):\n substitution = subsequence\n for letter in set(alphabet) ^ set(subsequence[j]):\n substitution = list(substitution)\n substitution[j] = letter\n substitution = ''.join(substitution)\n position = kmerdict.get(substitution)\n vector[0, position] += 1\n return list(vector[0])\n\n\n########################### Reverse Compliment Kmer ###########################\ndef GetRevcKmer(k):\n rev_kmer = RevcKmer(k=k)\n pos_vec = rev_kmer.make_revckmer_vec(open(posi_samples_file))\n neg_vec = rev_kmer.make_revckmer_vec(open(nega_samples_file))\n X = array(pos_vec + neg_vec)\n return X\n\n\n############ Parallel Correlation Pseudo Dinucleotide Composition #############\ndef GetPCPseDNC(lamada, phyche_list):\n pc_psednc = PCPseDNC(lamada=lamada, w=0.05)\n pos_vec = pc_psednc.make_pcpsednc_vec(open(posi_samples_file), phyche_index=phyche_list)\n neg_vec = pc_psednc.make_pcpsednc_vec(open(nega_samples_file), phyche_index=phyche_list)\n X = array(pos_vec + neg_vec) \n return X\n\n\n############ Parallel Correlation Pseudo Trinucleotide Composition ############\ndef GetPCPseTNC(lamada):\n pc_psetnc = PCPseTNC(lamada=lamada, w=0.05)\n pos_vec = pc_psetnc.make_pcpsetnc_vec(open(posi_samples_file), all_property=True)\n neg_vec = pc_psetnc.make_pcpsetnc_vec(open(nega_samples_file), all_property=True)\n X = array(pos_vec + neg_vec)\n return X\n\n\n############## Series Correlation Pseudo Dinucleotide Composition #############\ndef GetSCPseDNC(lamada, phyche_list):\n sc_psednc = SCPseDNC(lamada=lamada, w=0.05)\n pos_vec = sc_psednc.make_scpsednc_vec(open(posi_samples_file), phyche_index=phyche_list)\n neg_vec = sc_psednc.make_scpsednc_vec(open(nega_samples_file), phyche_index=phyche_list)\n X = array(pos_vec + neg_vec)\n return X \n\n\n############## Series Correlation Pseudo Trinucleotide Composition ############\ndef GetSCPseTNC(lamada):\n sc_psetnc = SCPseTNC(lamada=lamada, w=0.05)\n pos_vec = sc_psetnc.make_scpsetnc_vec(open(posi_samples_file), all_property=True)\n neg_vec = sc_psetnc.make_scpsetnc_vec(open(nega_samples_file), all_property=True)\n X = array(pos_vec + neg_vec)\n return X \n\n\n###############################################################################\nif __name__ == '__main__':\n global posi_samples_file\n global nega_samples_file\n posi_samples_file = 'SLT2_posi_samples.txt'\n nega_samples_file = 'SLT2_nega_samples.txt'\n fp = open(posi_samples_file, 'r')\n posis = GetSequences(fp)\n fn = open(nega_samples_file, 'r')\n negas = GetSequences(fn)\n instances = array(posis+negas)\n alphabet = ['A', 'C', 'G', 'T']\n \n # Spectrum Profile for k=1,2,3,4,5\n for k in range(1, 6):\n print('..........................................................................')\n print('Coding for feature:'+str(k)+'-Spectrum Profile, beginning')\n tic = time.clock()\n X = GetSpectrumProfile(instances, alphabet, k)\n np.savetxt(str(k)+'-SpectrumProfileFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n \n # Mismatch Profile for (k,m)=(3,1),(4,1),(5,1)\n for (k, m) in [(3, 1), (4, 1), (5, 1)]:\n print('..........................................................................')\n print('Coding for feature:'+str((k, m))+'-Mismatch Profile, beginning')\n tic = time.clock()\n X = GetMismatchProfile(instances, alphabet, k, m)\n np.savetxt(str((k, m))+'-MismatchProfileFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n\n # Reverse Compliment Kmer for k=1,2,3,4,5\n for k in range(1, 6):\n print('..........................................................................')\n print('Coding for feature:'+str(k)+'-RevcKmer, beginning')\n tic = time.clock()\n X = GetRevcKmer(k)\n np.savetxt(str(k)+'-RevcKmerFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n \n # Parallel Correlation Pseudo Dinucleotide Composition \n print('..........................................................................')\n print('Coding for feature:PCPseDNC, beginning')\n tic = time.clock()\n X = GetPCPseDNC(9, phyche_list=['Twist', 'Tilt', 'Roll', 'Shift', 'Slide', 'Rise'])\n np.savetxt('PCPseDNCFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n\n # Parallel Correlation Pseudo Trinucleotide Composition \n print('..........................................................................')\n print('Coding for feature:PCPseTNC, beginning')\n tic = time.clock()\n X = GetPCPseTNC(1)\n np.savetxt('PCPseTNCFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n \n # Series Correlation Pseudo Dinucleotide Composition \n print('..........................................................................')\n print('Coding for feature:SCPseDNC, beginning')\n tic = time.clock()\n X = GetSCPseDNC(15, phyche_list=['Twist', 'Tilt', 'Roll', 'Shift', 'Slide', 'Rise'])\n np.savetxt('SCPseDNCFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n \n # Series Correlation Pseudo Trinucleotide Composition \n print('..........................................................................')\n print('Coding for feature:SCPseTNC, beginning')\n tic = time.clock()\n X = GetSCPseTNC(1)\n np.savetxt('SCPseTNCFeature'+'.txt', X)\n toc = time.clock()\n print('Coding time:%.3f minutes' % ((toc-tic)/60))\n\n # all features - 17\n","sub_path":"code/GetVariousFeatures.py","file_name":"GetVariousFeatures.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"97533377","text":"import re\n\nf = open(\"regex_sum_300111.txt\",\"r\")\n\ntotal = 0\ncount = 0\nfor line in f.readlines():\n if not re.search('[0-9]+',line): continue\n num = re.findall('([0-9]+)',line)\n for n in num:\n count += 1\n total += int(n)\nprint(total,count)","sub_path":"regulerexp2.py","file_name":"regulerexp2.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"221629501","text":"import random\n\nclass Card(object):\n def __init__(self,type,isFirst):\n self.type = type \n self.age = 0\n self.selected = False\n self.active = True\n self.isFirst = isFirst\n self.mouse_over = False\n \n def is_same_card(self, other):\n self.type = other.type\n \nclass Board(object):\n def __init__(self):\n self.new_game()\n \n # --- Board functions ---\n def new_game(self):\n self.board = []\n for i in range(18):\n # add two of each type\n self.board.append(Card(i, True))\n self.board.append(Card(i, False))\n random.shuffle(self.board)\n self.unknown = list(self.board)\n self.known = []\n self.selected = []\n \n def get_cards(self):\n return self.board\n \n def select_card(self,c):\n c.age=0\n self.board.remove(c)\n if c in self.unknown:\n self.unknown.remove(c)\n elif c in self.known:\n self.known.remove(c)\n self.selected.append(c)\n c.selected = True\n \n def end_of_turn(self):\n pair = None\n if len(self.selected):\n if self.selected[0].type != self.selected[1].type:\n for c in self.selected:\n c.selected = False\n self.known.append(c)\n self.board.append(c)\n else:\n for c in self.selected:\n c.active = False\n pair = list(self.selected) \n self.selected = []\n for c in self.known:\n c.age += 1\n return pair\n \n def is_game_over(self):\n if not len(self.board):\n return True\n return False\n \n # --- Help function for AI --- \n def _build_temp_lists(self,maxage):\n k = []\n u = list(self.unknown)\n for c in self.known:\n if maxage is None:\n k.append(c)\n else:\n if c.age <= maxage:\n k.append(c)\n else:\n u.append(c)\n return (k,u)\n\n def search_known_for_pairs(self,maxage=None):\n known,unknown = self._build_temp_lists(maxage)\n for c1 in known:\n for c2 in known:\n if c2 != c1 and c2.type == c1.type:\n return c1\n return None\n \n def search_known_for_match(self,maxage=None,card=None):\n if card is None:\n card = self.selected[0]\n known,unknown = self._build_temp_lists(maxage)\n for c in known:\n if c == card:\n continue\n if c.type == card.type:\n return c\n return None\n \n def get_known(self,maxage=None):\n known,unknown = self._build_temp_lists(maxage)\n return known\n \n def select_unknown(self,maxage=None):\n known,unknown = self._build_temp_lists(maxage) \n if len(unknown):\n return(random.choice(unknown))\n else:\n return None\n \n def select_known(self,maxage=None):\n known,unknown = self._build_temp_lists(maxage) \n if len(known):\n return(random.choice(known))\n else:\n return None\n \n def select_any(self):\n if len(self.board):\n return(random.choice(self.board))\n else:\n return None\n \n","sub_path":"MyMemory/lib/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164776550","text":"import numpy as np\nimport pandas as pd\n\ndef importdata(filecsv,nCols):\n if (nCols<400):\n nCh = 300\n if (nCols>1200):\n nCh = 1200\n\n data = np.float32(pd.read_csv(filecsv , usecols = np.arange(0,nCh,1) , header = None).values)\n var = np.array( pd.read_csv(filecsv , usecols = np.arange(nCh,nCols,1), header = None).values , dtype = np.float32)\n\n return data, var\n \ndef importvars(filecsv,nCols):\n var = np.array( pd.read_csv(filecsv , usecols = np.arange(0,nCols), header = None).values , dtype = np.float32)\n return var\n\n\ndef filterdata(data, cut1,cut2):\n data = data [ np.where( np.logical_and( cut1, cut2 ) ) [0] ]\n \n return data\n","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179294052","text":"'''\nExercise 2: Write another program that prompts for a list of numbers as \nabove and at the end prints out both the maximum and minimum of the \nnumbers instead of the average.\n'''\n\nfrom __future__ import division\n\ndef num_check(value):\n '''Checks to see if value can be cast as a float.\n Returns: value as a float, if possible\n False, if not'''\n return_value = None\n try:\n return_value = float(value)\n except ValueError:\n if str.upper(value) == 'DONE':\n return_value = 'DONE'\n else:\n return_value = 'Invalid input'\n return return_value \n\ndef calc_values(numbers):\n '''Generate some summary statictics from numbers and print them'''\n numbers.sort()\n total = sum(numbers)\n count = len(numbers)\n min_n = numbers[0]\n max_n = numbers[-1]\n print(\"Total = {}, Count = {}, Min = {}, Max = {}\".format(total, count, min_n, max_n))\n\ndef main():\n numbers = []\n while True:\n value = raw_input('Enter a number:')\n checked_value = num_check(value)\n if checked_value == 'DONE':\n break\n elif checked_value == 'Invalid input':\n print(checked_value)\n else:\n numbers.append(checked_value)\n calc_values(numbers)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"ra_chp_5/ch_5_2.py","file_name":"ch_5_2.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"275100257","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django_countries.fields import CountryField\nfrom geoposition.fields import GeopositionField\n\n# Create your models here.\n\nGENDER_CHOICES = (\n\t(\"M\" , \"Male\"),\n\t(\"F\" , \"Female\"),\n\t)\n\nclass CofifiUser(models.Model):\n\tuser = models.OneToOneField(User)\n\tfriends = models.ManyToManyField('self',related_name=\"user_friends\")\n\tbio = models.TextField(default=\"\")\n\timage = models.ImageField(upload_to=\"static/user\")\n\tcountry = CountryField()\n\tgender = models.CharField(max_length=1,choices=GENDER_CHOICES)\n\tbirth_date= models.DateField()\n\tdef __unicode__(self):\n\t\treturn self.user.username\n\nclass Geoposition(models.Model):\n\twho = models.ForeignKey(CofifiUser)\n\twhen = models.DateTimeField(auto_now_add=True)\n\twhere = GeopositionField()\n\n\tdef __unicode__(self):\n\t\treturn \"At \" + str(self.when)","sub_path":"user_profile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180529200","text":"from datetime import datetime\nimport time\n\nfrom basepy.cache import memoized\n\n\n# timing function execution time\ndef timeit(func):\n def inner(*args, **kwargs):\n time_1 = datetime.now()\n rv = func(*args, **kwargs)\n time_2 = datetime.now()\n return rv, (time_2-time_1).total_seconds()\n return inner\n\n\n# not caching function\ndef some_function(n):\n \"\"\"Return the nth fibonacci number.\"\"\"\n time.sleep(1)\n return n*2\n\n\n@memoized\ndef some_function_2(n):\n \"\"\"Return the nth fibonacci number.\"\"\"\n time.sleep(2)\n return n*2\n\n\ndef test_memorized():\n func1 = timeit(some_function)\n func2 = timeit(some_function_2)\n _, t = func1(100)\n assert t >= 1.0\n _, t = func2(1002)\n assert t >= 1.0\n _, t = func2(1002)\n assert t <= 0.5\n","sub_path":"tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"565952352","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Jul 20, 2017\r\n@author: Ancient Abysswalker\r\n\"\"\"\r\n\r\nfrom math import factorial as fact\r\n\r\ndef binom(n,k):\r\n\tif n == k:\r\n\t\treturn 1\r\n\tif k > n:\r\n\t\treturn 0\r\n\treturn int(fact(n)/fact(k)/fact(n-k))\r\n\r\n#lattice grid is 1 larger than grid size\r\nlatGrid=20\r\n\r\n#print the number of paths\r\npathCnt=binom(2*latGrid,latGrid)\r\nprint(pathCnt)\r\n","sub_path":"Euler Projekt 015 - Lattice paths/EulerProjekt_15.py","file_name":"EulerProjekt_15.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"125961586","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\n\n# Create your views here.\n\ndef echo(request):\n response = ''\n if request.method == 'GET':\n if request.GET:\n response += 'get '\n for each in request.GET:\n response += f'{each}: {request.GET[each]} '\n\n if request.method == 'POST':\n if request.POST:\n response += 'post '\n for each in request.POST:\n response += f'{each}: {request.POST[each]} '\n\n try:\n response += f'statement is {request.META[\"HTTP_X-PRINT-STATEMENT\"]}'\n except KeyError:\n response += 'statement is empty'\n return HttpResponse(content=response, status=200)\n\n\ndef filters(request):\n return render(request, 'filters.html', context={\n 'a': request.GET.get('a', 1),\n 'b': request.GET.get('b', 1)\n })\n\n\ndef extend(request):\n return render(request, 'extend.html', context={\n 'a': request.GET.get('a'),\n 'b': request.GET.get('b')\n })\n","sub_path":"coursera/python_mailru/course03/week4/routing_practice/template/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"321517018","text":"class ListNode(object):\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.next = None\n\n\ndef buildnodes(l):\n\thead=ListNode(l[0])\n\tnode=head\n\tfor i in range(1,len(l)):\n\t\tnode.next=ListNode(l[i])\n\t\tnode=node.next\n\treturn head\n\n\n#### Delete Node\n\ndef delnode(head,node):\n\tif node.next:\n\t\tnode.val=node.next.val\n\t\tnode.next=node.next.next\n\telse:\n\t\twhile head.next.next:\n\t\t\thead=head.next\n\t\thead.next=None\n\ndef reverKnode(k,head):\n\tp1=head\n\tp2=head\n\twhile k:\n\t\tp1=p1.next\n\t\tk=k-1\n\twhile p1.next:\n\t\tp1=p1.next\n\t\tp2.p2.next\n\treturn p2\n\n\ndef circlenode(head):\n\tp1=head.next.next\n\tp2=head.next\n\twhile p1!=p2:\n\t\tp1=p1.next.next\n\t\tp2=p2.next\n\tp2=head\n\twhile p1!=p2:\n\t\tp1=p1.next\n\t\tp2=p2.next\n\treturn p1\n\n\ndef reversnodes(head):\n\tp1=None\n\tc=head\n\twhile c.next:\n\t\tp2=c.next\n\t\tc.next=p1\n\t\tp1=c\n\t\tc=p2\n\tc.next=p1\n\treturn c\n\nc=reversnodes(buildnodes([1,2,3,4,5,6]))\nwhile c:\n\tprint(c.val)\n\tc=c.next\n\n\ndef mergenodes(h1,h2):\n\tif h1.val/([^\\/]*)/'\r\n # scan all content and return the first matched content with ignoring the l_u case letter\r\n matchObject = re.search(regularExpression, html, re.I)\r\n # if get\r\n if matchObject:\r\n # if phonetic exist\r\n if matchObject.group(1):\r\n self.phoneticSpelling = matchObject.group(1)\r\n else:\r\n self.phoneticSpelling = None\r\n # make the list format 已经有单词了,再添加音标\r\n list.append(self.phoneticSpelling)\r\n # write into the excel.xls 直接写道文件夹中\r\n write_excel_xls_append(self._filePath, list)\r\n\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# get the word phonetic in excel.xls path使用绝对路径和相对路径都可以 这个在游戏中显示\r\ndef get_word_pho(path, word):\r\n # open workbook\r\n workbook = xlrd.open_workbook(path)\r\n # get all sheets by sheet names\r\n sheets = workbook.sheet_names()\r\n # get the first sheet\r\n worksheet = workbook.sheet_by_name(sheets[0])\r\n # get the correspond content 先得到第一列的单词\r\n words = worksheet.col_values(0)\r\n # if word in words\r\n if word in words:\r\n # return the word index 如果有这个单词就返回这个单词的索引\r\n word_index = words.index(word)\r\n # return the phonetic 根据索引返回音标\r\n return worksheet.cell_value(word_index, 1)\r\n else:\r\n return None\r\n","sub_path":"words_phonetic_library.py","file_name":"words_phonetic_library.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"322354386","text":"from quimb import *\nfrom quimb.tensor import *\nimport quimb as qu\nimport numpy as np\nimport quimb.tensor as qtn\nimport matplotlib.pyplot as plt\nfrom numpy import linalg as LA\nimport matplotlib as mpl\n\n\nmpl.rcParams['xtick.major.size'] = 10\nmpl.rcParams['xtick.major.width'] = 1\nmpl.rcParams['xtick.minor.size'] = 5\nmpl.rcParams['xtick.minor.width'] = 1\nmpl.rcParams['ytick.major.size'] = 10\nmpl.rcParams['ytick.major.width'] = 1\nmpl.rcParams['ytick.minor.size'] = 5\nmpl.rcParams['ytick.minor.width'] = 1\n\n\ndef sort_low(error):\n val_min=1.e8\n error_f=[]\n for i in range(len(error)-1):\n if error[i]>=error[i+1] and error[i]<=val_min :\n error_f.append(error[i])\n val_min=error[i+1]*1.0\n pass\n\n return error_f\n\n\nR=np.loadtxt(\"lay10rand.txt\")\nxr10=R[:,0]\nyr10=R[:,1]\nerrorr10=R[:,2]\nerrorr10=sort_low(errorr10) \n\n\nR=np.loadtxt(\"lay10good.txt\")\nxg10=R[:,0]\nyg10=R[:,1]\nerrorg10=R[:,2]\nerrorg10=sort_low(errorg10) \n\nR=np.loadtxt(\"lay11good.txt\")\nx11=R[:,0]\ny11=R[:,1]\nerror11=R[:,2]\nerror11=sort_low(error11) \n\n\nR=np.loadtxt(\"lay6good.txt\")\nx6=R[:,0]\ny6=R[:,1]\nerror6=R[:,2]\nerror6=sort_low(error6) \n\n\n\nR=np.loadtxt(\"lay4good.txt\")\nx4=R[:,0]\ny4=R[:,1]\nerror4=R[:,2]\nerror4=sort_low(error4) \n\n\n\nR=np.loadtxt(\"lay8good.txt\")\nx8=R[:,0]\ny8=R[:,1]\nerror8=R[:,2]\nerror8=sort_low(error8) \n\nR=np.loadtxt(\"lay9good.txt\")\nx9=R[:,0]\ny9=R[:,1]\nerror9=R[:,2]\nerror9=sort_low(error9) \n\n\n\nR=np.loadtxt(\"lay8rand.txt\")\nx8r=R[:,0]\ny8r=R[:,1]\nerror8r=R[:,2]\nerror8r=sort_low(error8r) \n\n\nR=np.loadtxt(\"lay12good.txt\")\nx12=R[:,0]\ny12=R[:,1]\nerror12=R[:,2]\nerror12=sort_low(error12) \n\n\nR=np.loadtxt(\"lay13good.txt\")\nx13=R[:,0]\ny13=R[:,1]\nerror13=R[:,2]\nerror13=sort_low(error13) \n\n\nplt.figure(figsize=(8, 6))\n\n\nplt.loglog( errorr10, '-',lw=4, color = '#e3360b', label=r'$\\tau=10$, random')\nplt.loglog( error8r, '-.', lw=4,color = '#72cfbb', label=r'$\\tau=8$, random')\n#plt.loglog( error9, '3', color = '#cf729d', label='lay=9, good')\nplt.loglog( errorg10, '--',lw=4, color = '#0b8de3', label=r'$\\tau=10$, from $\\tau=8$')\n#plt.loglog( error11, '--', lw=4,color = '#e3570b', label=r'$\\tau=11$')\n#plt.loglog( error12, '--',lw=4, color = '#729fcf', label=r'$\\tau=12$')\n#plt.loglog( error13, '--', lw=4,color = '#96068a', label=r'$\\tau=13$')\n\n#plt.loglog( error6, 'x', color = '#e30b69', label='lay=6')\n\n#plt.loglog( error4, '2', color = '#729fcf', label='lay=4')\n\n\n#plt.loglog( error8r, '2', color = '#cf729d', label='lay=8, random')\n\n#plt.title('brickwall circuit')\nplt.ylabel(r'$\\delta$ E',fontsize=20)\nplt.xlabel(r'$iterations$',fontsize=20)\n#plt.axhline(0.00422,color='black', label='D=4')\n\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.legend(loc=\"upper right\", prop={'size': 18})\n\nplt.xlim([1,5.5e3])\nplt.ylim([7.2e-3, 10.e-2])\n\n\n#plt.grid(True)\nplt.savefig('plot1.pdf')\nplt.clf()\n","sub_path":"plot/brick-Hisenberg/plot1.py","file_name":"plot1.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"288268726","text":"import math as m\n\np1 = input().split(\" \")\np2 = input().split(\" \")\n\nx1, y1 = p1\nx2, y2 = p2\n\ndistancia = m.sqrt( (float(x2) - float(x1))**2 + (float(y2) - float(y1))**2 )\n\nprint(\"{0:.4f}\".format(distancia))","sub_path":"uri/iniciante/1015.py","file_name":"1015.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"69348825","text":"from flask import Flask,render_template,request,jsonify\nimport sqlite3 as sql\nfrom urllib.request import urlopen,Request\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport pandas as pd\nfrom collections import Counter\nimport plotly\nimport plotly.express as px\nimport json\n\n\n#new words in sentiments\nnew_words = {\n 'rise':50,\n 'high':10,\n 'jump':45,\n 'drop':-100,\n 'slip':-10,\n 'fall':-100,\n 'gain':20,\n 'crush': 10,\n 'beat': 5,\n 'miss': -5,\n 'trouble': -10,\n 'fall': -100,\n 'drop':-10,\n 'buy':20,\n 'sell':-10,\n 'bullish':10,\n 'bull':10,\n }\n # Instantiate the sentiment intensity analyzer with the existing lexicon\nvader = SentimentIntensityAnalyzer()\n # Update the lexicon\nvader.lexicon.update(new_words)\n\napp=Flask(__name__)\n\n#homepage\n@app.route('/')\ndef index():\n return render_template('index.html')\n \n#add Nifty as a special page\n# show the main link of all the stocks of the topstock \n@app.route('/topstock/', methods=['POST'])\ndef topstock():\n if request.method=='POST':\n text=request.form['search']\n if text=='NIFTY - S&P CNX NIFTY - INDEX':\n db,cur=connect('tsa')\n cur.execute('SELECT * FROM companies WHERE title==?',(text,))\n url_text=cur.fetchone()\n db.close()\n url=url_text[2]\n req= Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n data= urlopen(req).read()\n # return render_template('test.html',data=data)\n soup=BeautifulSoup(data,'lxml')\n li=soup.find_all('div',{'class':'col-lg-4 col-md-4 col-sm-4'})[1:]\n val_list=[]\n final_dict={}\n h3=li[0].find('h3').text\n new_li=li[0].find('table').find_all('td')\n for val in new_li:\n val_list.append(val.text)\n val_dic={val_list[i]:val_list[i+1] for i in range(0,len(val_list),2)}\n final_dict[h3]= val_dic\n\n for div in li[1:4]:\n val_list=[]\n h3=div.find('h3').text\n new_li=div.find('table').find_all('td')\n for val in new_li:\n val_dic={}\n if len(val.attrs)!=0 :\n if val.attrs['colspan']=='3':\n pass\n else:\n \n val_list.append(val.text.strip())\n else:\n val_list.append(val.text.strip())\n val_dic={val_list[i]:val_list[i+1:i+3] for i in range(0,len(val_list),3)}\n final_dict[h3]= val_dic\n\n val_list=[]\n h3=li[4].find('h3').text\n new_li=li[4].find('table').find_all('td')\n val_dic={}\n for val in new_li:\n if len(val.attrs)!=0 :\n if val.attrs['colspan']=='2':\n pass\n else:\n val_list.append(val.text.strip())\n else:\n val_list.append(val.text.strip())\n val_dic={val_list[i]:val_list[i+1] for i in range(0,len(val_list),2)}\n final_dict[h3]= val_dic\n\n for div in li[4:8]:\n val_list=[]\n h3=div.find('h3').text\n new_li=div.find('table').find_all('td')\n for val in new_li:\n val_dic={}\n if len(val.attrs)!=0 :\n if val.attrs['colspan']=='2':\n pass\n else:\n val_list.append(val.text.strip())\n else:\n val_list.append(val.text.strip())\n val_dic={val_list[i]:val_list[i+1] for i in range(0,len(val_list),2)}\n final_dict[h3]= val_dic\n\n val_list=[]\n h3=li[8].find('h3').text\n new_li=li[8].find('table').find_all('td')\n for val in new_li:\n val_dic={}\n if len(val.attrs)!=0 :\n if val.attrs['colspan']=='3':\n pass\n else:\n val_list.append(val.text.strip())\n else:\n val_list.append(val.text.strip())\n val_dic={val_list[i]:val_list[i+1:i+3] for i in range(0,len(val_list),3)}\n final_dict[h3]= val_dic\n\n li=soup.find('div',{'class':'col-lg-8 col-md-8 col-sm-8'})\n val_list=[]\n h3=li.find('h3').text.strip()\n for val in li.find_all('td'):\n if len(val.attrs)!=0 :\n if val.attrs['colspan']=='1':\n pass\n else:\n val_list.append(val.text.strip())\n else:\n val_list.append(val.text.strip())\n final_dict[h3]= val_list\n final={}\n i=1\n for val in final_dict.items():\n final[i]=val\n i=i+1\n \n return render_template('nifty.html',data=final)\n else:\n db,cur=connect('tsa')\n cur.execute('SELECT * FROM companies WHERE title==?',(text,))\n url_text=cur.fetchone()\n db.close()\n url=url_text[2]\n req= Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n data= urlopen(req).read()\n # return render_template('test.html',data=data)\n soup=BeautifulSoup(data,'lxml')\n li=soup.find_all('table',{'class':'table table-bordered table-striped table-hover'})\n final=[]\n for val in li:\n final.append(val.find_all('td'))\n #for var in val.find_all('td')\n final1=[]\n arr=np.array(final)\n final=arr.take([0,4,8,9,10])\n final1=arr.take([2,3])\n final2=arr.take(12)\n final=final.tolist()\n final1=final1.tolist()\n i=1\n final_dict={}\n for val in final:\n lib=[]\n if len(val)%2==0:\n for value in val:\n lib.append(value.text.strip())\n dic={lib[k]:lib[k+1] for k in range(0,len(lib),2)}\n final_dict[i]=dic\n i=i+1\n \n else:\n lis=val[0:(len(val)-1)]\n for value in lis:\n lib.append(value.text.strip())\n dic={lib[k]:lib[k+1] for k in range(0,len(lib),2)}\n final_dict[i]=dic\n i=i+1\n\n i=1\n final_dict1={}\n for val in final1:\n lib=[]\n if len(val)%3==0:\n for value in val:\n lib.append(value.text.strip())\n dic1={lib[k]:lib[k+1:k+3] for k in range(0,len(lib),3)}\n final_dict1[i]=dic1\n i=i+1\n \n else:\n lis1=val[0:(len(val)-1)]\n for value in lis1:\n lib.append(value.text.strip())\n dic1={lib[k]:lib[k+1:k+3] for k in range(0,len(lib),3)}\n final_dict1[i]=dic1\n i=i+1\n highlights=[]\n for val in final2:\n highlights.append(val.text.strip())\n return render_template('topstock.html',data1=final_dict, data2=final_dict1,highlight=highlights[0:-1])\n\n#show the fundamentals link\n@app.route('/fund_ac/', methods=['POST'])\ndef screener():\n if request.method=='POST':\n text=request.form['search']\n db,cur=connect('tsa')\n cur.execute('SELECT * FROM companies WHERE title==?',(text,))\n url_text=cur.fetchone()\n db.close()\n url=url_text[3]\n req= Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n data= urlopen(req).read()\n url_text=urlopen(req).read()\n soup=BeautifulSoup(url_text,'lxml')\n tab_li=soup.find_all('div',{'class':'table-responsive'})[0:4]\n fin_dic={}\n for val in tab_li[0:2]:\n dic={}\n td_li=[]\n h3=val.find('h3').text\n for th in val.find_all('th'):\n td_li.append(th.text.strip()) \n for td in val.find_all('td'):\n td_li.append(td.text.strip())\n for val1,val2 in zip(td_li[0::2],td_li[1::2]):\n dic[val1]=val2\n fin_dic[h3]=dic\n for val in tab_li[2:4]:\n td_li=[]\n h3=val.find('h3').text\n for th in val.find_all('th'):\n td_li.append(th.text.strip()) \n for td in val.find_all('td'):\n td_li.append(td.text.strip())\n dic1={td_li[k]:td_li[k+1:k+4] for k in range(0,len(td_li),4)}\n fin_dic[h3]=dic1\n data=[]\n for val in fin_dic.items():\n data.append(val)\n \n return render_template('screener.html',data=data)\n # url='https://www.screener.in/company/RELIANCE/consolidated/'\n # data=urlopen(url)\n # soup=BeautifulSoup(data,'lxml')\n # li=soup.find_all('ul',{'class':'row-full-width'})\n # orde=[]\n # for val in li:\n # for var in val.find_all('li'):\n # orde.append(var.text.split(\"\\n\"))\n # orde1=[]\n # for val in orde:\n # orde1.append(list(filter(lambda item:item.strip(' '), val)))\n # orde=[]\n # for val in orde1:\n # if (val[0].strip()=='Listed on') or (val[0].strip()=='Company Website'):\n # pass\n # elif len(val[1:])>1:\n # new_val=val[1].strip() +\" \"+ val[2].strip()\n # orde.append((val[0].strip(),new_val))\n # else:\n # orde.append((val[0].strip(),val[1].strip()))\n \n\n # url_peer='https://www.screener.in/api/company/6598251/peers/'\n # data=urlopen(url_peer)\n # soup=BeautifulSoup(data,'lxml')\n # peer=soup.find_all('table',{'class':'data-table text-nowrap striped'})\n # orde1=[]\n # for val in peer:\n # for var in val.find_all('tr'):\n # orde1.append(var.text)\n # peer_data=[]\n # for var in orde1:\n # peer_data.append(var.split(\"\\n\"))\n # orde1=[]\n # for var in peer_data[1:3]:\n # str_list=list(filter(lambda item:item.strip(), var))\n # orde1.append(str_list[1:])\n # url='https://www.screener.in/company/RELIANCE/consolidated/'\n # data=urlopen(url)\n # soup=BeautifulSoup(data,'lxml')\n # li=soup.find_all('table',{'class':'three columns ranges-table'})\n # compound=[]\n # for val in li:\n # data=val.find_all('td')\n # for td in data[-2:]:\n # compound.append(td.text.strip())\n # data=zip(compound[0::2],compound[1::2])\n\n # cash_flow=soup.find('section',{'id':'cash-flow'}).find('table')\n # th=cash_flow.find_all('th')\n # years=[]\n # for val in th[-3:]:\n # years.append(val.text)\n # cash_flow_dic={}\n # cash_flow_dic[0]=years\n # td=cash_flow.find_all('tr') \n # cost=[]\n # for val in td[1:]:\n # cost.append(val)\n # for i in range(1,len(cost)+1):\n # cash_flow_dic[i]=cost[i-1].text.split('\\n')[-4:-1]\n\n # share_hold=soup.find('section',{'id':'shareholding'}).find('table')\n # th=share_hold.find_all('th')\n # years=[]\n # for val in th[4:]:\n # years.append(val.text.strip())\n # share_dic={}\n # share_dic[0]=years\n # tr=share_hold.find_all('tr')\n # share=[]\n # for val in tr[1:]:\n # share.append(val)\n # for i in range(1,len(share)+1):\n # share_dic[i]=share[i-1].text.split('\\n')[-10:-1]\n\n # return render_template('screener.html',data1=orde,peer=enumerate(orde1,start=1),compound=data,cash_data=cash_flow_dic,\n # share=share_dic)\n\n#show the technicals links\n@app.route('/trend_ac/', methods=['POST'])\ndef trend():\n if request.method=='POST':\n text=request.form['search']\n db,cur=connect('tsa')\n cur.execute('SELECT * FROM companies WHERE title==?',(text,))\n url_text=cur.fetchone()\n db.close()\n url=url_text[4]\n req= Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n data= urlopen(req).read()\n url_text=urlopen(req).read()\n soup=BeautifulSoup(data,'lxml')\n tables=soup.find_all('div',{'id':'datagrid'})\n final_dic={}\n val_list=[]\n for th in tables[0].find_all('th'):\n val_list.append(th.text.strip())\n for val in tables[0].find_all('td'):\n val_list.append(val.text.strip())\n if len(val_list)==12:\n dic={val_list[i]:val_list[i+1:i+6] for i in range(0,len(val_list),6)}\n elif len(val_list)==14:\n dic={val_list[i]:val_list[i+1:i+7] for i in range(0,len(val_list),7)}\n elif len(val_list)==18:\n dic={val_list[i]:val_list[i+1:i+9] for i in range(0,len(val_list),9)}\n elif len(val_list)==16:\n dic={val_list[i]:val_list[i+1:i+8] for i in range(0,len(val_list),8)}\n\n \n final_dic['table']=dic\n\n\n val_list=[]\n h3=tables[1].find('h3').text.strip()\n for val in tables[1].find_all('td'):\n if len(val.attrs)!=0 :\n if val.attrs['colspan']=='1':\n pass\n else:\n val_list.append(val.text.strip())\n else:\n val_list.append(val.text.strip())\n final_dic[h3]=val_list\n signals=[]\n for val in tables[2:20]:\n val_list=[]\n dic={}\n h3=val.find('h3').text.strip()\n for th in val.find_all('th'):\n temp_td=val.find_all('td')\n if len(temp_td[2].text.strip())==0 and (th.text.strip()=='%K'):\n pass\n else:\n if th.find('a'):\n pass\n else:\n if (th.text.strip()=='View In Chart') :\n pass\n else:\n val_list.append(th.text.strip())\n for td in val.find_all('td'):\n if ('Volume Trend' in h3) and len(td.text.strip())==0:\n val_list.append(\" \")\n else:\n if len(td.text)==1:\n pass\n else:\n val_list.append(td.text.strip())\n if len(val_list)==16:\n dic={val_list[i]:val_list[i+1:i+4] for i in range(0,16,4)}\n if len(val_list)==20:\n dic={val_list[i]:val_list[i+1:i+5] for i in range(0,20,5)}\n if len(val_list)==24:\n dic={val_list[i]:val_list[i+1:i+6] for i in range(0,24,6)}\n if len(val_list)==32:\n dic={val_list[i]:val_list[i+1:i+4] for i in range(0,32,4)}\n \n sign=list(dic.values())\n for val in sign[1:]:\n signals.append(val[-1])\n\n final_dic[h3]=dic\n i=1\n data={}\n for val in final_dic.items():\n data[i]=val\n i=i+1\n \n sig_count=Counter(signals[0:-13])\n sig=dict(sig_count)\n df_dic={}\n for var in ['Bearish', 'Mild Bearish','Strong Bearish', 'Neutral', 'Mild Bullish', 'Bullish', 'Strong Bullish']:\n if var in sig:\n df_dic[var]=sig[var]\n df=pd.DataFrame(df_dic.items(),columns=['Signals','Value'])\n fig = px.bar(df, x=\"Signals\", y=\"Value\",color='Value',\n barmode='group',\n height=400,) \n plot=json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('comp_trend.html',data=data,plot=plot)\n\n#connect to database\ndef connect(dbname):\n try:\n db=sql.connect(f\"{dbname}.db\")\n cur=db.cursor()\n return db,cur\n except Exception as e:\n print(\"Error\" ,e)\n exit(2)\n\n#fetch news from the cnbc and moneycontrol website\ndef fetchnews(dbname):\n if dbname=='cnbctv18':\n db,cur=connect('cnbctv18')\n cur.execute('SELECT * FROM News ORDER BY id DESC LIMIT 1')\n lastrec=cur.fetchone()\n newslist=[]\n for var in range(1,5):\n url='https://www.cnbctv18.com/market/stocks/page-{0}/'.format(var)\n data=urlopen(url)\n soup=BeautifulSoup(data,'lxml')\n li=soup.find_all('div',{'class':'list_title'})\n for div in li: \n link=div.find('a')['href']\n text=list(div.find('a').text.split('\\n'))\n if text[0]==lastrec[1]:\n break\n score=vader.polarity_scores(text[0])['compound']\n if score<0:\n senti='negative'\n elif score==0:\n senti='netural'\n else:\n senti='positive'\n newslist.append((link,text,senti))\n if text[0]==lastrec[1]:\n break\n if len(newslist)!=0: \n newslist=newslist[::-1]\n\n for val in newslist:\n cur.execute(\"\"\"INSERT INTO News(title,description,link,sentiment)\n VALUES(?,?,?,?)\"\"\",(val[1][0],val[1][1],val[0],val[2]))\n db.commit()\n db.close()\n return None\n \n elif dbname=='moneycontrol':\n db,cur=connect('moneycontrol')\n cur.execute('SELECT * FROM News ORDER BY id DESC LIMIT 1')\n lastrec=cur.fetchone()\n newslist=[]\n for var in range(1,3):\n url='https://www.moneycontrol.com/news/business/stocks/page-{0}'.format(var)\n data=urlopen(url)\n soup=BeautifulSoup(data,'lxml')\n li=soup.find_all('li',{'class':'clearfix'})\n for div in li:\n try:\n link=div.find('a')['href']\n title=div.find('a')['title']\n desc=div.find('p').text\n if title==lastrec[1]:\n break\n else:\n score=vader.polarity_scores(title)['compound']\n if score<0:\n senti='negative'\n elif score==0:\n senti='netural'\n else:\n senti='positive'\n newslist.append((title,desc,senti,link))\n except:\n pass\n if title==lastrec[1]:\n break\n\n if len(newslist)!=0: \n newslist=newslist[::-1]\n for val in newslist:\n cur.execute(\"\"\"INSERT INTO News(title,description,sentiment,link)\n VALUES(?,?,?,?)\"\"\",(val[0].strip(),val[1].strip(),val[2],val[3]))\n db.commit()\n db.close()\n\n\n#Show the News\n@app.route('/news/')\ndef news():\n fetchnews('cnbctv18')\n db,cur=connect('cnbctv18')\n cur.execute(\"\"\"SELECT * FROM (\n SELECT * FROM News ORDER BY id DESC LIMIT 5\n )Var1 ORDER BY id ASC;\"\"\")\n news1=cur.fetchall()\n news1=news1[::-1]\n db.close()\n\n fetchnews('moneyconrol')\n db,cur=connect('moneycontrol')\n cur.execute(\"\"\"SELECT * FROM (\n SELECT * FROM News ORDER BY id DESC LIMIT 5\n )Var1 ORDER BY id ASC;\"\"\")\n news2=cur.fetchall()\n news2=news2[::-1]\n db.close()\n return render_template('news.html',data1=enumerate(news1,start=1),data2=enumerate(news2,start=1))\n\n@app.route('/cnbctv/')\ndef cnbctv18news():\n db,cur=connect('cnbctv18')\n cur.execute(\"SELECT * FROM News\")\n news=cur.fetchall()\n news=news[::-1]\n db.close()\n return render_template('cnbctv18.html',data=enumerate(news,start=1))\n\n@app.route('/part_cnbc_news/', methods=['POST'])\ndef part_cnbc_news():\n if request.method == 'POST':\n word=request.form['search']\n db,cur=connect('cnbctv18')\n cur.execute('Select * from News')\n news_list=cur.fetchall()\n word=word.lower()\n data=[]\n for news in news_list:\n if word in news[1].lower():\n data.append(news)\n db.close()\n return render_template(\"cnbc_newser.html\",data=enumerate(data,start=1))\n\n\n@app.route('/money_news/')\ndef moneynews():\n db,cur=connect('moneycontrol')\n cur.execute(\"SELECT * FROM News\")\n news=cur.fetchall()\n news=news[::-1]\n db.close()\n return render_template('money_control.html',data=enumerate(news,start=1))\n\n\n@app.route('/part_money_news/', methods=['POST'])\ndef part_money_news():\n if request.method == 'POST':\n word=request.form['search']\n db,cur=connect('moneycontrol')\n cur.execute('Select * from News')\n news_list=cur.fetchall()\n word=word.lower()\n data=[]\n for news in news_list:\n if word in news[1].lower():\n data.append(news)\n db.close()\n return render_template(\"money_search.html\",data=enumerate(data,start=1))\n\n\n@app.route('/sgd/')\ndef receive_data():\n return render_template('tsa.html')\n\n@app.route('/fundamentals/')\ndef fund_data():\n return render_template('fundamentals.html')\n\n@app.route('/trends/')\ndef trend_data():\n return render_template('trend.html')\n\n# show the corporate csv file\n@app.route('/export/')\ndef export_lim():\n df=pd.read_csv('https://www1.nseindia.com/corporates/datafiles/BM_All_Forthcoming.csv').iloc[:10,:]\n data=list(df.values)\n return render_template('export_lim.html',data=data)\n\n@app.route('/export_all/')\ndef export():\n df=pd.read_csv('https://www1.nseindia.com/corporates/datafiles/BM_All_Forthcoming.csv')\n data=list(df.values)\n return render_template('export_all.html',data=data)\n\n@app.route('/export_part/', methods=['POST'])\ndef export_part():\n if request.method == 'POST':\n word=request.form['search']\n df=pd.read_csv('https://www1.nseindia.com/corporates/datafiles/BM_All_Forthcoming.csv')\n df=df[df['Company'].str.contains(word,case=False)]\n data=list(df.values)\n return render_template('part_export.html',data=data)\n\n\n#show the graph in corporate page\n\n\nif __name__ == \"__main__\": \n app.run(debug = True) ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"311192100","text":"#!/usr/bin/env python3.6\n# coding=utf-8\n\nimport skimpy\n\nfrom pip._internal.req import parse_requirements\nfrom pip._internal.download import PipSession\n\nfrom setuptools import setup, find_packages\n\nparsed_requirements = parse_requirements(\n 'requirements.txt',\n session=PipSession()\n)\n\nrequirements = [str(ir.req) for ir in parsed_requirements]\n\nsetup(\n name='skimpy',\n version=skimpy.__version__,\n description='',\n long_description='',\n py_modules=['skimpy'],\n include_package_data=True,\n install_requires=requirements,\n zip_safe=False,\n keywords='skimpy',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594826305","text":"import pandas as pd\nfrom collections import Counter\nfrom math import log\n\n\ndef cal(dictionary):\n s = 0.0\n sum_of_type = sum(dictionary.values())\n for i in dictionary:\n tt = dictionary[i]/sum_of_type\n s -= tt*log(tt, 2)\n return s\n\n\ndef decisiontree(dataframe, max_gain=0.0, max_feature=None, delta=0.1):\n features = dataframe.columns[:-1]\n print(features)\n target = dataframe[dataframe.columns[-1]]\n features_dict = Counter(features)\n base_entropy = cal(Counter(target))\n\n for feature in features:\n row_data = dataframe[feature]\n s = 0.0\n temp = Counter(row_data)\n if len(temp) > len(features)/2:\n continue\n sum_temp = sum(temp.values())\n # print(temp)\n for j in temp:\n index = row_data == j\n sub_traget = target[index]\n s += temp[j] / sum_temp * cal(Counter(sub_traget))\n if base_entropy - s > max_gain:\n max_feature = feature\n max_gain = base_entropy - s\n print(max_gain, max_feature)\n\n if max_feature is not None or max_gain <= delta:\n new_features = features.drop(max_feature)\n new_data = dataframe[\n new_features.append(\n pd.Index([dataframe.columns[-1]]))\n ]\n return decisiontree(new_data)\n else:\n return \"finish catagory\"\n\n\ndatapath = \"./data.csv\"\ndata = pd.read_csv(datapath)\nprint(decisiontree(data))\n","sub_path":"decisiontree/decisiontree_ID3.py","file_name":"decisiontree_ID3.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261119758","text":"# vim: expandtab\n# -*- coding: utf-8 -*-\nfrom django.contrib import admin\n\nfrom poleno.utils.misc import decorate\nfrom poleno.utils.admin import simple_list_filter_factory, admin_obj_format\n\nfrom .models import Invitation, InvitationSupply\n\n\n@admin.register(Invitation, site=admin.site)\nclass InvitationAdmin(admin.ModelAdmin):\n date_hierarchy = u'created'\n list_display = [\n u'id',\n u'email',\n u'created',\n decorate(\n lambda o: u'Accepted' if o.is_accepted else\n u'Expired' if o.is_expired else\n u'Pending' if o.is_pending else u'--',\n short_description=u'State',\n ),\n decorate(\n lambda o: admin_obj_format(o.invitor,\n u'{obj.first_name} {obj.last_name} <{obj.email}>'),\n short_description=u'Invitor',\n admin_order_field=u'invitor__email',\n ),\n decorate(\n lambda o: admin_obj_format(o.invitee,\n u'{obj.first_name} {obj.last_name} <{obj.email}>'),\n short_description=u'Invitee',\n admin_order_field=u'invitee__email',\n ),\n ]\n list_filter = [\n u'created',\n simple_list_filter_factory(u'State', u'state', [\n (u'1', u'Accepted', lambda qs: qs.accepted()),\n (u'2', u'Expired', lambda qs: qs.expired()),\n (u'3', u'Pending', lambda qs: qs.pending()),\n ]),\n ]\n search_fields = [\n u'=id',\n u'email',\n u'invitor__first_name',\n u'invitor__last_name',\n u'invitor__email',\n u'invitee__first_name',\n u'invitee__last_name',\n u'invitee__email',\n ]\n ordering = [\n u'-created',\n u'-id',\n ]\n exclude = [\n ]\n readonly_fields = [\n ]\n raw_id_fields = [\n u'invitor',\n u'invitee',\n u'message',\n ]\n inlines = [\n ]\n\n def get_queryset(self, request):\n queryset = super(InvitationAdmin, self).get_queryset(request)\n queryset = queryset.select_related(u'invitor')\n queryset = queryset.select_related(u'invitee')\n return queryset\n\n@admin.register(InvitationSupply, site=admin.site)\nclass InvitationSupplyAdmin(admin.ModelAdmin):\n date_hierarchy = None\n list_display = [\n u'id',\n decorate(\n lambda o: admin_obj_format(o.user,\n u'{obj.first_name} {obj.last_name} <{obj.email}>'),\n short_description=u'User',\n admin_order_field=u'user__email',\n ),\n u'enabled',\n u'unlimited',\n u'supply',\n ]\n list_filter = [\n u'enabled',\n u'unlimited',\n ]\n search_fields = [\n u'=id',\n u'user__first_name',\n u'user__last_name',\n u'user__email',\n ]\n ordering = [\n u'id',\n ]\n exclude = [\n ]\n readonly_fields = [\n ]\n raw_id_fields = [\n u'user',\n ]\n inlines = [\n ]\n\n def get_queryset(self, request):\n queryset = super(InvitationSupplyAdmin, self).get_queryset(request)\n queryset = queryset.select_related(u'user')\n return queryset\n","sub_path":"poleno/invitations/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269209410","text":"#! /usr/bin/env python\nimport os\nimport argparse\nimport datetime\nimport torch\nimport model\nimport train\nfrom logger import get_logger\n\nfrom mydatasets import get_loader\n\nparser = argparse.ArgumentParser(description='CNN text classificer')\n#parser = argparse.get_args()\n# learning\nparser.add_argument('-lr', type=float, default=1e-6, help='initial learning rate [default: 0.001]') # CNN or RNN: 1e-3, DocBert: 1e-6\nparser.add_argument('-epochs', type=int, default=50, help='number of epochs for train [default: 256]')\nparser.add_argument('-batchsize', type=int, default=8, help='batch size for training [default: 64]') # CNN or RNN: 50, DocBert: 8\nparser.add_argument('-accumulation-steps',\n type=int, default=2, help='batch size for training [default: 64]') # CNN or RNN: 1, DocBert: 2\n\nparser.add_argument('-log-interval', type=int, default=10, help='how many steps to wait before logging training status [default: 1]')\nparser.add_argument('-test-interval', type=int, default=500, help='how many steps to wait before testing [default: 100]')\nparser.add_argument('-save-interval', type=int, default=500, help='how many steps to wait before saving [default:500]')\nparser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot')\nparser.add_argument('-early-stop', type=int, default=1000, help='iteration numbers to stop without performance increasing')\nparser.add_argument('-save-best', type=bool, default=True, help='whether to save when get best performance')\n\n# data\nparser.add_argument('-shuffle', action='store_true', default=True, help='shuffle the data every epoch')\nparser.add_argument('-train_dir', type=str, default='../dataset/gmlda_mtl_bert_dataset.pkl', help='Train Dir')\n#cnn/rnn:'../dataset/gmlda_bert_dataset.pkl' bert: '../dataset/gmlda_mtl_bert_dataset.pkl'\nparser.add_argument('-dataset', type=str, default='mti_data')\nparser.add_argument('-index2word', type=list, default=[])\n\n# model\nparser.add_argument('-model', type=str, default='DocBertMtl', help='the model name')\nparser.add_argument('-dropout', type=float, default=0.5, help='the probability for dropout [default: 0.5]')\nparser.add_argument('-max-norm', type=float, default=3.0, help='l2 constraint of parameters [default: 3.0]')\nparser.add_argument('-embed-dim', type=int, default=300, help='number of embedding dimension [default: 128]')\nparser.add_argument('-kernel-num', type=int, default=150, help='number of each kind of kernel')\nparser.add_argument('-kernel-sizes', type=str, default='3', help='comma-separated kernel size to use for convolution')\nparser.add_argument('-query-kernel-num', type=int, default=150, help='query channel num')\nparser.add_argument('-layer-num', type=int, default=2, help='dpcnn layer num for convolution')\nparser.add_argument('-static', action='store_true', default=False, help='fix the embedding')\nparser.add_argument('-vocab-num', type=int, default=128, help='number of embedding dimension [default: 128]')\nparser.add_argument('-PositionalEmbedding', type=bool, default=False, help='if using Positional Embedding')\n\n# optimizer\nparser.add_argument('-optimizer', type=str, default='adam', help='number of embedding dimension [default: 128]')\nparser.add_argument('-weight_decay', type=float, default=1e-5, help='number of embedding dimension [default: 128]')\nparser.add_argument('-sent_loss', type=bool, default=False, help='number of embedding dimension [default: 128]')\nparser.add_argument('-optimizer_warper', type=bool, default=False, help='number of embedding dimension [default: 128]')\n\n# device\nparser.add_argument('-obv_device', type=str, default='0', help='device to use for iterate data, -1 mean cpu [default: -1]')\nparser.add_argument('-device', type=int, default=0, help='device to use for iterate data, -1 mean cpu [default: -1]')\nparser.add_argument('-no-cuda', action='store_true', default=False, help='disable the gpu')\n\n# option\nparser.add_argument('-snapshot', type=str, default=None, help='filename of model snapshot [default: None]')\nparser.add_argument('-predict', type=str, default=None, help='predict the sentence given')\nparser.add_argument('-test', action='store_true', default=train, help='train or test')\n# args = parser.parse_args(args=[])\nargs = parser.parse_args()\n\n# usage\nlogger = get_logger('log/' + args.model + '_' + args.dataset + '.log')\nlogger.info('start training!')\n\n\n# load data\nprint(\"\\n Loading data...\")\n\ntrain_iter, vocab_size, class_num, index2word = get_loader(args.train_dir, batch_size=args.batchsize, train_flag=True)\ntest_iter, vocab_size, class_num, index2word = get_loader(args.train_dir, batch_size=args.batchsize, train_flag=False)\n\nargs.index2word = index2word\n# update args and print\n\nargs.vocab_num = vocab_size\nargs.class_num = class_num\nargs.cuda = (not args.no_cuda) and torch.cuda.is_available()\nargs.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]\nargs.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\nprint(\"\\n Parameters:\")\nfor attr, value in sorted(args.__dict__.items()):\n print(\"\\t{}={}\".format(attr.upper(), value))\n logger.info(\"\\t{}={}\".format(attr.upper(), value))\n\n# model\nif args.model == 'Text_CNN':\n cnn = model.Text_CNN(args)\nelif args.model == 'Text_CNN_att':\n cnn = model.Text_CNN_att(args)\nelif args.model == 'DPCNN':\n cnn = model.DPCNN(args)\nelif args.model == 'DPCNN_att':\n cnn = model.DPCNN_att(args)\nelif args.model == 'DPCNN_multi':\n cnn = model.DPCNN_multi(args)\nelif args.model == 'Text_CNN_CBAM':\n cnn = model.Text_CNN_CBAM(args)\nelif args.model == 'Text_LSTM':\n cnn = model.Text_LSTM(args)\nelif args.model == 'Text_MLP':\n cnn = model.Text_MLP(args)\nelif args.model == 'Text_CNN_Style':\n cnn = model.Text_CNN_Style(args)\nelif args.model == 'WV_Text_CNN':\n cnn = model.WV_Text_CNN(args)\nelif args.model == 'Text_LSTM_Style':\n cnn = model.Text_LSTM_Style(args)\nelif args.model == 'Text_CNN_att_MTL':\n cnn = model.Text_CNN_att_MTL(args)\nelif args.model == 'Text_LSTM_att_MTL':\n cnn = model.Text_LSTM_att_MTL(args)\nelif args.model == 'DocBertMtl':\n cnn = model.DocBertMtl(args)\n\n\nif args.snapshot is not None:\n print('\\nLoading model from {}...'.format(args.snapshot))\n cnn.load_state_dict(torch.load(args.snapshot))\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.obv_device\nargs.device = 'cuda' if args.cuda else 'cpu'\n\n\ntrain.train(train_iter, test_iter, cnn, args, logger)\nlogger.info('finish training!')\n","sub_path":"Multi-domain-sentiment-classfication/EnsLM-mATM-layer-wise/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594753720","text":"# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries\r\n# SPDX-FileCopyrightText: Copyright (c) 2022 ladyada for Adafruit Industries\r\n#\r\n# SPDX-License-Identifier: Unlicense\r\n\r\nimport time\r\nimport board\r\nimport adafruit_pcf8574\r\n\r\nprint(\"PCF8574 digitalio LED blink test\")\r\n\r\ni2c = board.I2C() # uses board.SCL and board.SDA\r\n# i2c = board.STEMMA_I2C() # For using the built-in STEMMA QT connector on a microcontroller\r\npcf = adafruit_pcf8574.PCF8574(i2c)\r\n\r\n# get a 'digitalio' like pin from the pcf\r\nled = pcf.get_pin(7)\r\n\r\n# Setup pin7 as an output that's at a high logic level default\r\nled.switch_to_output(value=True)\r\n\r\nwhile True:\r\n led.value = True\r\n time.sleep(0.2)\r\n led.value = False\r\n time.sleep(0.2)\r\n","sub_path":"Circuit_Playground/CircuitPython/libraries/adafruit-circuitpython-bundle-7.x-mpy-20230406/examples/pcf8574_simpletest.py","file_name":"pcf8574_simpletest.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"639920742","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^users$', views.users, name=\"users\"),\n url(r'^create_user$', views.create_user, name=\"create_user\"),\n url(r'^show/(?P\\d+)$', views.show, name=\"show\"),\n url(r'^removeInterest/(?P\\d+)$', views.removeInterest, name=\"removeInterest\"),\n url(r'^$', views.index, name=\"landing\"),\n]","sub_path":"charissa_johnson/interests/apps/interests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"556293175","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef f(grid):\n\n # 1 标定每个区域面程并标记\n # 定义与(i,j) 相邻的点\n def neighbor(grid, i, j):\n for x,y in [(i-1,j), (i+1, j), (i, j-1), (i,j+1)]:\n if 0<= x < len(grid) and 0<= y < len(grid[0]):\n yield (x,y)\n\n def mark(grid, i, j, num):\n \"\"\"\n num -> 当前的标记\n \"\"\"\n total = 1\n grid[i][j] = num\n queue = [(i,j)]\n while queue:\n a = queue.pop(0)\n for x,y in neighbor(grid, a[0], a[1]):\n if grid[x][y] == 1:\n queue.append((x,y))\n grid[x][y] = num\n total += 1\n return total\n\n num = 2\n record = {}\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n record[num] = mark(grid, i, j, num)\n num += 1\n\n # 2 计算最大面积\n print(grid)\n ans = max(record.values() or [0])\n for r in xrange(len(grid)):\n for c in xrange(len(grid[1])):\n if grid[r][c] == 0:\n seen = {grid[nr][nc] for nr, nc in neighbor(grid, r, c) if grid[nr][nc] > 1}\n ans = max(ans, 1 + sum(record[i] for i in seen))\n return ans\n\n\n\ngrid = [[1, 1], [1, 1]]\nprint(f(grid))\n","sub_path":"demo/code/2021-11-02/f1.py","file_name":"f1.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238707522","text":"# O(n*c) time | O(n*c) space\ndef knapsackProblem(items, capacity):\n dp_arr = [[[0,[]] for _ in range(capacity + 1)] for _ in range(len(items))]\n for w in range(1,capacity + 1):\n for i in range(len(items)):\n if w < items[i][1]: #can't take the item\n if i > 0:\n dp_arr[i][w] = dp_arr[i-1][w].copy()\n continue\n if i > 0:\n new_weight = w - items[i][1]\n prev_solution = dp_arr[i-1][w]\n remainder_solution = dp_arr[i-1][new_weight]\n if prev_solution[0] > remainder_solution[0] + items[i][0]:\n dp_arr[i][w] = dp_arr[i-1][w].copy()\n else:\n dp_arr[i][w][0] = items[i][0] + remainder_solution[0]\n dp_arr[i][w][1].extend([i, *remainder_solution[1]])\n else:\n dp_arr[i][w][0] = items[i][0]\n dp_arr[i][w][1].append(i)\n return dp_arr[-1][-1]\n \nassert knapsackProblem([[1, 2], [4, 3], [5, 6], [6, 7]], 10) == [10, [1, 3]]","sub_path":"Dynamic Programming/knapsack_0_1.py","file_name":"knapsack_0_1.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"416929837","text":"import queue_array\nimport stack_array\n\nclass Vertex:\n \n def __init__(self, key):\n \"\"\"Initialize a vertex with the properties below\"\"\"\n self.id = key\n self.connected_to = {}\n self.color = 'white'\n self.dist = 0\n self.pred = None\n \n def add_neighbor(self, nbr, weight=0):\n \"\"\"Connectes two vertices by adding a path between them\"\"\"\n self.connected_to[nbr] = weight\n\n def set_distance(self, num):\n \"\"\"Sets distance from the root vertex to another vertex\"\"\"\n self.dist = num\n\n def set_color(self, color):\n \"\"\"Sets the color of a vertex\"\"\"\n self.color = color\n\n def set_pred(self, node):\n \"\"\"Sets the predecessor (parent) of a vertex\"\"\"\n self.pred = node\n\n def __str__(self):\n return str(self.id) + ' connected to: ' + str([x.id for x in self.connected_to])\n\n def get_connections(self):\n \"\"\"Returns a list of all the keys of which the vertex is connected to\"\"\"\n return self.connected_to.keys()\n\n def get_id(self):\n \"\"\"Returns the id (label) of the vertex\"\"\"\n return self.id\n\n def get_weight(self, nbr):\n \"\"\"Returns the weight (cost) of an edge (path)\"\"\"\n return self.connected_to[nbr]\n\n def get_distance(self):\n \"\"\"Returns distance from vertex to root vertex\"\"\"\n return self.dist\n\n def get_color(self):\n \"\"\"Returns color of vertex\"\"\"\n return self.color\n\n def get_pred(self):\n \"\"\"Returns predecessor (parent) of vertex\"\"\"\n return self.pred\n\nclass Graph:\n\n def __init__(self):\n \"\"\"Initialize empty graph\"\"\"\n self.vert_list = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n \"\"\"Adds vertex to the dictionary of vertices\"\"\"\n self.num_vertices += 1\n new_vertex = Vertex(key)\n self.vert_list[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n \"\"\"If the key to a vertex correspons to a vertex dictionary, return\n the vertex it corresponds to. If no vertex corresponds to the key, \n return None\"\"\"\n if key in self.vert_list:\n return self.vert_list[key]\n return None\n\n def __contains__(self, key):\n \"\"\"Checks if a key corresponds to a vertex in the vertex dictionary\"\"\"\n return n in self.vert_list\n\n def add_edge(self, v1, v2):\n \"\"\"Adds an edge (path) from one vertex to another with weight 0\"\"\"\n if v1 not in self.vert_list:\n self.add_vertex(v1)\n if v2 not in self.vert_list:\n self.add_vertex(v2)\n self.vert_list[v1].add_neighbor(self.vert_list[v2])\n self.vert_list[v2].add_neighbor(self.vert_list[v1])\n\n def get_vertices(self):\n \"\"\"Returns id's of all vertices in the vertex dictionary\"\"\"\n return self.vert_list.keys()\n \n def __iter__(self):\n return iter(self.vert_list.values())\n\n def bfs(self, start):\n \"\"\"Breadth First Search that returns a list of all connected vertices\"\"\"\n start.set_distance(0)\n start.set_pred(None)\n vert_queue = queue_array.Queue(1000)\n vert_queue.enqueue(start)\n conn_verts = []\n while vert_queue.size() > 0:\n current_vert = vert_queue.dequeue()\n for neighbor in current_vert.get_connections():\n if neighbor.get_color() == 'white':\n neighbor.set_color('grey')\n neighbor.set_distance(current_vert.get_distance() + 1)\n neighbor.set_pred(current_vert)\n vert_queue.enqueue(neighbor)\n current_vert.set_color('black')\n conn_verts.append(current_vert.id)\n return conn_verts\n\n def dfs(self, start):\n \"\"\"Depth first search that checks if a graph is bipartite or not\"\"\"\n start.set_distance(0)\n start.set_pred(None)\n vert_stack = stack_array.Stack(1000)\n vert_stack.push(start)\n blue = []\n red = []\n while not vert_stack.is_empty():\n current_vert = vert_stack.pop()\n if current_vert.get_pred() is None:\n blue.append(current_vert)\n else:\n if current_vert.get_pred() in blue:\n red.append(current_vert)\n elif current_vert.get_pred() in red:\n blue.append(current_vert)\n for neighbor in current_vert.get_connections():\n if neighbor.get_color() == 'white' and neighbor not in red and neighbor not in blue:\n neighbor.set_color('grey')\n neighbor.set_distance(current_vert.get_distance() + 1)\n neighbor.set_pred(current_vert)\n vert_stack.push(neighbor)\n elif neighbor in blue:\n if current_vert in blue:\n return False\n elif neighbor in red:\n if current_vert in red:\n return False\n current_vert.set_color('black')\n\n def depth_first_search_recur(self, node):\n \"\"\"Recursive depth first search\"\"\"\n node.set_color('black')\n for neighbor in node.get_connections():\n if neighbor.get_color() == 'white':\n self.depth_first_search_recur(neighbor)\n\n def conn_components(self):\n \"\"\"Returns a list of lists, with each new list within a list denoting\n a separate, connected tree of vertices\"\"\"\n for key in self.vert_list:\n vertex = self.vert_list[key]\n vertex.set_color('white')\n conn_comp = []\n for key in self.vert_list:\n vertex = self.vert_list[key]\n if vertex.get_color() == 'white':\n conn_tree = self.bfs(vertex)\n conn_comp.append(conn_tree)\n return conn_comp\n\n def bicolor(self):\n \"\"\"Returns True if a Graph is bipartite (able to use\n no more than two colors for each vertex with having no adjacent\n vertices the same color). Else, returns False.\"\"\"\n for key in self.vert_list:\n vertex = self.vert_list[key]\n vertex.set_color('white')\n for key in self.vert_list:\n vertex = self.vert_list[key]\n if vertex.get_color() == 'white':\n if self.dfs(vertex) == False:\n return False\n return True\n\n\n","sub_path":"graphs/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"466931806","text":"from pprint import pprint\n\nperson = dict()\nperson['name'] = 'Mike Swift'\nperson['age'] = 37\nperson['isAlive'] = True\nperson['phone'] = [{'type': 'office', 'number': '608-123-4567'},\n {'type': 'home', 'number': '608-987-6543'}]\nperson['address'] = {'street': '1210 West Dayton Street',\n 'city': 'Madison',\n 'state': 'WI',\n 'zip': 53706}\n# pretty print\npprint(person)\n","sub_path":"tyler/cs301/fall18/materials3/code/lec-14-json-tuples/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"43026717","text":"from __future__ import division #1/2 = float, 1//2 = integer, python 3.0 behaviour in 2.6, to make future port to 3 easier.\nimport datetime\nimport libxml2\nimport os\nclass Task(object):\n\tdef __init__(self,taskfile):\n\t\tself.taskfile=taskfile\n\t\t#print(\"Parsing:\"+taskfile)\n\t\txmldoc=libxml2.parseFile(taskfile)\n\t\tfor node in xmldoc.xpathEval('/task/uid'):\n\t\t\tself.uid = node.content\n\t\tfor node in xmldoc.xpathEval('/task/creation'):\n\t\t\tself.creation = datetime.datetime.utcfromtimestamp(float(node.content))\n\t\tfor node in xmldoc.xpathEval('/task/name'):\n\t\t\tself.name = unicode(node.content,\"utf-8\")\n\t\tfor node in xmldoc.xpathEval('/task/period'):\n\t\t\tself.period = datetime.timedelta(seconds=(float(node.content)))\n\t\tfor node in xmldoc.xpathEval('/task/last'):\n\t\t\tself.last = datetime.datetime.utcfromtimestamp(float(node.content))\n\t@staticmethod\n\tdef writexml(taskspath,uid,creation,name,period,last):\n\t\txmldoc = libxml2.parseDoc('')\n\t\troot = xmldoc.getRootElement()\n\t\tnode = libxml2.newNode('uid')\n\t\tnode.setContent(uid)\n\t\troot.addChild(node)\n\t\tnode = libxml2.newNode('creation')\n\t\tnode.setContent(str((creation - datetime.datetime(1970, 1, 1)).total_seconds()))\n\t\troot.addChild(node)\n\t\tnode = libxml2.newNode('name')\n\t\tnode.setContent(name)\n\t\troot.addChild(node)\n\t\tnode = libxml2.newNode('period')\n\t\tnode.setContent(str(period))\n\t\troot.addChild(node)\n\t\tnode = libxml2.newNode('last')\n\t\tnode.setContent(str(last))\n\t\troot.addChild(node)\n\t\ttaskpath=os.path.join(taskspath,uid+\".xml\")\n\t\tf = open(taskpath,'w')\n\t\txmldoc.saveTo(f,encoding=\"utf-8\",format=1)\n\t\tf.close()\n\t\treturn taskpath\n\tdef due(self):\n\t\treturn (datetime.datetime.utcnow()-self.last).total_seconds()/self.period.total_seconds()\n\tdef doitnow(self):\n\t\txmldoc=libxml2.parseFile(self.taskfile)\n\t\tself.last = datetime.datetime.utcnow()\n\t\ttimestamp = (self.last-datetime.datetime(1970, 1, 1)).total_seconds()\n\t\tfor node in xmldoc.xpathEval('/task/last'):\n\t\t\tnode.setContent(str(timestamp))\n\t\tf = open(self.taskfile,'w')\n\t\txmldoc.saveTo(f,encoding=\"utf-8\",format=1)\n\t\tf.close()\n\t\treturn timestamp\n","sub_path":"server/Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"257577416","text":"from collections import Counter\n\nnb_shoes = int(input())\ncounter = Counter(list(map(int, input().split())))\nnb_customers = int(input())\nearnings = 0\nfor _ in range(nb_customers):\n size, price = map(int, input().split())\n if (counter.get(size, 0) != 0):\n counter[size] -= 1\n earnings += price\nprint(earnings)\n","sub_path":"python/collections-counter/collections-counter.py","file_name":"collections-counter.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397788408","text":"from events import Events\nfrom time import sleep\nfrom threading import Thread\nfrom typing import Tuple\n\n\nclass Timer:\n \"\"\"\n A timer that counts seconds, minutes and hours and raises a event when a unit time occur.\n\n Attributes\n ----------\n alive : bool\n Causes the timers counts if True or stop if False.\n\n elapsed_seconds : int\n Number of seconds elapsed since the timer begun.\n\n elapsed_minutes: int\n Number of minutes elapsed since the timer begun.\n\n elapsed_hours: int\n Number of hours elapsed since the timer begun.\n\n events: Events\n The event trigger. Contains the event names on_second, on_minute, on_hour.\n\n \"\"\"\n\n def __init__(self):\n self.alive = False\n self.elapsed_seconds = 0\n self.elapsed_minutes = 0\n self.elapsed_hours = 0\n self.events = Events(('on_second', 'on_minute', 'on_hour'))\n\n def count_seconds(self) -> None:\n \"\"\"Raises the on_second event, every second and counts the seconds elapsed.\"\"\"\n\n while self.alive:\n sleep(1) # sleeping the thread a second\n self.elapsed_seconds += 1 # incrementing elapsed seconds by 1\n self.events.on_second() # raising the event\n\n def count_minutes(self) -> None:\n \"\"\"Raises the on_minute event, every minute and counts the minutes elapsed.\"\"\"\n\n while self.alive:\n sleep(60) # sleeping the thread a minute\n self.elapsed_minutes += 1 # incrementing elapsed minutes by 1\n self.events.on_minute() # raising the event\n\n def count_hours(self) -> None:\n \"\"\"Raises the on_hour event, every hour and counts the hours elapsed.\"\"\"\n\n while self.alive:\n sleep(3600) # sleeping the thread a hour\n self.elapsed_hours += 1 # incrementing elapsed hours by 1\n self.events.on_hour() # raising the event\n\n\ndef time_units(total_seconds: int) -> Tuple[int, int, int]:\n \"\"\"Convert a given number of seconds to hours, minutes and seconds.\n\n Parameters\n ----------\n total_seconds : int\n Total number of seconds to convert.\n\n Returns\n -------\n int, int, int\n Three integers representing the resultant seconds, minutes and hour of the conversion\n\n \"\"\"\n\n hours = total_seconds // 3600\n minutes = (total_seconds // 60) % 60\n seconds = total_seconds % 60\n\n return seconds, minutes, hours\n\n\nclass Requestmeter:\n \"\"\"\n This class works like a speedometer for requests. The members declared will calculate the request ratios made\n by unit of time (seconds, minutes, hours).\n\n Attributes\n ----------\n total_requests: int\n Counter of the total requests made.\n\n requests_by_second: list\n A two elements list that stores the requests made in each second. It functions like a log that registers\n the accumulated requests made in the previous second and the differential requests made in the last second.\n\n requests_by_minute: list\n A two elements list that stores the requests made in each minute. It functions like a log that registers\n the accumulated requests made in the previous minute and the differential requests made in the last minute.\n\n requests_by_hour: list\n A two elements list that stores the requests made in each hour. It functions like a log that registers\n the accumulated requests made in the previous hour and the differential requests made in the last hour.\n\n events: Events\n The event trigger. Contains the event names second_speed_limit_exceeded, minute_speed_limit_exceeded,\n hour_speed_limit_exceeded. Raises the corresponding event each time the speed limit has been exceeded.\n\n timer: Timer\n The timer that will count each second, minute and hour elapsed.\n\n Class Attributes\n ----------------\n speed_limits : list\n Maximum number of requests that must be sent per second, minute and hour, correspondingly.\n\n Notes\n -----\n The difference between the terms \"by\" and \"per\" used in the members of this class is clearly explained in\n https://english.stackexchange.com/a/22693\n\n \"\"\"\n\n speed_limits = () # maximum number of requests per second, minute and hour, correspondingly\n\n def __init__(self, limits):\n Requestmeter.speed_limits = limits if limits is not None else (2, 9, 540)\n self.total_requests = 0\n self.requests_by_second = [0, 0]\n self.requests_by_minute = [0, 0]\n self.requests_by_hour = [0, 0]\n\n self.events = Events(('s_speed_limit_exceeded', 'm_speed_limit_exceeded', 'h_speed_limit_exceeded'))\n\n self.timer = Timer()\n\n # Event subscriptions\n self.timer.events.on_second = self.requests_by_second_counter\n self.timer.events.on_minute = self.requests_by_minute_counter\n self.timer.events.on_hour = self.requests_by_hour_counter\n\n def start(self):\n self.timer.alive = True\n seconds_thread = Thread(target=self.timer.count_seconds)\n minutes_thread = Thread(target=self.timer.count_minutes)\n hours_thread = Thread(target=self.timer.count_hours)\n\n seconds_thread.setDaemon(True)\n minutes_thread.setDaemon(True)\n hours_thread.setDaemon(True)\n\n seconds_thread.start()\n minutes_thread.start()\n hours_thread.start()\n\n def finish(self):\n # finishing all 3 threads\n self.timer.alive = False\n\n # returning elapsed time\n return self.timer.elapsed_seconds, self.timer.elapsed_minutes, self.timer.elapsed_hours\n\n def count(self):\n self.total_requests += 1\n\n # region Speed calculators\n def requests_per_second(self):\n return sum(self.requests_by_second[:]) / self.timer.elapsed_seconds\n\n def requests_per_minute(self):\n return sum(self.requests_by_minute[:]) / self.timer.elapsed_minutes\n\n def requests_per_hour(self):\n return sum(self.requests_by_hour[:]) / self.timer.elapsed_hours\n # endregion Speed calculators\n\n # region Timer event handlers\n def requests_by_second_counter(self):\n self.requests_by_second[0] = sum(self.requests_by_second[:])\n self.requests_by_second[-1] = self.total_requests - self.requests_by_second[0]\n print(\"s:\", self.requests_by_second[-1])\n\n if self.requests_by_second[-1] > Requestmeter.speed_limits[0]:\n self.events.s_speed_limit_exceeded((self.requests_by_second[-1] / self.speed_limits[0])-1)\n\n def requests_by_minute_counter(self):\n self.requests_by_minute[0] = sum(self.requests_by_minute[:])\n self.requests_by_minute[-1] = self.total_requests - self.requests_by_minute[0]\n print(\"m:\", self.requests_by_minute[-1])\n\n if self.requests_by_minute[-1] > Requestmeter.speed_limits[1]:\n self.events.m_speed_limit_exceeded((self.requests_by_minute[-1] / self.speed_limits[1])-1)\n\n def requests_by_hour_counter(self):\n self.requests_by_hour[0] = sum(self.requests_by_hour[:])\n self.requests_by_hour[-1] = self.total_requests - self.requests_by_hour[0]\n print(\"h:\", self.requests_by_hour[-1])\n\n if self.requests_by_hour[-1] > Requestmeter.speed_limits[2]:\n self.events.h_speed_limit_exceeded((self.requests_by_hour[-1] / self.speed_limits[2])-1)\n # endregion Timer event handlers\n\n def summary(self):\n p_seconds, p_minutes, p_hours = time_units(self.timer.elapsed_seconds)\n\n print(\"Total requests: \", self.total_requests)\n print(f\"Elapsed time: {self.timer.elapsed_hours} h | {self.timer.elapsed_minutes} m | \"\n f\"{self.timer.elapsed_seconds} s\")\n print(f\"Pretty time: {self.timer.elapsed_seconds} s = {p_hours}:{p_minutes}:{p_seconds}\")\n print(f\"rps: {self.total_requests/self.timer.elapsed_seconds} requests/second\")\n\n print(\"Requests by second\")\n print(self.requests_by_second)\n\n print(\"Requests by minute\")\n print(self.requests_by_minute)\n\n print(\"Requests by hour\")\n print(self.requests_by_hour)\n","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"426489614","text":"import pygame\nimport time\nimport random\n\npygame.init()\n\nWIDTH = 350\nHEIGHT = 300\nSCREEN = pygame.display.set_mode((WIDTH, HEIGHT))\n\npygame.display.set_caption('My Game')\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\npygame.font.init()\n\nSCREEN.fill(WHITE)\n\ncords = {\n \"CenterCol\" : 175.0,\n \"RightCol\" : 291.6666666666667,\n \"LeftCol\" : 58.333333333333336,\n \"Top\" : 50.0,\n \"Middle\" : 150.0,\n \"Bottom\" : 250.0,\n}\n\nboxes = {\n}\n\n# check if 3 in row with dict (cords : name) print(175, 50) = x or o\ndef checkcondition(cords):\n possiblewinRow()\n possiblewinCol()\n possiblewinCross()\n if len(boxes) == 9:\n win('DRAW')\n \ndef possiblewinRow():\n col = [59, 175, 291]\n row = [50, 150, 250]\n top = []\n middle = []\n bottom = []\n for i in range(0, 3):\n for j in range(0, 3):\n temp = col[j], row[i]\n tup = tuple(temp)\n if tup in boxes:\n if row[i] == 50:\n top.append(boxes[tup])\n elif row[i] == 150:\n middle.append(boxes[tup])\n elif row[i] == 250:\n bottom.append(boxes[tup])\n else:\n 'nohere'\n\n if len(top) == 3:\n if 'o' in top:\n if 'x' not in top:\n win('o')\n else:\n win('x')\n if len(middle) == 3:\n if 'o' in middle:\n if 'x' not in middle:\n win('o')\n else:\n win('x')\n if len(bottom) == 3:\n if 'o' in bottom:\n if 'x' not in bottom:\n win('o')\n else:\n win('x')\n\ndef possiblewinCol():\n col = [59, 175, 291]\n row = [50, 150, 250]\n left = []\n middleCol = []\n right = []\n for i in range(0, 3):\n for j in range(0, 3):\n temp = col[i], row[j]\n tup = tuple(temp)\n if tup in boxes:\n if row[i] == 50:\n left.append(boxes[tup])\n elif row[i] == 150:\n middleCol.append(boxes[tup])\n elif row[i] == 250:\n right.append(boxes[tup])\n else:\n 'nohere'\n \n if len(left) == 3:\n if 'o' in left:\n if 'x' not in left:\n win('o')\n else:\n win('x')\n if len(middleCol) == 3:\n if 'o' in middleCol:\n if 'x' not in middleCol:\n win('o')\n else:\n win('x')\n if len(right) == 3:\n if 'o' in right:\n if 'x' not in right:\n win('o')\n else:\n win('x')\n\ndef possiblewinCross():\n col = [59, 175, 291]\n row = [50, 150, 250]\n leftright = []\n rightleft = []\n for i in range(0, 3):\n temp = col[i], row[i]\n tup = tuple(temp)\n if tup in boxes:\n leftright.append(boxes[tup])\n for i in range(0, 3):\n if i == 0:\n temp = col[i], row[2]\n tup = tuple(temp)\n if tup in boxes:\n rightleft.append(boxes[tup])\n elif i == 1:\n temp = col[i], row[1]\n tup = tuple(temp)\n if tup in boxes:\n rightleft.append(boxes[tup])\n else:\n temp = col[i], row[0]\n tup = tuple(temp)\n if tup in boxes:\n rightleft.append(boxes[tup])\n \n if len(leftright) == 3:\n if 'o' in leftright:\n if 'x' not in leftright:\n win('o')\n else:\n win('x')\n if len(rightleft) == 3:\n if 'o' in rightleft:\n if 'x' not in rightleft:\n win('o')\n else:\n win('x')\n\n \n\ndef checkpos(posx, posy):\n ycolumns = [50, 150, 250]\n for y in ycolumns:\n if y - 50 <= posy <= y + 50:\n drawy = y\n xcolumns = [59, 175, 291]\n for x in xcolumns:\n if x - 59 <= posx <= x + 59:\n drawx = x\n return(drawx, drawy)\n\ndef win(winstate):\n if winstate == 'DRAW':\n print(f'{winstate} - you tied')\n winstate = f'{winstate} - you tied'\n else:\n print(f'{winstate} won this match')\n winstate= f'{winstate} won this match'\n\n blankwin(winstate)\n\ndef blankwin(winstate):\n time.sleep(2)\n SCREEN.fill((0, 0, 0))\n myfont = pygame.font.SysFont('Comic Sans MS', 30)\n textsurface = myfont.render(winstate, False, (255, 255, 255))\n SCREEN.blit(textsurface,(100, 0))\n pygame.display.update()\n\nturns = 1 \n\nis_running = True\nwhile is_running:\n pygame.display.flip()\n\n class lines(object):\n for Lines in range(1, 3):\n pygame.draw.line(SCREEN, BLACK, (WIDTH/3 * Lines, 0), (WIDTH/3 * Lines, HEIGHT), 4)\n pygame.draw.line(SCREEN, BLACK, (0, HEIGHT/3 * Lines), (WIDTH, HEIGHT/3 * Lines), 4)\n\n done = False\n \n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n while done == False:\n botx = random.randint(0, WIDTH)\n boty = random.randint(0, HEIGHT)\n botdraw = checkpos(botx, boty)\n if turns % 2 == 0:\n if botdraw not in boxes:\n done = True\n turns += 1\n pygame.draw.circle(SCREEN, BLACK, botdraw, 10, 2)\n boxes.update({botdraw : 'o'})\n pygame.display.flip()\n checkcondition(botdraw)\n else:\n if botdraw not in boxes:\n done = True\n turns += 1\n pygame.draw.lines(SCREEN, BLACK, False, [(botdraw[0]+10, botdraw[1]+10), (botdraw[0]-10, botdraw[1]-10), (botdraw[0], botdraw[1]), (botdraw[0]-10, botdraw[1]+10), (botdraw[0]+10, botdraw[1]-10)], 4)\n boxes.update({botdraw : 'x'})\n pygame.display.flip()\n checkcondition(botdraw)\n if event.type == pygame.QUIT:\n is_running = False\n\npygame.quit()\n","sub_path":"botnc.py","file_name":"botnc.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"130783545","text":"# This program creates a deck of cards, shuffles them and then prints five cards.\n\nimport itertools, random\n\n\ndef getAllCards():\n # deck is a list of tuples\n deck = list(itertools.product(range(1,14),['Spade', 'Heart', 'Diamond', 'Club']))\n return deck\n\ndef shuffleDeck(deck):\n # this will do inplace shuffling of the list\n random.shuffle(deck)\n print(\"You Got :\")\n for i in range(5):\n print(deck[i][0] , \" of \", deck[i][1])\n\nmydeck = getAllCards()\nshuffleDeck(mydeck)","sub_path":"Akshar_Basic/CardDeck.py","file_name":"CardDeck.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"157648564","text":"import time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n try:\n city = input('Enter a city: ').lower()\n if city in ('chicago', 'new york city', 'washington'):\n break\n else:\n print('\\nNo city with this name: try again!')\n except ValueError:\n print('That\\'s not a valid name!')\n\n # get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input('Choose one month to analyze or type \\'all\\' to get all of them: ').lower()\n if month in ('all', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'):\n break\n else:\n print('That\\'s not a valid month! Try again!')\n except ValueError:\n print('That\\'s not a valid month!')\n \n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input('Choose one day of the week or type \\'all\\' to get all of them: ').lower()\n \n if day in ('all', 'monday', 'tuesday', 'wednesday', 'thursay', 'friday', 'saturday', 'sunday'):\n break\n else:\n print('That\\'s not a valid day')\n except ValueError:\n print('That\\'s not a valid day!')\n \n print('-'*40)\n return city, month, day\n\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month \n df = df[df['month'] == month]\n\n # filter by day of week \n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n #add hour column:\n df['hour'] = df['Start Time'].dt.hour\n\n return df\n\ndef raw_data(df):\n \"\"\"Display raw data if the user wants. \"\"\"\n x = 5\n while True:\n raw_dt_display = input('\\nDo you want to see raw data?\\n').lower()\n \n if raw_dt_display == 'yes' and x < len(df.index):\n print(df.head(x))\n x += 5\n elif x > len(df.index): #and x % 5 < 5:\n print(df.head((x + (x % 5))))\n print('\\nYou have reached the last line of the dataset!\\n')\n break\n else:\n break\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n print('The most common month is: ', df['month'].mode()[0])\n\n\n # display the most common day of week\n print('The most common day is: ', df['day_of_week'].mode()[0])\n\n # display the most common start hour\n\n print('The most common start hour is: ', df['hour'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n m_startstation = df['Start Station'].mode()[0]\n print('The most commonly used start station is {}'.format(m_startstation))\n\n # display most commonly used end station\n m_endstation = df['End Station'].mode()[0]\n print('The most commonly used end station is {}'.format(m_endstation))\n\n # display most frequent combination of start station and end station trip\n most_freq = df.groupby(['Start Station', 'End Station']).size().nlargest(1).reset_index(name='count')\n print('The most frequent combination of start station and end station trip is {} and {}'.format(most_freq['Start Station'], most_freq['End Station']))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total travel time is {} seconds'.format(df['Trip Duration'].sum()))\n\n # display mean travel time\n print('Mean travel time is {} seconds'.format(df['Trip Duration'].mean())) \n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df, city):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Users by type: \\n{}'.format(user_types))\n\n # Display counts of gender\n if city != 'washington':\n user_gender = df['Gender'].value_counts()\n print('Users by gender: {}'.format(user_gender))\n else:\n print('There is no gender data in Washington dataset!')\n\n\n # Display earliest, most recent, and most common year of birth\n if city != 'washington':\n earliest = df['Birth Year'].min()\n most_recent = df['Birth Year'].max()\n most_common = df['Birth Year'].mode()\n print('The earliest, most recent, and most common year of birth are {}, {}, {}, respectively'.format(earliest, most_recent, most_common))\n else:\n print('There is no birth year data in Washington dataset!')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef plot_data(df):\n \"\"\"Displays plots if user choose yes.\"\"\"\n\n plot_true = input('\\nWould you like to see some calculed statistics with plots? Enter yes or no\\n').lower()\n \n #display number of users per type with a bar plot\n if plot_true == 'yes':\n df['User Type'].value_counts().plot(kind='barh')\n plt.ylabel('User Type')\n plt.xlabel('Number of users')\n plt.title('Number of users per type')\n plt.show()\n #display number of users per gender\n df['Gender'].value_counts().plot(kind='barh')\n plt.ylabel('Gender')\n plt.xlabel('Number of users')\n plt.title('Number of users per gender')\n plt.show()\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n raw_data(df)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df, city)\n plot_data(df)\n #ask if user wants to restar the script\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n \n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_2_Juliana_v2.py","file_name":"bikeshare_2_Juliana_v2.py","file_ext":"py","file_size_in_byte":8087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"587572364","text":"'''\r\n无频域,只有时序和形态特征\r\n'''\r\nfrom time import time\r\nimport keras\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom keras import Input, Model\r\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPool1D, Dropout, Bidirectional, Lambda\r\nfrom keras.optimizers import Adam, SGD\r\nfrom keras.utils import to_categorical\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.svm import SVC\r\nfrom keras import backend as K\r\nimport pandas as pd\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.feature_selection import RFE\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.decomposition import PCA\r\n\r\n# 写一个LossHistory类,保存loss和acc\r\nclass LossHistory(keras.callbacks.Callback):\r\n def on_train_begin(self, logs={}):\r\n self.losses = {'batch': [], 'epoch': []}\r\n self.accuracy = {'batch': [], 'epoch': []}\r\n # 按照batch来进行追加数据\r\n\r\n def on_batch_end(self, batch, logs={}):\r\n # 每一个batch完成后向容器里面追加loss,acc\r\n self.losses['batch'].append(logs.get('loss'))\r\n self.accuracy['batch'].append(logs.get('acc'))\r\n\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n # 每一个epoch完成后向容器里面追加loss,acc\r\n self.losses['epoch'].append(logs.get('loss'))\r\n self.accuracy['epoch'].append(logs.get('acc'))\r\n\r\n def loss_plot(self, loss_type):\r\n iters = range(len(self.losses[loss_type]))\r\n plt.figure()\r\n # acc\r\n plt.plot(iters, self.accuracy[loss_type], 'r')\r\n print('Accuracy', self.accuracy[loss_type])\r\n plt.xlabel(loss_type)\r\n plt.ylabel('Accuracy')\r\n plt.show()\r\n # loss\r\n plt.plot(iters, self.losses[loss_type], 'g')\r\n print('loss', self.losses[loss_type])\r\n # plt.grid(True)\r\n plt.xlabel(loss_type)\r\n plt.ylabel('loss')\r\n plt.show()\r\n\r\n\r\ndef model_23():\r\n input2_ = Input(shape=(500, 16), name='input2') # [batch, in_width, in_channels]\r\n x2 = Conv1D(10, kernel_size=100, strides=5, activation='relu', padding='same')(input2_) # 100, 10\r\n x2 = MaxPool1D(pool_size=2)(x2) # 50, 10\r\n x2 = Conv1D(16, kernel_size=50, activation='relu', padding='same')(x2) # 50, 16\r\n x2 = Flatten()(x2) # 800\r\n x2 = Lambda(lambda x: x * 0.5)(x2)\r\n\r\n input3_ = Input(shape=(16, 500), name='input3')# 第一个时间序列timesteps,第二个是特征数\r\n # x3 = Bidirectional(LSTM(250, return_sequences=True))(input3_)\r\n x3 = Bidirectional(LSTM(25, return_sequences=True))(input3_)\r\n x3 = Flatten()(x3)\r\n x3 = Lambda(lambda x: x * 0.5)(x3)\r\n # added = concatenate([x2, x3])\r\n added = keras.layers.Add()([x2, x3])\r\n added = Dropout(0.6)(added)\r\n output_ = Dense(4, activation='softmax', name='output')(added)\r\n model = Model(inputs=[input2_, input3_], outputs=[output_])\r\n model.summary()\r\n return model\r\n\r\n\r\ndef model_02():\r\n input2_ = Input(shape=(500, 16), name='input2') # [batch, in_width, in_channels]\r\n x2 = Conv1D(10, kernel_size=100, strides=5, activation='relu', padding='same')(input2_) # 100, 10\r\n x2 = MaxPool1D(pool_size=2)(x2) # 50, 10\r\n x2 = Conv1D(16, kernel_size=50, activation='relu', padding='same')(x2) # 50, 16\r\n x2 = Flatten()(x2) # 800\r\n x2 = Dropout(0.7)(x2)\r\n output_ = Dense(4, activation='softmax', name='output')(x2)\r\n model = Model(inputs=[input2_], outputs=[output_])\r\n model.summary()\r\n return model\r\n\r\n\r\ndef model_03():\r\n input3_ = Input(shape=(16, 500), name='input3') # 第一个时间序列timesteps,第二个是特征数\r\n # x3 = Bidirectional(LSTM(250, return_sequences=True))(input3_)\r\n x3 = Bidirectional(LSTM(25, return_sequences=True))(input3_)# 16, 20\r\n x3 = Flatten()(x3)#320\r\n x3 = Dropout(0.7)(x3)\r\n output_ = Dense(4, activation='softmax', name='output')(x3)\r\n model = Model(inputs=[input3_], outputs=[output_])\r\n model.summary()\r\n return model\r\n\r\n\r\ndef svc(traindata, trainlabel, testdata, testlabel):\r\n print(\"Start training SVM...\")\r\n svcClf = SVC(C=0.1, kernel=\"rbf\", cache_size=300)\r\n svcClf.fit(traindata, trainlabel)\r\n pred_testlabel = svcClf.predict(testdata)\r\n num = len(pred_testlabel)\r\n accuracy = len([1 for i in range(num) if testlabel[i] == pred_testlabel[i]]) / float(num)\r\n print(\"cnn-svm Accuracy:\", accuracy)\r\n print('acc', accuracy_score(testlabel, pred_testlabel))\r\n print('precision_score', metrics.precision_score(testlabel, pred_testlabel, average='macro'))\r\n print('recall_score', metrics.recall_score(testlabel, pred_testlabel, average='macro'))\r\n print('f1_score', metrics.f1_score(testlabel, pred_testlabel, average='weighted'))\r\n\r\n\r\ndef rf(traindata, trainlabel, testdata, testlabel):\r\n print(\"Start training Random Forest...\")\r\n rfClf = RandomForestClassifier(n_estimators=200, criterion='entropy')\r\n rfClf.fit(traindata, trainlabel)\r\n pred_testlabel = rfClf.predict(testdata)\r\n num = len(pred_testlabel)\r\n accuracy = len([1 for i in range(num) if testlabel[i] == pred_testlabel[i]]) / float(num)\r\n print(\"cnn-rf Accuracy:\", accuracy)\r\n print('acc', accuracy_score(testlabel, pred_testlabel))\r\n print('precision_score', metrics.precision_score(testlabel, pred_testlabel, average='macro'))\r\n print('recall_score', metrics.recall_score(testlabel, pred_testlabel, average='macro'))\r\n print('f1_score', metrics.f1_score(testlabel, pred_testlabel, average='weighted'))\r\n\r\n\r\ndef knn(traindata, trainlabel, testdata, testlabel):\r\n print(\"Start training KNN...\")\r\n knn = KNeighborsClassifier(n_neighbors=5, p=2,leaf_size=30,weights='distance', algorithm='auto')\r\n knn.fit(traindata, trainlabel)\r\n pred_testlabel = knn.predict(testdata)\r\n num = len(pred_testlabel)\r\n accuracy = len([1 for i in range(num) if testlabel[i] == pred_testlabel[i]]) / float(num)\r\n print(\"cnn-knn Accuracy:\", accuracy)\r\n print('acc', accuracy_score(testlabel, pred_testlabel))\r\n print('precision_score', metrics.precision_score(testlabel, pred_testlabel, average='macro'))\r\n print('recall_score', metrics.recall_score(testlabel, pred_testlabel, average='macro'))\r\n print('f1_score', metrics.f1_score(testlabel, pred_testlabel, average='weighted'))\r\n\r\n\r\ndef plot_confusion_matrix(cm, title='Confusion Matrix', cmap=plt.cm.binary):\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n xlocation = np.array(range(len(labels)))\r\n plt.xticks(xlocation, labels, rotation=90)\r\n plt.yticks(xlocation, labels)\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n X_train_2d = np.load(file=\"X_train_2d_0.5.npy\")\r\n X_test_2d = np.load(file=\"X_test_2d_0.5.npy\")\r\n X_train_1d = np.load(file=\"X_train_1d_0.5.npy\")\r\n X_test_1d = np.load(file=\"X_test_1d_0.5.npy\")\r\n y_train = np.load(file=\"y_train_0.5.npy\") - 1\r\n y_test = np.load(file=\"y_test_0.5.npy\") - 1\r\n X_train_2d_T = X_train_2d.transpose(0, 2, 1)\r\n X_test_2d_T = X_test_2d.transpose(0, 2, 1)\r\n # X_test_1d_T = X_test_1d.reshape(1600, 8000, 1)\r\n # X_train_1d_T = X_train_1d.reshape(2400, 8000, 1)\r\n print(X_train_2d.shape)# 500,16\r\n print(X_test_2d.shape)\r\n\r\n labels = ['TYPE1', 'TYPE2', 'TYPE3', 'TYPE4']\r\n tick_marks = np.array(range(len(labels))) + 0.5\r\n\r\n # one-hot类型\r\n y_train_hot, y_test_hot = to_categorical(y_train, 4), to_categorical(y_test, 4)\r\n # X_train_pic = list_pic('D:/PycharmProjects/untitled/img_train/X_train_1d' + '*.png')\r\n # X_test_pic = list_pic('D:/PycharmProjects/untitled/img_test/X_test_1d' + '*.png')\r\n # X_val_pic = list_pic('D:/PycharmProjects/untitled/img_val/X_val_1d' + '*.png')\r\n\r\n model_23 = model_23()\r\n lh_23 = LossHistory()\r\n model_23.compile(optimizer=Adam(lr=0.0007), loss='categorical_crossentropy', metrics=['acc']) #Adam((lr=0.0007))\r\n model_23.fit([X_train_2d, X_train_2d_T], y_train_hot, batch_size=40, epochs=1, verbose=1, shuffle=True, callbacks=[lh_23])\r\n\r\n score_23, acc_23 = model_23.evaluate([X_test_2d, X_test_2d_T], y_test_hot, verbose=0)\r\n y_pre = model_23.predict([X_test_2d, X_test_2d_T])\r\n y_pre = np.argmax(y_pre, axis=1)\r\n num = len(y_pre)\r\n accuracy = len([1 for i in range(num) if y_pre[i] == y_test[i]]) / float(num)\r\n print('loss_23', lh_23.losses)\r\n print('acc_23', lh_23.accuracy)\r\n lh_23.loss_plot('batch')\r\n print('acc', accuracy_score(y_test, y_pre))\r\n print('precision_score', metrics.precision_score(y_test, y_pre, average='macro'))\r\n print('recall_score', metrics.recall_score(y_test, y_pre, average='macro'))\r\n print('f1_score', metrics.f1_score(y_test, y_pre, average='weighted'))\r\n print('混淆矩阵', confusion_matrix(y_test, y_pre))\r\n target_names = ['class 0', 'class 1', 'class 2', 'class 3']\r\n print(classification_report(y_test, y_pre, target_names=target_names))\r\n cm = confusion_matrix(y_test, y_pre)\r\n np.set_printoptions(precision=2)\r\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n plt.figure()\r\n ind_array = np.arange(len(labels))\r\n x, y = np.meshgrid(ind_array, ind_array)\r\n for x_val, y_val in zip(x.flatten(), y.flatten()):\r\n c = cm_normalized[y_val][x_val]\r\n if c > 0.01:\r\n plt.text(x_val, y_val, \"%0.2f\" % (c,), color='white', fontsize=15, va='center', ha='center')\r\n # offset the tick\r\n plt.gca().set_xticks(tick_marks, minor=True)\r\n plt.gca().set_yticks(tick_marks, minor=True)\r\n plt.gca().xaxis.set_ticks_position('none')\r\n plt.gca().yaxis.set_ticks_position('none')\r\n plt.grid(True, which='minor', linestyle='-')\r\n plt.gcf().subplots_adjust(bottom=0.15)\r\n plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')\r\n plt.show()\r\n\r\n # ################################################## 提取FC特征 ###################################################\r\n get_feature23_FC = K.function([model_23.layers[0].input, model_23.layers[3].input], [model_23.layers[10].output])#CNN3 LSRM4\r\n FC_train_feature23_FC = get_feature23_FC([X_train_2d, X_train_2d_T])[0]\r\n FC_test_feature23_FC = get_feature23_FC([X_test_2d, X_test_2d_T])[0]\r\n # SCM RF\r\n # svc(FC_train_feature23_FC, y_train, FC_test_feature23_FC, y_test)\r\n # rf(FC_train_feature23_FC, y_train, FC_test_feature23_FC, y_test)\r\n # knn(FC_train_feature23_FC, y_train, FC_test_feature23_FC, y_test)\r\n # fc层t-sne\r\n y_train = pd.DataFrame(y_train)\r\n FC_train_feature23_FC = pd.DataFrame(FC_train_feature23_FC)\r\n r = pd.concat([FC_train_feature23_FC, y_train], axis=1)\r\n r.columns = list(FC_train_feature23_FC.columns) + [r'聚类类别']\r\n t0 = time()\r\n tsne = TSNE(n_components=2, init=\"pca\", perplexity=500, random_state=1, learning_rate=50)\r\n tsne.fit_transform(FC_train_feature23_FC.iloc[:1000, :]) # 进行数据降维,降成两维\r\n t1 = time()\r\n print(\"t-SNE: %.2g sec\" % (t1 - t0)) # 算法用时\r\n tsne = pd.DataFrame(tsne.embedding_, index=FC_train_feature23_FC.iloc[:1000, :].index) # 转换数据格式\r\n d = tsne[r[r'聚类类别'] == 0]\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 1]\r\n # plt.plot(d[0],d[1],'go')#o\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 2]\r\n # plt.plot(d[0],d[1],'b*')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 3]\r\n # plt.plot(d[0], d[1],'y+')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.legend(['TYPE1', 'TYPE2', 'TYPE3', 'TYPE4'])\r\n plt.title(\"FC t-SNE (%.2g sec)\" % (t1 - t0))\r\n plt.show()\r\n\r\n #################################################输入聚类######################################################3\r\n FC_train_feature23_In = np.reshape(X_train_2d, (-1, 8000))\r\n FC_test_feature23_In = np.reshape(X_test_2d, (-1, 8000))\r\n # SCM RF knn\r\n # svc(FC_train_feature23_In, y_train, FC_test_feature23_In, y_test)\r\n # rf(FC_train_feature23_In, y_train, FC_test_feature23_In, y_test)\r\n # knn(FC_train_feature23_In, y_train, FC_test_feature23_In, y_test)\r\n # Input层t-sne\r\n y_train = pd.DataFrame(y_train)\r\n FC_train_feature23_In = pd.DataFrame(FC_train_feature23_In)\r\n r = pd.concat([FC_train_feature23_In, y_train], axis=1)\r\n r.columns = list(FC_train_feature23_In.columns) + [r'聚类类别']\r\n t0 = time()\r\n tsne = TSNE(n_components=2, init=\"pca\", perplexity=500, random_state=1, learning_rate=50)\r\n tsne.fit_transform(FC_train_feature23_In.iloc[:1000, :]) # 进行数据降维,降成两维\r\n t1 = time()\r\n print(\"t-SNE: %.2g sec\" % (t1 - t0)) # 算法用时\r\n tsne = pd.DataFrame(tsne.embedding_, index=FC_train_feature23_In.iloc[:1000, :].index) # 转换数据格式\r\n d = tsne[r[r'聚类类别'] == 1]\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 2]\r\n # plt.plot(d[0],d[1],'go')#o\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 3]\r\n # plt.plot(d[0],d[1],'b*')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 4]\r\n # plt.plot(d[0], d[1],'y+')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.legend(['TYPE1', 'TYPE2', 'TYPE3', 'TYPE4'])\r\n plt.title(\"Input t-SNE (%.2g sec)\" % (t1 - t0))\r\n plt.show()\r\n\r\n # #############################################提取CNN特征####################################################\r\n get_feature23_CNN = K.function([model_23.layers[0].input, model_23.layers[3].input], [model_23.layers[4].output]) # CNN3 LSRM4\r\n FC_train_feature23_CNN = get_feature23_CNN([X_train_2d, X_train_2d_T])[0]\r\n FC_test_feature23_CNN = get_feature23_CNN([X_test_2d, X_test_2d_T])[0]\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_CNN[0, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE3')\r\n plt.show()\r\n\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_CNN[1, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE2')\r\n plt.show()\r\n\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_CNN[2, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE4')\r\n plt.show()\r\n\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_CNN[5, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE1')\r\n plt.show()\r\n ##############################################cnn聚类#############################################################\r\n FC_train_feature23_CNN = np.reshape(FC_train_feature23_CNN, (-1, 800))\r\n # SCM RF\r\n # svc(FC_train_feature23_CNN, y_train, FC_test_feature23_CNN, y_test)\r\n # rf(FC_train_feature23_CNN, y_train, FC_test_feature23_CNN, y_test)\r\n # knn(FC_train_feature23_CNN, y_train, FC_test_feature23_CNN, y_test)\r\n # fc层t-sne\r\n y_train = pd.DataFrame(y_train)\r\n FC_train_feature23_CNN = pd.DataFrame(FC_train_feature23_CNN)\r\n r = pd.concat([FC_train_feature23_CNN, y_train], axis=1)\r\n r.columns = list(FC_train_feature23_CNN.columns) + [r'聚类类别']\r\n t0 = time()\r\n tsne = TSNE(n_components=2, init=\"pca\", perplexity=500, random_state=1, learning_rate=50)\r\n tsne.fit_transform(FC_train_feature23_CNN.iloc[:1000, :]) # 进行数据降维,降成两维\r\n t1 = time()\r\n print(\"t-SNE: %.2g sec\" % (t1 - t0)) # 算法用时\r\n tsne = pd.DataFrame(tsne.embedding_, index=FC_train_feature23_CNN.iloc[:1000, :].index) # 转换数据格式\r\n d = tsne[r[r'聚类类别'] == 0]\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 1]\r\n # plt.plot(d[0],d[1],'go')#o\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 2]\r\n # plt.plot(d[0],d[1],'b*')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 3]\r\n # plt.plot(d[0], d[1],'y+')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.legend(['TYPE1', 'TYPE2', 'TYPE3', 'TYPE4'])\r\n plt.title(\"1DCNN t-SNE (%.2g sec)\" % (t1 - t0))\r\n plt.show()\r\n #\r\n # # ##############################################提取Lstm##########################################################\r\n get_feature23_L = K.function([model_23.layers[0].input, model_23.layers[3].input], [model_23.layers[5].output]) # CNN3 LSRM4\r\n FC_train_feature23_L = get_feature23_L([X_train_2d, X_train_2d_T])[0]\r\n FC_test_feature23_L = get_feature23_L([X_test_2d, X_train_2d_T])[0]\r\n FC_train_feature23_L = FC_train_feature23_L.transpose(0, 2, 1)\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_L[0, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE3')\r\n plt.show()\r\n\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_L[1, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE2')\r\n plt.show()\r\n\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_L[2, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE4')\r\n plt.show()\r\n\r\n for i in range(16):\r\n i = i + 1\r\n plt.subplot(4, 4, i)\r\n i = i - 1\r\n plt.plot(FC_train_feature23_L[6, :, i])\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.suptitle('TYPE1')\r\n plt.show()\r\n #\r\n FC_train_feature23_L = np.reshape(FC_train_feature23_L, (-1, 800))\r\n # SCM RF\r\n # svc(FC_train_feature23_L, y_train, FC_test_feature23_L, y_test)\r\n # rf(FC_train_feature23_L, y_train, FC_test_feature23_L, y_test)\r\n # knn(FC_train_feature23_L, y_train, FC_test_feature23_L, y_test)\r\n # lstm层t-sne\r\n y_train = pd.DataFrame(y_train)\r\n FC_train_feature23_L = pd.DataFrame(FC_train_feature23_L)\r\n r = pd.concat([FC_train_feature23_L, y_train], axis=1)\r\n r.columns = list(FC_train_feature23_L.columns) + [r'聚类类别']\r\n t0 = time()\r\n tsne = TSNE(n_components=2, init=\"pca\", perplexity=500, random_state=1, learning_rate=50)\r\n tsne.fit_transform(FC_train_feature23_L.iloc[:1000, :]) # 进行数据降维,降成两维\r\n t1 = time()\r\n print(\"t-SNE: %.2g sec\" % (t1 - t0)) # 算法用时\r\n tsne = pd.DataFrame(tsne.embedding_, index=FC_train_feature23_CNN.iloc[:1000, :].index) # 转换数据格式\r\n d = tsne[r[r'聚类类别'] == 0]\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 1]\r\n # plt.plot(d[0],d[1],'go')#o\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 2]\r\n # plt.plot(d[0],d[1],'b*')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n d = tsne[r[r'聚类类别'] == 3]\r\n # plt.plot(d[0], d[1],'y+')#*\r\n plt.scatter(d[0], d[1], cmap=plt.cm.Spectral) # .\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.legend(['TYPE1', 'TYPE2', 'TYPE3', 'TYPE4'])\r\n plt.title(\"LSTM t-SNE (%.2g sec)\" % (t1 - t0))\r\n plt.show()\r\n #\r\n model_02 = model_02()\r\n lh_2 = LossHistory()\r\n model_02.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\r\n model_02.fit(X_train_2d, y_train_hot, batch_size=40, epochs=1, verbose=1, shuffle=True,\r\n callbacks=[lh_2]) # TensorBoard(log_dir='mytensorboard')\r\n score_02, acc_02 = model_02.evaluate(X_test_2d, y_test_hot)\r\n print('model_02----', 'Test accuracy:', acc_02, 'Score:', score_02)\r\n print('loss_2', lh_2.losses)\r\n print('acc_2', lh_2.accuracy)\r\n lh_2.loss_plot('batch')\r\n y_pre2 = model_02.predict([X_test_2d])\r\n y_pre2 = np.argmax(y_pre2, axis=1)\r\n print('acc', accuracy_score(y_test, y_pre2))\r\n print('precision_score', metrics.precision_score(y_test, y_pre2, average='macro'))\r\n print('recall_score', metrics.recall_score(y_test, y_pre2, average='macro'))\r\n print('f1_score', metrics.f1_score(y_test, y_pre2, average='weighted'))\r\n # get_feature2_FC = K.function([model_02.layers[0].input], [model_02.layers[4].output]) # CNN3 LSRM4\r\n # FC_train_feature2_FC = get_feature2_FC([X_train_2d, X_train_2d_T])[0]\r\n # FC_test_feature2_FC = get_feature2_FC([X_test_2d, X_train_2d_T])[0]\r\n # svc(FC_train_feature2_FC, y_train, FC_test_feature2_FC, y_test)\r\n # rf(FC_train_feature2_FC, y_train, FC_test_feature2_FC, y_test)\r\n # knn(FC_train_feature2_FC, y_train, FC_test_feature2_FC, y_test)\r\n\r\n model_03 = model_03()\r\n lh_3 = LossHistory()\r\n model_03.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\r\n model_03.fit(X_train_2d_T, y_train_hot, batch_size=40, epochs=1, verbose=1, shuffle=True, callbacks=[lh_3]) # TensorBoard(log_dir='mytensorboard')\r\n score_03, acc_03 = model_03.evaluate(X_test_2d_T, y_test_hot)\r\n print('loss_3', lh_3.losses)\r\n print('acc_3', lh_3.accuracy)\r\n lh_3.loss_plot('batch')\r\n y_pre3 = model_03.predict([X_test_2d_T])\r\n y_pre3 = np.argmax(y_pre3, axis=1)\r\n print('acc', accuracy_score(y_test, y_pre3))\r\n print('precision_score', metrics.precision_score(y_test, y_pre3, average='macro'))\r\n print('recall_score', metrics.recall_score(y_test, y_pre3, average='macro'))\r\n print('f1_score', metrics.f1_score(y_test, y_pre3, average='weighted'))\r\n\r\n print('model_23----', 'Test accuracy:', acc_23, 'Score:', score_23)\r\n print('model_02----', 'Test accuracy:', acc_02, 'Score:', score_02)\r\n print('model_03----', 'Test accuracy:', acc_03, 'Score:', score_03)\r\n\r\n iters_3 = range(len(lh_3.losses['batch']))\r\n iters_2 = range(len(lh_2.losses['batch']))\r\n iters_23 = range(len(lh_23.losses['batch']))\r\n\r\n plt.figure()\r\n plt.plot(iters_3, lh_3.accuracy['batch'], 'r', label='BiLSTM')\r\n plt.plot(iters_2, lh_2.accuracy['batch'], 'g', label='1DCNN')\r\n plt.plot(iters_23, lh_23.accuracy['batch'], 'b', label='1DCNN+BiLSTM')\r\n plt.xlabel('batch')\r\n plt.ylabel('acc')\r\n plt.title('Accuracy')\r\n plt.legend()\r\n plt.show()\r\n\r\n plt.figure()\r\n plt.plot(iters_3, lh_3.losses['batch'], 'r', label='BiLSTM')\r\n plt.plot(iters_2, lh_2.losses['batch'], 'g', label='1DCNN')\r\n plt.plot(iters_23, lh_23.losses['batch'], 'b', label='1DCNN+BiLSTM')\r\n plt.xlabel('batch')\r\n plt.ylabel('loss')\r\n plt.title('Loss')\r\n plt.legend()\r\n plt.show()\r\n","sub_path":"开关电源-仪器仪表学报/CNN+LSTM+特征可视化.py","file_name":"CNN+LSTM+特征可视化.py","file_ext":"py","file_size_in_byte":23211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23274118","text":"# is_hot = False\n# is_cold = True\n#\n# if is_hot:\n# print(\"it's a hot day\")\n# print('Drink lots of water')\n# elif is_cold:\n# print('it is cold')\n# print('wear warm clothes')\n# else:\n# print('it is a nice day')\n\n# house = 1000000\n# good_credit = False\n#\n# if good_credit:\n# down_payment = 0.1 * house\n# print('You need to put down 10%')\n# else:\n# down_payment = 0.2 * house\n# print('You need to put down 20%')\n# print(f\"Down payment: ${down_payment}\")\n\n# has_high_income = False\n# has_good_credit = True\n#\n# if has_high_income or not has_good_credit:\n# print('You are eligible for a loan')\n\n# temperature = 34\n#\n# if temperature > 30:\n# print(\"it's a hot day\")\n# else:\n# print(\"it's not a hot day\")\n\n# name = input('what is your name? ')\n#\n# if len(name) < 3:\n# print('name must be at least 3 characters long')\n# elif len(name) > 50:\n# print('name must be a max of 50 characters')\n# else:\n# print('name looks good')\n\n\nweight = int(input('weight: '))\nmeasure = input('(L)bs or (K)g: ')\n\nif measure.upper() == \"L\":\n converted = weight * 0.45\n print(f\"You are {converted} kilos\")\nelse:\n converted = weight / 0.45\n print(f\"You are {converted} pounds\")\n\n\n\n","sub_path":"cond.py","file_name":"cond.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"298735223","text":"from __future__ import division\n\n__all__ = ['oth_mobile_pose_resnet18_v1b', 'oth_mobile_pose_resnet50_v1b', 'oth_mobile_pose_mobilenet1_0',\n 'oth_mobile_pose_mobilenetv2_1_0', 'oth_mobile_pose_mobilenetv3_small', 'oth_mobile_pose_mobilenetv3_large']\n\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import initializer\nfrom mxnet.gluon import nn\nfrom mxnet.gluon import contrib\nfrom mxnet.gluon.block import HybridBlock\nfrom mxnet.context import cpu\nimport gluoncv as gcv\n\n\ndef get_max_pred(batch_heatmaps):\n batch_size = batch_heatmaps.shape[0]\n num_joints = batch_heatmaps.shape[1]\n width = batch_heatmaps.shape[3]\n heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))\n idx = mx.nd.argmax(heatmaps_reshaped, 2)\n maxvals = mx.nd.max(heatmaps_reshaped, 2)\n\n maxvals = maxvals.reshape((batch_size, num_joints, 1))\n idx = idx.reshape((batch_size, num_joints, 1))\n\n preds = mx.nd.tile(idx, (1, 1, 2)).astype(np.float32)\n\n preds[:, :, 0] = (preds[:, :, 0]) % width\n preds[:, :, 1] = mx.nd.floor((preds[:, :, 1]) / width)\n\n pred_mask = mx.nd.tile(mx.nd.greater(maxvals, 0.0), (1, 1, 2))\n pred_mask = pred_mask.astype(np.float32)\n\n preds *= pred_mask\n return preds, maxvals\n\n\ndef _get_final_preds(batch_heatmaps):\n coords, maxvals = get_max_pred(batch_heatmaps)\n\n heatmap_height = batch_heatmaps.shape[2]\n heatmap_width = batch_heatmaps.shape[3]\n\n # post-processing\n for n in range(coords.shape[0]):\n for p in range(coords.shape[1]):\n hm = batch_heatmaps[n][p]\n px = int(mx.nd.floor(coords[n][p][0] + 0.5).asscalar())\n py = int(mx.nd.floor(coords[n][p][1] + 0.5).asscalar())\n if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:\n diff = mx.nd.concat(hm[py][px+1] - hm[py][px-1],\n hm[py+1][px] - hm[py-1][px],\n dim=0)\n coords[n][p] += mx.nd.sign(diff) * .25\n\n return coords, maxvals\n\n\nclass DUC(HybridBlock):\n '''Upsampling layer with pixel shuffle\n '''\n def __init__(self,\n planes,\n upscale_factor=2,\n **kwargs):\n super(DUC, self).__init__(**kwargs)\n self.conv = nn.Conv2D(planes, kernel_size=3, padding=1, use_bias=False)\n self.bn = gcv.nn.BatchNormCudnnOff(gamma_initializer=initializer.One(),\n beta_initializer=initializer.Zero())\n self.relu = nn.Activation('relu')\n self.pixel_shuffle = contrib.nn.PixelShuffle2D(upscale_factor)\n\n def hybrid_forward(self, F, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.pixel_shuffle(x)\n return x\n\n\nclass MobilePose(HybridBlock):\n \"\"\"Pose Estimation for Mobile Device\"\"\"\n def __init__(self,\n base_name,\n base_attrs=('features',),\n num_joints=17,\n fixed_size=True,\n pretrained_base=False,\n pretrained_ctx=cpu(),\n in_channels=3,\n in_size=(256, 192),\n **kwargs):\n super(MobilePose, self).__init__(**kwargs)\n assert (in_channels == 3)\n self.in_size = in_size\n\n with self.name_scope():\n from gluoncv.model_zoo import get_model\n base_model = get_model(base_name, pretrained=pretrained_base,\n ctx=pretrained_ctx)\n self.features = nn.HybridSequential()\n if base_name.startswith('mobilenetv2'):\n self.features.add(base_model.features[:-1])\n elif base_name.startswith('mobilenetv3'):\n self.features.add(base_model.features[:-4])\n elif base_name.startswith('mobilenet'):\n self.features.add(base_model.features[:-2])\n else:\n for layer in base_attrs:\n self.features.add(getattr(base_model, layer))\n\n self.upsampling = nn.HybridSequential()\n self.upsampling.add(\n nn.Conv2D(256, 1, 1, 0, use_bias=False),\n DUC(512, 2),\n DUC(256, 2),\n DUC(128, 2),\n nn.Conv2D(num_joints, 1, use_bias=False,\n weight_initializer=initializer.Normal(0.001)),\n )\n\n def hybrid_forward(self, F, x):\n x = self.features(x)\n x = self.upsampling(x)\n\n batch_heatmaps = x.as_in_context(mx.cpu())\n y, maxvals = _get_final_preds(batch_heatmaps=batch_heatmaps)\n keypoints = F.concat(y, maxvals, dim=2)\n return keypoints\n\n\ndef get_mobile_pose(base_name, ctx=cpu(), pretrained=False,\n root='~/.mxnet/models', **kwargs):\n net = MobilePose(base_name, **kwargs)\n\n if pretrained:\n from gluoncv.model_zoo.model_store import get_model_file\n net.load_parameters(get_model_file('mobile_pose_%s'%(base_name),\n tag=pretrained, root=root), ctx=ctx)\n\n return net\n\n\ndef oth_mobile_pose_resnet18_v1b(pretrained=False, **kwargs):\n return get_mobile_pose('resnet18_v1b', base_attrs=['conv1', 'bn1', 'relu', 'maxpool',\n 'layer1', 'layer2', 'layer3', 'layer4'],\n pretrained=pretrained,\n **kwargs)\n\n\ndef oth_mobile_pose_resnet50_v1b(pretrained=False, **kwargs):\n return get_mobile_pose('resnet50_v1b', base_attrs=['conv1', 'bn1', 'relu', 'maxpool',\n 'layer1', 'layer2', 'layer3', 'layer4'],\n pretrained=pretrained,\n **kwargs)\n\n\ndef oth_mobile_pose_mobilenet1_0(pretrained=False, **kwargs):\n return get_mobile_pose('mobilenet1.0', base_attrs=['features'], pretrained=pretrained, **kwargs)\n\n\ndef oth_mobile_pose_mobilenetv2_1_0(pretrained=False, **kwargs):\n return get_mobile_pose('mobilenetv2_1.0', base_attrs=['features'], pretrained=pretrained, **kwargs)\n\n\ndef oth_mobile_pose_mobilenetv3_small(pretrained=False, **kwargs):\n return get_mobile_pose('mobilenetv3_small', base_attrs=['features'], pretrained=pretrained, **kwargs)\n\n\ndef oth_mobile_pose_mobilenetv3_large(pretrained=False, **kwargs):\n return get_mobile_pose('mobilenetv3_large', base_attrs=['features'], pretrained=pretrained, **kwargs)\n\n\ndef _test():\n import numpy as np\n import mxnet as mx\n\n pretrained = False\n\n models = [\n # oth_mobile_pose_resnet18_v1b,\n # oth_mobile_pose_resnet50_v1b,\n # oth_mobile_pose_mobilenet1_0,\n oth_mobile_pose_mobilenetv2_1_0,\n oth_mobile_pose_mobilenetv3_small,\n oth_mobile_pose_mobilenetv3_large,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n ctx = mx.cpu()\n if not pretrained:\n net.initialize(ctx=ctx)\n\n x = mx.nd.zeros((1, 3, 256, 192), ctx=ctx)\n y = net(x)\n # assert (y.shape == (1, 17, 64, 48))\n\n # net.hybridize()\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != oth_mobile_pose_resnet18_v1b or weight_count == 12858208)\n assert (model != oth_mobile_pose_resnet50_v1b or weight_count == 25582944)\n assert (model != oth_mobile_pose_mobilenet1_0 or weight_count == 5019744)\n assert (model != oth_mobile_pose_mobilenetv2_1_0 or weight_count == 4102176)\n assert (model != oth_mobile_pose_mobilenetv3_small or weight_count == 2625088)\n assert (model != oth_mobile_pose_mobilenetv3_large or weight_count == 4768336)\n\n\nif __name__ == \"__main__\":\n _test()\n","sub_path":"gluon/gluoncv2/models/others/oth_mobile_pose.py","file_name":"oth_mobile_pose.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"307367628","text":"import webbrowser\n\n\nclass Movie():\n \"\"\"\" This class will help you to make your own movie objects\n with couple of different attributes such as\n title, storyline and it will show you trailer of the chosen movie\"\"\"\n\n def __init__(self, movie_title, movie_storyline, movie_image,\n movie_trailer):\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = movie_image\n self.trailer_youtube_url = movie_trailer\n\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"402426271","text":"from usbiss.spi import SPI\nimport time\nimport matplotlib.pyplot as plt\nspi.close()\nport ='COM60'\nspi = SPI(port)\nspi.mode = 1\nhz = 300000\nspi.max_speed_hz = hz\n\n#spi.open()\n\nsenddata = [0x03]\ny = []\ntry:\n while True:\n \n x = spi.xfer2(senddata)\n \n print(x)\n y.append(x)\n plt.scatter(len(y),x)\n time.sleep(1)\n plt.draw()\nexcept KeyboardInterrupt:\n print('interrupted!')\n","sub_path":"openComsV1-DESKTOP-81H4K5L.py","file_name":"openComsV1-DESKTOP-81H4K5L.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185537432","text":"# coding: utf-8\n\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\"\"\"\nFILE: sample_helpers.py\nDESCRIPTION:\n Helper functions used for the azure attestation samples.\n\n\"\"\"\nimport datetime \nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography import x509\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.x509 import BasicConstraints, CertificateBuilder, NameOID, SubjectAlternativeName\nfrom cryptography.hazmat.primitives import hashes, serialization\nimport os\n\ndef write_banner(banner):\n #type:(str) -> None\n \"\"\"\n Write a banner which can be used to visually separate the output of the samples.\n \"\"\"\n separator = '*'*80\n print(\"\\n\")\n print(separator)\n print(\" {}\".format(banner))\n print(separator)\n\ndef create_rsa_key(): #type() -> RSAPrivateKey\n \"\"\"\n Create an RSA Asymmetric 2048 bit key.\n \"\"\"\n return rsa.generate_private_key(65537, 2048, backend=default_backend()).private_bytes(\n serialization.Encoding.DER,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption())\n\ndef create_x509_certificate(key_der, subject_name): #type(Union[EllipticCurvePrivateKey,RSAPrivateKey], str) -> Certificate\n \"\"\"\n Given an RSA or ECDS private key, create a self-signed X.509 certificate\n with the specified subject name signed with that key.\n \"\"\"\n signing_key = serialization.load_der_private_key(key_der, password=None, backend=default_backend())\n builder = CertificateBuilder()\n builder = builder.subject_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, subject_name),\n ]))\n builder = builder.issuer_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, subject_name),\n ]))\n\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + (one_day * 30))\n builder = builder.serial_number(x509.random_serial_number()) \n builder = builder.public_key(signing_key.public_key())\n builder = builder.add_extension(SubjectAlternativeName([x509.DNSName(subject_name)]), critical=False)\n builder = builder.add_extension(BasicConstraints(ca=False, path_length=None), critical=True)\n return builder.sign(private_key=signing_key, algorithm=hashes.SHA256(), backend=default_backend()).public_bytes(serialization.Encoding.DER)\n\ndef create_client_credentials():\n #type:() -> 'azure.identity.ClientSecretCredentials'\n\n tenant_id = os.getenv(\"ATTESTATION_TENANT_ID\")\n client_id = os.getenv(\"ATTESTATION_CLIENT_ID\")\n secret = os.getenv(\"ATTESTATION_CLIENT_SECRET\")\n\n if not tenant_id or not client_id or not secret:\n raise Exception(\"Must provide client credentials.\")\n\n # Create azure-identity class\n from azure.identity import ClientSecretCredential\n\n return ClientSecretCredential(\n tenant_id=tenant_id,\n client_id=client_id,\n client_secret=secret)\n\ndef create_client_credentials_async():\n #type:() -> 'azure.identity.aio.ClientSecretCredentials'\n\n tenant_id = os.getenv(\"ATTESTATION_TENANT_ID\")\n client_id = os.getenv(\"ATTESTATION_CLIENT_ID\")\n secret = os.getenv(\"ATTESTATION_CLIENT_SECRET\")\n\n if not tenant_id or not client_id or not secret:\n raise Exception(\"Must provide client credentials.\")\n\n # Create azure-identity class\n from azure.identity.aio import ClientSecretCredential\n\n return ClientSecretCredential(\n tenant_id=tenant_id,\n client_id=client_id,\n client_secret=secret)\n","sub_path":"sdk/attestation/azure-security-attestation/samples/sample_utils.py","file_name":"sample_utils.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"212661040","text":"\r\nimport sys\r\nimport webapp2\r\nfrom google.appengine.ext.webapp import template\r\nfrom models.historyDB import History\r\nfrom models.user import User\r\nimport logging\r\n\r\nclass HistoryHandler(webapp2.RequestHandler):\r\n def get(self):\r\n template_params = {}\r\n\r\n user = None\r\n if self.request.cookies.get('our_token'): #the cookie that should contain the access token!\r\n user = User.check_token(self.request.cookies.get('our_token'))\r\n\r\n if user:\r\n template_params['user'] = user.username\r\n\r\n if not user:\r\n template_params['noaccess'] = 'PURCHEASE HISTORY'\r\n html = template.render(\"web/templates/home.html\", template_params)\r\n self.response.write(html)\r\n\r\n else:\r\n template_params['histories'] =[]\r\n histories = History.getHistory()\r\n\r\n for h in histories:\r\n if h.username != user.username:\r\n continue\r\n if h.avatar:\r\n template_params['histories'].append({\r\n \"key_urlsafe\": h.key.urlsafe(),\r\n \"key\": h.key.id(),\r\n \"date\": h.date,\r\n \"symbol\": h.symbol,\r\n \"enterprice\": h.enterprice,\r\n \"stoplose\": h.stoplose,\r\n \"takeprofit\": h.takeprofit,\r\n \"profitorloss\":h.profitorloss,\r\n \"volume\": h.volume,\r\n \"lstype\": h.lstype,\r\n \"remarks\": h.remarks,\r\n \"img_src_url\": \"/handlerImage?img_id=\"+h.key.urlsafe()\r\n })\r\n else:\r\n template_params['histories'].append({\r\n \"key_urlsafe\": h.key.urlsafe(),\r\n \"key\": h.key.id(),\r\n \"date\": h.date,\r\n \"symbol\": h.symbol,\r\n \"enterprice\": h.enterprice,\r\n \"stoplose\": h.stoplose,\r\n \"takeprofit\": h.takeprofit,\r\n \"profitorloss\":h.profitorloss,\r\n \"volume\": h.volume,\r\n \"lstype\": h.lstype,\r\n \"remarks\": h.remarks,\r\n \"img_src_url\": \"../static/images/no_image.png\"\r\n })\r\n\r\n html = template.render(\"web/templates/history.html\", template_params)\r\n self.response.write(html)\r\n\r\n\r\n def post(self):\r\n user = None\r\n if self.request.cookies.get('our_token'): #the cookie that should contain the access token!\r\n user = User.check_token(self.request.cookies.get('our_token'))\r\n\r\n symboldb = self.request.get('symbol')\r\n enterPricedb = self.request.get('enterPrice')\r\n stopLosedb = self.request.get('stopLose')\r\n takeProfitdb = self.request.get('takeProfit')\r\n profitorlossdb = self.request.get('profitOrLoss')\r\n volumedb = self.request.get('volume')\r\n lstypedb = self.request.get('type')\r\n dateDB = self.request.get('date')\r\n remarksdb = self.request.get('remarks')\r\n imageDB = self.request.get('img')\r\n userdb = user.username\r\n\r\n sizeimage = sys.getsizeof(imageDB)\r\n logging.info(sizeimage)\r\n if sizeimage <= 1000000: #size of image small from 1000000 byte\r\n try:\r\n history = History(symbol=symboldb ,enterprice=enterPricedb,stoplose = stopLosedb,takeprofit=takeProfitdb\r\n ,profitorloss = profitorlossdb,volume=volumedb, date=dateDB ,remarks = remarksdb,\r\n lstype = lstypedb, username = userdb, avatar=imageDB)\r\n except:\r\n history = History(symbol=symboldb ,enterprice=enterPricedb,stoplose = stopLosedb,takeprofit=takeProfitdb\r\n ,profitorloss = profitorlossdb,volume=volumedb, date=dateDB ,remarks = remarksdb,\r\n lstype = lstypedb, username = userdb)\r\n history.put()\r\n self.redirect(\"/history\")\r\n else:\r\n\r\n self.redirect(\"/error_page\")\r\n\r\n\r\n\r\n\r\n\r\n\r\napp = webapp2.WSGIApplication([\r\n ('/history', HistoryHandler)\r\n], debug=True)","sub_path":"web/pages/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"301823071","text":"\"\"\"\nAn extension of the standard Trac CommitTicketUpdater that allows configuring\na separate ticket_prefix regex. This is helpful in situations where the default\nticket reference rules are not applicable, such as when using GitHub, which\nclaims #[0-9]+.\n\"\"\"\n\nimport re\n\nfrom genshi.builder import tag\n\nfrom trac.config import Option\nfrom trac.resource import Resource\nfrom trac.versioncontrol import RepositoryManager\nfrom trac.versioncontrol.web_ui.changeset import ChangesetModule\nfrom trac.wiki.formatter import format_to_html\nfrom tracopt.ticket.commit_updater import CommitTicketUpdater, CommitTicketReferenceMacro\n\nclass ConfigurableCommitTicketUpdater(CommitTicketUpdater):\n \"\"\"\n An extension of Trac's CommitTicketUpdater that listens for new commits\n referencing tickets using a number of keywords and a configurable ticket\n prefix.\n \"\"\"\n\n ticket_prefix = Option(\n 'ticket', 'commit_ticket_update_ticket_prefix',\n CommitTicketUpdater.ticket_prefix,\n \"\"\"Regular expression matching (but not capturing) the prefix that\n CommitTicketUpdater should look for in commit messages.\"\"\")\n\n @property\n def ticket_reference(self):\n \"\"\"\n Return a regular expression that will match ticket references.\n \"\"\"\n return self.ticket_prefix + '[0-9]+'\n\n @property\n def ticket_command(self):\n \"\"\"\n Return a regular expression that will match ticket commands in the form\n of .\n \"\"\"\n return (r'(?P[A-Za-z]*)\\s*.?\\s*'\n r'(?P%s(?:(?:[, &]*|[ ]?and[ ]?)%s)*)' %\n (self.ticket_reference, self.ticket_reference))\n\n @property\n def ticket_re(self):\n \"\"\"\n Return a compiled regular expression that will match tickets. The first\n match group will contain the ticket number.\n \"\"\"\n return re.compile(self.ticket_prefix + '([0-9]+)')\n\n def make_ticket_comment(self, repos, changeset):\n \"\"\"Create the ticket comment from the changeset data.\"\"\"\n rev = changeset.rev\n revstring = str(rev)\n drev = str(repos.display_rev(rev))\n if repos.reponame:\n revstring += '/' + repos.reponame\n drev += '/' + repos.reponame\n return \"\"\"\\\nIn [changeset:\"%s\" %s]:\n{{{\n#!ConfigurableCommitTicketReference repository=\"%s\" revision=\"%s\"\n%s\n}}}\"\"\" % (revstring, drev, repos.reponame, rev, changeset.message.strip())\n\nclass ConfigurableCommitTicketReferenceMacro(CommitTicketReferenceMacro):\n \"\"\"\n An extension of Trac CommitTicketUpdater's CommitTicketReferenceMacro that\n does not search for occurrences of the ticket reference in a referenced\n commit's message. This avoids the dependency on CommitTicketUpdater.\n \"\"\"\n\n # pylint: disable=abstract-method\n\n def expand_macro(self, formatter, name, content, args=None):\n # pylint: disable=too-many-function-args\n args = args or {}\n reponame = args.get('repository') or ''\n rev = args.get('revision')\n # pylint: disable=no-member\n repos = RepositoryManager(self.env).get_repository(reponame)\n try:\n changeset = repos.get_changeset(rev)\n message = changeset.message\n rev = changeset.rev\n resource = repos.resource\n except Exception: # pylint: disable=broad-except\n message = content\n resource = Resource('repository', reponame)\n if ChangesetModule(self.env).wiki_format_messages:\n message = '\\n'.join(map(lambda line: \"> \" + line, message.split('\\n')))\n return tag.div(format_to_html(\n self.env,\n formatter.context.child('changeset', rev, parent=resource),\n message, escape_newlines=True), class_='message')\n else:\n return tag.pre(message, class_='message')\n","sub_path":"trac_configurable_ctu/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"430063726","text":"from __future__ import print_function, absolute_import, unicode_literals\n\n__all__ = [\"create_app\"]\n\nimport flask\nimport flask.ext.login as login_ext\nfrom flaskext.babel import Babel\n\nimport re\nimport os\nimport logging\n\nimport redis\nimport pymongo\n\nfrom wtf.views.foursquare import api\nfrom wtf.login import create_login, login_handler, logout_handler\nfrom wtf.error_handlers import TLSSMTPHandler\nfrom wtf.models import User, Proposal\n\n\nbabel = Babel()\n\n\ndef get_locale():\n if re.search(r\"//polite\\.\", flask.request.url) is not None:\n return \"en_GB\"\n return flask.session.get(\"locale\", \"en_US\")\n\n\ndef javascript_view():\n return flask.render_template(\"wtf.js\")\n\n\ndef index_view():\n user = login_ext.current_user\n if not user.is_authenticated():\n user = None\n proposal = None\n else:\n proposal = user.find_recent()\n return flask.render_template(\"index.html\",\n google_api_key=flask.current_app.config[\"GOOGLE_WEB_KEY\"],\n user=user, proposal=proposal)\n\n\ndef about_view():\n user = login_ext.current_user\n if not user.is_authenticated():\n user = None\n return flask.render_template(\"about.html\")\n\n\ndef share_view(short_url):\n prop = Proposal.c().find_one({\"short_url\": short_url})\n if prop is not None:\n return flask.render_template(\"share.html\", proposal=Proposal(prop))\n return flask.redirect(flask.url_for(\"index\"))\n\n\ndef before_request():\n uri = os.environ.get(\"MONGOLAB_URI\", \"mongodb://localhost/wtflunch\")\n flask.g.dbc = pymongo.Connection(host=uri)\n dbname = pymongo.uri_parser.parse_uri(uri).get(\"database\", \"wtflunch\")\n flask.g.db = flask.g.dbc[dbname]\n\n # Indexing.\n c = User.c()\n c.ensure_index(\"token\")\n c.ensure_index(\"open_id\")\n\n c = Proposal.c()\n c.ensure_index(\"accepted\")\n c.ensure_index(\"date\")\n c.ensure_index(\"user_id\")\n c.ensure_index(\"short_url\")\n\n # Redis database.\n flask.g.redis = redis.StrictRedis.from_url(\n os.environ.get(\"REDISTOGO_URL\", \"redis://localhost:6379\"))\n\n\ndef teardown_request(exception):\n flask.g.dbc.close()\n\n\ndef create_app():\n app = flask.Flask(__name__)\n app.config.from_object(\"wtf.config_defaults.WTFConfig\")\n babel.init_app(app)\n babel.localeselector(get_locale)\n print(babel.list_translations())\n\n # Add the blueprint(s).\n app.register_blueprint(api, url_prefix=\"/api\")\n\n # Attach routes.\n app.add_url_rule(\"/\", \"index\", index_view)\n app.add_url_rule(\"/wtf.js\", \"javascript\", javascript_view)\n app.add_url_rule(\"/about\", \"about\", about_view)\n app.add_url_rule(\"/login\", \"login\", login_handler)\n app.add_url_rule(\"/logout\", \"logout\", logout_handler)\n\n # Share urls.\n app.add_url_rule(\"/share/\", \"share\", share_view)\n app.add_url_rule(\"/\", \"share\", share_view)\n\n # Pre- and post-request hooks.\n app.before_request(before_request)\n app.teardown_request(teardown_request)\n\n # Set up logins.\n oid, login_manager = create_login()\n oid.init_app(app)\n login_manager.setup_app(app)\n\n # Set up email logging.\n mail_handler = TLSSMTPHandler((\"smtp.gmail.com\", 587),\n \"Lunch Robot \",\n app.config[\"ADMIN_EMAILS\"],\n \"WTF Failed\")\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n return app\n","sub_path":"wtf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"280477324","text":"from math import sqrt, pow, ceil, log\n\n\ndef EucDist(x1, x2, y1, y2):\n \"\"\"\n Distance Function: Euclidean\n\n This is already implemented by scipy.spatial.distance.euclidean(u, v)[source]\n http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.euclidean.html\n \"\"\"\n return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))\n\n\ndef compute_level(l1, l2, distance):\n \"\"\"\n\n :param l1:\n :param l2:\n :param distance:\n :return:\n \"\"\"\n return ceil(log(((l1 + l2) / (2 * distance)), 2) + 1)\n\n\ndef calculate_distance(voxel_node1, voxel_node2):\n point_of_voxel_node1 = [[voxel_node1.x_left, voxel_node1.y_left],\n [voxel_node1.x_right, voxel_node1.y_left],\n [voxel_node1.x_right, voxel_node1.y_right],\n [voxel_node1.x_left, voxel_node1.y_right]]\n\n point_of_voxel_node2 = [[voxel_node2.x_left, voxel_node2.y_left],\n [voxel_node2.x_right, voxel_node2.y_left],\n [voxel_node2.x_right, voxel_node2.y_right],\n [voxel_node2.x_left, voxel_node2.y_right]]\n\n min_distance = float(\"inf\")\n max_distance = float(\"-inf\")\n for data_node1 in point_of_voxel_node1:\n for data_node2 in point_of_voxel_node2:\n calculated_distance = EucDist(data_node1[0], data_node2[0], data_node1[1], data_node2[1])\n if calculated_distance < min_distance:\n min_distance = calculated_distance\n if calculated_distance > max_distance:\n max_distance = calculated_distance\n\n return min_distance, max_distance\n","sub_path":"quadtree/QuadtreeTime/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"198796348","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport polarTools as pt\nimport pickle\nimport sys\n\nprint(\"Welcome to the ORNs !!!\")\n\n# Select the Odorant, Odor Delivery Protocol, Locust Model\nodor_path = sys.argv[1]\nprotocol_path = sys.argv[2]\nlocust_path = sys.argv[3]\n\n# Load the Odorant, Odor Delivery Protocol, Locust Model\nwith open(odor_path, 'rb') as fp:\n odor = pickle.load(fp)\nwith open(protocol_path, 'rb') as fp:\n protocol = pickle.load(fp)\nwith open(locust_path, 'rb') as fp:\n locust = pickle.load(fp)\n\n# Define ORN Response Generator\ndef generate_orn(orn_number,duration,resolution,odorVec,odorStart,odorEnd): # Function to generate single ORN Trace\n seeds = int(locust['rec_seeds'][orn_number]+12)\n np.random.seed(seeds)\n baseline = np.clip(locust['baseline_firing']+locust['baseline_firing_variation']*np.random.normal(),1,None)/locust['peak_firing'] # Baseline Firing Rate Ratio\n trace = baseline*np.ones(int(duration/resolution)) # Set Baseline activity for the Protocol Duration\n np.random.seed()\n rec_field = pt.generateUniform(1,odor['dim_odorspace'],seed=int(locust['rec_seeds'][orn_number])) # Receptive Field of ORNs in Odor Space\n np.random.seed()\n latency = locust['latency'][orn_number] # Latency of Response to Odor Presentation\n t_rise = locust['t_rise'][orn_number] # Time to Rise to Peak\n t_fall = locust['t_fall'][orn_number] # Response Decay Time\n tuning = locust['tuning'][orn_number]/2 # Odor Tuning-width / Sensitivity\n \n def sigmoid(x,a1=locust['a1'],a2=locust['a2']):\t# Sigmoid for Response\n return 1/(1+np.exp(-a1*(x-a2)))\n\n def tanc(x, a=0.06083939,b=0.16323569,c=1.73986923,d=0.34085669):\n return a+b*np.tan(c*x-d)\n \n odorMag = np.linalg.norm(odorVec) # Odor Concentration\n cosSim = np.dot(odorVec,rec_field)/(np.linalg.norm(odorVec)*np.linalg.norm(rec_field)) # Cosine Similarity wrt Odor\n\n if np.arccos(cosSim) < np.deg2rad(121):#locust['inh_threshold']):\t# Minimum Response Threshhold\n res_strength = (1-baseline)*tanc(odorMag*np.cos(np.arccos(cosSim)/2)**tuning)\n else:\n res_strength = -baseline*np.linalg.norm(odorVec)\n \n if locust['f_sharp'][orn_number]:\n # Generate Sharp Trace\n rise = np.arange(0,t_rise/2,resolution)\n rise = baseline+res_strength*2*np.exp(1)/t_rise*rise*np.exp(-2*rise/t_rise)\n riseStartIndex = int((odorStart+latency)/resolution)\n riseEndIndex = riseStartIndex+rise.shape[0]\n trace[riseStartIndex:riseEndIndex] = rise\n peak = rise[-1]\n fall = np.linspace(0,duration-riseEndIndex*resolution,trace.shape[0]-riseEndIndex)\n fall = (peak-baseline)*np.exp(-fall/t_fall)+baseline\n fallStartIndex = riseEndIndex\n trace[fallStartIndex:] = fall \n else:\n # Generate Broad Trace\n rise = np.arange(0,t_rise,resolution)\n rise = baseline+res_strength*np.exp(1)/t_rise*rise*np.exp(-rise/t_rise)\n riseStartIndex = int((odorStart+latency)/resolution)\n riseEndIndex = int((odorStart+latency)/resolution)+rise.shape[0]\n trace[riseStartIndex:riseEndIndex] = rise\n peak_1 = rise[-1]\n adaptation_rate = locust['adaptation_extent'][orn_number] # Amplitude of Adaptation-related Decay\n t_adaptation = locust['t_adaptation'][orn_number] # Odor Adaptation Time\n adaptation = np.arange(0,(int(odorEnd/resolution)-riseEndIndex)*resolution,resolution)\n adaptation = (peak_1-(adaptation_rate*res_strength+baseline))*np.exp(-adaptation/t_adaptation)+(adaptation_rate*res_strength+baseline)\n adaptationStartIndex = riseEndIndex\n adaptationEndIndex = adaptationStartIndex+adaptation.shape[0]\n trace[adaptationStartIndex:adaptationEndIndex] = adaptation\n peak_2 = adaptation[-1]\n fall = np.arange(0,(trace.shape[0]-adaptationEndIndex)*resolution,resolution)\n fall = (peak_2-baseline)*np.exp(-fall/t_fall) + baseline\n fallStartIndex = adaptationEndIndex\n trace[fallStartIndex:] = fall\n \n trace = trace*locust['peak_firing'] # Scale to Peak Firing Rate\n \n return trace\n\n# Generate Odor Response\nnp.random.seed()\n\nprint(\"Generating ORN Responses...\")\n\norns = []\nfor i in range(locust['ORN_types']): # Generate ORN types\n orns.append(generate_orn(i,protocol['duration'],protocol['resolution'],odor['odor_vector'],protocol['odor_start'],protocol['odor_start']+protocol['odor_duration']))\n print('{}/{} ORN Types Completed'.format(i+1,locust['ORN_types']), end = '\\r')\n\norns = np.array(orns*locust['ORN_replicates'])\n\nprint(\"Generation Complete.\")\n\n# Save ORN Data\nnp.save(sys.argv[4]+'/ORN Firing Data',orns[:,::100])\n\nnp.random.seed()\n\ninit_theta = np.random.uniform(size=orns.shape[0])\nrandom_normal = np.random.normal(size=orns.shape)\n\ndef spike_generator(fr,resolution,init_theta=init_theta,random_normal=random_normal):\n spike = np.zeros(fr.shape)\n theta = init_theta\n for i in range(fr.shape[1]):\n dtheta = resolution/1000*fr[:,i]\n theta = theta + dtheta + 0.005*random_normal[:,i]\n spike[:,i]= theta>1 \n theta = np.where(theta>1,np.zeros(theta.shape[0]),theta)\n if i%int(1000/resolution)==0:\n print('ORN Spiking {}/{} ms Completed'.format(int(i*resolution),int(fr.shape[1]*resolution)), end = '\\r')\n return spike\n\norns_spike = spike_generator(orns,0.01)\nprint()\n\n# Generate Antennal Output\n\nprint(\"Generating Antennal Input...\")\n\nORN_Output_s = np.matmul(orns_spike.T,locust['ORN-AL']).T\n\np_n = int(0.75*locust['AL_n'])\n\nORN_Output_current = np.zeros(ORN_Output_s.shape)\nfor i in range(ORN_Output_s.shape[0]):\n cfilter = 0.5*np.ones(30)\n ORN_Output_current[i,:] = np.convolve(ORN_Output_s[i,:], cfilter,'same')\n print('{}/{} Acetylcholine Concentration Integration Completed'.format(i+1,locust['AL_n']), end = '\\r')\nprint()\n\nep=0.01\na = 10.0\nb = 0.2\n\ndef f(o,t):\n# do = a*(1.0-o)*ORN_Output_current[:,int(t/ep)]/np.array([50]*90+[700]*30) - b*o\n do = a*(1.0-o)*ORN_Output_current[:,int(t/ep)]/np.array([50]*90+[700]*30) - b*o\n return do\n\ntime = np.arange(ORN_Output_current.shape[1])*ep\nX = np.zeros(ORN_Output_current.shape)\n\nX[:,0]= 0\n\nfor i in range(1,time.shape[0]):\n X[:,i] = X[:,i-1] + ep*f(X[:,i-1],time[i-1])\n if i%int(100/ep) == 0:\n print('{}s/{}s Acetylcholine Receptor Integration Completed'.format(i*ep,time.shape[0]*ep), end = '\\r')\nprint()\n\nprint(\"Generation Complete\")\n\n# Save Current Input\nnp.save(sys.argv[4]+'/current_input',X)\n\nprint(\"'Information has been transferred to the Antennal Lobe. Thank you for using our services.' - ORNs\")\n","sub_path":"receptorLayer.py","file_name":"receptorLayer.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145194201","text":"import numpy as np\nimport networkx as nx\nimport cmath\n\nfrom math import sqrt\nfrom scipy import stats\nfrom scipy import signal\nfrom scipy.signal import freqz, group_delay\nfrom matplotlib import pyplot as plt\nfrom matplotlib import patches\n\n# IN THIS FILE\n\n# -------RANDOM VAR MODELS---------\n# iidG_ER(n, p, q, r)\n\n# -------GENERATION UTILITIES------\n# random_arma(p, q, k, z_radius, p_radius)\n# random_matrix(n, d)\n# block_companion(B):\n\n# -------PLOTTING METHODS----------\n# plot_matrix_ev(M, ax, mrkr = \"rx\"):\n# plot_filter_pz(b, a)\n# plot_filter(b, a, dB, plot_gd)\n# plot_filter_PSD(b, a, dB, title)\n# axis_filter_PSD(b, a, title, label, linewidth)\n# add_PSD_plot(b, a, ax, label, color, linestyle, linewidth)\n# power_transfer(w, P, sys_ba)\n\n# -------RANDOM VAR MODELS-----------\n# Generally, these VAR models are not garaunteed to be stable\n# but, we can look at the approximate distribution of evs and\n# guess the right parameters for the model to be \"probably\" stable.\n\ndef iidG_ER(n, p, q, r = 0.65):\n \"\"\"\n Form p matrices of iid gaussians, multiply them all by the same\n random bernoulli matrix having parameter q to create an erdos renyi\n random graph. The gaussian matrices are normalized by (pi/2)**.25 *\n nsqrt(r / (n - 1)) which makes r the expected gershgorin circle\n radius. We also return the block companion matrix\n \"\"\"\n dN = lambda n : np.random.normal(0, 1, size = (n, n))\n dB = lambda n : np.random.binomial(n = 1, p = q, size = (n, n))\n G = random_matrix(n, dB) # graph structure\n # Noramlizer\n k = ((np.pi / 2)**.25) * np.sqrt(float(r) / (n - 1)) / float(sqrt(q)*p)\n B = [k*random_matrix(n, dN)*G.T for i in range(p)]\n M = block_companion(B)\n return B, M, G\n\n\ndef iidG_gn(n, p, gain_var = 1, k = None, p_radius = 0.75):\n def aEij(i, j, a = 1):\n Eij = np.zeros((n,n))\n Eij[i, j] = a\n return Eij\n\n G = nx.gn_graph(n, kernel=k)\n G = nx.adjacency_matrix(G)\n G = G.toarray()\n\n B = [np.zeros((n,n)) for i in range(p)]\n for i, row in enumerate(G.T):\n for j, x in enumerate(row):\n if x == 1:\n b, a = random_arma(p, 0, k = np.random.normal(0, gain_var),\n p_radius = p_radius)\n B[0] = B[0] + aEij(i, j, b)\n B[1:] = [B_tau + aEij(i, j, a_tau)\n for (B_tau, a_tau) in zip(B, a[1:])]\n\n M = block_companion(B)\n return B, M, G\n\n#-------GENERATION UTILITIES---------\n\ndef block_companion(B):\n \"\"\"\n Produces a block companion from the matrices B[0], B[1], ... , B[p - 1]\n [B0, B1, B2, ... Bp-1]\n [ I, 0, 0, ... 0 ]\n [ 0, I, 0, ... 0 ]\n [ 0, 0, I, ... 0 ]\n [ 0, 0, ..., I, 0 ]\n \"\"\"\n p = len(B)\n B = np.hstack((B[k] for k in range(p))) #The top row\n n = B.shape[0]\n\n I = np.eye(n*(p - 1))\n Z = np.zeros((n*(p - 1), n))\n R = np.hstack((I, Z))\n B = np.vstack((B, R))\n\n return B\n\ndef random_matrix(n, d):\n \"\"\"Return an nxn matrix with iid entries having law d(n)\"\"\"\n M = d(n)\n return M\n\ndef random_arma(p, q, k = 1, z_radius = 1, p_radius = 0.75):\n \"\"\"\n Returns a random ARMA(p, q) filter. The parameters p and q define\n the order of the filter where p is the number of AR coefficients\n (poles) and q is the number of MA coefficients (zeros). k is the\n gain of the filter. The z_radius and p_radius paramters specify the\n maximum magnitude of the zeros and poles resp. In order for the\n filter to be stable, we should have p_radius < 1. The poles and\n zeros will be placed uniformly at random inside a disc of the\n specified radius.\n\n We also force the coefficients to be real. This is done by ensuring\n that for every complex pole or zero, it's recipricol conjugate is\n also present. If p and q are even, then all the poles/zeros could\n be complex. But if p or q is odd, then one of the poles and or\n zeros will be purely real.\n\n The filter must be causal. That is, we assert p >= q.\n Finally, note that in order to generate complex numbers uniformly\n over the disc we can't generate R and theta uniformly then transform\n them. This will give a distribution concentrated near (0, 0). We\n need to generate u uniformly [0, 1] then take R = sqrt(u). This can\n be seen by starting with a uniform joint distribution f(x, y) =\n 1/pi, then applying a transform to (r, theta) with x = rcos(theta),\n y = rsin(theta), calculating the distributions of r and theta, then\n applying inverse transform sampling.\n \"\"\"\n assert(p >= q), \"System is not causal\"\n P = []\n Z = []\n for i in range(p % 2):\n pi_r = stats.uniform.rvs(loc = -p_radius, scale = 2*p_radius)\n P.append(pi_r)\n \n for i in range((p - (p % 2)) // 2):\n pi_r = sqrt(stats.uniform.rvs(loc = 0, scale = p_radius))\n pi_ang = stats.uniform.rvs(loc = -np.pi, scale = 2*np.pi)\n P.append(cmath.rect(pi_r, pi_ang))\n P.append(cmath.rect(pi_r, -pi_ang))\n\n for i in range(q % 2):\n zi_r = stats.uniform.rvs(loc = -z_radius, scale = 2*z_radius)\n Z.append(zi_r)\n\n for i in range((q - (q % 2)) // 2):\n zi_r = stats.uniform.rvs(loc = 0, scale = z_radius)\n zi_ang = stats.uniform.rvs(loc = -np.pi, scale = 2*np.pi)\n Z.append(cmath.rect(zi_r, zi_ang))\n Z.append(cmath.rect(zi_r, -zi_ang))\n\n b, a = signal.zpk2tf(Z, P, k)\n return b, a\n\n#------PLOTTING METHODS-------------\ndef matrix_ev_ax(fig, n = None, q = None, p = None, r = None):\n ax = fig.add_subplot(1,1,1)\n uc = patches.Circle((0, 0), radius = 1, fill = False,\n color = \"black\", ls = \"solid\", linewidth = 3)\n ax.add_patch(uc)\n ax.set_aspect(\"equal\")\n ax.set_xlabel(\"Real\")\n ax.set_ylabel(\"Imaginary\")\n ax.set_title(\"Eigenvalues\")\n ax.text(0.8, 1.35, \"$M_{ij} \\in N(%d, %d)$\" % (0, 1), fontsize = 18)\n if n:\n ax.text(1.0, 1.25, \"$n = %d$\" % n, fontsize = 18)\n if q:\n ax.text(1.0, 1.15, \"$q = %f$\" % q, fontsize = 18)\n if p:\n ax.text(1.0, 1.05, \"$p = %f$\" % p, fontsize = 18)\n if r:\n ax.text(1.0, 0.95, \"$r = %r$\" % r, fontsize = 18)\n \n ax.set_xlim([-1.5, 1.5])\n ax.set_ylim([-1.5, 1.5])\n return ax\n\ndef plot_matrix_ev(M, ax, mrkr = \"rx\"):\n \"\"\"\n Plot the eigenvalues of the matrix M onto the axis ax\n \"\"\"\n EVs = np.linalg.eigvals(M)\n for ev in EVs:\n ax.plot(ev.real, ev.imag, mrkr)\n return\n\ndef plot_filter_pz(b, a):\n \"\"\"\n Creates a pole zero plot of a filter\n Modified from: http://www.dsprelated.com/showcode/244.php\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n uc = patches.Circle((0, 0), radius = 1, fill = False,\n color = \"black\", ls = \"dashed\")\n ax.add_patch(uc)\n\n z = np.roots(b)\n p = np.roots(a)\n\n try:\n abs_max_z = max(abs(zi) for zi in z)\n except ValueError: #empty sequence\n abs_max_z = 0\n\n try:\n abs_max_p = max(abs(pi) for pi in p)\n except ValueError: #empty sequence\n abs_max_p = 0\n\n ax_lim = max(abs_max_p, abs_max_z)\n\n ax.plot(z.real, z.imag, \"go\", markersize = 12, mew = 3)\n ax.plot(p.real, p.imag, \"rx\", markersize = 12, mew = 3)\n ax.set_xlim([-ax_lim - 1, ax_lim + 1])\n ax.set_ylim([-ax_lim - 1, ax_lim + 1])\n\n ax.set_xlabel(\"Real\")\n ax.set_ylabel(\"Imaginary\")\n ax.set_title(\"Filter PZ Plot\")\n\n plt.gca().set_aspect(\"equal\")\n\n fig.show()\n return\n\n\ndef plot_filter(b, a, dB=True, plot_gd=False, ret_fig=False,\n ax_fig=None):\n \"\"\"\n Plots a filter's response\n \"\"\"\n w, h = freqz(b, a)\n\n if ax_fig:\n ax1, fig = ax_fig\n else:\n fig = plt.figure()\n ax1 = fig.add_subplot(1,1,1)\n ax1.set_title(\"frequency response\")\n\n if dB:\n ax1.plot(w, 20*np.log10(abs(h)), \"b\", linewidth = 2)\n ax1.set_ylabel(\"$20log_{10}|H|$\", color = \"b\")\n else:\n ax1.plot(w, abs(h), \"b\")\n ax1.set_ylabel(\"$|H|$\", color = \"b\")\n\n ax1.set_xlabel(\"frequency [Rad/Sample]\")\n ax1.grid()\n\n if plot_gd:\n ax2 = ax1.twinx()\n w, gd = group_delay((b, a), w)\n ax2.plot(w, gd, \"g\", linewidth = 2)\n ax2.set_ylabel(\"Group Delay [s]\", color = \"g\")\n\n if ret_fig:\n return ax1, fig\n else:\n fig.show()\n return\n\n#-------------------------------------------------------------------------------\ndef plot_filter_PSD(b, a, dB = True, title = \"PSD\"):\n \"\"\"\n Plots a filter's response\n \"\"\"\n w, h = freqz(b, a)\n h = np.abs(h) #Transfer function\n fig = plt.figure()\n ax1 = fig.add_subplot(1,1,1)\n ax1.set_title(title)\n\n if dB:\n ax1.plot(w, 20*np.log10(abs(h)), \"b\", linewidth = 2)\n ax1.set_ylabel(\"$20log_{10}|H|$\", color = \"b\")\n else:\n ax1.plot(w, abs(h)**2, \"b\")\n ax1.set_ylabel(\"$|H|$\", color = \"b\")\n\n ax1.set_xlabel(\"frequency [Rad/Sample]\")\n ax1.grid()\n fig.show()\n return\n\n#-------------------------------------------------------------------------------\ndef axis_filter_PSD(b, a, title = \"PSD\", label = \"original\", linewidth = 2):\n \"\"\"\n Returns a figure and an axis for a PSD plot\n \"\"\"\n w, h = freqz(b, a)\n h = np.abs(h) #Transfer function\n fig = plt.figure()\n ax1 = fig.add_subplot(1,1,1)\n ax1.set_title(title)\n\n ax1.plot(w, 20*np.log10(abs(h)), \"b\", linewidth = linewidth, label = label,\n color = \"b\")\n # ax1.semilogy(w, abs(h)**2, \"b\", linewidth = 2, label = label, color = \"b\")\n ax1.set_ylabel(\"$20log_{10}|H|$\")\n\n ax1.set_xlabel(\"frequency [Rad/Sample]\")\n ax1.grid()\n\n return fig, ax1, w\n\ndef add_PSD_plot(b, a, ax, label = \"new\", color = \"b\", linestyle = \"--\",\n linewidth = 1.5):\n \"\"\"\n Adds a PSD plot to the given axis\n \"\"\"\n w, h = freqz(b, a)\n h = np.abs(h)**2 #Squared transfer function\n ax.plot(w, 10*np.log10(abs(h)), \"b\", linewidth = linewidth,\n label = label, color = color, linestyle = linestyle)\n return ax\n\ndef power_transfer(w, P, sys_ba):\n \"\"\"\n Calculates the power transfer at the frequencies w of the input\n PSD P through the filter described by sys_ba = (b, a).\n \"\"\"\n w, h = signal.freqz(sys_ba[0], sys_ba[1], worN = w)\n return P*(np.abs(h)**2)\n\n#-----------------------------------------------------\n#Useful for plotting eigenvalue distributions of iidG_ER\ndef test_iidG_ER():\n N = 100\n n = 50\n p = 3\n q = 0.1\n r = 0.5\n\n #Params passed to iidG_ER\n params = (n, p, q, r / (q * p))\n\n _, M = iidG_ER(*params)\n\n plt.imshow(M)\n plt.colorbar()\n plt.show()\n\n fig = plt.figure()\n ax = matrix_ev_ax(fig, n, p, q, r)\n\n for i in range(N):\n _, M = iidG_ER(*params)\n plot_matrix_ev(M, ax, \"g+\")\n plt.show()\n return\n\nif __name__ == \"__main__\":\n test_iidG_ER()\n","sub_path":"software/pwgc/LSI_filters.py","file_name":"LSI_filters.py","file_ext":"py","file_size_in_byte":10859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"524293452","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0002_article_is_published'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ArticleTranslation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('title', models.CharField(max_length=200)),\n ('subtitle', models.CharField(max_length=200)),\n ('content', models.TextField()),\n ('languge', models.CharField(default=b'english', max_length=50, choices=[(b'english', b'English'), (b'spanish', b'Spanish')])),\n ('article_id', models.ForeignKey(to='articles.Article')),\n ],\n options={\n 'ordering': ['-created_at'],\n 'abstract': False,\n },\n ),\n migrations.RemoveField(\n model_name='articlecontent',\n name='article_id',\n ),\n migrations.DeleteModel(\n name='ArticleContent',\n ),\n ]\n","sub_path":"cms/articles/migrations/0003_auto_20150913_1118.py","file_name":"0003_auto_20150913_1118.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"143182072","text":"import random\n\nclass QuickSortAlgorithm():\n def sort(self, array):\n self.shuffle(array)\n self.quick_sort_recursive(array, 0, len(array) - 1)\n\n def shuffle(self, array):\n n = len(array)\n for i in range(0, n - 2):\n random_item_index = random.randint(i, n - 1)\n self.swap(array, i, random_item_index)\n\n def quick_sort_recursive(self, array, left, right):\n pivot = self.get_pivot(array, left)\n left_write_pos = left\n right_write_pos = right\n\n while left_write_pos <= right_write_pos:\n\n while self.compare(array[left_write_pos], pivot):\n left_write_pos += 1\n\n while self.compare(pivot, array[right_write_pos]):\n right_write_pos -= 1\n\n if left_write_pos <= right_write_pos:\n self.swap(array, left_write_pos, right_write_pos)\n left_write_pos += 1\n right_write_pos -= 1\n\n if left < left_write_pos - 1:\n self.quick_sort_recursive(array, left, left_write_pos - 1)\n\n if left_write_pos < right:\n self.quick_sort_recursive(array, left_write_pos, right)\n\n def get_pivot(self, array, left):\n return array[left]\n\n def swap(self, array, index1, index2):\n (array[index1], array[index2]) = (array[index2], array[index1])\n\n def compare(self, a, b):\n return a < b","sub_path":"discnt/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"251558629","text":"#Dominion Deck Class\n\n#Import classes\nimport random\nimport os\nimport copy\nimport sys\nimport socket\nfrom dominioncards import *\n\n#Main Deck Class\nclass DomDeck(object):\n\tcellarCard = CellarCard()\n\tchapelCard = ChapelCard()\n\tmoatCard = MoatCard()\n\tchancellorCard = ChancellorCard()\n\tvillageCard = VillageCard()\n\twoodcutterCard = WoodcutterCard()\n\tworkshopCard = WorkshopCard()\n\tbureaucratCard = BureaucratCard()\n\tfeastCard = FeastCard()\n\tgardensCard = GardensCard()\n\tmilitiaCard = MilitiaCard()\n\tmoneylenderCard = MoneylenderCard()\n\tremodelCard = RemodelCard()\n\tsmithyCard = SmithyCard()\n\tspyCard = SpyCard()\n\tthiefCard = ThiefCard()\n\tthroneRoomCard = ThroneRoomCard()\n\tcouncilRoomCard = CouncilRoomCard()\n\tfestivalCard = FestivalCard()\n\tlaboratoryCard = LaboratoryCard()\n\tlibraryCard = LibraryCard()\n\tmarketCard = MarketCard()\n\tmineCard = MineCard()\n\twitchCard = WitchCard()\n\tadventurerCard = AdventurerCard()\n\t\t\n\t#Treasure Cards --- [Cost, Value]\n\tgoldCard = GoldCard()\n\tsilverCard = SilverCard()\n\tcopperCard = CopperCard()\n\tgoldCards = []\n\tsilverCards = []\n\tcopperCards = []\n\n\t#Victory Cards --- [Cost, Value]\n\tprovinceCard = ProvinceCard()\n\tduchyCard = DuchyCard()\n\testateCard = EstateCard()\n\tprovinceCards = []\n\tduchyCards = []\n\testateCards = []\n\n\t#Curse Cards --- [Cost, Value]\n\tcurseCard = CurseCard()\n\tcurseCards = []\n\n\t#kingdom Cards --- [Description, Cost, Value]\n\tkingdomTypes = [\n\t\tcellarCard,\n\t\tmoatCard,\n\t\tchancellorCard,\n\t\tchapelCard,\n\t\tvillageCard,\n\t\twoodcutterCard,\n\t\tworkshopCard,\n\t\tbureaucratCard,\n\t\tfeastCard,\n\t\tgardensCard,\n\t\tmilitiaCard,\n\t\tmoneylenderCard,\n\t\tremodelCard,\n\t\tspyCard,\n\t\tsmithyCard,\n\t\tthiefCard,\n\t\tthroneRoomCard,\n\t\tcouncilRoomCard,\n\t\tfestivalCard,\n\t\tlaboratoryCard,\n\t\tlibraryCard,\n\t\tmarketCard,\n\t\tmineCard,\n\t\tadventurerCard]\n\t\t\n\tkingdomCardPicks = []\n\tkingdomCards = {}\n\n\tdef __init__(self):\n\t\tpass\n\n# Method to build the starting deck, inculding treasure, victory, and kingdom cards\n# Makes a \"pile\" for each card type, saved as lists and dictionaries\n\tdef buildDeck(self, players):\n\t\tself.kingdomCardPicks = random.sample(self.kingdomTypes, 10)\n\t\tself.kingdomCardPicks.sort(key=lambda x: x.cost, reverse=True)\t\t\n\t\tfor i in range(len(self.kingdomCardPicks)):\n\t\t\tif self.kingdomCardPicks[i].victory == True:\n\t\t\t\tif players == 2:\n\t\t\t\t\tx = 8\n\t\t\t\telse:\n\t\t\t\t\tx = 12\n\t\t\t\tfor y in range(x):\n\t\t\t\t\tif y == 0:\n\t\t\t\t\t\tself.kingdomCards['card' + str(i)] = [self.kingdomCardPicks[i]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.kingdomCards['card' + str(i)].append(self.kingdomCardPicks[i])\n\t\t\telse:\n\t\t\t\tfor x in range(10):\n\t\t\t\t\tif x == 0:\n\t\t\t\t\t\tself.kingdomCards['card' + str(i)] = [self.kingdomCardPicks[i]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.kingdomCards['card' + str(i)].append(self.kingdomCardPicks[i])\n\t\tif players == 2:\n\t\t\tx = 8\n\t\telse:\n\t\t\tx = 12\n\t\tfor i in range(x):\n\t\t\tself.provinceCards.append(self.provinceCard)\n\t\t\tself.duchyCards.append(self.duchyCard)\n\t\t\tself.estateCards.append(self.estateCard)\n\t\tfor i in range(30):\n\t\t\tself.goldCards.append(self.goldCard)\n\t\tfor i in range(40):\n\t\t\tself.silverCards.append(self.silverCard)\n\t\tfor i in range(60):\n\t\t\tself.copperCards.append(self.copperCard)\n\t\tfor i in range((players * 10) - 10):\n\t\t\tself.curseCards.append(self.curseCard)\n#\t\tif any(i.potion == True for i,value in self.kingdomCardPicks[value]):\n#\t\t\tpass\n\n\tdef send_data(self, client, data):\n \tmessage = str(data)\n\t return client.sendall(message)\n\n\tdef printDeckCards(self, roster):\n\t\tfor user in roster:\n\t\t\tself.send_data(user.playerConn, \"CLRSCRN_FULL\\n\")\n\t\t\tself.send_data(user.playerConn, \"\\n\")\n\t\t\tself.send_data(user.playerConn, \" [P]\" + ProvinceCard.cardColor + ProvinceCard.cardName + \"\\033[0m (\" + str(len(self.provinceCards)) + \"): $\" + str(ProvinceCard.cost) + \" [G]\" + GoldCard.cardColor + GoldCard.cardName + \"\\033[0m (\" + str(len(self.goldCards)) + \"): $\" + str(GoldCard.cost))\n\t\t\tself.send_data(user.playerConn, \" [0]\" + self.kingdomCards['card0'][0].cardColor + self.kingdomCards['card0'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card0'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card0'])).zfill(2) + \"): $\" + str(self.kingdomCards['card0'][0].cost))\n\t\t\tself.send_data(user.playerConn, \" [5]\" + self.kingdomCards['card5'][0].cardColor + self.kingdomCards['card5'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card5'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card5'])).zfill(2) + \"): $\" + str(self.kingdomCards['card5'][0].cost) + \"\\n\")\n\t\t\tself.send_data(user.playerConn, \" [D]\" + DuchyCard.cardColor + DuchyCard.cardName + \"\\033[0m (\" + str(len(self.duchyCards)) + \"): $\" + str(DuchyCard.cost) + \" [S]\" + SilverCard.cardColor + SilverCard.cardName + \"\\033[0m (\" + str(len(self.silverCards)) + \"): $\" + str(SilverCard.cost))\n\t\t\tself.send_data(user.playerConn, \" [1]\" + self.kingdomCards['card1'][0].cardColor + self.kingdomCards['card1'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card1'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card1'])).zfill(2) + \"): $\" + str(self.kingdomCards['card1'][0].cost))\n\t\t\tself.send_data(user.playerConn, \" [6]\" + self.kingdomCards['card6'][0].cardColor + self.kingdomCards['card6'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card6'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card6'])).zfill(2) + \"): $\" + str(self.kingdomCards['card6'][0].cost) + \"\\n\")\n\t\t\tself.send_data(user.playerConn, \" [E]\" + EstateCard.cardColor + EstateCard.cardName + \"\\033[0m (\" + str(len(self.estateCards)) + \"): $\" + str(EstateCard.cost) + \" [C]\" + CopperCard.cardColor + CopperCard.cardName + \"\\033[0m (\" + str(len(self.copperCards)) + \"): $\" + str(CopperCard.cost))\n\t\t\tself.send_data(user.playerConn, \" [2]\" + self.kingdomCards['card2'][0].cardColor + self.kingdomCards['card2'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card2'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card2'])).zfill(2) + \"): $\" + str(self.kingdomCards['card2'][0].cost))\n\t\t\tself.send_data(user.playerConn, \" [7]\" + self.kingdomCards['card7'][0].cardColor + self.kingdomCards['card7'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card7'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card7'])).zfill(2) + \"): $\" + str(self.kingdomCards['card7'][0].cost) + \"\\n\")\n\t\t\tself.send_data(user.playerConn, \"\t\t\t\t\t \")\n\t\t\tself.send_data(user.playerConn, \" [3]\" + self.kingdomCards['card3'][0].cardColor + self.kingdomCards['card3'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card3'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card3'])).zfill(2) + \"): $\" + str(self.kingdomCards['card3'][0].cost))\n\t\t\tself.send_data(user.playerConn, \" [8]\" + self.kingdomCards['card8'][0].cardColor + self.kingdomCards['card8'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card8'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card8'])).zfill(2) + \"): $\" + str(self.kingdomCards['card8'][0].cost) + \"\\n\")\n\t\t\tself.send_data(user.playerConn, \" [U]\" + CurseCard.cardColor + CurseCard.cardName + \"\\033[0m (\" + str(len(self.curseCards)).zfill(2) + \"): $\" + str(CurseCard.cost) + \" \")\n\t\t\tself.send_data(user.playerConn, \" [4]\" + self.kingdomCards['card4'][0].cardColor + self.kingdomCards['card4'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card4'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card4'])).zfill(2) + \"): $\" + str(self.kingdomCards['card4'][0].cost))\n\t\t\tself.send_data(user.playerConn, \" [9]\" + self.kingdomCards['card9'][0].cardColor + self.kingdomCards['card9'][0].cardName + \"\\033[0m \" + (\" \" * (12 - len(self.kingdomCards['card9'][0].cardName))) + \" (\" + str(len(self.kingdomCards['card9'])).zfill(2) + \"): $\" + str(self.kingdomCards['card9'][0].cost) + \"\\n\")\n\n\tdef readCard(self, number, conn):\n\t\tcardToRead = 'card' + str(int(number))\n\t\tself.send_data(conn, 'CLRSCRN_FULL\\n')\n\t\tself.send_data(conn, \"\\033[36m Card Name: \\033[0m\" + self.kingdomCards[cardToRead][0].cardColor + self.kingdomCards[cardToRead][0].cardName + \"\\n\")\n\t\tself.send_data(conn, \"\\033[36m Description: \\033[0m\" + self.kingdomCards[cardToRead][0].description + \"\\n\")\n\t\tself.send_data(conn, \"\\n\\033[32m Cost:\\033[0m $\" + str(self.kingdomCards[cardToRead][0].cost) + \"\\n\")\n\t\tself.send_data(conn, (\"\\n\\n\\033[1;31m Press (y) when finished...\\033[0m\\n\"))\n\t\twhile True:\n\t\t\tdone_reading = self.recv_data(conn, 1024)\n\t\t\tif done_reading != 'y':\n\t\t\t\tself.send_data(conn, \"\\n\\n\\033[1;31m Press (y) when finished...\\033[0m\\n\")\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\treturn\n","sub_path":"server/dominiondeck.py","file_name":"dominiondeck.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"525488554","text":"##Autor: David Medina A01653311\n##Calcular rendimiento de un auto\n\n##Esta funcion es para calcular el rendimiento de km/l\ndef calcularRendimientoKmLitros(km,gas):\n rKm = km/gas\n return rKm\n\n##Esta funcion es para calcular rendimiento de millas/galones\ndef calcularRendimientoMillasGalones(km,gas):\n m = km / 1.6093\n gal = gas * 0.264\n mG = m/gal\n return mG\n\n##Esta funcion es para calcular gasolina necesaria para recorrer cierta distancia\ndef calcularGasParaRecorrerKms(km,rendimiento):\n gas = km/rendimiento\n return gas\n\n## funcion principal\ndef main():\n kmRecorridos = int(input(\"Ingresar kilometros recorridos: \"))\n gas = int(input(\"Ingresar gasolñina gastada: \"))\n\n rendimientoKmL = calcularRendimientoKmLitros(kmRecorridos,gas)\n rendimientoMG = calcularRendimientoMillasGalones(kmRecorridos, gas)\n\n print (\"El rendimiento del automóvil en kilometros/litros es: %.2f\" % rendimientoKmL, \"km/l.\")\n print (\"El rendimiento del automóvil en millas/galones es: %.2f\" % rendimientoMG, \"mi/ga.\")\n\n kmARecorrer = int(input(\"Ingresar kilometros a recorrer: \"))\n rendimiento = rendimientoKmL\n gasNecesaria = calcularGasParaRecorrerKms(kmARecorrer, rendimiento)\n\n print (\"Para recorrer\", kmARecorrer, \"km, necesitas %.2f\"% gasNecesaria , \"litros de gasolina.\")\n\nmain()\n","sub_path":"auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397306123","text":"'''\n\nRun on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python\n\n'''\n\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(3435) # for reproducibility, should be first\n\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential, Graph\nfrom keras.layers import Dropout, Activation, Flatten, \\\n Embedding, Convolution1D, MaxPooling1D, AveragePooling1D, \\\n Input, Dense, merge\nfrom keras.regularizers import l2\nfrom keras.layers.recurrent import LSTM, GRU, SimpleRNN\nfrom keras.constraints import maxnorm\nfrom keras.datasets import imdb\nfrom keras import callbacks\nfrom keras.utils import generic_utils\nfrom keras.models import Model\nfrom keras.optimizers import Adadelta\nimport time\n\n\nbatch_size = 50\nnb_filter = 200\nfilter_length = 4\nhidden_dims = nb_filter * 2\nnb_epoch = 60\nRNN = GRU\nrnn_output_size = 100\nfolds = 10\n\nprint('Loading data...')\n\nfrom process_sst_data import SentimentPhrase\nfrom sst2_data import load_data\n\nX_train, y_train, X_test, y_test, W, W2 = load_data()\nmaxlen = X_train.shape[1]\nmax_features = len(W)\nembedding_dims = len(W[0])\n\nprint('Train...')\naccs = []\nfirst_run = True\nprint(len(X_train), 'train sequences')\nprint(len(X_test), 'test sequences')\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\nrand_idx = np.random.permutation(range(len(X_train)))\nX_train = X_train[rand_idx]\ny_train = y_train[rand_idx]\n\n\ndef build_model():\n main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')\n embedding = Embedding(max_features, embedding_dims,\n weights=[np.matrix(W)], input_length=maxlen,\n name='embedding')(main_input)\n\n embedding = Dropout(0.50)(embedding)\n\n conv4 = Convolution1D(nb_filter=nb_filter,\n filter_length=4,\n border_mode='valid',\n activation='relu',\n subsample_length=1,\n name='conv4')(embedding)\n maxConv4 = MaxPooling1D(pool_length=2,\n name='maxConv4')(conv4)\n\n conv5 = Convolution1D(nb_filter=nb_filter,\n filter_length=5,\n border_mode='valid',\n activation='relu',\n subsample_length=1,\n name='conv5')(embedding)\n maxConv5 = MaxPooling1D(pool_length=2,\n name='maxConv5')(conv5)\n\n x = merge([maxConv4, maxConv5], mode='concat')\n\n x = Dropout(0.15)(x)\n\n x = RNN(rnn_output_size)(x)\n\n x = Dense(hidden_dims, activation='relu', init='he_normal',\n W_constraint = maxnorm(3), b_constraint=maxnorm(3),\n name='mlp')(x)\n\n x = Dropout(0.10, name='drop')(x)\n\n output = Dense(1, init='he_normal',\n activation='sigmoid', name='output')(x)\n\n model = Model(input=main_input, output=output)\n model.compile(loss={'output':'binary_crossentropy'},\n optimizer=Adadelta(lr=0.95, epsilon=1e-06),\n metrics=[\"accuracy\"])\n return model\n\nmodel = build_model()\nif first_run:\n first_run = False\n print(model.summary())\n\nbest_val_acc = 0\nbest_test_acc = 0\nfor j in xrange(nb_epoch):\n a = time.time()\n his = model.fit(X_train, y_train,\n batch_size=batch_size,\n validation_data=[X_test, y_test],\n shuffle=True,\n nb_epoch=1, verbose=1)\n print('Epoch %d/%d\\t%s' % (j + 1, nb_epoch, str(his.history)))\n if his.history['val_acc'][0] >= best_val_acc:\n score, acc = model.evaluate(X_test, y_test,\n batch_size=batch_size,\n verbose=2)\n best_val_acc = his.history['val_acc'][0]\n best_test_acc = acc\n print('Got best epoch best val acc is %f test acc is %f' %\n (best_val_acc, best_test_acc))\n if len(accs) > 0:\n print('Current avg test acc:', str(np.mean(accs)))\n b = time.time()\n cost = b - a\n left = (nb_epoch - j - 1)\n print('One round cost %ds, %d round %ds %dmin left' % (cost, left,\n cost * left,\n cost * left / 60.0))\naccs.append(best_test_acc)\nprint('Avg test acc:', str(np.mean(accs)))\n","sub_path":"crnn-master_old/sst2_cnn_rnn.py","file_name":"sst2_cnn_rnn.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"642894022","text":"\"\"\"\nThis module implements the Tranform class.\n\"\"\"\n\nfrom helperfunctions import *\nfrom vraytype import *\nfrom vector import *\nfrom matrix import *\n\n\nclass Transform(VRayType):\n \"\"\"\n A class representing a transformation in 3D space (3x3 matrix + offset).\n \"\"\"\n\n ###############################################\n # Initializing methods\n\n def __init__(self, *args):\n \"\"\" Initialize the transform. \"\"\"\n self.init(*args)\n\n def init(self, *args):\n \"\"\"\n Initialize the transform.\n\n Usage:\n - init() - both the transform matrix and the offset are set to zero\n - init(t) - create a transform from the given transform\n - init(1) - the matrix is set to the identity matrix and the\n offset is set to zero\n - init(m, v) - create a transform with a given matrix m and a\n given vector v\n \"\"\"\n\n self.m = Matrix()\n self.offs = Vector()\n\n if len(args) == 0:\n pass\n elif len(args) == 1:\n if isinstance(args[0], Transform):\n self.m = args[0].m.copy()\n self.offs = args[0].offs.copy()\n elif isinstance(args[0], int):\n if args[0] == 1:\n self.makeIdentity()\n elif hasattr(args[0], '__iter__') and len(args[0]) == 2:\n self.m.init(args[0][0])\n self.offs.init(args[0][1])\n else:\n raise TypeError(\"Invalid arguments.\")\n elif(len(args) == 2):\n self.m.init(args[0])\n self.offs.init(args[1])\n else:\n raise TypeError(\"Invalid arguments.\")\n\n ###############################################\n # Inherited from VRayType\n\n def __repr__(self):\n \"\"\" Thorough explanation can be found in the base class(VRayType). \"\"\"\n return 'Transform(%s, %s)' % (self.m, self.offs)\n\n def _value(self):\n \"\"\" Thorough explanation can be found in the base class(VRayType). \"\"\"\n return (self.m._value(), self.offs._value())\n\n def _desc(self):\n \"\"\" Thorough explanation can be found in the base class(VRayType). \"\"\"\n return VRayParameterType.paramtype_transform\n\n def copy(self):\n \"\"\" Thorough explanation can be found in the base class(VRayType). \"\"\"\n return Transform(self)\n\n ###############################################\n # New methods\n\n def __iadd__(self, other):\n \"\"\" Add another transform to this one. \"\"\"\n if not isinstance(other, Transform):\n return NotImplemented\n\n self.m += other.m\n self.offs += other.offs\n\n return self\n\n def __add__(self, other):\n \"\"\"\n Return the result from adding this transform with another transform.\n \"\"\"\n if not isinstance(other, Transform):\n return NotImplemented\n\n return self.copy().__iadd__(other)\n\n def __isub__(self, other):\n \"\"\" Substract another transform from this one. \"\"\"\n if not isinstance(other, Transform):\n return NotImplemented\n\n self.m -= other.m\n self.offs -= other.offs\n\n return self\n\n def __sub__(self, other):\n \"\"\" Return the result from substracting a tranform from this one. \"\"\"\n if not isinstance(other, Transform):\n return NotImplemented\n\n return self.copy().__isub__(other)\n\n def __imul__(self, other):\n \"\"\"\n Multipliy the transform by:\n - another transform\n - a number - multiply the matrix and the offset by this number\n \"\"\"\n if isinstance(other, Transform):\n self.offs = (self.m * other.offs + self.offs)\n self.m *= other.m\n elif isnumeric(other):\n self.m *= other\n self.offs *= other\n else:\n return NotImplemented\n\n return self\n\n def __mul__(self, other):\n \"\"\"\n Multiply a copy of the transform by:\n - another transform\n - a vector\n - a number\n and return the copy.\n \"\"\"\n if isinstance(other, Transform):\n return self.copy().__imul__(other)\n elif isinstance(other, Vector):\n return self.m * other + self.offs\n elif isnumeric(other):\n return self.copy().__imul__(other)\n else:\n return NotImplemented\n\n def __rmul__(self, other):\n \"\"\"\n Multiply:\n - another transform\n - a vector\n - a number\n by a copy of this transform and return the copy.\n \"\"\"\n if isinstance(other, Vector):\n return (other + self.offs) * self.m\n elif isnumeric(other):\n return self.copy().__imul__(other)\n else:\n return NotImplemented\n\n def __idiv__(self, other):\n \"\"\" Divide the Transform by a number. \"\"\"\n if not isnumeric(other):\n return NotImplemented\n\n self.m /= other\n self.offs /= other\n\n return self\n\n def __div__(self, other):\n \"\"\" Return the result from dividing the transform with a number. \"\"\"\n if not isnumeric(other):\n return NotImplemented\n\n return self.copy().__idiv__(other)\n\n def __eq__(self, other):\n \"\"\" Checks whether the two transforms are equal. \"\"\"\n if not isinstance(other, Transform):\n return NotImplemented\n\n return self.m == other.m and self.offs == other.offs\n\n def __ne__(self, other):\n \"\"\" Checks whether the two transforms are not equal. \"\"\"\n if not isinstance(other, Transform):\n return NotImplemented\n\n return self.m != other.m or self.offs != other.offs\n\n def __neg__(self):\n \"\"\" Return a Transform with negative values. \"\"\"\n return Transform(-self.m, -self.offs)\n\n def __pos__(self):\n \"\"\" Return a copy of the Transform. \"\"\"\n return self.copy()\n\n def makeZero(self):\n \"\"\" Make both the matrix and the vector equal to zero. \"\"\"\n self.m.makeZero()\n self.offs.makeZero()\n\n def makeIdentity(self):\n \"\"\" Make the transform equal to the identity transform. \"\"\"\n self.m.makeIdentity()\n self.offs.makeZero()\n\n def makeInverse(self):\n \"\"\" Make the transform equal to its inverse. \"\"\"\n self.m.makeInverse()\n self.offs = -self.m * self.offs\n\n def getInverse(self):\n \"\"\" Return the inverse of the transform. \"\"\"\n t = Transform(self)\n t.makeInverse()\n return t\n","sub_path":"WitPipeline/MayaPlugs/Vray1/3.5/2017/scripts/vray/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"107303027","text":"import numpy as np\nimport os.path as path\nimport gc\nfrom joblib import dump, load\nfrom sklearn.metrics import precision_recall_fscore_support, accuracy_score\n\nfrom utils import (\n # Constants\n DEVICE, BIRD_NAMES, DATA_PATH, MODEL_PATH, PREDICTIONS_PATH, TOLERANCE,\n\n # Data handling functions\n load_bird_data, extract_labelled_spectrograms, train_test_split,\n extract_birds, create_windows, store_birds, load_birds, store_dataset,\n load_dataset, flatten_windows_dic, standardize_data,\n\n # Other stuff\n hash_spectrograms, tuples2vector, score_predictions\n)\n\nfrom classifiers import (\n # For the cnn\n train_CNN, predict_syllables_CNN, load_cnn, wrap_cnn,\n\n # For the rnn\n train_RNN, predict_syllables_RNN, load_rnn, wrap_rnn,\n\n # For transfer learning\n get_transfer_learning_models_CNN, get_transfer_learning_models_RNN\n)\n\n\ndef predict_dataset(model, spectrograms, ground_truth_tuples, name, read_cache=True, write_cache=True):\n \"\"\"\n Makes predictions for the specified dataset consisting of spectrograms (parameter 'spectrograms')\n and labelled tuples (parameter 'ground_truth_tuples') of the form (on, off, SETindex, bird_name)\n\n If 'read_cache==True', then the\n function first checks, whether the results have already been computed (i.e.\n whether a file with name 'name' already exists) and if so, loads the data.\n\n If 'write_cache==True' then the function will store the computed results.\n \"\"\"\n base = path.dirname(path.abspath(__file__))\n p = path.join(base, PREDICTIONS_PATH + f\"predictions_{name}.data\")\n\n if read_cache and path.isfile(p):\n return load(p)\n\n results = []\n # Iterate over all spectrograms\n for index, spec in enumerate(spectrograms):\n if index % 20 == 0:\n gc.collect()\n print(index, \"/\", len(spectrograms))\n\n # Generate predictions for current spectrogram. Make sure that they are sorted\n y_pred = sorted(model(spec, index),\n key=lambda t: (t[2], t[0], t[1]))\n\n # Fetch the true tuples for this spectrogram. Make sure that they are sorted\n y_true = sorted([tup for tup in ground_truth_tuples if tup[2] == index],\n key=lambda t: (t[2], t[0], t[1]))\n\n results.append({\"y_true\": y_true, \"y_pred\": y_pred, \"spectrogram_length\": len(spec[0])})\n\n # Store the results, if write_cache = True\n if write_cache:\n dump(results, p)\n\n return results\n\n\ndef analyze_errors(predictions):\n \"\"\"\n This function compares the vocal/non-vocal predictions of a model to the true\n vocal/non-vocal intervals and keeps track of 4 different types of errors:\n 1. Border error: The prediction is a bit longer/shorter than the real error.\n - Severity: Small\n 2. Noise error: In this case the model misclassified noise as a syllable\n - Severity: Medium\n 3. Skip error: In this case the model misclassified a real syllable as noise\n (i.e. the model skipped a real syllable)\n - Severity: Medium\n 4. Split error: Here the model did accidentally split a syllable into 2 syllables\n - Severity: Large\n\n Furthermore, it displays a few other metrics such as a score (used for grading),\n how many columns were predicted correctly in total and other metrics\n \"\"\"\n # A few helper functions which each operate on two lists of tuples\n def set_intersect(a, b):\n \"\"\"\n Takes two lists of tuples as input and returns all tuples from 'a' whose ranges intersect\n with at least one tuple from 'b'\n \"\"\"\n return [t_a for t_a in a if\n any(map(lambda t_b:\n (t_a[0] <= t_b[0] and t_a[1] >= t_b[0]) or\n (t_a[0] <= t_b[1] and t_a[1] >= t_b[1]) or\n (t_a[0] >= t_b[0] and t_a[1] <= t_b[1]), b))]\n\n def set_minus(a, b):\n \"\"\"\n Takes two lists of tuples as input and returns all tuples from 'a' which don't\n intersect with any tuple from 'b'\n \"\"\"\n intersect_with_b = set_intersect(a, b)\n return [t_a for t_a in a if t_a not in intersect_with_b]\n\n def set_contains(a, b):\n \"\"\"\n Takes two lists of tuples as input and returns all tuples from 'a' whose interval\n is contained in at least one tuple from 'b'\n \"\"\"\n return [t_a for t_a in a if\n any(map(lambda t_b: t_a[0] >= t_b[0] and t_a[1] <= t_b[1], b))]\n\n # Store all tuples which are erroneous in their respective arrays\n type1_tuples = []\n type2_tuples = []\n type3_tuples = []\n type4_tuples = []\n\n # Store the amount of times each error occurs in these variables\n type1_amount = type2_amount = type3_amount = type4_amount = 0\n\n accuracies = []\n prediction_tuples = []\n prediction_vector = []\n solution_tuples = []\n solution_vector = []\n\n # Iterate through all spectrograms\n for index, result in enumerate(predictions):\n y_true = result['y_true']\n y_pred = result['y_pred']\n length = result['spectrogram_length']\n\n prediction_tuples.append((y_pred, length))\n solution_tuples.append((y_true, length))\n\n # If we don't have any lables, there is nothing to analyze\n if y_true == []:\n type1_tuples.append([])\n type2_tuples.append(y_pred)\n type3_tuples.append([])\n type4_tuples.append([])\n continue\n\n # Find all errors of type 1 (Border errors)\n # overlap = set_intersect(y_pred, y_true)\n # type1 = [t for t in y_pred if t in overlap and t not in y_true]\n type1 = [t for t in y_pred if len(\n {t2 for t2 in y_true if abs(t[0] - t2[0]) <= TOLERANCE and abs(t[1] - t2[1]) <= TOLERANCE}\n ) == 0]\n type1_amount += len(type1)\n type1_tuples.append(type1)\n\n # Find all errors of type 2 (Noise errors)\n type2 = set_minus(y_pred, y_true)\n type2_amount += len(type2)\n type2_tuples.append(type2)\n\n # Find all errors of type 3 (Skip errors)\n type3 = set_minus(y_true, y_pred)\n type3_amount += len(type3)\n type3_tuples.append(type3)\n\n # Find all errors of type 4 (Split errors)\n # For all consecutive pairs of tuples 'a','b' in 'y_pred', get the interval\n # of the space between 'a' and 'b'\n consecutive_tuples = list(zip(y_pred, y_pred[1:]))\n consecutive_tuples = [(t[0][1], t[1][0], t[0][2]) for t in consecutive_tuples]\n type4 = set_contains(consecutive_tuples, y_true)\n type4_amount += len(type4)\n type4_tuples.append(type4)\n\n accuracies.append(score_predictions(y_true, y_pred, tolerance=TOLERANCE))\n\n # Compute a few standard error metrics\n # First transform the tuples into a single array of 0s and 1s representing local/\n # non-vocal columns\n prediction_vector = tuples2vector(prediction_tuples)\n solution_vector = tuples2vector(solution_tuples)\n\n # Compute error metrics\n precision, recall, f1, _ = precision_recall_fscore_support(\n solution_vector,\n prediction_vector,\n average=\"binary\"\n )\n accuracy = accuracy_score(solution_vector, prediction_vector)\n\n return {'type1_amount': type1_amount,\n 'type2_amount': type2_amount,\n 'type3_amount': type3_amount,\n 'type4_amount': type4_amount,\n 'type1_tuples': type1_tuples,\n 'type2_tuples': type2_tuples,\n 'type3_tuples': type3_tuples,\n 'type4_tuples': type4_tuples,\n 'accuracy': accuracy,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'score_mean': np.mean(accuracies),\n 'score_std': np.std(accuracies)}\n\n\ndef compare_classifiers(dataset=None, model_dic=None, print_summary=True):\n \"\"\"\n Take the different classifiers and generates predictions for the specified dataset.\n The predictions are then analyzed and the different types of errors get listed.\n\n IF 'dataset' is 'None', the data of all birds is loaded. Else it should have the form:\n {\n \"bird_name1\": {\"Xs_train\" : Xs_train, \"tuples\" : ground_truth_tuples},\n \"bird_name2\": {\"Xs_train\" : Xs_train, \"tuples\" : ground_truth_tuples},\n ...\n }\n where \"Xs_train\" is a list of spectrograms and \"tuples\" a list of tuples of the form (on,off,SETindex,bird_name) denoting\n the start (on) and end (off) of a syllable, and the spectrogram index (SETindex) of said syllable, and the name of the bird\n (bird_name) from which the syllable stems from.\n\n 'model_dic' is a dictionary containing a mapping of model names to models:\n {\n \"model_name1\" : model1,\n \"model_name2\" : model2,\n ...\n }\n\n Note: This function assumes that the classifiers have already been trained before.\n \"\"\"\n if model_dic is None:\n raise Exception(\"You need to specify at least one model!\")\n\n # Implemented error metrics\n metrics = ['accuracy', 'precision', 'recall', 'f1', 'score_mean', 'score_std']\n\n # If dataset is not specified, use total dataset over all birds\n if dataset is None:\n dataset = load_bird_data()\n dataset, _ = extract_labelled_spectrograms(dataset)\n\n bird_names = [key for key in list(dataset.keys()) if type(key) == np.str_ or type(key) == str]\n\n summary_total = {}\n for model_name in model_dic.keys():\n summary_total[model_name] = {}\n\n # Iterate through all specified birds and generate predictions\n for bird_name in bird_names:\n gc.collect()\n bird_data = dataset[bird_name]\n spectrograms = bird_data['Xs_train']\n tuples = bird_data['tuples']\n\n results = {}\n errors = {}\n\n # Make predictions for this specific bird\n for model_name in model_dic.keys():\n print(f\"Make predictions for bird {bird_name} using the model {model_name}\")\n\n # Use a hash value in the name to check if the input file already exists or not\n name = f\"{model_name}_{bird_name}_hash_{hash_spectrograms(spectrograms)}\"\n results[model_name] = predict_dataset(model=model_dic[model_name],\n spectrograms=spectrograms, ground_truth_tuples=tuples,\n name=name)\n\n # Analyze the errors of the different predictors\n errors[model_name] = analyze_errors(results[model_name])\n\n # Add the errors of this specific model on this specific bird to the summary\n for error_type in range(1, 5, 1):\n error_name = f\"type{error_type}_amount\"\n summary_total[model_name][error_name] = summary_total[model_name].get(error_name, 0) + \\\n errors[model_name][error_name]\n # Add the other score metrics per model\n for metric in metrics:\n summary_total[model_name][metric] = summary_total[model_name].get(metric, []) \\\n + [errors[model_name][metric]]\n\n summary_total[bird_name] = errors\n\n if not print_summary:\n return summary_total\n\n # Print a summary of the analysis results\n line = \"=\" * 50\n def header1(text): print(line); print(text); print(line)\n def header2(text): print(text + \":\"); print(\"-\" * (len(text) + 1))\n\n def print_overview(error_dic):\n print(\"Type 1 errors (Border errors): \", error_dic[\"type1_amount\"])\n print(\"Type 2 errors (Noise errors) : \", error_dic[\"type2_amount\"])\n print(\"Type 3 errors (Skip errors) : \", error_dic[\"type3_amount\"])\n print(\"Type 4 errors (Split errors) : \", error_dic[\"type4_amount\"], \"\\n\")\n for metric in metrics:\n print(metric + \": \" + str(np.mean(error_dic[metric])))\n print()\n\n print()\n header1(\"SUMMARY\")\n header1(\"TOTAL\")\n for model_name in model_dic.keys():\n header2(model_name)\n print_overview(summary_total[model_name])\n\n for bird_name in bird_names:\n header1(\"BIRD \"+bird_name)\n for model_name in model_dic.keys():\n header2(model_name)\n print_overview(summary_total[bird_name][model_name])\n\n return summary_total\n\n\nif __name__ == \"__main__\":\n\n # Set some general parameters\n use_feature_extraction = False\n wnd_sz = 20\n limit = 80000\n standardize = False\n online = False\n\n # If you only want to train the cnn stuff, set this to False\n rnn_should_be_trained = False\n\n # Some RNN parameters\n network_type = \"gru\" # Choose from {'rnn', 'lstm', 'gru'}\n num_layers = 1\n hidden_size = 100\n\n # The paths to the models\n base = path.dirname(path.abspath(__file__))\n cnn_name = \"CNN_features_%s_wnd_sz_%s_limit_%s_v02.model\" % (use_feature_extraction, wnd_sz, limit)\n cnn_path = path.join(base, MODEL_PATH+cnn_name)\n rnn_name = \"RNN_type_%s_num_layers_%s_hidden_size_%s_features_%s_wnd_sz_%s_limit_%s.model\" % (\n network_type, num_layers, hidden_size, use_feature_extraction, wnd_sz, limit)\n rnn_path = path.join(base, MODEL_PATH+rnn_name)\n\n if not (path.isfile(cnn_path) and (not rnn_should_be_trained or path.isfile(rnn_path))):\n # Load the data and get all labelled spectrograms\n bird_data = load_bird_data(names=[\"g17y2\"])\n\n if standardize:\n bird_data = standardize_data(bird_data, coarse_mode=\"per_spectrogram\", fine_mode=\"scalar\")\n data_labelled, _ = extract_labelled_spectrograms(bird_data)\n\n # Perform a train-validation-test split\n data_train, data_test = train_test_split(bird_data=data_labelled, configs=0.33, seed=42)\n data_val, data_test = train_test_split(bird_data=data_test, configs=0.5, seed=42)\n\n # Extract the windows from the spectrograms\n windows_train, _ = create_windows(bird_data=data_train, wnd_sizes=wnd_sz, limits=limit, on_fracs=0.5, dt=5, seed=42)\n windows_val, _ = create_windows(bird_data=data_val, wnd_sizes=wnd_sz, limits=int(limit/2), on_fracs=0.5, dt=5, seed=42)\n windows_test, _ = create_windows(bird_data=data_test, wnd_sizes=wnd_sz, limits=int(limit/2), on_fracs=0.5, dt=5, seed=42)\n\n X_train, y_train = flatten_windows_dic(windows_train[wnd_sz])\n X_val, y_val = flatten_windows_dic(windows_val[wnd_sz])\n X_test, y_test = flatten_windows_dic(windows_test[wnd_sz])\n\n dataset = {\n \"train\": (X_train, y_train),\n \"validation\": (X_val, y_val),\n \"test\": (X_test, y_test)\n }\n\n if not path.isfile(cnn_path):\n cnn = train_CNN(dataset, cnn_name, normalize_input=True, online=online)\n if rnn_should_be_trained and not path.isfile(rnn_path):\n rnn = train_RNN(dataset, rnn_name, network_type=network_type, hidden_size=hidden_size,\n num_layers=num_layers, normalize_input=True, online=online)\n\n # Load the CNN\n cnn = load_cnn(cnn_path, wnd_sz, online=online)\n if rnn_should_be_trained:\n rnn = load_rnn(rnn_path, network_type, nfreq=128, hidden_size=hidden_size, num_layers=num_layers, device=DEVICE)\n\n # Print the number of parameters\n print(\"CNN has \", sum(p.numel() for p in cnn.parameters()), \" parameters.\")\n if rnn_should_be_trained:\n print(\"RNN has \", sum(p.numel() for p in rnn.parameters()), \" parameters.\")\n\n cnn_wrapped = wrap_cnn(cnn, mode=\"for_spectrograms\")\n if rnn_should_be_trained:\n rnn_wrapped = wrap_rnn(rnn, mode=\"for_spectrograms\")\n # compare_classifiers(dataset=None, model_dic={\"cnn\": cnn_wrapped, \"rnn\": rnn_wrapped}, print_summary=True)\n\n transfer_model_dic_cnn = get_transfer_learning_models_CNN(\n bird_names=[\"g19o10\", \"R3428\"],\n base_model=cnn,\n arch=\"CNN\",\n wnd_sz=wnd_sz,\n limit=limit,\n retrain_layers=4,\n standardize_input=standardize)\n\n if rnn_should_be_trained:\n transfer_model_dic_rnn = get_transfer_learning_models_RNN(\n bird_names=[\"g19o10\", \"R3428\"],\n base_model=rnn,\n arch=\"RNN\",\n wnd_sz=wnd_sz,\n limit=limit,\n network_type=network_type, # Choose from [\"rnn\",\"lstm\",\"gru\"]\n hidden_size=hidden_size,\n num_layers=num_layers,\n retrain_layers=4,\n nfreq=128,\n standardize_input=standardize\n )\n\n transfer_model_dic_cnn[\"base_CNN\"] = cnn\n if rnn_should_be_trained:\n transfer_model_dic_rnn[\"base_RNN\"] = rnn\n\n for key in transfer_model_dic_cnn:\n transfer_model_dic_cnn[key] = wrap_cnn(transfer_model_dic_cnn[key], mode=\"for_spectrograms\", normalize_input=True)\n\n if rnn_should_be_trained:\n for key in transfer_model_dic_rnn:\n transfer_model_dic_rnn[key] = wrap_rnn(transfer_model_dic_rnn[key], mode=\"for_spectrograms\", normalize_input=True)\n\n compare_classifiers(dataset=None, model_dic=transfer_model_dic_cnn, print_summary=True)\n if rnn_should_be_trained:\n compare_classifiers(dataset=None, model_dic=transfer_model_dic_rnn, print_summary=True)\n","sub_path":"analyze_errors.py","file_name":"analyze_errors.py","file_ext":"py","file_size_in_byte":17184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"431110501","text":"#encoding:utf-8\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\n\ndef start_up_driver(browser_name='Chrome'):\n if browser_name == 'Chrome':\n driver = webdriver.Chrome()\n elif browser_name == \"chrome_headless\":\n options = Options()\n options.add_argument(\"--headless\") # Runs Chrome in headless mode.\n options.add_argument('--no-sandbox') # Bypass OS security model\n options.add_argument('--disable-gpu') # applicable to windows os only\n driver = webdriver.Chrome(options=options)\n elif browser_name == 'Firefox':\n driver = webdriver.Firefox()\n elif browser_name == 'Ie':\n driver = webdriver.Ie()\n elif browser_name == 'Edge':\n driver = webdriver.Edge()\n elif browser_name == 'Safari':\n driver = webdriver.Safari()\n else:\n raise NameError(\n \"Not found %s browser,You can enter 'Chrome', 'Firefox', 'Ie', 'Edge', 'Safari','chrome-headless'.\" % browser_name)\n return driver","sub_path":"common/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254412891","text":"import sqlite3\n\nwith sqlite3.connect(\"test_database.db\") as connection:\n\tc = connection.cursor()\n\tc.execute(\"DROP TABLE IF EXISTS Roster\")\n\tc.execute(\"CREATE TABLE Roster(Name TEXT, Species TEXT, IQ INT)\")\n\tinsert_values = (\n\t\t\t(\"Jean-Baptiste Zorg\", \"Human\", 122),\n\t\t\t(\"Korben Dallas\", \"Meat Popsicle\", 100),\n\t\t\t(\"Ak'not\", \"Mangalore\", -5)\n\t\t)\n\tc.executemany(\"INSERT INTO Roster VALUES(?, ?, ?)\", insert_values)\n\tc.execute(\"UPDATE Roster SET Species='Human' WHERE Name='Korben Dallas'\")\n\tc.execute(\"SELECT Name, IQ FROM Roster WHERE Species=='Human'\")\n\tfor row in c.fetchall():\n\t\tprint(row)","sub_path":"sqlite_review_exercises.py","file_name":"sqlite_review_exercises.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"11678395","text":"\"\"\"Modules of pymongo\"\"\"\nfrom pymongo import MongoClient\n\n\ndef get_copywrites(find={}, sort=[]):\n client = MongoClient()\n db = client['common_db']\n collection = db['copies_store']\n cursor = collection.find(filter=find, sort=sort)\n docs = []\n for doc in cursor:\n print(doc)\n docs.append({\n 'name': doc.get('name', 'NAME_NOT_FOUND'),\n 'content': doc.get('content', []),\n 'update_time': doc.get('update_time', \"TIME_NOT_FOUND\"),\n 'id': str(doc.get('_id'))\n })\n\n return docs\n","sub_path":"huangsite/huangsite/operate_database/get_copywrites.py","file_name":"get_copywrites.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423083632","text":"#Copyright (C) 2020 Valentin-Gabriel SOUMAH\r\n# coding: utf-8\r\nimport os\r\nimport BAO1_Valentin as Bao1\r\nfrom sklearn.naive_bayes import GaussianNB,ComplementNB,MultinomialNB \r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom numpy import nditer\r\n\r\n#On réutilise le dictionnaire avec toutes les rubrique de la BAO1\r\ncorres = {'tout':'0,2-(.*),0|0,57-0,64-823353,0|env_sciences','une':'0,2-3208,1-0,0', 'international':'0,2-3210,1-0,0', 'europe':'0,2-3214,1-0,0', 'societe':'0,2-3224,1-0,0', 'idees':'0,2-3232,1-0,0', 'economie':'0,2-3234,1-0,0', 'actualite-medias':'0,2-3236,1-0,0', 'sport':'0,2-3242,1-0,0', 'planete':'0,2-3244,1-0,0', 'culture':'0,2-3246,1-0,0', 'livres':'0,2-3260,1-0,0', 'cinema':'0,2-3476,1-0,0', 'technologies':'0,2-3546,1-0,0', 'politique':'0,57-0,64-823353,0', 'sciences':'env_sciences'}\r\n\r\n\r\ndef constitutioncorpus():\r\n if not os.path.isdir(\"bao4donnees\"):\r\n os.mkdir(\"bao4donnees\")\r\n textes,classes=[],[] #On crée des listes vide qu'on remplira avec le contenu de chaque fil rss et leur classe associées\r\n for rubrik in corres:\r\n if rubrik == \"tout\":continue\r\n #Pour chaque rubrique on lance la Bao1, on a donc un fichier txt par rubrique.\r\n Bao1.parcours(\"2019\",\"bao4donnees\",rubrique=rubrik,extractiontype=\"lxml\")\r\n\r\n with open(\"bao4donnees/\"+rubrik+\".txt\") as fichier:\r\n #On coupe le fichier selon les fils rss originaux\r\n #La division a été marquée dans la bao1 par des tirets\r\n for filrss in fichier.read().split(\"------------------\\n\"):\r\n if not filrss: continue #On ignore les fils rss ou rien n'a été extrait (à cause des doublons)\r\n textes.append(filrss)\r\n classes.append(rubrik)\r\n return textes,classes #On renvoie les textes avec les classes associées pour l'entraînement\r\n\r\ndef entrainement(textes,classes):\r\n print(\"entraînement\")\r\n vecto = TfidfVectorizer(max_features=4500,ngram_range=(1, 2))\r\n vecteurs = vecto.fit_transform(textes).toarray() #On vectorise selon le nombre d'occurence de chaque type\r\n\r\n modele = MultinomialNB().fit(vecteurs,classes) #On entraîne le modèle avec les données\r\n return modele\r\nif __name__ == \"__main__\":\r\n textes,classes= constitutioncorpus()\r\n classesu=[]\r\n for classe in classes:\r\n if classe not in classesu: #On aura besoin par la suite de la liste des classes dans l'ordre\r\n classesu.append(classe)\r\n modele=entrainement(textes,classes) #On entraîne le modèle en lui donnant les titres et description de chaque fil rss avec la rubrique à laquelle ils appartiennent\r\n\r\n #Maintenant on va tester le modèle\r\n \r\n txttest=[open(\"bao4donnees/echantillontest/\"+fichier).read() for fichier in os.listdir(\"bao4donnees/echantillontest\")] #On récupère une liste de tous les contenus textuels des fichiers de l'echantillon de test\r\n vraiesrubriques = [fichier[:-4] for fichier in os.listdir(\"bao4donnees/echantillontest\") ] #On voudra afficher les vraies rubriques de chaque fichier\r\n vecteurstest = TfidfVectorizer(max_features=4500,ngram_range=(1, 2)).fit_transform(txttest).toarray() #On les vectorise aussi pour pouvoir les rendre comparables à données d'entrainements\r\n\r\n i=0\r\n with open(\"resultats/classificationrubriques.txt\", \"w\") as sortie:\r\n for vecteur in vecteurstest:\r\n sortie.write(\"vraie rubrique: \"+vraiesrubriques[i]+\"\\n\")\r\n print(\"vraie rubrique:\",vraiesrubriques[i])\r\n #On utilise notre modèle pour prédire la rubrique pour chaque fil rss\r\n resultat=modele.predict_proba(vecteur.reshape(1, -1) )[0]\r\n j=0\r\n #On affiche les probabilités estimées par notre modèle\r\n for nb in nditer(resultat):\r\n sortie.write(\"\\t\"+classesu[j] + str(nb)+\"\\n\")\r\n print(\"\\t\",classesu[j], nb)\r\n j+=1\r\n prediction=modele.predict(vecteur.reshape(1,-1))[0]\r\n sortie.write(\"rubrique prédite \"+prediction+\"\\n\")\r\n print(\"rubrique prédite:\",prediction,\"\\n\")\r\n #Est ce que notre modèle a eu juste?\r\n if prediction==vraiesrubriques[i]:\r\n sortie.write(\"Prédiction exacte!\\n\\n\")\r\n print(\"Prédiction exacte!\\n\")\r\n else:\r\n sortie.write(\"Raté\\n\\n\")\r\n print(\"Raté\\n\")\r\n i+=1\r\n\r\n","sub_path":"BAO4_Valentin.py","file_name":"BAO4_Valentin.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"448406855","text":"import students\nimport courses\ndef read_student_file(file_name,students_list):\n fi=open(file_name, \"r\") #只读模式打开\n line1=fi.readline().strip() \n while line1!=\"\":\n tem_list=line1.split(sep=\",\") #拆分成独立字符串的列表\n item=students.Students(tem_list[0], tem_list[1], tem_list[2], tem_list[3]) #调用类对象\n line2=fi.readline().strip() \n for i in range(int(line2)):\n tempLine=fi.readline().strip() \n item.add_courses(tempLine)\n students_list.append(item)\n line1=fi.readline().strip()\n fi.close()\n#读取students.txt文件\ndef read_course_file(file_name,courses_list):\n fi = open(file_name, \"r\")\n line1 = fi.readline().strip() #只读模式读取文件数据\n while line1!=\"\":\n tem_list=line1.split(sep=\",\") #拆成独立字符串的列表\n line2=fi.readline().strip()\n item=courses.Course(tem_list[0], tem_list[1],tem_list[2],tem_list[3],tem_list[4],line2)\n courses_list.append(item)\n line1=fi.readline().strip()\n fi.close()\n#读取课程信息\ndef write_student_file(file_name, students_list): #写入students数据\n fo=open(file_name, \"w\")\n for i in range(len(students_list)): \n item=students_list[i] \n str_result=\"\"\n str_result+=item.get_name()+\",\"+item.get_ID()+\",\"+item.get_gender()+\",\"+item.get_major()+\"\\n\"\n str_result+=str(item.get_course_number())+\"\\n\"\n for one in item.get_courses_list():\n str_result+=one+\"\\n\"\n fo.write(str_result)\n fo.flush()\n fo.close()\n#写入学生信息到新的列表\ndef write_courses_file(file_name,courses_list): #写入courses数据\n fo=open(file_name,\"w\")\n for i in range(len(courses_list)):\n item = courses_list[i]\n str_result=\"\"\n str_result+=item.get_course_name()+\",\"+item.get_course_ID()+\",\"+item.get_credit()+\",\"+item.get_teacher()+\",\"+item.get_classroom()+\"\\n\"\n str_result2=item.get_information()+\"\\n\"\n fo.write(str_result)\n fo.write(str_result2)\n fo.flush()\n fo.close()\n#写入课程信息到新文件\ndef search(courses_list):\n course_id=input(\"Please enter Course_ID: \")\n not_found=True\n i=0\n while i NUM_DECAY and NUM_DECAY > 0:\n # arithmetic/linear annealing\n new_lr = np.float32(sh_lr.get_value() - LEARNING_RATE/NUM_DECAY)\n print(\"New LR:\", new_lr)\n sh_lr.set_value(np.float32(new_lr))\n\n if (n + 1) % 5 == 0:\n # uncomment if to save the whole network\n save_dump('{}/epoch_{}.pkl'.format(output_folder, n), network_dump)\n\n if args.visualize:\n X_sample = generate_samples(generate, batch_size=tile**2)\n if args.binarize:\n X_sample = binarize(X_sample)\n save_path = '{}/sample_{}.png'.format(sample_folder, n)\n if args.dataset == 'MNIST':\n grayscale_grid_vis(X_sample[:tile**2], tiling, save_path)\n elif args.dataset == 'CIFAR10':\n color_grid_vis(X_sample[:tile**2], tiling, save_path)\n\nexcept KeyboardInterrupt:\n pass\n\nsave_dump('{}/final_epoch.pkl'.format(output_folder), network_dump)\n\nstats = np.stack([train_costs, val_dists], axis=1)\nplot_learning_curves(\n stats, legend=['train cost', 'val dist'], show_immediately=False,\n filename='{}/learn_curves'.format(output_folder)\n)","sub_path":"PixelRNN/train_pixelrnn.py","file_name":"train_pixelrnn.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"41568543","text":"# ライブラリの読み込み\nimport pyaudio\nimport numpy as np\nimport threading\nimport time\n\n# matplotlib関連\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\n\n# GUI関連\nimport tkinter\nfrom matplotlib.backends.backend_tkagg import (\n\tFigureCanvasTkAgg, NavigationToolbar2Tk)\n\n# mp3ファイルを読み込んで再生\nfrom pydub import AudioSegment\nfrom pydub.utils import make_chunks\n\n# サンプリングレート\nSAMPLING_RATE = 16000\n\n# フレームサイズ\nFRAME_SIZE = 4096\n\n# サイズシフト\nSHIFT_SIZE = int(SAMPLING_RATE / 20)\t# 今回は0.05秒\n\n# スペクトルをカラー表示する際に色の範囲を正規化するために\n# スペクトルの最小値と最大値を指定\n# スペクトルの値がこの範囲を超えると,同じ色になってしまう\nSPECTRUM_MIN = -5\nSPECTRUM_MAX = 10\n\n# log10を計算する際に,引数が0にならないようにするためにこの値を足す\nEPSILON = 1e-10\n\n# ハミング窓\nhamming_window = np.hamming(FRAME_SIZE)\n\n# グラフに表示する縦軸方向のデータ数\nMAX_NUM_SPECTROGRAM = int(FRAME_SIZE / 2)\n\n# グラフに表示する横軸方向のデータ数\nNUM_DATA_SHOWN = 150\n\n# GUIの開始フラグ(まだGUIを開始していないので、ここではFalseに)\nis_gui_running = False\n\n# 音高の幅\nLOW_PITCH = 55\nHIGH_PITCH = 75\n\n# 周波数の範囲\nLOW_FREQUENCY = 0\nHIGH_FREQUENCY = 1500\n\n#\n# (1) GUI / グラフ描画の処理\n#\n\n# ここでは matplotlib animation を用いて描画する\n# 毎回 figure や ax を初期化すると処理時間がかかるため\n# データを更新したら,それに従って必要な部分のみ再描画することでリアルタイム処理を実現する\n\n# matplotlib animation によって呼び出される関数\n# ここでは最新のスペクトログラムと音量のデータを格納する\n# 再描画はmatplotlib animationが行う\ndef animate(frame_index):\n\n\tax1_sub.set_array(spectrogram_data)\n\tax2_sub.set_data(time_x_data, sing_data)\t# マイク入力の音高\n\tax3_sub.set_data(time_x_data, melody_data)\t# 楽曲の音高\n\t\n\treturn ax1_sub, ax2_sub, ax3_sub\n\n# GUIで表示するための処理(Tkinter)\nroot = tkinter.Tk()\nroot.wm_title(\"粗雑採点DX(採点はしません)\")\n\n# スペクトログラムを描画\nfig, ax1 = plt.subplots(1, 1)\ncanvas = FigureCanvasTkAgg(fig, master=root)\n\n# 横軸の値のデータ\ntime_x_data = np.linspace(0, NUM_DATA_SHOWN * (SHIFT_SIZE/SAMPLING_RATE), NUM_DATA_SHOWN)\n# 縦軸の値のデータ\nfreq_y_data = np.linspace(8000/MAX_NUM_SPECTROGRAM, 8000, MAX_NUM_SPECTROGRAM)\n\n# とりあえず初期値(ゼロ)のスペクトログラムと音高のデータを作成\n# この numpy array にデータが更新されていく\nspectrogram_data = np.zeros((len(freq_y_data), len(time_x_data)))\nsing_data = np.zeros(len(time_x_data))\nmelody_data = np.zeros(len(time_x_data))\n\n# スペクトログラムを描画する際に横軸と縦軸のデータを行列にしておく必要がある\n# これは下記の matplotlib の pcolormesh の仕様のため\nX = np.zeros(spectrogram_data.shape)\nY = np.zeros(spectrogram_data.shape)\nfor idx_f, f_v in enumerate(freq_y_data):\n\tfor idx_t, t_v in enumerate(time_x_data):\n\t\tX[idx_f, idx_t] = t_v\n\t\tY[idx_f, idx_t] = f_v\n\n# pcolormeshを用いてスペクトログラムを描画\n# 戻り値はデータの更新 & 再描画のために必要\nax1_sub = ax1.pcolormesh(\n\tX,\n\tY,\n\tspectrogram_data,\n\tshading='nearest',\t# 描画スタイル\n\tcmap='BuPu',\t\t\t# カラーマップ\n\talpha=0.7, \t# 透明度\n\tnorm=Normalize(SPECTRUM_MIN, SPECTRUM_MAX)\t# 値の最小値と最大値を指定して,それに色を合わせる\n)\n\n# 音高を表示するために反転した軸を作成\nax2 = ax1.twinx()\nax3 = ax1.twinx()\n\n# 音高をプロットする\n# 戻り値はデータの更新 & 再描画のために必要\nax2_sub, = ax2.plot(time_x_data, sing_data, c='y', linewidth = 5.0)\nax3_sub, = ax3.plot(time_x_data, melody_data, c='#D3D3D3', linewidth = 5.0)\n\n# ラベルの設定\nax1.set_xlabel('sec')\t\t\t\t# x軸のラベルを設定\nax1.set_ylabel('frequency [Hz]')\t# y軸のラベルを設定\n\n# y軸の範囲の設定\nax1.set_ylim([LOW_FREQUENCY, HIGH_FREQUENCY])\nax2.set_ylim([LOW_FREQUENCY, HIGH_FREQUENCY])\nax3.set_ylim([LOW_FREQUENCY, HIGH_FREQUENCY])\n\n# maplotlib animationを設定\nani = animation.FuncAnimation(\n\tfig,\n\tanimate,\t\t# 再描画のために呼び出される関数\n\tinterval=100,\t# 100ミリ秒間隔で再描画を行う(PC環境によって処理が追いつかない場合はこの値を大きくするとよい)\n\tblit=True\t\t# blitting処理を行うため描画処理が速くなる\n)\n\n# matplotlib を GUI(Tkinter) に追加する\ntoolbar = NavigationToolbar2Tk(canvas, root)\ncanvas.get_tk_widget().pack()\n\n# 再生位置をテキストで表示するためのラベルを作成\ntext = tkinter.StringVar()\ntext.set('0.0')\nlabel = tkinter.Label(master=root, textvariable=text, font=(\"\", 30))\nlabel.pack()\n\n# 終了ボタンが押されたときに呼び出される関数\n# ここではGUIを終了する\ndef _quit():\n\troot.quit()\n\troot.destroy()\n\n# 終了ボタンを作成\nbutton = tkinter.Button(master=root, text=\"終了\", command=_quit, font=(\"\", 30))\nbutton.pack()\n\n\n#\n# (2) マイク入力のための処理\n#\n\nx_stacked_data = np.array([])\n\n# ノートナンバーから周波数へ\ndef nn2hz(notenum):\n\treturn 440.0 * (2.0 ** ((notenum - 69) / 12.0))\n\n# 周波数からノートナンバーへ\ndef hz2nn(frequency):\n\treturn int (round (12.0 * (np.log(frequency / 440.0) / np.log (2.0)))) + 69\n\n# ノートナンバーから対応する全高調波成分の振幅の総和を計算\ndef calc_melody_likelihood(spectrum, notenum):\n\tfundamental_frequency = nn2hz(notenum)\t#ノートナンバーに対応する周波数\n\tmelody_likelihood = 0\n\n\t# 基本周波数\n\ts_idx = int(len(spectrum) * (fundamental_frequency / (SAMPLING_RATE/2)))\n\tmelody_likelihood += spectrum[s_idx]\n\t# 2倍音\n\ts_idx = int(len(spectrum) * (fundamental_frequency * 2 / (SAMPLING_RATE/2)))\n\tmelody_likelihood += spectrum[s_idx] * 0.75\n\t# 3倍音\n\ts_idx = int(len(spectrum) * (fundamental_frequency * 3 / (SAMPLING_RATE/2)))\n\tmelody_likelihood += spectrum[s_idx] * 0.50\n\n\treturn melody_likelihood\n\n# 音声波形データを受け取り,ゼロ交差数を計算する関数\ndef zero_cross(waveform):\n\n\tzc = 0\n\n\tfor i in range(len(waveform) - 1):\n\t\tif(\n\t\t\t(waveform[i] > 0.0 and waveform[i+1] < 0.0) or\n\t\t\t(waveform[i] < 0.0 and waveform[i+1] > 0.0)\n\t\t):\n\t\t\tzc += 1\n\n\treturn zc * SAMPLING_RATE / FRAME_SIZE \t# 単位時間あたりに変換\n\n# フレーム毎に呼び出される関数\ndef input_callback(in_data, frame_count, time_info, status_flags):\n\t\n\t# この関数は別スレッドで実行するため\n\t# メインスレッドで定義した以下の numpy array を利用できるように global 宣言する\n\t# これらにはフレーム毎のスペクトルと音量のデータが格納される\n\tglobal x_stacked_data, sing_data\n\n\t# 現在のフレームの歌声データをnumpy arrayに変換\n\tx_current_frame = np.frombuffer(in_data, dtype=np.float32)\n\n\t# 現在のフレームとこれまでに入力されたフレームを連結\n\tx_stacked_data = np.concatenate([x_stacked_data, x_current_frame])\n\n\t# フレームサイズ分のデータがあれば処理を行う\n\tif len(x_stacked_data) >= FRAME_SIZE:\n\t\t\n\t\t# フレームサイズからはみ出した過去のデータは捨てる\n\t\tx_stacked_data = x_stacked_data[len(x_stacked_data)-FRAME_SIZE:]\n\n\t\t# スペクトルを計算\n\t\tfft_spec = np.fft.rfft(x_stacked_data * hamming_window)\n\t\tfft_abs_spec = np.abs(fft_spec)\n\n\t\t# 対数振幅スペクトログラムから、ノートナンバーに対応する周波数の振幅の総和を取得\n\t\tframe_likelihood = 0\n\t\tframe_index = 0\n\t\tfor i in range(LOW_PITCH, HIGH_PITCH):\n\t\t\tmelody_likelihood = calc_melody_likelihood(fft_abs_spec, i)\n\t\t\tif melody_likelihood > frame_likelihood :\n\t\t\t\tframe_index = i\n\t\t\t\tframe_likelihood = melody_likelihood\n\n\t\tsing_data = np.roll(sing_data, -1)\n\t\t# 音量が閾値未満ならNanを配列に追加し、グラフ内に表示させない\n\t\tvol = 20 * np.log10(np.mean(x_current_frame ** 2) + EPSILON)\n\t\tif vol < -100 :\n\t\t\tsing_data[-1] = np.nan\n\t\telse :\n\t\t\tsing_data[-1] = nn2hz(frame_index)\n\t\n\t# 戻り値は pyaudio の仕様に従うこと\n\treturn None, pyaudio.paContinue\n\n# マイクからの音声入力にはpyaudioを使用\n# ここではpyaudioの再生ストリームを作成\n# 【注意】シフトサイズごとに指定された関数が呼び出される\np = pyaudio.PyAudio()\nstream = p.open(\n\tformat = pyaudio.paFloat32,\n\tchannels = 1,\n\trate = SAMPLING_RATE,\n\tinput = True,\t\t\t\t\t\t# ここをTrueにするとマイクからの入力になる \n\tframes_per_buffer = SHIFT_SIZE,\t\t# シフトサイズ\n\tstream_callback = input_callback\t# ここでした関数がマイク入力の度に呼び出される(frame_per_bufferで指定した単位で)\n)\n\n\n#\n# (3) mp3ファイル音楽を再生する処理\n#\n\naudio_stacked_data = np.array([])\n\n# mp3ファイル名\n# ここは各自の音源ファイルに合わせて変更すること\nfilename = './mp3/hotaru_no_hikari.mp3'\n\n# pydubを使用して音楽ファイルを読み込む\naudio_data = AudioSegment.from_mp3(filename)\n\n# 音声ファイルの再生にはpyaudioを使用\n# ここではpyaudioの再生ストリームを作成\np_play = pyaudio.PyAudio()\nstream_play = p_play.open(\n\tformat = p.get_format_from_width(audio_data.sample_width),\t# ストリームを読み書きするときのデータ型\n\tchannels = audio_data.channels,\t\t\t\t\t\t\t\t# チャネル数\n\trate = audio_data.frame_rate,\t\t\t\t\t\t\t\t# サンプリングレート\n\toutput = True\t\t\t\t\t\t\t\t\t\t\t\t# 出力モードに設定\n)\n\n# pydubで読み込んだ音楽ファイルを再生する部分のみ関数化する\n# 別スレッドで実行するため\ndef play_music():\n\n\t# この関数は別スレッドで実行するため\n\t# メインスレッドで定義した以下の変数を利用できるように global 宣言する\n\tglobal is_gui_running, audio_data, now_playing_sec\n\tglobal spectrogram_data, audio_stacked_data, melody_data\n\n\t# pydubのmake_chunksを用いて音楽ファイルのデータを切り出しながら読み込む\n\t# 第二引数には何ミリ秒毎に読み込むかを指定\n\n\tsize_frame_music = 100\t# 100ミリ秒毎に読み込む\n\n\tidx = 0\n\n\t# make_chunks関数を使用して一定のフレーム毎に音楽ファイルを読み込む\n\n\t# フレーム毎に計算し、楽曲のスペクトログラム配列に加える\n\t# 再生位置も計算する\n\tfor chunk in make_chunks(audio_data, size_frame_music):\n\t\t\n\t\t# GUIが終了してれば,この関数の処理も終了する\n\t\tif is_gui_running == False:\n\t\t\tbreak\n\n\t\t# 現在のフレームの楽曲データをnumpy arrayに変換\n\t\taudio_current_frame = chunk.get_array_of_samples()\n\n\t\t# 片側だけの音を抽出\n\t\taudio_current_frame = audio_current_frame[::chunk.channels]\n\n\t\t# 現在のフレームとこれまでに入力されたフレームを連結\n\t\taudio_stacked_data = np.concatenate([audio_stacked_data, audio_current_frame])\n\t\t\n\t\t# フレームサイズ分のデータがあれば処理を行う\n\t\tif len(audio_stacked_data) >= FRAME_SIZE:\n\n\t\t\t# フレームサイズからはみ出した分は捨てる\n\t\t\taudio_stacked_data = audio_stacked_data[len(audio_stacked_data)-FRAME_SIZE:]\n\n\t\t\t# スペクトルを計算\n\t\t\tfft_spec = np.fft.rfft(audio_stacked_data * hamming_window)\n\t\t\tfft_abs_spec = np.abs(fft_spec)\n\t\t\tfft_log_abs_spec = np.log10(fft_abs_spec + EPSILON)[:-1]\n\n\t\t\t# 2次元配列上で列方向(時間軸方向)に1つずらし(戻し)\n\t\t\t# 最後の列(=最後の時刻のスペクトルがあった位置)に最新のスペクトルデータを挿入\n\t\t\tspectrogram_data = np.roll(spectrogram_data, -1, axis=1)\n\t\t\tspectrogram_data[:, -1] = fft_log_abs_spec\n\n\t\t\t# 対数振幅スペクトログラムから、ノートナンバーに対応する周波数の振幅の総和を取得\n\t\t\tframe_likelihood = 0\n\t\t\tframe_index = 0\n\t\t\tfor i in range(LOW_PITCH, HIGH_PITCH):\n\t\t\t\tmelody_likelihood = calc_melody_likelihood(fft_abs_spec, i)\n\t\t\t\tif melody_likelihood > frame_likelihood :\n\t\t\t\t\tframe_index = i\n\t\t\t\t\tframe_likelihood = melody_likelihood\n\t\t\tmelody_data = np.roll(melody_data, -1)\n\t\t\tmelody_data[-1] = nn2hz(frame_index)\n\n\t\t\tmelody_data = np.roll(melody_data, -1)\n\t\t\t# ゼロ交差数をカウント\n\t\t\tzero_count = zero_cross(audio_stacked_data * hamming_window)\n\t\t\t# 閾値を設定。ゼロ交差数が範囲外の場合基本周波数を0とする。\n\t\t\tif zero_count < 800 or zero_count > 3000:\n\t\t\t\tmelody_data[-1] = np.nan\n\t\t\telse :\n\t\t\t\tmelody_data[-1] = nn2hz(frame_index)\n\n\t\t# pyaudioの再生ストリームに切り出した音楽データを流し込む\n\t\t# 再生が完了するまで処理はここでブロックされる\n\t\tstream_play.write(chunk._data)\n\n\t\t# 現在の再生位置を計算(単位は秒)\n\t\tnow_playing_sec = (idx * size_frame_music) / 1000.\n\t\t\n\t\tidx += 1\n\n# 再生時間の表示を随時更新する関数\ndef update_gui_text():\n\n\tglobal is_gui_running, now_playing_sec, text\n\n\twhile True:\n\t\t\n\t\t# GUIが表示されていれば再生位置(秒)をテキストとしてGUI上に表示\n\t\tif is_gui_running:\n\t\t\ttext.set('%.3f' % now_playing_sec)\n\t\t\n\t\t# 0.01秒ごとに更新\n\t\ttime.sleep(0.01)\n\n# 再生時間を表す\nnow_playing_sec = 0.0\n\n# 音楽を再生するパートを関数化したので,それを別スレッドで(GUIのため)再生開始\nt_play_music = threading.Thread(target=play_music)\nt_play_music.setDaemon(True)\t# GUIが消されたときにこの別スレッドの処理も終了されるようにするため\nt_play_music.start()\n\n# 再生時間の表示を随時更新する関数を別スレッドで開始\nt_update_gui = threading.Thread(target=update_gui_text)\nt_update_gui.setDaemon(True)\t# GUIが消されたときにこの別スレッドの処理も終了されるようにするため\nt_update_gui.start()\n\n#\n# (4) 全体の処理��実行\n#\n\n# GUIの開始フラグをTrueに\nis_gui_running = True\n\n# GUIを開始,GUIが表示されている間は処理はここでストップ\ntkinter.mainloop()\n\n# GUIの開始フラグをFalseに = 音楽再生スレッドのループを終了\nis_gui_running = False\n\n# 終了処理\nstream_play.stop_stream()\nstream_play.close()\np_play.terminate()","sub_path":"task/task2/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":14521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484662446","text":"#! /usr/bin/env python3\n\nimport sys\n\ncolors = {\"red\": '\\033[91m',\n \"yellow\": '\\033[93m',\n \"green\": '\\033[92m',\n \"cyan\": '\\033[0;36m',\n \"blue\": '\\033[94m',\n \"magenta\": '\\033[0;35m',\n \"reset\": '\\033[0m',\n \"bold\": \"\\033[1m\"}\n\ncolor_order = [\"blue\", \"red\", \"green\", \"yellow\", \"cyan\", \"magenta\", \"bold\", \"reset\"]\n\ndef get_color_order():\n return color_order\n\nif sys.platform != \"win32\" and sys.stdout.isatty() and not 'idlelib.run' in sys.modules:\n def colored(expression, color=\"reset\"):\n try: color = colors[color]\n except: color = colors[\"reset\"]\n return color + expression + colors[\"reset\"]\nelse:\n def colored(expression, color=\"reset\"):\n return \"%s!%s\" % (color.title(), expression,)\n\ndef uncolored(expression):\n for key, value in colors.items():\n if key != \"reset\":\n expression = expression.replace(value, key.title() + \"!\")\n else:\n expression = expression.replace(value, \"\")\n return expression\n","sub_path":"clip_rpg/termcolor.py","file_name":"termcolor.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"526604381","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django import forms\nfrom hackathon.models import UserProfile\n\n\n\n\n\n#class UserForm(forms.ModelForm):\n# password = forms.CharField(widget=forms.PasswordInput())\n#\n# class Meta:\n# model = User\n# fields = ('username', 'email', 'password')\n\n\nclass SignupForm(forms.ModelForm):\n #wsUser = forms.CharField(max_length=40, label='WS Username')\n #wsPwd = forms.CharField(max_length=40, label='WS Password')\n def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n \n \n def signup(self, request, user):\n wsData = UserProfile()\n wsData.user = user\n \n wsData.zipcode = self.cleaned_data['zipcode']\n wsData.age = self.cleaned_data['age']\n wsData.currently_employed = self.cleaned_data['currently_employed']\n wsData.paid_job_before = self.cleaned_data['paid_job_before']\n wsData.education_level = self.cleaned_data['education_level']\n \n #wsData.wsUser = self.cleaned_data['wsUser']\n #wsData.wsPwd = self.cleaned_data['wsPwd']\n \n wsData.save()\n class Meta:\n model = UserProfile\n fields = ('zipcode', 'age', 'currently_employed', 'paid_job_before', 'education_level' )\n #fields = ('wsUser', 'wsPwd',)\n \n \n\n","sub_path":"hackathon_starter/hackathon/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"307020337","text":"import pytest\nfrom selenium import webdriver\nimport unittest\nimport os\nimport sys\nimport pytest\nimport time\n\nclass FunctionalTests(unittest.TestCase):\n\n\tdef setUp(self):\n\t\toptions = webdriver.ChromeOptions()\n\t\toptions.add_argument('--no-sandbox')\n\t\tself.driver = webdriver.Chrome(os.path.join(os.environ[\"ChromeWebDriver\"], 'chromedriver.exe'), chrome_options=options)\n\t\tself.driver.implicitly_wait(300)\n\n\tdef test_selenium(self):\n\t\twebAppUrl = pytest.config.getoption('webAppUrl')\n\t\tstart_timestamp = time.time()\n\t\tend_timestamp = start_timestamp + 60*10\t\t\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tresponse = self.driver.get(webAppUrl)\n\t\t\t\ttitle = self.driver.title\n\t\t\t\tself.assertIn(\"Home Page - Python Bottle Application\", title)\n\t\t\t\tbreak\n\t\t\texcept Exception as e:\n\t\t\t\tcurrent_timestamp = time.time()\n\t\t\t\tif(current_timestamp > end_timestamp):\n\t\t\t\t\tprint('\"##vso[task.logissue type=error;]Test test_selenium failed with error: ' + str(e))\n\t\t\t\t\traise\n\t\t\t\ttime.sleep(5)\n\n\tdef tearDown(self):\n\t\ttry:\n\t\t\tself.driver.quit()\n\t\texcept Exception as e:\n\t\t\tprint('tearDown.Error occurred while trying to close the selenium chrome driver: ' + str(e))\n","sub_path":"python/bottle/webappWithTests/Tests/functional_tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"119569115","text":"from django.conf.urls import url\n\nfrom nutsy.profiles.views import (GithubAuthHook, ListCreateOrganizations, CreateInvitations,\n RetrieveUpdateAuthenticatedUser, Unsubscribe)\n\n\nurlpatterns = [\n url(r'^users/me/$', RetrieveUpdateAuthenticatedUser.as_view(), name='auth_user_detail'),\n url(r'^organizations/$', ListCreateOrganizations.as_view(), name='organizations_list_create'),\n url(r'^invitations/$', CreateInvitations.as_view(), name='invitations_create'),\n url(r'^hooks/github/$', GithubAuthHook.as_view(), name='github_auth_hook'),\n url(r'^mail/unsubscribe/$', Unsubscribe.as_view(), name='user_mail_unsubscribe')\n]\n\n\n\n","sub_path":"nutsy/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"444038069","text":"#!/usr/bin/env python\n\nfrom gi.repository import Gtk, Clutter, GtkClutter, GLib, GObject\n\ntry:\n from .srtsrc import *\nexcept ValueError:\n from srtsrc import *\n\ndef on_button_clicked(button, src):\n src.reset()\n return True\n\ndef on_stage_button_press(stage, event, src):\n x, y = stage.get_size()\n az = event.x / x * 360\n el = (y - event.y) / y * 90\n try:\n src.cmd_coor = [az, el]\n except ValueError:\n pass\n return True\n\ndef main():\n src = SrtSrc()\n src.connect('error', print)\n # this can take sometime.\n if not src.conn():\n return\n src.reset()\n GtkClutter.init([])\n window = Gtk.Window()\n vbox = Gtk.VBox(False, 6)\n window.add(vbox)\n vbox.show()\n button = Gtk.Button.new_with_label(\"Reset\")\n vbox.pack_end(button, False, False, 0)\n button.show()\n clutter_widget = GtkClutter.Embed()\n vbox.pack_start(clutter_widget, True, True, 0)\n clutter_widget.show()\n clutter_widget.set_size_request(200, 200)\n stage = clutter_widget.get_stage()\n color = Clutter.Color()\n color.from_string('#000000ff')\n stage.set_color(color)\n stage.show()\n button.connect('clicked', on_button_clicked, src)\n window.connect('hide', Gtk.main_quit, None)\n stage.connect('button-press-event', on_stage_button_press, src)\n window.show()\n Gtk.main()\n src.disconn()\n\nif __name__ == '__main__':\n main()\n","sub_path":"21cm/srtctrl/srtgui.py","file_name":"srtgui.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"329683854","text":"'''\nInput: a List of integers\nReturns: a List of integers\n'''\n# def product_of_all_other_numbers(arr):\n# # Your code here\n# current_index = 0\n# new_array = []\n# # Start counter loop\n# while current_index < len(arr):\n# # Counter for inside loop\n# inside_index = 0\n# result = 1\n# # loop over array for each item\n# while inside_index < len(arr):\n# # If the inside index is the same amount of times as the current index\n# if inside_index == current_index:\n# # Then increment the inside counter loop by 1\n# inside_index += 1\n# else:\n# # Increment the inside counter loop by 1 \n# result = result * arr[inside_index]\n# # Increment the inside counter loop by 1\n# inside_index += 1\n# # Append result to new array\n# new_array.append(result)\n# # Increment index for outer loop by 1\n# current_index += 1\n# return new_array\n\n# Optimized\ndef product_of_all_other_numbers(arr):\n new_array = [1]\n length = len(arr)\n\n # Run forwards through the list muliplying by the cumulative product of the elements before\n running_product = 1\n for i in arr[:-1]: \n running_product *= i\n new_array.append(running_product)\n\n # Run backwards though the list multiplying by the cumulative product of the elements after\n running_product = 1\n for i in range(length-1, 0, -1):\n running_product *= arr[i]\n new_array[i - 1] *= running_product\n \n return new_array\n\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n # arr = [1, 2, 3, 4, 5]\n arr = [2, 6, 9, 8, 2, 2, 9, 10, 7, 4, 7, 1, 9, 5, 9, 1, 8, 1, 8, 6, 2, 6, 4, 8, 9, 5, 4, 9, 10, 3, 9, 1, 9, 2, 6, 8, 5, 5, 4, 7, 7, 5, 8, 1, 6, 5, 1, 7, 7, 8]\n\n print(f\"Output of product_of_all_other_numbers: {product_of_all_other_numbers(arr)}\")\n","sub_path":"product_of_all_other_numbers/product_of_all_other_numbers.py","file_name":"product_of_all_other_numbers.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594406442","text":"'''\n713. Subarray Product Less Than K My SubmissionsBack to Contest\nUser Accepted: 761\nUser Tried: 1063\nTotal Accepted: 785\nTotal Submissions: 2431\nDifficulty: Medium\nYour are given an array of positive integers nums.\n\nCount and print the number of (contiguous) subarrays where the product of all the elements in the subarray is less than k.\n\nExample 1:\nInput: nums = [10, 5, 2, 6], k = 100\nOutput: 8\nExplanation: The 8 subarrays that have product less than 100 are: [10], [5], [2], [6], [10, 5], [5, 2], [2, 6], [5, 2, 6].\nNote that [10, 5, 2] is not included as the product of 100 is not strictly less than k.\nNote:\n\n0 < nums.length <= 50000.\n0 < nums[i] < 1000.\n0 <= k < 10^6.\n\n74 / 84 test cases passed.Will raise TLE on some cases\nhowever same solution in cpp will be accepted.\n'''\n\nclass Solution:\n '''\n Count and print the number of (contiguous) subarrays where the product of all the elements\n in the subarray is less than k.\n '''\n def numSubarrayProductLessThanK(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n N = len(nums)\n dp, cnt = [], 0\n for n in nums:\n dp2 = []\n if n < k:\n cnt += 1\n dp2.append(n)\n for m in dp:\n t = m * n\n if t < k:\n cnt += 1\n dp2.append(t)\n dp = dp2\n return cnt\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LeetCodeContests/55/count_subarray_product_less_then_k.py","file_name":"count_subarray_product_less_then_k.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"139736185","text":"import pytest\nfrom unittest.mock import create_autospec\n\nfrom django_swagger_utils.drf_server.exceptions import NotFound\n\nfrom essentials_kit_management.interactors.storages.user_storage_interface \\\n import UserStorageInterface\nfrom essentials_kit_management.interactors.presenters.user_presenter_interface \\\n import UserPresenterInterface\n\nfrom essentials_kit_management.interactors. \\\n create_transaction_request_interactor import \\\n CreateTransactionRequestInteractor\n\nfrom essentials_kit_management.interactors.storages.dtos import \\\n TransactionRequestDto\n# i need to add test cases for it\n\ndef test_create_transaction_request_interactor():\n\n # Arrange\n user_id = 1\n amount_paid=10001\n transaction_id = 1234567890\n transaction_dict = {\n \"amount_paid\": amount_paid,\n \"transaction_type\": \"PAYTM\",\n \"transaction_id\": transaction_id,\n \"transaction_screenshot\": \"screenshot/photo.png\"\n }\n expected_transaction_request_dto = TransactionRequestDto(\n amount_paid=10001,\n transaction_type=\"PAYTM\",\n transaction_id=transaction_id,\n transaction_screenshot=\"screenshot/photo.png\"\n )\n\n user_storage = create_autospec(UserStorageInterface)\n user_presenter = create_autospec(UserPresenterInterface)\n interactor = CreateTransactionRequestInteractor(\n user_storage=user_storage,\n user_presenter=user_presenter)\n user_storage.is_transaction_id_exists.return_value = False\n\n # Act\n interactor.create_transaction_request(user_id=user_id,\n transaction_dict=transaction_dict)\n \n\n # Assert\n user_storage.is_valid_payment.assert_called_once_with(amount=amount_paid)\n user_storage.is_transaction_id_exists.assert_called_once_with(\n transaction_id=transaction_id)\n user_storage.create_transaction_request_db.assert_called_once()\n\n\ndef test_create_transaction_request_interactor_with_invalid_payment_raise_exception(\n ):\n\n # Arrange\n user_id = 1\n invalid_payment = -10\n transaction_dict = {\n \"amount_paid\": invalid_payment,\n \"transaction_type\": \"PAYTM\",\n \"transaction_id\": 1234567890,\n \"transaction_screenshot\": \"screenshot/photo.png\"\n }\n\n user_storage = create_autospec(UserStorageInterface)\n user_presenter = create_autospec(UserPresenterInterface)\n interactor = CreateTransactionRequestInteractor(\n user_storage=user_storage,\n user_presenter=user_presenter)\n user_storage.is_valid_payment.return_value = False\n user_presenter.raise_invalid_payment_exception.side_effect = NotFound\n\n # Act\n with pytest.raises(NotFound):\n interactor.create_transaction_request(\n user_id=user_id,\n transaction_dict=transaction_dict)\n\n\ndef test_create_transaction_request_interactor_with_existed_transaction_id_raise_exception(\n ):\n\n # Arrange\n user_id = 1\n invalid_payment = 10\n transaction_dict = {\n \"amount_paid\": invalid_payment,\n \"transaction_type\": \"PAYTM\",\n \"transaction_id\": 1234567890,\n \"transaction_screenshot\": \"screenshot/photo.png\"\n }\n\n user_storage = create_autospec(UserStorageInterface)\n user_presenter = create_autospec(UserPresenterInterface)\n interactor = CreateTransactionRequestInteractor(\n user_storage=user_storage,\n user_presenter=user_presenter)\n\n user_storage.is_transaction_id_exists.return_value = True\n user_presenter.raise_transaction_id_already_exist_exception.side_effect = \\\n NotFound\n\n # Act\n with pytest.raises(NotFound):\n interactor.create_transaction_request(\n user_id=user_id,\n transaction_dict=transaction_dict)\n","sub_path":"essentials_kit_management/tests/interactors/test_create_transaction_request_interactor.py","file_name":"test_create_transaction_request_interactor.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476104906","text":"#!/usr/bin/env python\nimport msgflo\nimport logging\nimport requests\n\nlog = logging.getLogger(__name__)\n\nclass blockchain(msgflo.Participant):\n def __init__(self, role):\n d = {\n 'component': 'eva/blockchain',\n 'label': '',\n 'icon': 'link',\n 'inports': [\n { 'id': 'in', 'type': 'int' },\n { 'id': 'name', 'type': 'string' }\n ],\n 'outports': [\n { 'id': 'history', 'type': 'object' },\n ],\n \n }\n self.name = 'temp'\n msgflo.Participant.__init__(self, d, role)\n\n def process(self, inport, msg):\n if inport == 'name':\n self.name = msg.data\n self.ack(msg)\n return\n \n if inport == 'in':\n url = 'https://5560be7a10ba4ceea786ecb2b4ade5aa-vp0.us.blockchain.ibm.com:5002/registrar'\n resp = requests.post(url, json={\n \t\t\"enrollId\": \"admin\",\n\t\t\"enrollSecret\": \"f9c649a1ef\"\n\t })\n if 'OK' in resp.json().keys():\n blockchain_data = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"invoke\",\n \"params\": {\n \"type\": 1,\n \"chaincodeID\": {\n \"name\": \"3aeb9793d67968f966f2b093c361c70cdbf7a2813a02f7a5da344386580d3b519899b73003b335c587e3d016d44b54eb7d8030bddddbc3e9abf05db81c20eaef\"\n },\n \"ctorMsg\": {\n \"function\": \"write\",\n \"args\": [\n self.name, str(msg.data)\n ]\n },\n \"secureContext\": \"admin\"\n },\n \"id\": 3\n }\n url2 = 'https://5560be7a10ba4ceea786ecb2b4ade5aa-vp0.us.blockchain.ibm.com:5002/chaincode'\n resp2 = requests.post(url2, json=blockchain_data)\n self.send('history', resp2.json())\n self.ack(msg)\n\nif __name__ == '__main__':\n msgflo.main(blockchain)\n","sub_path":"components/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"245337220","text":"#!/bin/python3\n\n#\n# Url: https://www.hackerrank.com/challenges/python-arithmetic-operators/problem\n#\n# Title: Arithmetic Operators\n#\n\n\nif __name__ == '__main__':\n a = int(input())\n b = int(input())\n\n soma = a + b\n diferenca = a - b\n multiplic = a * b\n\n print(soma)\n print(diferenca) \n print(multiplic)","sub_path":"python-arithmetic-operators/python-arithmetic-operators.py","file_name":"python-arithmetic-operators.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"447378970","text":"import numpy as np\n\nimport random as rand\n\n\ndef get_surrounding(field, row, col, value):\n \"\"\"Finds how many mines are in surrounding squares of a certain coordinate\"\"\"\n count = 0\n # checks surrounding squares\n for i in range(row - 1, row + 2):\n for k in range(col - 1, col + 2):\n # checks if the current coordinate is on the field and is the value\n if 0 <= i < field.shape[0] and 0 <= k < field.shape[1] and field[i, k] == value:\n count += 1\n return count\n\n\ndef generate_num_field(field):\n \"\"\"Generates an array of the number of surrounding mines\"\"\"\n num_field = np.zeros(field.shape, dtype=int)\n for i in range(field.shape[0]):\n for k in range(field.shape[1]):\n num_field[i, k] = get_surrounding(field, i, k, 1)\n return num_field\n\n\ndef generate_minefield(dimension, num_mines, starting): # dimension and starting are [row, col], ADD STARTING\n \"\"\"Generates an array where 0 = empty and 1 = bomb\"\"\"\n\n field = np.zeros(dimension, dtype=int) # generates empty field\n # squares_left accounts for squares surrounding the starting point that must be mine free\n squares_left = int(dimension[0]*dimension[1]) - get_surrounding(field, starting[0], starting[1], 0)\n\n # places mines randomly, iterating through every square\n for row in range(dimension[0]):\n for col in range(dimension[1]):\n if num_mines == 0: # when all mines have been placed\n break\n if abs(row - starting[0]) <= 1 and abs(col - starting[1]) <= 1: # no mines surrounding the starting point\n pass\n elif rand.randint(1, squares_left) <= num_mines: # num_mine/squares_left % chance of mine\n field[row, col] = 1\n num_mines -= 1\n squares_left -= 1\n return field\n\n\ndef generate_user_field(dimension):\n \"\"\"Generates the field the user sees, ? = hidden squares, ! = flagged, 0-9 = num surrounding mines\"\"\"\n return np.array([[\"?\" for i in range(dimension[1])] for k in range(dimension[0])])\n\n\ndef flood_reveal(user_field, num_field, row, col):\n \"\"\"Reveals empty squares on player's choice row col by using recursive flood fill\"\"\"\n # if the square has already been revealed\n if user_field[row, col] != \"?\":\n return\n # if there are mines surrounding the square and it's not adjacent to an already revealed empty square\n elif num_field[row, col] != 0 and get_surrounding(user_field, row, col, \"0\") == 0:\n return\n else:\n # gets the number for surrounding mines from num_field\n user_field[row, col] = str(num_field[row, col])\n # if the coordinates are in bound, then recurse in 4 directions (up, down, left, right)\n if row != np.shape(user_field)[0] - 1:\n flood_reveal(user_field, num_field, row + 1, col)\n if row != 0:\n flood_reveal(user_field, num_field, row - 1, col)\n if col != np.shape(user_field)[1] - 1:\n flood_reveal(user_field, num_field, row, col + 1)\n if col != 0:\n flood_reveal(user_field, num_field, row, col - 1)\n return\n\n\ndef user_input_coords(string):\n \"\"\"Converts user coordinate input from string to list\"\"\"\n return list(map(int, input(string).split()))\n\n\ndef print_array(array):\n \"\"\"Prints 2d array without all the quotes and brackets\"\"\"\n # prints top x axis\n print(\" \", end=\"\")\n for i in range(array.shape[1]):\n if i < 10: # accounts for how one/two digit numbers take up different spaces\n print(\"\\033[94m\" + str(i) + \"\\033[0m\", end=\" \") # axis are blue :)\n else:\n print(\"\\033[94m\" + str(i) + \"\\033[0m\", end=\" \")\n print()\n # prints array and left y axis\n for i in range(array.shape[0]):\n if i < 10: # accounts for how one/two digit numbers take up different spaces\n print(\"\\033[94m\" + str(i) + \"\\033[0m\", end=\" \")\n else:\n print(\"\\033[94m\" + str(i) + \"\\033[0m\", end=\" \")\n for k in array[i]:\n if k == \"?\":\n print(\"\\033[91m\" + k + \"\\033[0m\", end=\" \")\n elif k == \"!\":\n print(\"\\033[93m\" + k + \"\\033[0m\", end=\" \")\n elif k != \"0\":\n print(\"\\033[32m\" + k + \"\\033[0m\", end=\" \")\n else:\n print(k, end=\" \")\n print()\n\n\ndef main():\n # set up all the variables\n difficulty = input(\"\"\"Welcome to minesweeper, choose the difficulty (b for beginner, m for medium, h for hard).\nBeginner is 10x10 with 10 mines, medium is 16x16 with 40 mines, hard is 30x16 with 99 mines.\"\"\")\n diff_list = [[\"b\", \"m\", \"h\"], [[10, 10], [16, 16], [30, 16]], [10, 40, 99]]\n dimension = diff_list[1][diff_list[0].index(difficulty)]\n num_mines = diff_list[2][diff_list[0].index(difficulty)]\n flags_left = num_mines\n\n # creates the field the user sees with hidden spots\n user_field = generate_user_field(dimension)\n print_array(user_field)\n\n # gets the starting point\n starting = user_input_coords(\"Choose a starting point (eg. type 0 12 to get 1st row 13th column)\")\n\n # fields\n minefield = generate_minefield(dimension, num_mines, starting)\n num_field = generate_num_field(minefield)\n\n flood_reveal(user_field, num_field, starting[0], starting[1])\n print_array(user_field)\n\n while True:\n choose_square = user_input_coords(\"Choose a square to flag or reveal\")\n row = choose_square[0]\n col = choose_square[1]\n\n square_decision = input(\"Type f to flag, c to remove flag, or r to reveal\")\n\n if np.count_nonzero(user_field == \"?\") + np.count_nonzero(user_field == \"!\") == num_mines: # if you win\n user_field[user_field == \"!\"] = \"*\"\n user_field[user_field == \"?\"] = \"*\"\n print_array(user_field)\n print(\"Good job, you won!!\")\n break\n\n if square_decision == \"r\":\n if minefield[row, col] == 1: # if you reveal a mine, you're bad and you lost\n user_field[row, col] = \"*\"\n print_array(user_field)\n print(\"Oops you uncovered a mine, you're bad AND you lost, try again.\")\n break\n\n elif num_field[row, col] > 0: # if you reveal a square with surrounding mines, no flood_reveal\n user_field[row, col] = num_field[row, col]\n print_array(user_field)\n\n elif num_field[row, col] == 0:\n flood_reveal(user_field, num_field, row, col)\n print_array(user_field)\n\n elif square_decision == \"f\":\n user_field[row, col] = \"!\"\n print_array(user_field)\n flags_left -= 1\n print(\"Flags left:\", flags_left)\n\n elif square_decision == \"c\":\n user_field[row, col] = \"?\"\n print_array(user_field)\n flags_left += 1\n print(\"Flags left:\", flags_left)\n\n\nmain()\n","sub_path":"comp/j_wu_old.py","file_name":"j_wu_old.py","file_ext":"py","file_size_in_byte":6896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"212212824","text":"\"\"\"\nLabels display text to users\n\"\"\"\n\nfrom kivy.uix.label import Label\nfrom kivy.properties import StringProperty, BooleanProperty, ColorProperty\n\n__all__ = [\n 'CupertinoLabel'\n]\n\n\nclass CupertinoLabel(Label):\n \"\"\"\n iOS style Label\n\n .. image:: ../_static/label.png\n \"\"\"\n\n text = StringProperty(' ')\n \"\"\"\n A :class:`~kivy.properties.StringProperty` defining the text of\n :class:`~kivycupertino.uix.label.CupertinoLabel`\n \"\"\"\n\n font_name = StringProperty('San Francisco')\n \"\"\"\n A :class:`~kivy.properties.StringProperty` defining the font of\n :class:`~kivycupertino.uix.label.CupertinoLabel`. To comply with iOS standard, use\n `San Francisco` or `New York`\n \"\"\"\n\n bold = BooleanProperty(False)\n \"\"\"\n A :class:`~kivy.properties.BooleanProperty` defining if\n :class:`~kivycupertino.uix.label.CupertinoLabel` is bold\n \"\"\"\n\n italic = BooleanProperty(False)\n \"\"\"\n A :class:`~kivy.properties.BooleanProperty` defining if\n :class:`~kivycupertino.uix.label.CupertinoLabel` is italic\n \"\"\"\n\n color = ColorProperty([0, 0, 0, 1])\n \"\"\"\n A :class:`~kivy.properties.ColorProperty` that deinfes the text color of\n :class:`~kivycupertino.uix.label.CupertinoLabel`\n \"\"\"\n","sub_path":"kivycupertino/uix/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"399134519","text":"import numpy as np\nimport pandas as pd\nimport math\nimport random\nimport time\nfrom scipy.spatial import distance\n\n# Definir una clase para expresar puntos y su asignación a un cluster\nclass DataPoint:\n def __init__(self, p):\n self.value = p[:]\n \n def set_value(self, p):\n self.value = p\n \n def get_value(self):\n return self.value\n \n def set_cluster(self, cluster):\n self.cluster = cluster\n \n def get_cluster(self):\n return self.cluster\n\ndef initialize_dataset(data, data_set, data_len):\n for i in range(data_len):\n point = DataPoint(data_set[i])\n point.set_cluster(None)\n data.append(point)\n return\n\ndef initialize_centroids(centroids, sampling_method, num_clusters, data_set, data_len):\n # print(\"Centroides inicializados en:\")\n \n for c in range(num_clusters):\n if(sampling_method == 0):\n which = random.randint(0, data_len-1)\n elif(sampling_method == 1):\n which = c\n else:\n which = data_len-1-c\n \n centroids.append(list(data_set[which]))\n # imprimo los centroids elegidos\n # print(centroids) \n return \n\ndef update_clusters(centroids, num_clusters, data , data_len, larger_distance):\n changed = False\n \n for i in range(data_len):\n minDistance = larger_distance\n currentCluster = 0\n \n for j in range(num_clusters):\n dist = distance.euclidean(data[i].get_value(), centroids[j])\n if(dist < minDistance):\n minDistance = dist\n currentCluster = j\n \n if(data[i].get_cluster() is None or data[i].get_cluster() != currentCluster):\n data[i].set_cluster(currentCluster)\n changed = True\n \n return changed\n\ndef update_centroids(centroids, num_clusters, data, data_set, data_len): \n # print(\"Los nuevos centroids son:\")\n\n for j in range(num_clusters):\n means = [0] * data_set.shape[1]\n \n clusterSize = 0\n for k in range(data_len):\n if(data[k].get_cluster() == j):\n p = data[k].get_value()\n for i in range(data_set.shape[1]):\n means[i] += p[i]\n clusterSize += 1\n\n if(clusterSize > 0):\n for i in range(data_set.shape[1]):\n centroids[j][i] = means[i] / clusterSize\n\n # print(centroids) \n return \n\ndef k_means( num_clusters, sampling_method, data_set, data_len, larger_distance):\n LARGER_DISTANCE = larger_distance\n # Leer los datos de archivo\n DATA_SET = data_set\n # Tamaño del conjunto de datos\n DATA_LEN = data_len\n # --------------------------\n # Crear el conjunto de datos\n data = []\n initialize_dataset(data, DATA_SET, DATA_LEN)\n # 1 - definir el numero de clusters\n NUM_CLUSTERS = num_clusters\n # 2 - Definir forma de muestreo; 0 = random, 1=head, 2=tail\n SAMPLING_METHOD = sampling_method\n centroids = []\n initialize_centroids(centroids, SAMPLING_METHOD, NUM_CLUSTERS, DATA_SET, DATA_LEN)\n # 3 Asignar cada punto del conjunto de datos al cluster donde la \n # distancia del punto al centroide es menor.\n # Actualizar los clusters\n KEEP_WALKING = update_clusters(centroids, NUM_CLUSTERS, data, DATA_LEN, LARGER_DISTANCE) \n # 4 - Calcular los centroides a partir de los puntos en cada cluster.\n # Actualizar los centroides\n update_centroids(centroids, NUM_CLUSTERS, data, DATA_SET, DATA_LEN)\n # 5 - Repetir los pasos 3 y 4 hasta que no haya cambios en los clusters.\n while(KEEP_WALKING):\n KEEP_WALKING = update_clusters(centroids, NUM_CLUSTERS, data, DATA_LEN, LARGER_DISTANCE) \n if (KEEP_WALKING):\n update_centroids(centroids, NUM_CLUSTERS, data, DATA_SET, DATA_LEN)\n else : \n members = [0] * NUM_CLUSTERS\n for i in range(DATA_LEN):\n members[data[i].get_cluster()] += 1\n\n for j in range(NUM_CLUSTERS):\n print(f\"\\nCluster {j}: {members[j]} miembros.\")\n print(np.asarray(centroids[j]))","sub_path":"Proyecto/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"56622902","text":"import requests\nimport json\n\n\nbase_url = 'http://localhost:82/api/'\nrest_session = requests.session()\n\nrest_session.headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n}\nlogin_data = {\n \"username\": \"admin\",\n \"password\": \"admin\",\n \"domain\": \"Global\"\n}\nlogin_json = json.dumps(login_data)\nlogin_response = rest_session.put(\n url= \"{0}login\".format(base_url),\n data= login_json,\n headers=rest_session.headers\n)\nauth_header = {\"Authorization\": \"Basic {}\".format(login_response.text[1:-1])}\nrest_session.headers.update(auth_header)\n\nall_blueprints = json.loads(rest_session.get(\n url= \"{}v2/blueprints\".format(base_url)\n).text)\nmy_blueprint = [bp for bp in all_blueprints if bp['name'] == 'sandbox_api_exercise'][0]\nblueprint_id = my_blueprint['id']\n# reserve this blueprint\nreserve_body = json.dumps({\n \"duration\": \"PT23H\",\n \"name\": \"training Sandbox\",\n})\n\nreserve = rest_session.post(\n url=\"{base}v2/blueprints/{blueprint_identifier}/start\".format(\n blueprint_identifier=blueprint_id,\n base=base_url\n ),\n data=reserve_body\n)\n\n# obtain the sandbox id\nsandbox_id = json.loads(reserve.content)['id']\n\n# get sandbox commands\ncommands = json.loads(rest_session.get(\n url=\"{base}v2/sandboxes/{sandbox_identifier}/commands\".format(\n sandbox_identifier=sandbox_id,\n base=base_url\n )\n).content)\n\n# run command\n\nrun_command = rest_session.post(\n url=\"{base}v2/sandboxes/{sandbox_id}/commands/{command_name}/start\".format(\n sandbox_id=sandbox_id,\n command_name='Run Tests',\n base=base_url\n )\n)\n\n# tear down the sandbox\nunreserve = rest_session.post(\n url=\"{base}v2/sandboxes/{sandbox_id}/stop\".format(\n sandbox_id=sandbox_id,\n base=base_url\n )\n)\n\n\npass","sub_path":"Misc/Sandbox_api.py","file_name":"Sandbox_api.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440836478","text":"import os\nimport numpy as np\nfrom data_divide import split_train_test\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport random\nfrom k_means import k_mean\nfrom sklearn.model_selection import KFold\n\n# preprocessing\npath = \"C:\\\\Users\\\\Stoner\\\\Desktop\\\\大二下课程\\\\NLP-PPT\\\\text_classification\\\\mini_newsgroups\"\nclass_names = os.listdir(path)\npath = path + \"\\\\\"\n# print(class_names)\ntrain_set = []\ntest_set = []\nfor class_name in class_names:\n file_path = path + class_name\n file_names = os.listdir(file_path)\n file_path = file_path + \"\\\\\"\n files = []\n for file_name in file_names:\n with open(file_path + file_name, errors=\"ignore\") as file:\n A = file.read()\n files.append(A)\n files = np.asarray(files)\n train_set.append(files)\n# print(files[0])\n# print(len(files))\n# print(len(train_set))\n# print(train_set)\n\n# construct the frequency matrix\n# stop words\nstopdir = \"C:\\\\Users\\\\Stoner\\\\Desktop\\\\大二下课程\\\\NLP-PPT\\\\text_classification\\\\english.txt\"\nwith open(stopdir, errors='ignore') as stopdir:\n stopword = stopdir.read()\nstopword = stopword.splitlines()\nstopword = stopword + ['ain', 'daren', 'hadn', 'mayn', 'mightn', 'mon', 'mustn', 'needn', 'oughtn', 'shan']\n# 将训练集合并成易操作的格式\nseq_train = []\n# print(train_set[0][0])\nfor i in range(len(train_set)):\n for j in range(len(train_set[i])):\n seq_train.append(train_set[i][j])\n# 构建词袋模型\ncv_train = CountVectorizer(stop_words=stopword, lowercase=True, max_features=5000)\ncv_train_fit = cv_train.fit_transform(seq_train)\n# print(len(seq_train))\n# print(cv.vocabulary_)\nfreq_matrix_train = cv_train_fit.toarray()\n# print(freq_matrix_train)\n# print(len(cv_train.get_feature_names()))\n\n\nnum_center = 20\ncenter_ind = []\ncenter = []\nk_value = 2000\n# for i in range(num_center):\n# center_ind.append(random.randint(0, k_value * num_center))\n# center.append(freq_matrix_train[i])\nfor i in range(num_center):\n center.append(freq_matrix_train[100*i+20])\n\n# knn\nfreq_matrix_train = np.asarray(freq_matrix_train)\n# print(np.shape(freq_matrix_train))\nk_max = k_mean(freq_matrix_train, center, num_center, k_value)\n# print(np.shape(k_max))\n# k_max = np.zeros((num_center, 2, k_value))\n# for k in range(num_center):\n# for j in range(len(freq_matrix_train)):\n# distance = freq_matrix_train[j] * center[k]\n# distance = np.sum(distance)\n# # print('distance: ', distance)\n# # print(np.argmin(k_max[0]))\n# if distance > np.min(k_max[k][0]):\n# # print('yes', np.argmin(k_max[0]))\n# # print('distance: ', distance, 'j: ', j)\n# # print('k_max: ', k_max)\n# k_max[k, 1, np.argmin(k_max[0])] = j # 与训练集大小有关\n# k_max[k, 0, np.argmin(k_max[0])] = distance\n\nnum_iteration = 20\nfor k in range(num_iteration-1):\n for i in range(num_center):\n center[i] = np.zeros(np.shape(center[i])) # shape使用可能有错\n for j in range(k_value):\n center[i] = center[i] + freq_matrix_train[int(k_max[i][1][j])]\n center[i] = center[i] / k_value\n k_max = k_mean(freq_matrix_train, center, num_center, k_value)\n print(k)\ncorrect_rate = np.zeros(num_center)\nfor i in range(num_center):\n majority = np.zeros((1, num_center))\n for j in range(num_center):\n for k in range(k_value):\n if int(k_max[i][1][k] / k_value) == j:\n majority[0, j] = majority[0, j] + 1\n correct_rate[i] = np.max(majority)\n\nprint('Correct Rate: ', correct_rate/k_value)\n","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"651627343","text":"##### Settings #####\n# Imports\nimport re\nimport csv\nimport urllib.parse\n\n# Opening Files\ndef openFiles():\n readername = \"../input/Testing.csv\"\n writername = \"../extracted/testing/wordcount.csv\"\n readerfile = open(readername, encoding=\"ISO-8859-1\")\n writerfile = open(writername, \"w+\")\n \n return readerfile, writerfile\n\n# Closing Files\ndef closeFiles(readerfile, writerfile):\n readerfile.close()\n writerfile.close()\n\n# Creating CSV Builders\ndef createBuilders(readerfile, writerfile):\n readercsv = csv.reader(readerfile, delimiter=',')\n writercsv = csv.writer(writerfile)\n \n # Ignore readerfile's header\n next(readercsv)\n return readercsv, writercsv\n\n# Obtain position, text and rating\ndef obtainTrainingValues(row):\n text = row[0]\n rating = row[1]\n\n return text, rating\ndef obtainTestingValues(row):\n text = row[1]\n rating = \"?\"\n\n return text, rating\n\n# Create the header of the csv result file\ndef createHeader(writercsv):\n writercsv.writerow(\n [\n \"index\",\n \"rating\",\n \"wordcount\"\n ]\n )\n\n# Create each row of the csv result file\ndef createRow(writercsv, index, rating, wordcount):\n writercsv.writerow(\n [\n index,\n rating,\n wordcount\n ]\n )\n\n\n##### Logic #####\n# Get files and builders\nreaderfile, writerfile = openFiles()\nreadercsv, writercsv = createBuilders(readerfile, writerfile)\n\n# Write the header\ncreateHeader(writercsv)\n\n# Run for every line in the readerfile\nindex = 1\nfor row in readercsv:\n # Obtain the values from each row\n text, rating = obtainTestingValues(row)\n\n # Parse for usage in word counter\n parsed = urllib.parse.quote(text)\n\n # Replace all weird characters %XX with a separator\n parsed = re.sub('%[0-9a-zA-Z][0-9a-zA-Z]|_', ',', parsed)\n\n # Split into words\n wordcount = 0\n words = parsed.split(',')\n for word in words:\n if word:\n wordcount += 1\n\n # Write the obtained value\n createRow(writercsv, index, rating, wordcount)\n\n # Keep track of the index\n index += 1\n \n# Close files\ncloseFiles(readerfile, writerfile)","sub_path":"wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180335305","text":"from collections import defaultdict\nfrom typing import List\n\nimport config\n\n\nOUTLET = 0\nLOWEST = 1\nHIGHEST = 3\n\n\ndef calculate_tree_diffs(joltages: List[int]) -> None:\n \"\"\"Calculate tree containing ways to chain adapters.\n\n Args:\n joltages (List[int]): the joltages of the adapters on hand\n\n \"\"\"\n # Add the starting (outlet) joltage.\n joltages.append(OUTLET)\n\n branches = defaultdict(int)\n\n # Initialize the branches for the first joltages.\n branches[OUTLET] = 1\n branches[1] = 1\n\n for joltage in joltages[1:]:\n for diff in range(LOWEST, HIGHEST + 1):\n prev = joltage - diff\n branches[joltage] += branches[prev]\n\n return max(branches.values())\n\n\ndef main() -> None:\n \"\"\"Check joltages of the adapters from the outlet to device.\"\"\"\n test_answer = 8\n file = config.TestFile(test_answer, to_type=int, sort=True)\n test = calculate_tree_diffs(file.contents)\n file.test(test)\n\n test_answer = 19208\n file = config.TestFile(\n test_answer, path=\"another_test_input.txt\", to_type=int, sort=True)\n file.write_to_file()\n test = calculate_tree_diffs(file.contents)\n file.test(test)\n\n file = config.File(to_type=int, sort=True)\n result = calculate_tree_diffs(file.contents)\n config.LOGGER.info(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2020/10/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"511989291","text":"\"\"\"\nAIOGitHubAPI: Repository Traffic\n\nhttps://docs.github.com/en/rest/reference/repos#traffic\n\"\"\"\nfrom datetime import datetime\n\nfrom aiogithubapi.objects.base import AIOGitHubAPIBaseClient\nfrom aiogithubapi.objects.repos.traffic.clones import AIOGitHubAPIReposTrafficClones\nfrom aiogithubapi.objects.repos.traffic.pageviews import (\n AIOGitHubAPIReposTrafficPageviews,\n)\n\n\nclass AIOGitHubAPIRepositoryTraffic(AIOGitHubAPIBaseClient):\n \"\"\"Repository Release GitHub API implementation.\"\"\"\n\n @property\n def full_name(self) -> None:\n return self.attributes.get(\"full_name\")\n\n async def get_views(self):\n _endpoint = f\"/repos/{self.full_name}/traffic/views\"\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIReposTrafficPageviews(response)\n\n async def get_clones(self) -> None:\n _endpoint = f\"/repos/{self.full_name}/traffic/clones\"\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIReposTrafficClones(response)\n","sub_path":"aiogithubapi/objects/repository/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"611936906","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom collections import deque\r\n\r\n\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.left = self.right = None\r\n\r\n def set_links(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n\r\ndef serialize_tree(root, output):\r\n if not root:\r\n output.append('\\0')\r\n return\r\n output.append(root.data)\r\n serialize_tree(root.left, output)\r\n serialize_tree(root.right, output)\r\n\r\n\r\ndef check_subtree1(root1, root2):\r\n s1, s2 = deque(), deque()\r\n serialize_tree(root1, s1)\r\n serialize_tree(root2, s2)\r\n return ''.join(s2) in ''.join(s1)\r\n\r\n\r\ndef is_subtree(root1, root2):\r\n if not root1 or not root2:\r\n return root1 == root2\r\n if root1.data != root2.data:\r\n return False\r\n return is_subtree(root1.left, root2.left) and is_subtree(root1.right, root2.right)\r\n\r\n\r\ndef check_subtree2(root1, root2):\r\n if not root1 or not root2:\r\n return False\r\n\r\n if root1.data == root2.data:\r\n if is_subtree(root1, root2):\r\n return True\r\n\r\n return check_subtree2(root1.left, root2) or check_subtree2(root1.right, root2)\r\n\r\n\r\nif __name__ == '__main__':\r\n t1_nodes = [\r\n Node('a'), # 0\r\n Node('b'), # 1\r\n Node('c'), # 2\r\n Node('d'), # 3\r\n Node('e'), # 4\r\n Node('g'), # 5\r\n Node('h'), # 6\r\n Node('o'), # 7\r\n Node('p'), # 8\r\n Node('f'), # 9\r\n Node('m'), # 10\r\n Node('k'), # 11\r\n Node('e'), # 12\r\n Node('n'), # 13\r\n Node('m'), # 14\r\n Node('l'), # 15\r\n Node('e'), # 16\r\n Node('f'), # 17\r\n Node('m'), # 18\r\n Node('f') # 19\r\n ]\r\n \r\n t1_nodes[0] .set_links(t1_nodes[1], t1_nodes[2])\r\n t1_nodes[1] .set_links(t1_nodes[3], t1_nodes[4])\r\n t1_nodes[2] .set_links(t1_nodes[5], t1_nodes[6])\r\n t1_nodes[3] .set_links(t1_nodes[7], t1_nodes[8])\r\n t1_nodes[4] .set_links(t1_nodes[9], t1_nodes[10])\r\n t1_nodes[6] .set_links(None, t1_nodes[11])\r\n t1_nodes[7] .set_links(t1_nodes[12], None)\r\n t1_nodes[9] .set_links(t1_nodes[13], None)\r\n t1_nodes[10].set_links(t1_nodes[14], None)\r\n t1_nodes[11].set_links(t1_nodes[15], t1_nodes[16])\r\n t1_nodes[12].set_links(t1_nodes[17], t1_nodes[18])\r\n t1_nodes[16].set_links(t1_nodes[19], None)\r\n\r\n t2_nodes = [\r\n Node('e'), # 0\r\n Node('f'), # 1\r\n Node('m'), # 2\r\n Node('n'), # 3\r\n Node('m') # 4\r\n ]\r\n\r\n t2_nodes[0].set_links(t2_nodes[1], t2_nodes[2])\r\n t2_nodes[1].set_links(t2_nodes[3], None)\r\n t2_nodes[2].set_links(t2_nodes[4], None)\r\n\r\n print('Is subtree (1):', check_subtree1(t1_nodes[0], t2_nodes[0]))\r\n print('Is subtree (2):', check_subtree2(t1_nodes[0], t2_nodes[0]))\r\n","sub_path":"ctci/python/ch4_trees_and_graphs/q10_check_subtree.py","file_name":"q10_check_subtree.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344853142","text":"# coding=utf-8\n\"\"\"Performs face detection in realtime.\n\nBased on code from https://github.com/shanren7/real_time_face_recognition\n\"\"\"\n\n#from contributed.face import *\nfrom contributed import face\nimport argparse\nimport sys\nimport time\nimport cv2\nimport os\nimport glob\nimport facepp\nimport base64\nimport shutil\n\n\n# 初始日期时间\nnow_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))\nnow_hour = time.strftime('%H', time.localtime(time.time()))\n\n# 路径\npath = './biaozhu_dav_20180606/'\n\n# 头视频定义\ndef search_all_files_return_by_time_reversed(path, reverse=True):\n return glob.glob(os.path.join(path, '*.dav'))\n\nIMGS_DIR = './videos/'\nsave_path = './biaozhu_result/'\n\ndef facepp_recog(img_path):\n with open(img_path,\"rb\") as img: \n # b64encode是编码,b64decode是解码 \n image_base64 = base64.b64encode(img.read()) \n #print(image_base64)\n sleep_count=0\n total_count=0\n user_id = \"-2\"\n while (user_id == \"-2\"):\n if sleep_count==5:\n total_count += 1\n sleep_count = 0\n time.sleep(10)\n user_id, confidence = facepp.FaceSearch_v1(image_base64, \"BSC_ST\") \n if (user_id ==\"-1\"):\n user_id, confidence = facepp.FaceSearch_v1(image_base64, \"BSC_ST_1\")\n print(\"UserID: \",user_id)\n sleep_count += 1\n print(\"Loop num: \",str(total_count))\n if total_count==3:\n break;\n\n now_date = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n if user_id == \"-1\":\n shutil.copyfile(img_path,save_path+str(now_date)+\"_Unknown.jpg\") #复制文件\n elif user_id == \"-2\":\n shutil.copyfile(img_path,save_path+str(now_date)+\"_NoFace.jpg\") #复制文件\n else:\n shutil.copyfile(img_path,save_path+user_id+\"_\"+str(now_date)+\".jpg\") #复制文件\n \n \n\ndef main(args):\n\n start_time = time.time()\n face_recognition = face.Recognition()\n\n frame_interval = 25 # Number of frames after which to run face detection\n fps_display_interval = 10 # seconds\n frame_rate = 25\n frame_count = 0\n # 当前最新视频\n VIDEO_STREAM_LIST = search_all_files_return_by_time_reversed(path)\n # 定义录制视频格式 motion-jpeg codec\n sp_typ = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n video_count = 0\n \n for VIDEO_STREAM in VIDEO_STREAM_LIST:\n video_count += 1\n print(\"=========================---Now recogniting Video: ---=========================\",str(video_count))\n print(\"================================================================================\",str(video_count))\n video_capture = cv2.VideoCapture(VIDEO_STREAM)\n while True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n if frame is None:\n break;\n \n if (frame_count % frame_interval) == 0:\n faces = face_recognition.identify(frame)\n faces_count = len(faces)\n print(\"Faces count: \",str(faces_count))\n if faces is not None:\n for my_face in faces:\n now_date = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n\t \t\t\t # 保存相片\n tmp_path = \"./biaozhu_temp/temp\"+str(now_date)+\".jpg\"\n cv2.imwrite(tmp_path,my_face.image)\n\t \t\t # 这里开始调用face++接口\n facepp_recog(tmp_path)\n frame_count += 1\n # cv2.waitKey(1)\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--debug', action='store_true',\n help='Enable some debug outputs.')\n parser.add_argument('--image', action='store_true',\n help='Process images instead of video.')\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"real_time_face_recognition_v5.py","file_name":"real_time_face_recognition_v5.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"171651132","text":"class Humano:\n def __init__(self, nombre, armadura, nivel, ataque, ojos=2, piernas=2, dientes=32, salud=30):\n\n self.nombre = nombre\n self.armadura = armadura\n self.nivel = nivel\n self.ataque = ataque\n self.ojos = ojos\n self.piernas = piernas\n self.dientes = dientes\n self.salud = salud\n\n def atacar(self, orco):\n orco.salud = orco.salud - (self.ataque - orco.armadura)\n return orco.salud\n\n def no_vivo(self, salud):\n if self.salud <=0:\n return True\n else:\n return False\n\n def atributos_humano(self):\n print('Nombre:', str(self.nombre), '\\nArmadura:', str(self.armadura), '\\nNivel:', str(self.nivel), '\\nAtaque:', str(self.ataque),\n '\\nOjos:', str(self.ojos), '\\nPiernas:', str(self.piernas), '\\nDientes:', str(self.dientes), '\\nSalud:', str(self.salud))\n\n","sub_path":"week3_course_python_III/day2_python_VIII/exercises/classes_import/objects/ClaseHumano.py","file_name":"ClaseHumano.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"565981152","text":"import os\r\nimport cv2\r\nimport glob\r\nimport numpy as np\r\nimport subprocess\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef unwarp(img, src, i, hw, M):\r\n h, w = hw\r\n # use cv2.warpPerspective() to warp your image to a top-down view\r\n warped = cv2.warpPerspective(img, M, (w, h), flags=cv2.INTER_LINEAR)\r\n warped = warped[25:360,22:365,:]\r\n # assumes the image is square\r\n grid_inc = warped.shape[0] / 10 \r\n # make grid\r\n for j in range(-1, 11):\r\n cv2.line(warped, (int(j*grid_inc-22), 0), (int(j*grid_inc-22), h), (255, 0, 0, 0.4), thickness=1)\r\n cv2.line(warped, (0, int(j*grid_inc+8)), (w, int(j*grid_inc+8)), (255, 0, 0, 0.4), thickness=1)\r\n\r\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))\r\n f.subplots_adjust(hspace=.2, wspace=.1)\r\n ax1.imshow(img)\r\n x = [src[0][0], src[2][0], src[3][0], src[1][0], src[0][0]]\r\n y = [src[0][1], src[2][1], src[3][1], src[1][1], src[0][1]]\r\n ax1.plot(x, y, color='blue', alpha=0.5, linewidth=1, solid_capstyle='butt', zorder=2)\r\n ax1.set_ylim([h, 0])\r\n ax1.set_xlim([0, w])\r\n ax1.set_title('Original', fontsize=30)\r\n ax2.imshow(cv2.flip(warped, 1))\r\n ax2.set_title('Homographied', fontsize=30)\r\n #plt.show()\r\n path = \"rectified/%03d.png\" % i\r\n plt.savefig(path)\r\n plt.close()\r\n return warped\r\n\r\n# We will first manually select the source points\r\n# we will select the destination point which will map the source points in\r\n# original image to destination points in unwarped image\r\nsrc = np.float32([(127, 132), # left\r\n (300, 69), # top\r\n (300, 245), # bottom\r\n (470, 133)]) # right\r\n\r\ndst = np.float32([(300, 100),\r\n (100, 100),\r\n (300, 300),\r\n (100, 300)])\r\n\r\ncap = cv2.VideoCapture('test_ground_plane.mp4')\r\ni = 0\r\nsetup = False\r\nwhile cap.isOpened():\r\n ret, im = cap.read()\r\n if not setup:\r\n h, w = im.shape[:2]\r\n M = cv2.getPerspectiveTransform(src, dst)\r\n setup = True\r\n if im is None: break\r\n im = im[:,:,::-1]\r\n im = unwarp(im, src, i, (h,w), M)\r\n #im = im[:,::-1,::-1]\r\n # path = \"rectified/%03d.png\" % i\r\n # cv2.imwrite(path, im)\r\n i += 1\r\n\r\nout_fps = \"10\"\r\nin_fps = \"10\"\r\nsubprocess.call([\r\n 'ffmpeg', '-framerate', in_fps, '-i', 'rectified/%03d.png', '-r', out_fps, '-pix_fmt', 'yuv420p',\r\n 'erect_arena_2.mp4'\r\n])\r\n# clean up!\r\nfor file_name in glob.glob(\"rectified/*.png\"):\r\n os.remove(file_name)","sub_path":"rectify_agent_maze_2st.py","file_name":"rectify_agent_maze_2st.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47313509","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/08/31 11:11\r\n# File: 0946.py\r\n# Desc: \r\n\r\n\r\nclass Solution:\r\n def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\r\n temp = []\r\n while popped:\r\n if temp and temp[-1] == popped[0]:\r\n temp.pop()\r\n popped.pop(0)\r\n elif pushed:\r\n temp.append(pushed.pop(0))\r\n else: break\r\n return temp == popped and len(temp) <= 1\r\n","sub_path":"Solutions/0946/0946.py","file_name":"0946.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484356935","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.preprocessing import StandardScaler\nfrom pydiffmap import diffusion_map as dm\n\ndf = pd.read_excel(\"HW_TESLA.xlt\")\nfeatures = df.columns\n\ndf = df.sample(frac=1).reset_index(drop=True)\n\ndata = df.loc[:, features[1:]].values\ntarget = df.loc[:, ['STATIC']].values\n\nx = df.loc[:, features[1:]].values\n#x = StandardScaler().fit_transform(x)\n\nnormalised_data = pd.DataFrame(x, columns=features[1:])\n\nmydmap = dm.DiffusionMap.from_sklearn(\n n_evecs=3, k=70, epsilon=10, alpha=0.1)\n\nstressT = mydmap.fit_transform(normalised_data)\n\nprincipal_data_Df = pd.DataFrame(data=stressT, columns=[\n 'principal component 1',\n 'principal component 2',\n 'principal component 3'])\n\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\n\ntargets = [0, 1]\ncolors = ['g', 'r']\nfor target, color in zip(targets, colors):\n indicesToKeep = df['STATIC'] == target\n ax.scatter3D(principal_data_Df.loc[indicesToKeep, 'principal component 1'],\n principal_data_Df.loc[indicesToKeep, 'principal component 2'],\n principal_data_Df.loc[indicesToKeep, 'principal component 3'],\n c=color, s=40)\n\nplt.legend(['Normal', 'Stress'], prop={'size': 15})\nplt.show()\n","sub_path":"two/diffMapPlot.py","file_name":"diffMapPlot.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"183261025","text":"'''\r\n\r\nFront End Menu Code\r\n\r\nBhaskar Kuchimanchi (17200052)\r\nHarshit Jain (17200167)\r\nMeenal Rewatkar (17200165)\r\n\r\n'''\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport company_details as company_details\r\nimport design as design\r\nimport webbrowser\r\nimport warnings\r\nimport re\r\n\r\nwarnings.simplefilter(action='ignore', category=FutureWarning) # Dont show User Pandas Warnings\r\nwarnings.simplefilter(action='ignore', category=UserWarning) # Dont show User Pandas Warnings\r\ncompanylist = pd.read_csv('companylist.csv', delimiter = \",\")\r\ncompanynames = companylist['Name']\r\n# Header Names : Symbol,Name,LastSale,MarketCap,IPOyear,Sector,industry,Summary Quote\r\n\r\nprint (\"\\n****Welcome To FinInfo Analytics****\\n\\t Select an option\")\r\n\r\ndef mainmenu():\r\n val = input(\"1. Enter Company Name\\n2. Enter Company Symbol\\n3. List of Symbols\\n4. Exit\\n\\n\") # Exit option can be included\r\n getdetails(val)\r\n return\r\n\r\n# This is used to allow user to enter company name or symbol to view its details\r\ndef getdetails(x):\r\n if x == \"1\":\r\n name = input(\"Please enter: \")\r\n for i in companynames:\r\n \tif (re.search(name, i , re.IGNORECASE)) is not None:\r\n \t\tgetinfo(i)\r\n elif x == \"2\":\r\n symbol = input(\"Please enter Symbol: \").upper()\r\n getinfo(symbol)\r\n elif x == \"3\":\r\n print (companylist[\"Symbol\"],\"\\n\\n\")\r\n mainmenu()\r\n elif x == \"4\":\r\n print (\"\\n************** THANK YOU **************\")\r\n sys.exit()\r\n else :\r\n print (\"Please retry\\n\")\r\n print(\"\\nPlease Enter Properly\\n\")\r\n mainmenu()\r\n\r\n# This function is used to fetch each company's details from companydetails.csv\r\ndef getinfo(x):\r\n companies = (companylist[(companylist == x).any(1)].stack())\r\n\r\n if len(companies) == 0: # HANDLING ERROR\r\n print (\"Company Not Found : *Please enter exact name or symbol* \\n\")\r\n value = input(\"If you want to try more please enter Y or enter N to go back to previous menu: \")\r\n if value.upper() == \"Y\":\r\n getdetails(\"2\")\r\n elif value.upper() == \"N\":\r\n design.design()\r\n mainmenu()\r\n else:\r\n print(\"\\nWrong Choice !!!!!! Please retry\")\r\n getdetails(\"2\")\r\n\r\n elif len(companies)>8:\r\n print (\"\\n Oops it does'nt look like a correct symbol - Looking For Anyone of these?\")\r\n design.design()\r\n print(companies[0:],\"\\n\")\r\n design.design()\r\n if input(\"\\nif not in the list? Hit 0 to try again OR any key to continue: \") == \"0\":\r\n main()\r\n else :\r\n np\r\n confirm = input(\"confirm the Company Symbol by entering : \").upper() #Upper to handle case sensitive execpts\r\n design.design()\r\n try:\r\n x = (np.where(companylist.Symbol == confirm))\r\n p = int(x[0])\r\n print (companylist.iloc[p][0:7])\r\n val = input(\"To visit company's Nasdaq site press Y or to continue press any other key: \")\r\n if(val.upper() == \"Y\"):\r\n webbrowser.open(companylist[7]) # To open the company link for more info\r\n company_details.getFile(confirm)\r\n main()\r\n except:\r\n print(\"\\nInvalid Entry Try Again\")\r\n getdetails(\"2\")\r\n\r\n else:\r\n design.design()\r\n print((companies[0:7]),\"\\n\") #For memory quote in companies[8]\r\n val = input(\"To visit company's Nasdaq site press Y or to continue press any other key: \").upper()\r\n if(val == \"Y\"):\r\n print(\"opening url\")\r\n webbrowser.open(companies[7]) # To open the company link for more info\r\n pih = 'PIH'\r\n if(x == pih):\r\n company_details.main(x)\r\n else:\r\n company_details.main(companies[0])\r\n design.design()\r\n\r\ndef main():\r\n mainmenu()\r\n\r\nmain()\r\n","sub_path":"stock_menu.py","file_name":"stock_menu.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"566207216","text":"from urllib.request import urlopen as uReq\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nmy_url = 'https://www.cars.com/for-sale/searchresults.action/?clrId=27125%2C27128%2C27129%2C29637%2C27135%2C27131&mdId=20567&mkId=20081&page=1&perPage=100&rd=99999&searchSource=GN_REFINEMENT&sort=relevance&stkTypId=28881&transTypeId=28112&yrId=20200%2C20145%2C20201%2C27381%2C34923%2C39723%2C47272%2C51683%2C56007%2C58487%2C30031936%2C35797618%2C36362520&zc=30008'\r\nuClient = uReq(my_url)\r\npage_html = uClient.read()\r\nuClient.close()\r\n#html_parser\r\npage_soup = soup(page_html, 'html.parser')\r\n#grab each product\r\ncontainers = page_soup.findAll('div',{'class':'shop-srp-listings__inner'})\r\nlen(containers)\r\nfilename = 'car1other_m.csv' \r\n\r\n\r\nf = open(filename, 'w')\r\nheaders ='car,condition,milage,extar_info\\n'\r\nf.write(headers)\r\nfor container in containers:\r\n #name of car\r\n name_of_car = container.findAll('h2',{'class':'listing-row__title'})\r\n car = name_of_car[0].text.strip()\r\n print(car)\r\n #condition of car\r\n condition_of_car = container.findAll('div',{'class':'listing-row__stocktype'})\r\n condition = condition_of_car[0].text.strip()\r\n print(condition)\r\n #milage of car\r\n milage_of_car = container.findAll('span',{'class':'listing-row__mileage'})\r\n milage = milage_of_car[0].text.strip()\r\n print(milage)\r\n car_price = container.findAll('span',{'class':'listing-row__price'})\r\n price = car_price[0].text.strip()\r\n price_edit = price.replace('$','')\r\n price = price_edit.replace(',','')\r\n print(price)\r\n car_information = container.findAll('ul', {'class':'listing-row__meta'})\r\n car_info = car_information[0].text.strip().replace(' ','')\r\n f.write(car+','+ condition +','+milage.replace(',','')+','+price+'\\n')\r\nf.close()","sub_path":"WebScrapping1.py","file_name":"WebScrapping1.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391205811","text":"import os\nimport datetime\n\ndef file_date(filename):\n \n fp = open(os.path.join(filename), 'w')\n fp.close()\n \n ts = os.stat(filename).st_mtime\n\n ts_str = str(datetime.datetime.fromtimestamp(ts))\n \n return (\"{}\".format(ts_str[:10])) \n\nprint(file_date(\"newfile.txt\")) \n","sub_path":"scripts/dir/last-modified.py","file_name":"last-modified.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"615044749","text":"#13.- Leer un número entero y determinar la\n#suma de sus dígitos pares.\ndef suma(num):\n sumar = 0\n while True:\n modulo = num % 10\n num = num // 10\n if modulo % 2 == 0:\n sumar = sumar + modulo\n if num < 1:\n break\n\n print(\"La suma de pares es:\", sumar)\n\nnumero = int(input(\"digite numero:\"))\nsuma(numero)\n","sub_path":"ciclos/13_ejercicio.py","file_name":"13_ejercicio.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"201500189","text":"\nimport random\ndef gambling(G,S,N):\n win = 0\n lose = 0\n play ='yes'\n\n while play == 'yes': # if user wants to play then this loop will run\n for i in range(N):\n R = random.randint(0,2)#it will take random values between 0 and 2\n print(R)\n if(R==1):\n G=G+S\n print(G)\n win=win+1\n print('You Won!')\n if win>0:\n break\n else:\n lose=lose+1\n print('Sorry!You lost.')\n play=int(input('You want to play again yes or no?'))#yes=1 No=0\n if win>=0:\n n1=(win/N)*100\n print('percentage of win is:',n1)\n else:\n n2=(lose/N)*100\n print('percentage of lose is:',n2)\n\n","sub_path":"Utilities/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"326399010","text":"#\n#\n#\n\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nfrom octodns.record import Create, Delete, Record\nfrom octodns.provider.azuredns import _AzureRecord, AzureProvider, \\\n _check_endswith_dot, _parse_azure_type\nfrom octodns.zone import Zone\nfrom octodns.provider.base import Plan\n\nfrom azure.mgmt.dns.models import ARecord, AaaaRecord, CnameRecord, MxRecord, \\\n SrvRecord, NsRecord, PtrRecord, TxtRecord, RecordSet, SoaRecord, \\\n Zone as AzureZone\nfrom msrestazure.azure_exceptions import CloudError\n\nfrom unittest import TestCase\nfrom mock import Mock, patch\n\n\nzone = Zone(name='unit.tests.', sub_zones=[])\nocto_records = []\nocto_records.append(Record.new(zone, '', {\n 'ttl': 0,\n 'type': 'A',\n 'values': ['1.2.3.4', '10.10.10.10']}))\nocto_records.append(Record.new(zone, 'a', {\n 'ttl': 1,\n 'type': 'A',\n 'values': ['1.2.3.4', '1.1.1.1']}))\nocto_records.append(Record.new(zone, 'aa', {\n 'ttl': 9001,\n 'type': 'A',\n 'values': ['1.2.4.3']}))\nocto_records.append(Record.new(zone, 'aaa', {\n 'ttl': 2,\n 'type': 'A',\n 'values': ['1.1.1.3']}))\nocto_records.append(Record.new(zone, 'cname', {\n 'ttl': 3,\n 'type': 'CNAME',\n 'value': 'a.unit.tests.'}))\nocto_records.append(Record.new(zone, 'mx1', {\n 'ttl': 3,\n 'type': 'MX',\n 'values': [{\n 'priority': 10,\n 'value': 'mx1.unit.tests.',\n }, {\n 'priority': 20,\n 'value': 'mx2.unit.tests.',\n }]}))\nocto_records.append(Record.new(zone, 'mx2', {\n 'ttl': 3,\n 'type': 'MX',\n 'values': [{\n 'priority': 10,\n 'value': 'mx1.unit.tests.',\n }]}))\nocto_records.append(Record.new(zone, '', {\n 'ttl': 4,\n 'type': 'NS',\n 'values': ['ns1.unit.tests.', 'ns2.unit.tests.']}))\nocto_records.append(Record.new(zone, 'foo', {\n 'ttl': 5,\n 'type': 'NS',\n 'value': 'ns1.unit.tests.'}))\nocto_records.append(Record.new(zone, '_srv._tcp', {\n 'ttl': 6,\n 'type': 'SRV',\n 'values': [{\n 'priority': 10,\n 'weight': 20,\n 'port': 30,\n 'target': 'foo-1.unit.tests.',\n }, {\n 'priority': 12,\n 'weight': 30,\n 'port': 30,\n 'target': 'foo-2.unit.tests.',\n }]}))\nocto_records.append(Record.new(zone, '_srv2._tcp', {\n 'ttl': 7,\n 'type': 'SRV',\n 'values': [{\n 'priority': 12,\n 'weight': 17,\n 'port': 1,\n 'target': 'srvfoo.unit.tests.',\n }]}))\nocto_records.append(Record.new(zone, 'txt1', {\n 'ttl': 8,\n 'type': 'TXT',\n 'value': 'txt singleton test'}))\nocto_records.append(Record.new(zone, 'txt2', {\n 'ttl': 9,\n 'type': 'TXT',\n 'values': ['txt multiple test', 'txt multiple test 2']}))\n\nazure_records = []\n_base0 = _AzureRecord('TestAzure', octo_records[0])\n_base0.zone_name = 'unit.tests'\n_base0.relative_record_set_name = '@'\n_base0.record_type = 'A'\n_base0.params['ttl'] = 0\n_base0.params['arecords'] = [ARecord('1.2.3.4'), ARecord('10.10.10.10')]\nazure_records.append(_base0)\n\n_base1 = _AzureRecord('TestAzure', octo_records[1])\n_base1.zone_name = 'unit.tests'\n_base1.relative_record_set_name = 'a'\n_base1.record_type = 'A'\n_base1.params['ttl'] = 1\n_base1.params['arecords'] = [ARecord('1.2.3.4'), ARecord('1.1.1.1')]\nazure_records.append(_base1)\n\n_base2 = _AzureRecord('TestAzure', octo_records[2])\n_base2.zone_name = 'unit.tests'\n_base2.relative_record_set_name = 'aa'\n_base2.record_type = 'A'\n_base2.params['ttl'] = 9001\n_base2.params['arecords'] = ARecord('1.2.4.3')\nazure_records.append(_base2)\n\n_base3 = _AzureRecord('TestAzure', octo_records[3])\n_base3.zone_name = 'unit.tests'\n_base3.relative_record_set_name = 'aaa'\n_base3.record_type = 'A'\n_base3.params['ttl'] = 2\n_base3.params['arecords'] = ARecord('1.1.1.3')\nazure_records.append(_base3)\n\n_base4 = _AzureRecord('TestAzure', octo_records[4])\n_base4.zone_name = 'unit.tests'\n_base4.relative_record_set_name = 'cname'\n_base4.record_type = 'CNAME'\n_base4.params['ttl'] = 3\n_base4.params['cname_record'] = CnameRecord('a.unit.tests.')\nazure_records.append(_base4)\n\n_base5 = _AzureRecord('TestAzure', octo_records[5])\n_base5.zone_name = 'unit.tests'\n_base5.relative_record_set_name = 'mx1'\n_base5.record_type = 'MX'\n_base5.params['ttl'] = 3\n_base5.params['mx_records'] = [MxRecord(10, 'mx1.unit.tests.'),\n MxRecord(20, 'mx2.unit.tests.')]\nazure_records.append(_base5)\n\n_base6 = _AzureRecord('TestAzure', octo_records[6])\n_base6.zone_name = 'unit.tests'\n_base6.relative_record_set_name = 'mx2'\n_base6.record_type = 'MX'\n_base6.params['ttl'] = 3\n_base6.params['mx_records'] = [MxRecord(10, 'mx1.unit.tests.')]\nazure_records.append(_base6)\n\n_base7 = _AzureRecord('TestAzure', octo_records[7])\n_base7.zone_name = 'unit.tests'\n_base7.relative_record_set_name = '@'\n_base7.record_type = 'NS'\n_base7.params['ttl'] = 4\n_base7.params['ns_records'] = [NsRecord('ns1.unit.tests.'),\n NsRecord('ns2.unit.tests.')]\nazure_records.append(_base7)\n\n_base8 = _AzureRecord('TestAzure', octo_records[8])\n_base8.zone_name = 'unit.tests'\n_base8.relative_record_set_name = 'foo'\n_base8.record_type = 'NS'\n_base8.params['ttl'] = 5\n_base8.params['ns_records'] = [NsRecord('ns1.unit.tests.')]\nazure_records.append(_base8)\n\n_base9 = _AzureRecord('TestAzure', octo_records[9])\n_base9.zone_name = 'unit.tests'\n_base9.relative_record_set_name = '_srv._tcp'\n_base9.record_type = 'SRV'\n_base9.params['ttl'] = 6\n_base9.params['srv_records'] = [SrvRecord(10, 20, 30, 'foo-1.unit.tests.'),\n SrvRecord(12, 30, 30, 'foo-2.unit.tests.')]\nazure_records.append(_base9)\n\n_base10 = _AzureRecord('TestAzure', octo_records[10])\n_base10.zone_name = 'unit.tests'\n_base10.relative_record_set_name = '_srv2._tcp'\n_base10.record_type = 'SRV'\n_base10.params['ttl'] = 7\n_base10.params['srv_records'] = [SrvRecord(12, 17, 1, 'srvfoo.unit.tests.')]\nazure_records.append(_base10)\n\n_base11 = _AzureRecord('TestAzure', octo_records[11])\n_base11.zone_name = 'unit.tests'\n_base11.relative_record_set_name = 'txt1'\n_base11.record_type = 'TXT'\n_base11.params['ttl'] = 8\n_base11.params['txt_records'] = [TxtRecord(['txt singleton test'])]\nazure_records.append(_base11)\n\n_base12 = _AzureRecord('TestAzure', octo_records[12])\n_base12.zone_name = 'unit.tests'\n_base12.relative_record_set_name = 'txt2'\n_base12.record_type = 'TXT'\n_base12.params['ttl'] = 9\n_base12.params['txt_records'] = [TxtRecord(['txt multiple test']),\n TxtRecord(['txt multiple test 2'])]\nazure_records.append(_base12)\n\n\nclass Test_AzureRecord(TestCase):\n def test_azure_record(self):\n assert(len(azure_records) == len(octo_records))\n for i in range(len(azure_records)):\n octo = _AzureRecord('TestAzure', octo_records[i])\n assert(azure_records[i]._equals(octo))\n\n\nclass Test_ParseAzureType(TestCase):\n def test_parse_azure_type(self):\n for expected, test in [['A', 'Microsoft.Network/dnszones/A'],\n ['AAAA', 'Microsoft.Network/dnszones/AAAA'],\n ['NS', 'Microsoft.Network/dnszones/NS'],\n ['MX', 'Microsoft.Network/dnszones/MX']]:\n self.assertEquals(expected, _parse_azure_type(test))\n\n\nclass Test_CheckEndswithDot(TestCase):\n def test_check_endswith_dot(self):\n for expected, test in [['a.', 'a'],\n ['a.', 'a.'],\n ['foo.bar.', 'foo.bar.'],\n ['foo.bar.', 'foo.bar']]:\n self.assertEquals(expected, _check_endswith_dot(test))\n\n\nclass TestAzureDnsProvider(TestCase):\n def _provider(self):\n return self._get_provider('mock_spc', 'mock_dns_client')\n\n @patch('octodns.provider.azuredns.DnsManagementClient')\n @patch('octodns.provider.azuredns.ServicePrincipalCredentials')\n def _get_provider(self, mock_spc, mock_dns_client):\n '''Returns a mock AzureProvider object to use in testing.\n\n :param mock_spc: placeholder\n :type mock_spc: str\n :param mock_dns_client: placeholder\n :type mock_dns_client: str\n\n :type return: AzureProvider\n '''\n return AzureProvider('mock_id', 'mock_client', 'mock_key',\n 'mock_directory', 'mock_sub', 'mock_rg')\n\n def test_populate_records(self):\n provider = self._get_provider()\n\n rs = []\n rs.append(RecordSet(name='a1', ttl=0, type='A',\n arecords=[ARecord('1.1.1.1')]))\n rs.append(RecordSet(name='a2', ttl=1, type='A',\n arecords=[ARecord('1.1.1.1'),\n ARecord('2.2.2.2')]))\n rs.append(RecordSet(name='aaaa1', ttl=2, type='AAAA',\n aaaa_records=[AaaaRecord('1:1ec:1::1')]))\n rs.append(RecordSet(name='aaaa2', ttl=3, type='AAAA',\n aaaa_records=[AaaaRecord('1:1ec:1::1'),\n AaaaRecord('1:1ec:1::2')]))\n rs.append(RecordSet(name='cname1', ttl=4, type='CNAME',\n cname_record=CnameRecord('cname.unit.test.')))\n rs.append(RecordSet(name='cname2', ttl=5, type='CNAME',\n cname_record=None))\n rs.append(RecordSet(name='mx1', ttl=6, type='MX',\n mx_records=[MxRecord(10, 'mx1.unit.test.')]))\n rs.append(RecordSet(name='mx2', ttl=7, type='MX',\n mx_records=[MxRecord(10, 'mx1.unit.test.'),\n MxRecord(11, 'mx2.unit.test.')]))\n rs.append(RecordSet(name='ns1', ttl=8, type='NS',\n ns_records=[NsRecord('ns1.unit.test.')]))\n rs.append(RecordSet(name='ns2', ttl=9, type='NS',\n ns_records=[NsRecord('ns1.unit.test.'),\n NsRecord('ns2.unit.test.')]))\n rs.append(RecordSet(name='ptr1', ttl=10, type='PTR',\n ptr_records=[PtrRecord('ptr1.unit.test.')]))\n rs.append(RecordSet(name='ptr2', ttl=11, type='PTR',\n ptr_records=[PtrRecord(None)]))\n rs.append(RecordSet(name='_srv1._tcp', ttl=12, type='SRV',\n srv_records=[SrvRecord(1, 2, 3, '1unit.tests.')]))\n rs.append(RecordSet(name='_srv2._tcp', ttl=13, type='SRV',\n srv_records=[SrvRecord(1, 2, 3, '1unit.tests.'),\n SrvRecord(4, 5, 6, '2unit.tests.')]))\n rs.append(RecordSet(name='txt1', ttl=14, type='TXT',\n txt_records=[TxtRecord('sample text1')]))\n rs.append(RecordSet(name='txt2', ttl=15, type='TXT',\n txt_records=[TxtRecord('sample text1'),\n TxtRecord('sample text2')]))\n rs.append(RecordSet(name='', ttl=16, type='SOA',\n soa_record=[SoaRecord()]))\n\n record_list = provider._dns_client.record_sets.list_by_dns_zone\n record_list.return_value = rs\n\n provider.populate(zone)\n\n self.assertEquals(len(zone.records), 16)\n\n def test_populate_zone(self):\n provider = self._get_provider()\n\n zone_list = provider._dns_client.zones.list_by_resource_group\n zone_list.return_value = [AzureZone(location='global'),\n AzureZone(location='global')]\n\n provider._populate_zones()\n\n self.assertEquals(len(provider._azure_zones), 1)\n\n def test_bad_zone_response(self):\n provider = self._get_provider()\n\n _get = provider._dns_client.zones.get\n _get.side_effect = CloudError(Mock(status=404), 'Azure Error')\n trip = False\n try:\n provider._check_zone('unit.test', create=False)\n except CloudError:\n trip = True\n self.assertEquals(trip, True)\n\n def test_apply(self):\n provider = self._get_provider()\n\n changes = []\n deletes = []\n for i in octo_records:\n changes.append(Create(i))\n deletes.append(Delete(i))\n\n self.assertEquals(13, provider.apply(Plan(None, zone, changes)))\n self.assertEquals(13, provider.apply(Plan(zone, zone, deletes)))\n\n def test_create_zone(self):\n provider = self._get_provider()\n\n changes = []\n for i in octo_records:\n changes.append(Create(i))\n desired = Zone('unit2.test.', [])\n\n err_msg = 'The Resource \\'Microsoft.Network/dnszones/unit2.test\\' '\n err_msg += 'under resource group \\'mock_rg\\' was not found.'\n _get = provider._dns_client.zones.get\n _get.side_effect = CloudError(Mock(status=404), err_msg)\n\n self.assertEquals(13, provider.apply(Plan(None, desired, changes)))\n\n def test_check_zone_no_create(self):\n provider = self._get_provider()\n\n rs = []\n rs.append(RecordSet(name='a1', ttl=0, type='A',\n arecords=[ARecord('1.1.1.1')]))\n rs.append(RecordSet(name='a2', ttl=1, type='A',\n arecords=[ARecord('1.1.1.1'),\n ARecord('2.2.2.2')]))\n\n record_list = provider._dns_client.record_sets.list_by_dns_zone\n record_list.return_value = rs\n\n err_msg = 'The Resource \\'Microsoft.Network/dnszones/unit3.test\\' '\n err_msg += 'under resource group \\'mock_rg\\' was not found.'\n _get = provider._dns_client.zones.get\n _get.side_effect = CloudError(Mock(status=404), err_msg)\n\n provider.populate(Zone('unit3.test.', []))\n\n self.assertEquals(len(zone.records), 0)\n","sub_path":"tests/test_octodns_provider_azuredns.py","file_name":"test_octodns_provider_azuredns.py","file_ext":"py","file_size_in_byte":13715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456817239","text":"import pandas as pd\nimport sys\nfrom Bio import pairwise2\nfrom Bio.pairwise2 import format_alignment\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndataset = pd.read_csv(sys.argv[1])\npath_output = sys.argv[2]\n\ncommand = \"mkdir -p \"+path_output+\"alignment_graph_clustering\"\nprint(command)\nos.system(command)\n\npath_output = path_output+\"alignment_graph_clustering/\"\n\ncombination_sequences = []\n\nmatrix_score = []\ndistance_matrix = []\nrows_encoding = []\n\nfor i in range(len(dataset)):\n\trow_score_distance = []\n\n\tprint(\"Process sequence: \", dataset['id_sequence_by_algorithm'][i])\n\n\tfor j in range(len(dataset)):\n\n\t\tif i != j:\n\t\t\t\n\t\t\t#make alignment\n\t\t\talignments = pairwise2.align.globalms(dataset['sequence'][i], dataset['sequence'][j], 2, -1, -.5, -.1, score_only=True)\t\t\t\n\t\t\trow_score_distance.append(alignments)\n\n\t\t\tcombination1 = str(dataset['id_sequence_by_algorithm'][i])+\"-\"+ str(dataset['id_sequence_by_algorithm'][j])\n\t\t\tcombination2 = str(dataset['id_sequence_by_algorithm'][j])+\"-\"+ str(dataset['id_sequence_by_algorithm'][i])\n\n\t\t\tif combination1 not in rows_encoding and combination2 not in rows_encoding:\n\t\t\t\trows_encoding.append(combination1)\n\t\t\t\tdistance_matrix.append(alignments)\n\t\telse:\n\t\t\trow_score_distance.append(100)\n\tmatrix_score.append(row_score_distance)\n\n#export matrix data, create heatmap and export distance matrix\ndataset_export = pd.DataFrame()\ndataset_export['sequences'] = rows_encoding\ndataset_export['score'] = distance_matrix\n\ndataset_export.to_csv(path_output+\"summary_score.csv\", index=False)\n\n# create heatmap\nsns.heatmap(matrix_score, cmap=\"PiYG\")\nplt.savefig(path_output+\"heatmap_score.svg\")","sub_path":"source_code/graph_based_alignment/make_alignment_sequences.py","file_name":"make_alignment_sequences.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"410273257","text":"#Tom Lancaster (c) 2019\n#\n#Program: Simulate rolling dice.\n\n#=========================================\n# Dice Rolling Simulator\n#=========================================\nimport random as rand\nimport sys\n\n#----- Function to validate user enters number -----\ndef inputNumber(in_put):\n while True:\n try:\n userInput = int(input(in_put))\n except ValueError:\n print(\"Not a number! Try again.\")\n continue\n else:\n return userInput\n break \n\n\n#-------- Function to execute program --------\ndef rollDice():\n num_dice = inputNumber((\"How many dice do you want to roll? = \"))\n roll_input = \"Y\"\n\n while (roll_input != 'N'): \n roll_input = input(\"\\nRoll?(Y/N) = \")\n roll_input = roll_input.upper()\n\n dice_rolled = [] #List to save results of rolled dice\n dice_result = 0\n \n #User selects to roll dice\n if(roll_input == \"Y\"):\n for dice in range(num_dice):\n single_dice = rand.randint(1,6)\n dice_rolled.append(single_dice)\n dice_result += single_dice\n\n print(\"The dice values are\", end=\" \")\n for i in range(num_dice):\n if(i == num_dice-1):\n print(\"%d\" % dice_rolled[i],end=\" \")\n else:\n print(\"%d,\" % dice_rolled[i],end=\" \")\n\n print(\"for a total of %d\" % dice_result)\n\n #User quits the program\n print(\"Thanks for rolling, homie!\\n\")\n sys.exit(0)\n\n#----- Run Program -----\nrollDice()\n","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"199891386","text":"import nba_api.stats.endpoints.playbyplayv2 as pbp\nimport pandas as pd\nimport time\nimport os\n\ngamelog_path = './data/nba_gamelogs/'\npbp_path = './data/nba_pbp/'\n\ngamelogs = [f for f in os.listdir(gamelog_path) if not f.startswith('.')]\n# gamelogs = './data/nba_gamelogs/nba_gamelogs_2018-19.csv'\n\nfor gl in gamelogs:\n print(gl)\n csv_path = gamelog_path + gl\n gl_df = pd.read_csv(csv_path, dtype='str')\n game_ids = list(gl_df.GAME_ID.unique())\n for g in game_ids:\n time.sleep(2)\n print(g)\n playbyplay = pbp.PlayByPlayV2(game_id=str(g), start_period=0, end_period=0)\n pbp_df = playbyplay.get_data_frames()\n pbp_df = pbp_df[0]\n csv_string = pbp_path + g + '.csv'\n pbp_df.to_csv(csv_string)\n\n","sub_path":"nba_api_scripts/get_pbp.py","file_name":"get_pbp.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391071026","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import BookItem\nfrom scrapy.linkextractors import LinkExtractor\n\n\nclass BookSpider(scrapy.Spider):\n name = 'book'\n allowed_domains = ['books.toscrape.com']\n start_urls = ['http://books.toscrape.com/']\n\n #书籍列表页面的解析函数\n def parse(self, response):\n #提取书籍页面中每本书的链接\n le = LinkExtractor(restrict_css='article.product_pod h3')\n for link in le.extract_links(response):\n yield scrapy.Request(link.url, callback=self.parse_book)\n \n #提取下一页的链接\n le = LinkExtractor(restrict_css='ul.pager li.next')\n links = le.extract_links(response)\n if links:\n next_url = links[0].url\n yield scrapy.Request (next_url, callback=self.parse)\n \n\n #书籍页面的解析函数\n def parse_book(self,response):\n book = BookItem()\n sel = response.css('div.product_main')\n book['name'] = sel.xpath('./h1/text()').extract_first()\n book['price'] = sel.css('p.price_color::text').extract_first()\n book['review_rating'] = sel.css('p.star-rating::attr(class)').re_first('star-rating ([A-Za-z]+)')\n \n sel = response.css('table.table.table-striped')\n book['upc'] = sel.xpath('(.//tr)[1]/td/text()').extract_first()\n book['stock'] = sel.xpath('(.//tr)[6]/td/text()').re('\\((\\d+) available\\)')\n book['review_num'] = sel.xpath('(.//tr)[last()]/td/text()').extract_first()\n book['url'] = response.url\n yield book","sub_path":"Python_Spyder/bookstores/bookstores/spiders/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613547952","text":"#!/usr/bin/env python3\n\nimport argparse\nimport xml.etree.ElementTree as ET\n\nfrom mesh import*\n\nif __name__ == '__main__':\n mesh = Mesh()\n \n print('Starting main...')\n\n parser = argparse.ArgumentParser(prog=\"Mesh data interpolation script\", description='Reads mesh and control file. Interpolates control defined in cells to mesh nodes.')\n parser.add_argument('mesh', type=str, help='path to the mesh file')\n parser.add_argument('barycenters', type=str, help='path to the barycenter file')\n\n args = parser.parse_args()\n print('[Calculate Barycenters]: Successfully parsed arguments')\n\n mesh.read_mesh_xml(args.mesh)\n mesh.write_barycenters_xml(args.barycenters)\n exit()\n","sub_path":"MOTIONS-toolset/toolset/calculate_barycenters.py","file_name":"calculate_barycenters.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613367862","text":"import asyncio\r\nimport os\r\nimport signal\r\n\r\nimport libvirtaio\r\n\r\nimport vanir\r\nimport vanir.api\r\nimport vanir.api.admin\r\nimport vanir.api.internal\r\nimport vanir.api.misc\r\nimport vanir.log\r\nimport vanir.utils\r\nimport vanir.vm.vanirvm\r\n\r\ndef sighandler(loop, signame, servers):\r\n print('caught {}, exiting'.format(signame))\r\n for server in servers:\r\n server.close()\r\n loop.stop()\r\n\r\nparser = vanir.tools.VanirArgumentParser(description='vanir OS daemon')\r\nparser.add_argument('--debug', action='store_true', default=False,\r\n help='Enable verbose error logging (all exceptions with full '\r\n 'tracebacks) and also send tracebacks to Admin API clients')\r\n\r\ndef main(args=None):\r\n loop = asyncio.get_event_loop()\r\n libvirtaio.virEventRegisterAsyncIOImpl(loop=loop)\r\n try:\r\n args = parser.parse_args(args)\r\n except:\r\n loop.close()\r\n raise\r\n\r\n args.app.register_event_handlers()\r\n\r\n if args.debug:\r\n vanir.log.enable_debug()\r\n\r\n servers = loop.run_until_complete(vanir.api.create_servers(\r\n vanir.api.admin.VanirAdminAPI,\r\n vanir.api.internal.VanirInternalAPI,\r\n vanir.api.misc.VanirMiscAPI,\r\n app=args.app, debug=args.debug))\r\n\r\n socknames = []\r\n for server in servers:\r\n for sock in server.sockets:\r\n socknames.append(sock.getsockname())\r\n\r\n for signame in ('SIGINT', 'SIGTERM'):\r\n loop.add_signal_handler(getattr(signal, signame),\r\n sighandler, loop, signame, servers)\r\n\r\n vanir.utils.systemd_notify()\r\n # make sure children will not inherit this\r\n os.environ.pop('NOTIFY_SOCKET', None)\r\n\r\n try:\r\n loop.run_forever()\r\n loop.run_until_complete(asyncio.wait([\r\n server.wait_closed() for server in servers]))\r\n for sockname in socknames:\r\n try:\r\n os.unlink(sockname)\r\n except FileNotFoundError:\r\n args.app.log.warning(\r\n 'socket {} got unlinked sometime before shutdown'.format(\r\n sockname))\r\n finally:\r\n loop.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"vanir/tools/vanirsd.py","file_name":"vanirsd.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"355447075","text":"## #########################################################\n##\n## Configuration for the production of the ICHEP VBTF ntuple\n## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n## MC, spring10\n##\n## Nikolaos Rompotis - Imperial College London\n## 22 June 2010\n##\n## #########################################################\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"PAT\")\n\n\n\nprocess.options = cms.untracked.PSet(\n Rethrow = cms.untracked.vstring('ProductNotFound')\n)\n\n#process.MessageLogger = cms.Service(\n# \"MessageLogger\",\n# categories = cms.untracked.vstring('info', 'debug','cout')\n# )\n\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = cms.untracked.string(\"INFO\")\nprocess.MessageLogger.cerr.FwkSummary = cms.untracked.PSet(\n reportEvery = cms.untracked.int32(1000000),\n limit = cms.untracked.int32(10000000)\n )\nprocess.MessageLogger.cerr.FwkReport = cms.untracked.PSet(\n reportEvery = cms.untracked.int32(100000),\n limit = cms.untracked.int32(10000000)\n )\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True)\n )\n\n\n\n\n# source\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n # SOME DATA FILE TO BE PUT HERE\n #'rfio:/castor/cern.ch/user/r/rompotis/DATA_STUDIES/Spring10/sample_WminusToENu-CTEQ66-powheg_Spring10-START3X_V26_AODSIM-v2.root',\n #'file:rfio:/castor/cern.ch/user/r/rompotis/DATA_STUDIES/Spring10/sample_WenuSpring10START3X_V26_S09-v1_AODSIM.root',\n 'rfio:/castor/cern.ch/cms/store/relval/CMSSW_3_9_1/RelValZEE/GEN-SIM-RECO/START39_V3-v1/0062/187296DA-39E4-DF11-A172-003048679296.root'\n )\n)\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )\n\n## Load additional processes\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\n## global tags:\nprocess.GlobalTag.globaltag = cms.string('START39_V3::All')\n\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n\n################################################################################################\n### P r e p a r a t i o n o f t h e P A T O b j e c t s f r o m A O D ###\n################################################################################################\n\n## pat sequences to be loaded:\n#process.load(\"PhysicsTools.PFCandProducer.PF2PAT_cff\")\nprocess.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n#process.load(\"PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cff\")\n##\n#\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n## MET creation <=== WARNING: YOU MAY WANT TO MODIFY THIS PART OF THE CODE %%%%%%%%%%%%%\n## specify the names of the MET collections that you need here %%%%\n## #%%\n## if you don't specify anything the default MET is the raw Calo MET #%%\nprocess.caloMET = process.patMETs.clone( #%%\n metSource = cms.InputTag(\"met\",\"\",\"RECO\"),\n addTrigMatch = cms.bool(False),\n addMuonCorrections = cms.bool(False),\n addGenMET = cms.bool(False),\n)\nprocess.tcMET = process.patMETs.clone( #%%\n metSource = cms.InputTag(\"tcMet\",\"\",\"RECO\"),\n addTrigMatch = cms.bool(False),\n addMuonCorrections = cms.bool(False),\n addGenMET = cms.bool(False),\n)\nprocess.pfMET = process.patMETs.clone( #%%\n metSource = cms.InputTag(\"pfMet\",\"\",\"RECO\"),\n addTrigMatch = cms.bool(False),\n addMuonCorrections = cms.bool(False),\n addGenMET = cms.bool(False),\n)\n## specify here what you want to have on the plots! <===== MET THAT YOU WANT ON THE PLOTS %%%%%%%\nmyMetCollection = 'caloMET'\nmyPfMetCollection = 'pfMET'\nmyTcMetCollection = 'tcMET'\n## modify the sequence of the MET creation: #%%\nprocess.makePatMETs = cms.Sequence(process.caloMET*process.tcMET*process.pfMET)\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n## modify the final pat sequence: keep only electrons + METS (muons are needed for met corrections)\nprocess.load(\"RecoEgamma.EgammaIsolationAlgos.egammaIsolationSequence_cff\")\n#process.patElectronIsolation = cms.Sequence(process.egammaIsolationSequence)\n\nprocess.patElectrons.isoDeposits = cms.PSet()\nprocess.patElectrons.userIsolation = cms.PSet()\nprocess.patElectrons.addElectronID = cms.bool(True)\nprocess.patElectrons.electronIDSources = cms.PSet(\n simpleEleId95relIso= cms.InputTag(\"simpleEleId95relIso\"),\n simpleEleId90relIso= cms.InputTag(\"simpleEleId90relIso\"),\n simpleEleId85relIso= cms.InputTag(\"simpleEleId85relIso\"),\n simpleEleId80relIso= cms.InputTag(\"simpleEleId80relIso\"),\n simpleEleId70relIso= cms.InputTag(\"simpleEleId70relIso\"),\n simpleEleId60relIso= cms.InputTag(\"simpleEleId60relIso\"),\n simpleEleId95cIso= cms.InputTag(\"simpleEleId95cIso\"),\n simpleEleId90cIso= cms.InputTag(\"simpleEleId90cIso\"),\n simpleEleId85cIso= cms.InputTag(\"simpleEleId85cIso\"),\n simpleEleId80cIso= cms.InputTag(\"simpleEleId80cIso\"),\n simpleEleId70cIso= cms.InputTag(\"simpleEleId70cIso\"),\n simpleEleId60cIso= cms.InputTag(\"simpleEleId60cIso\"),\n )\n##\nprocess.patElectrons.addGenMatch = cms.bool(False)\nprocess.patElectrons.embedGenMatch = cms.bool(False)\nprocess.patElectrons.usePV = cms.bool(False)\n##\nprocess.load(\"ElectroWeakAnalysis.WENu.simpleEleIdSequence_cff\")\n#\nprocess.patElectronIDs = cms.Sequence(process.simpleEleIdSequence)\nprocess.makePatElectrons = cms.Sequence(process.patElectronIDs*process.patElectrons)\n# process.makePatMuons may be needed depending on how you calculate the MET\nprocess.makePatCandidates = cms.Sequence(process.makePatElectrons+process.makePatMETs)\nprocess.patDefaultSequence = cms.Sequence(process.makePatCandidates)\n##\n## ################################################################################\n##\n## the filter to select the candidates from the data samples\n##\n##\n## WARNING: you may want to modify this item:\nHLT_process_name = \"HLT\" # REDIGI for the Spring10 production traditional MC / HLT for the powheg samples or data\n# trigger path selection\nHLT_path_name = \"HLT_Photon10_L1R\" #= \"HLT_Ele15_LW_L1R\" #\n# trigger filter name\nHLT_filter_name = \"hltL1NonIsoHLTNonIsoSinglePhotonEt10HcalIsolFilter\"\n#\n\n\nprocess.wenuFilter = cms.EDFilter('WenuCandidateFilter',\n ### the input collections needed:\n electronCollectionTag = cms.untracked.InputTag(\"patElectrons\",\"\",\"PAT\"),\n metCollectionTag = cms.untracked.InputTag(myMetCollection,\"\",\"PAT\"),\n pfMetCollectionTag = cms.untracked.InputTag(myPfMetCollection,\"\",\"PAT\"),\n tcMetCollectionTag = cms.untracked.InputTag(myTcMetCollection,\"\",\"PAT\"),\n triggerCollectionTag = cms.untracked.InputTag(\"TriggerResults\",\"\",HLT_process_name),\n triggerEventTag = cms.untracked.InputTag(\"hltTriggerSummaryAOD\",\"\",HLT_process_name),\n hltpath = cms.untracked.string(HLT_path_name),\n hltpathFilter = cms.untracked.InputTag(HLT_filter_name,\"\",HLT_process_name),\n ebRecHits = cms.untracked.InputTag(\"reducedEcalRecHitsEB\"),\n eeRecHits = cms.untracked.InputTag(\"reducedEcalRecHitsEE\"),\n PrimaryVerticesCollection = cms.untracked.InputTag(\"offlinePrimaryVertices\"),\n ### here the preselection is applied\n # fiducial cuts:\n BarrelMaxEta = cms.untracked.double(1.4442),\n EndCapMinEta = cms.untracked.double(1.566),\n EndCapMaxEta = cms.untracked.double(2.5),\n # demand ecal driven electron:\n useEcalDrivenElectrons = cms.untracked.bool(True),\n # demand offline spike cleaning with the Swiss Cross criterion:\n useSpikeRejection = cms.untracked.bool(False),\n spikeCleaningSwissCrossCut = cms.untracked.double(0.95),\n # demand geometrically matched to an HLT object with ET>15GeV\n useTriggerInfo = cms.untracked.bool(False),\n electronMatched2HLT = cms.untracked.bool(True),\n electronMatched2HLT_DR = cms.untracked.double(0.1),\n useHLTObjectETCut = cms.untracked.bool(True),\n hltObjectETCut = cms.untracked.double(15.),\n useExtraTrigger = cms.untracked.bool(False),\n # ET Cut in the SC\n ETCut = cms.untracked.double(20.),\n METCut = cms.untracked.double(0.),\n # reject events with a 2nd electron with ET > 20 that passes the WP95%\n vetoSecondElectronEvents = cms.untracked.bool(False),\n storeSecondElectron = cms.untracked.bool(True),\n ETCut2ndEle = cms.untracked.double(20.),\n vetoSecondElectronIDType = cms.untracked.string(\"simpleEleId95relIso\"),\n vetoSecondElectronIDSign = cms.untracked.string(\"=\"),\n vetoSecondElectronIDValue = cms.untracked.double(7.),\n # Other parameters of the code - leave them as they are\n useValidFirstPXBHit = cms.untracked.bool(False),\n useConversionRejection = cms.untracked.bool(False),\n useExpectedMissingHits = cms.untracked.bool(False),\n maxNumberOfExpectedMissingHits = cms.untracked.int32(1),\n # calculate some new cuts\n calculateValidFirstPXBHit = cms.untracked.bool(True),\n calculateExpectedMissingHits = cms.untracked.bool(True),\n )\n####################################################################################\n##\n## the W selection that you prefer included in another cfg\nfrom ElectroWeakAnalysis.WENu.simpleCutBasedSpring10SelectionBlocks_cfi import *\n\nselection_inverse = cms.PSet (\n deta_EB_inv = cms.untracked.bool(True),\n deta_EE_inv = cms.untracked.bool(True)\n )\n\n####################################################################################\n#\n# we need to store jet information, hence we have to produce the jets:\nprocess.load(\"JetMETCorrections.Configuration.CorrectedJetProducersDefault_cff\")\n#process.load(\"JetMETCorrections.Configuration.CorrectedJetProducers_cff\")\nprocess.jetSequence = cms.Sequence( process.ak5CaloJetsL2L3 )\nprocess.pfjetAK5Sequence = cms.Sequence( process.ak5PFJetsL2L3 )\n\nprocess.ourJetSequence = cms.Sequence( process.jetSequence * process.pfjetAK5Sequence )\n\n\n\n##\n## and the plot creator\nprocess.plotter = cms.EDAnalyzer('WenuPlots',\n # selection in use: wont be used - we have usePrecalcID true later\n selection_80relIso,\n selection_inverse,\n # The selection to be used here:\n usePrecalcID = cms.untracked.bool(True),\n usePrecalcIDType = cms.untracked.string('simpleEleId80relIso'),\n usePrecalcIDSign = cms.untracked.string('='),\n usePrecalcIDValue = cms.untracked.double(7),\n # some extra information on the ntuple production:\n includeJetInformationInNtuples = cms.untracked.bool(True),\n caloJetCollectionTag = cms.untracked.InputTag('ak5CaloJetsL2L3'),\n pfJetCollectionTag = cms.untracked.InputTag('ak5PFJetsL2L3'),\n DRJetFromElectron = cms.untracked.double(0.3),\n #\n wenuCollectionTag = cms.untracked.InputTag(\"wenuFilter\",\"selectedWenuCandidates\",\"PAT\"),\n WENU_VBTFselectionFileName = cms.untracked.string(\"WENU_VBTFselection.root\"),\n WENU_VBTFpreseleFileName = cms.untracked.string(\"WENU_VBTFpreselection.root\"),\n DatasetTag = cms.untracked.int32(100),\n storeExtraInformation = cms.untracked.bool(True),\n storeAllSecondElectronVariables = cms.untracked.bool(True),\n )\n#\nprocess.p = cms.Path( process.ourJetSequence*process.patDefaultSequence*process.wenuFilter*process.plotter)\n\n\n","sub_path":"ElectroWeakAnalysis/WENu/test/pat_WenuVBTF_ntuple_forData.py","file_name":"pat_WenuVBTF_ntuple_forData.py","file_ext":"py","file_size_in_byte":13648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"549067680","text":"#!/usr/bin/env python3\n\n\nimport os\nimport sys\nimport time\nimport numpy\nimport datetime\nimport threading\n\nimport rospy\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import String\n\n\n# --\nname = 'sisiv_logger'\nflag_name = 'sisiv_trigger'\ndata_dir = '/home/amigos/data/experiments/'\nsave_dir = os.path.join(data_dir, name)\n# --\n\n\nsis_list = ['2l', '2r', '3l', '3r',\n '4l', '4r', '5l', '5r',\n '1lu', '1ll', '1ru', '1rl']\n\n\nclass sisiv_logger(object):\n\n def __init__(self):\n self.flag = 0\n self.saveto = ''\n self.trigger = ''\n self.timestamp = 0\n self.sis_vol = [0.] * 12\n self.sis_cur = [0.] * 12\n self.filename_vol = ''\n self.filename_cur = ''\n\n def set_flag(self, req):\n trigger = req.data\n if trigger == '': self.flag = 0\n else:\n if 'lo' in trigger:\n self.timestamp = trigger.replace('-lo', '')\n lo = '-lo'\n else: self.timestamp, lo = trigger, ''\n exp_time = datetime.datetime.fromtimestamp(float(self.timestamp))\n self.ymd = exp_time.strftime('%Y%m%d_')\n self.hms = exp_time.strftime('%H%M%S')\n self.saveto = os.path.join(save_dir, self.ymd + self.hms + lo)\n os.makedirs(self.saveto)\n self.filename_vol = self.saveto + '/sis_vol.txt'\n self.filename_cur = self.saveto + '/sis_cur.txt'\n f_vol = open(self.filename_vol, 'a')\n f_cur = open(self.filename_cur, 'a')\n print(\"FILE OPEN\")\n f_vol.close()\n f_cur.close()\n self.flag = 1\n\n def callback_voltage(self, req, idx):\n if self.flag == 0:\n return\n\n self.sis_vol[idx] = req.data\n return\n\n def callback_current(self, req, idx):\n if self.flag == 0:\n return\n\n self.sis_cur[idx] = req.data\n return\n\n def log(self):\n while not rospy.is_shutdown():\n if self.flag == 0:\n continue\n\n sis_vol = ' '.join(map(str, self.sis_vol)) + '\\n'\n sis_cur = ' '.join(map(str, self.sis_cur)) + '\\n'\n f_vol = open(self.filename_vol, 'a')\n f_cur = open(self.filename_cur, 'a')\n f_vol.write(sis_vol)\n f_cur.write(sis_cur)\n f_vol.close()\n f_cur.close()\n\n time.sleep(1e-2) # 10 msec.\n\n def start_thread(self):\n th = threading.Thread(target=self.log)\n th.setDaemon(True)\n th.start()\n\n\nif __name__ == '__main__':\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n pass\n\n st = sisiv_logger()\n st.start_thread()\n rospy.init_node(name)\n # print('[sisiv_logger] : START SUBSCRIBER ... ')\n sis_vol_sub_list = [rospy.Subscriber('sis_vol_{}'.format(sis),\n Float64,\n st.callback_voltage,\n callback_args = idx)\n for idx, sis in enumerate(sis_list)]\n sis_cur_sub_list = [rospy.Subscriber('sis_cur_{}'.format(sis),\n Float64,\n st.callback_current,\n callback_args = idx)\n for idx, sis in enumerate(sis_list)]\n flag_sub = rospy.Subscriber(flag_name, String, st.set_flag)\n rospy.spin()\n","sub_path":"scripts/sisiv_logger.py","file_name":"sisiv_logger.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"610940291","text":"import json\n\nclass BagsOfWords:\n bag = None\n bag_reverse = None\n next_token = 0\n max_token = 0\n\n def __init__(self):\n setting = json.load(\n open(\"./setting.json\", \"r\", encoding=\"utf-8\")\n )\n self.bag = {}\n self.bag_reverse = {}\n\n tokens = setting[\"spacial_token\"]\n\n for word in tokens.keys():\n self.addWord(word, id=tokens[word])\n \n self.max_token = setting[\"max_sentence\"]\n\n \n \n def getWord(self, token):\n if token not in self.bag:\n raise TypeError(\"Unknown this token.\")\n\n return self.bag[token]\n\n def addWord(self, word, id=None):\n if id is None:\n while self.next_token in self.bag:\n self.next_token = self.next_token + 1\n\n self.bag_reverse[word] = self.next_token\n self.bag[self.next_token] = word\n self.next_token = self.next_token + 1\n else:\n if id < 0:\n raise ValueError(\"An ID must more than 0!\")\n if id in self.bag:\n raise ValueError(\"There is an ID already!\")\n \n self.bag_reverse[word] = id\n self.bag[id] = word\n \n def getToken(self, word):\n if word not in self.bag_reverse:\n raise KeyError(\"Unknown this word {0}\".format(word))\n return self.bag_reverse[word]\n \n def has(self, word):\n return word in self.bag_reverse\n\n def length(self):\n return len(self.bag)\n\n","sub_path":"src/classes/BagsOfWords.py","file_name":"BagsOfWords.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"68352298","text":"from django.urls import path\nfrom django.conf.urls import url\n\nfrom .djangoapps.sample import views as SampleViews\nfrom .djangoapps.login import views as LoginViews\nfrom .djangoapps.index import views as IndexViews\n\nurlpatterns = [\n path('sample', SampleViews.sample, name='sample'),\n path('login', LoginViews.login, name='login'),\n\n path('', IndexViews.index, name='index'),\n path('api_getProName', IndexViews.api_getProName, name='api_getProName'),\n path('api_getCourse', IndexViews.api_getCourse, name='api_getCourse'),\n path('api_getClass', IndexViews.api_getClass, name='api_getClass'),\n path('api_getProList', IndexViews.api_getProList, name='api_getProList'),\n path('api_getTime', IndexViews.api_getTime, name='api_getTime'),\n path('api_getCheck', IndexViews.api_getCheck, name='api_getCheck'),\n path('api_getWeek', IndexViews.api_getWeek, name='api_getWeek'),\n path('api_getTwoTab', IndexViews.api_getTwoTab, name='api_getTwoTab'),\n path('api_getFourTab', IndexViews.api_getFourTab, name='api_getFourTab'),\n path('api_saveData', IndexViews.api_saveData, name='api_saveData'),\n]\n","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"79763963","text":"from django.db import models\nfrom . import date_time\n\n\nclass Todo(models.Model):\n title = models.CharField(max_length=255)\n created_time = models.CharField(max_length=255)\n finish = models.BooleanField(default=False)\n\n class Meta:\n db_table = 'todo'\n\n def __str__(self):\n return self.title\n\n @classmethod\n def all(cls):\n todo_list = Todo.objects.all().order_by('-created_time')\n l = [todo.json() for todo in todo_list]\n d = dict(todo_list=l)\n return d\n\n @classmethod\n def new(cls, form):\n d = {\n 'title': form.get('title', ''),\n 'created_time': date_time(),\n }\n todo = Todo.objects.create(**d)\n return todo\n\n @classmethod\n def update(cls, form):\n todo = cls.objects.get(id=form['id'])\n todo.title = form.get('title', '')\n todo.save()\n return todo\n\n @classmethod\n def delete_by_id(cls, id):\n todo = cls.objects.get(id=id)\n todo.delete()\n return todo\n\n def json(self):\n d = {\n 'id': self.id,\n 'title': self.title,\n 'created_time': self.created_time,\n }\n return d\n","sub_path":"backend/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213756120","text":"import random\nimport argparse\n\n##\n# @brief Create a new graph.\n#\n# @param order The number of node in the graph.\n# @param density The density of the graphe. (p/n)\n# @param minimum The minimum number of set.\n# @param maximum The maximum number of set.\n#\n# @return The list of vertex.\ndef create_graph(order, density, minimum):\n nb_set = order\n L = [[i] for i in range(order)] \n vertex = []\n nb_generation = (int)(density * order)\n\n random.seed(None)\n\n i = 0 \n while (nb_set > minimum and i < nb_generation): \n first = random.randrange(nb_set)\n second = random.randrange(nb_set)\n \n # create vertex\n vertex.append((L[first][random.randrange(len(L[first]))],L[second][random.randrange(len(L[second]))]))\n \n # link each set\n if (first != second):\n L[first] += L[second]\n del L[second]\n nb_set -= 1\n\n i += 1\n \n tmp = [(int)(len(l) * (nb_generation - i)/order) for l in L]\n for selection, nb_iteration in enumerate(tmp):\n for _ in range(nb_iteration):\n first = random.randrange(len(L[selection]))\n second = random.randrange(len(L[selection]))\n\n vertex.append((L[selection][first],L[selection][second]))\n\n return (vertex, L)\n \n\ndef write_graph(order, density, minimum, path):\n print(\"Start generating the graph.\")\n vertex, L = create_graph(order, density, minimum)\n print(\"Generate all vertex => {}\", L)\n\n print(\"Write the dotfile.\")\n \n with open(path, \"w\") as file:\n file.write(\"graph test {\\n\")\n for s, d in vertex:\n file.write(\"\\t{} -- {};\\n\".format(s, d))\n file.write(\"}\")\n\n print(\"end.\")\n\nparser = argparse.ArgumentParser(description='Create a dot file')\nparser.add_argument('order', type=int, help='order of the graph')\nparser.add_argument('density', type=float, help='density of the graph')\nparser.add_argument('minimum', type=int, help='minimum of set in the graph')\nparser.add_argument('path', type=str, help='Path to the output')\n\nargs = parser.parse_args()\n\nprint(\"argument => {} | {} | {} | {}\".format(args.order, args.density, args.minimum, args.path))\nwrite_graph(args.order, args.density, args.minimum, args.path)\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"80943706","text":"def merge(nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: None Do not return anything, modify nums1 in-place instead.\n \"\"\"\n i = 0\n x = 0\n nums1_copy = nums1[:m]\n nums1[:] = []\n while i < m and x < n:\n if nums1_copy[i] < nums2[x]:\n nums1.append(nums1_copy[i])\n i += 1\n else:\n nums1.append(nums2[x])\n x += 1\n\n if i < m:\n nums1 += nums1_copy[i:m]\n if x < n:\n nums1 += nums2[x:n]","sub_path":"Week_01/G20200343030393/LeetCode_88_393.py","file_name":"LeetCode_88_393.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"649091503","text":"from hanoi_tower import *\nimport pdb\n\ngame_board = Board()\ngame_running = True\n\ndef print_towers(tower_stack):\n for disk in tower_stack:\n print(disk.size)\n print(\"**\")\n\n# def print_towers(tower):\n# if len(tower) > 0:\n# spaces = len(tower) - 1 \n# for disk in tower:\n# print(spaces * \" \" + disk.size * \"_\" + str(disk.size) + \"_\" * disk.size)\n# spaces -= 1\n# else:\n# print('''\n# 0\n# 0\n# 0\n# 0\n# _____0_____\n# ''')\n # print(len(tower) * \"_\" + \"^\" + \"_\" * len(tower))\n# _1_\n# __2__\n# ___3___\n# ____4____\n# _____5_____\n\ndef setup_game(disk_count=5):\n\n for i in range(3):\n game_board.list_of_towers.append(Tower(i))\n \n\n for i in range(disk_count, 0, -1):\n game_board.list_of_towers[0].stack.append(Disk(i))\n\n# def player_action():\n# from_tower = int(input(\"From tower: \")) - 1\n# to_tower = int(input(\"To Tower: \")) - 1\n\n# def change_place_disk(tower):\n# pass\n\n\ndef main():\n setup_game()\n global game_running\n moves_made = 0\n\n end_list = game_board.list_of_towers[0].stack.copy()\n\n # game_board.list_towers()\n for tower in game_board.list_of_towers:\n print_towers(tower.stack)\n\n while game_running:\n correct = False\n \n while not correct:\n print(\"Pick tower: [1 - 3]\")\n from_tower = int(input(\"From tower: \")) - 1\n\n to_tower = int(input(\"To Tower: \")) - 1\n moves_made += 1\n\n if from_tower <= 2 and from_tower >= 0 and to_tower <= 2 and to_tower >= 0:\n correct = True\n\n if game_board.list_of_towers[to_tower].peek() == []:\n game_board.list_of_towers[to_tower].add_to_tower(game_board.list_of_towers[from_tower].remove_from_tower())\n elif game_board.list_of_towers[to_tower].peek().size < game_board.list_of_towers[from_tower].peek().size:\n print(\"\\n********Wrong move**********\")\n else:\n game_board.list_of_towers[to_tower].add_to_tower(game_board.list_of_towers[from_tower].remove_from_tower())\n\n\n print(\"\\n\")\n # game_board.list_towers()\n for tower in game_board.list_of_towers:\n print_towers(tower.stack)\n\n print(\"**************************************\\n\")\n\n\n if game_board.list_of_towers[2].stack == end_list:\n print(\"Congratulations! You won!\")\n print(f\"Total moves: {moves_made}\")\n game_running = False\n \n # for tower in game_board.list_of_towers:\n # print(tower.stack)\n \n \n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\\nGame closed by user\")","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"160586211","text":"\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport math\nnamelist = [\"LinearRegression\",\"LassoRegression\",\"RandomForest\"]\ndef Lasso_visualizationGraph(name):\n Lasso_data0 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\"+name+\"_D0.csv\", index_col=0)\n Lasso_data7 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\"+name+\"_D7.csv\", index_col=0)\n Lasso_data14 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\"+name+\"_D14.csv\", index_col=0)\n Lasso_data28 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\"+name+\"_D28.csv\", index_col=0)\n return Lasso_data0, Lasso_data7, Lasso_data14, Lasso_data28\ndef RF_visualizationGraph(name):\n RF_data0 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\" + name + \"_D0.csv\", index_col=0)\n RF_data7 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\" + name + \"_D7.csv\", index_col=0)\n RF_data14 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\" + name + \"_D14.csv\", index_col=0)\n RF_data28 = pd.read_csv(os.getcwd() + \"/../test/99_test_result_\" + name + \"_D28.csv\", index_col=0)\n return RF_data0, RF_data7, RF_data14, RF_data28\n\n# Lasso_data0, Lasso_data7, Lasso_data14, Lasso_data28 =Lasso_visualizationGraph(\"LassoRegression\")\n# RF_data0, RF_data7, RF_data14, RF_data28 = RF_visualizationGraph(\"RandomForest\")\n\ndef compareIndex(lasso, rf, day):\n df = pd.DataFrame(columns=(\"prediction\",\"realValue\",\"accuracy\"))\n avr = 0;\n for i in range(len(lasso)):\n if (lasso.ix[i, 3] > rf.ix[i, 3]):\n df.loc[i] = [lasso.ix[i, 1],lasso.ix[i, 2],lasso.ix[i, 3]]\n else:\n df.loc[i] = [rf.ix[i, 1],rf.ix[i, 2],rf.ix[i, 3]]\n avr += df.ix[i, 2]\n avr /= len(lasso)\n df.to_csv(os.getcwd() + \"/../test/d\"+str(day)+\".csv\", encoding=\"utf-8\")\n return df, avr\n\n\n# df0, avr0 = compareIndex(Lasso_data0, RF_data0, 0)\n# df7, avr7 = compareIndex(Lasso_data7, RF_data7, 7)\n# df14, avr14 = compareIndex(Lasso_data14, RF_data14, 14)\n# df28, avr28 = compareIndex(Lasso_data28, RF_data28, 28)\n#\n# print(df0)\n# print(\"-\"*80)\n# print(df7)\n# print(\"-\"*80)\n# print(df14)\n# print(\"-\"*80)\n# print(df28)\n# print(\"-\"*80)\n# print(avr0)\n# print(avr7)\n# print(avr14)\n# print(avr28)\n\n\ndata0 = pd.read_csv(os.getcwd() + \"/../test/d0.csv\", index_col=0)\ndata7 = pd.read_csv(os.getcwd() + \"/../test/d7.csv\", index_col=0)\ndata14 = pd.read_csv(os.getcwd() + \"/../test/d14.csv\", index_col=0)\ndata28 = pd.read_csv(os.getcwd() + \"/../test/d28.csv\", index_col=0)\n\ndef drawPlt(data0, data7, data14, data28, index):\n real_value = [data0[\"realValue\"][index],\n data0[\"prediction\"][index],\n data7[\"prediction\"][index],\n data14[\"prediction\"][index],\n data28[\"prediction\"][index]]\n prediction_model = [1, 2, 3, 4, 5]\n plt.figure(figsize=(10, 8))\n red_patch = mpatches.Patch(color=\"red\", label=\"Final Audience\")\n blue_patch = mpatches.Patch(color=\"green\", label=\"Prediction Value\")\n\n plt.legend(handles=[red_patch, blue_patch], loc='upper center', bbox_to_anchor=(0.5, 1), ncol=3)\n width = 0.35\n barlist = plt.bar(prediction_model, real_value, width, alpha = 0.9)\n barlist[0].set_color('r')\n for i in range(1,len(barlist)):\n barlist[i].set_color('g')\n\n plt.ylim(min(real_value) - min(real_value) / 3, max(real_value) + min(real_value) / 4)\n\n plt.xlim(0, 6)\n plt.xlabel('Model', position=(1, 0), fontsize=15, verticalalignment='center', horizontalalignment='center')\n plt.ylabel('Audience', position=(0, 1.02), fontsize=15, verticalalignment='bottom', horizontalalignment='left',\n rotation='horizontal')\n plt.yticks([], fontsize=12)\n plt.title('Prediction for Movie Audience', fontsize=20, verticalalignment='bottom', position=(0.5, 1.02))\n plt.xticks(prediction_model, (\"Actual_Value\", \"D0\", \"D7\", \"D14\", \"D28\"), fontsize=12)\n\n for a, b in zip(prediction_model, real_value):\n a -= 0.25\n val = int(b)\n b += (b / 100)\n plt.text(a, b, str(val))\n\n fig = plt.gcf()\n fig.savefig(os.getcwd()+'/../test/graph/plt_'+str(index)+'.png')\n plt.close()\n\nALL = len(data0)\nfor j in range(0,ALL):\n drawPlt(data0, data7, data14, data28, j)\n\n\n# date = 0\n# data = pd.read_csv(os.getcwd() + \"/../data/99_output_LassoRegression_D\" + date + \".csv\", index_col=0)\n# index = 0\n# real_data= pd.read_csv(os.getcwd() + \"/../data/99_output_LassoRegression_D0.csv\", index_col=0)\n\n\n\n\n\n\n\n\n\n\n\n\n# plt.show()\n\n\n","sub_path":"pythonSrc/source/22_visualization_graph.py","file_name":"22_visualization_graph.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206310250","text":"\n\n#calss header\nclass _ORIGINAL():\n\tdef __init__(self,): \n\t\tself.name = \"ORIGINAL\"\n\t\tself.definitions = [u'the first one made and not a copy: ', u'a piece of work by a famous artist or designer and not a copy by someone else: ', u'If you read something in the original, you read it in the language in which it was first written.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_original.py","file_name":"_original.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"55052378","text":"from itertools import combinations\nimport timeit\ndef list_equal(one,two):\n\tfor i in two:\n\t\tif i not in one: return False\n\treturn True\n\ndef check(array,value):\n\tfor item in array:\n\t\tif list_equal(value,item): return False\n\treturn True\n\na = [i for i in range(1000)]\nres = []\nb=timeit.timeit('[res.append(list(i)) for i in combinations(a,3) if sum(i)==0 and check(res,i)]',globals=globals(), number=1)\nprint(b)\n\n\"\"\"\noutput:\n\"\"\"\n","sub_path":"python/yo.py","file_name":"yo.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"305602911","text":"from django.urls import path,include\n\nfrom . import views\n\napp_name = 'apps'\nurlpatterns = [\n # Item\n path('', views.ItemIndexView.as_view(), name='index'),\n path('items/', views.ItemIndexView.as_view(), name='index'),\n path('items//', views.ItemDetailView.as_view(), name='detail'),\n path('items/new/', views.item_create, name='item_new'),\n # ItemPurchaseHistory\n path('items//purchase/', views.item_purchase, name='item_purchase'),\n]","sub_path":"proj/apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644796694","text":"__author__ = 'Deedasmi'\nfrom calculator import calc\n\ndef main():\n '''\n The main program loop\n '''\n print(\"Welcome to the Calculator\")\n while True:\n print(\"Please input an equation\")\n try:\n eq = take_input()\n if eq: #If returned an equation\n print_ans(calc.calculate(eq))\n except(UserWarning, SyntaxWarning) as e:\n print(e)\n except ZeroDivisionError:\n print(\"Cannot divide by Zero, numbnuts\")\n except KeyboardInterrupt:\n print(\"Exiting\")\n exit()\n\ndef take_input():\n '''\n Grabs user input split into a list\n :return: List if ready for math or None to skip math\n '''\n eq = \"\"\n while not eq.strip():\n eq = input()\n if eq == \"Done\" or eq == \"done\":\n exit(0)\n elif \"=\" in eq: #Handle User defined variables (and possibly functions) separately\n print(calc.handle_user_defined_input(eq))\n return None\n return eq\n\n\ndef print_ans(ans):\n '''\n Format answer to be printed\n :param ans: The answer\n '''\n if ans.is_integer():\n print(int(ans))\n else:\n print(round(ans, 4))\n\nif __name__ == \"__main__\":\n main()","sub_path":"calculator/calc_view.py","file_name":"calc_view.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"365097838","text":"from conans import ConanFile, CMake, tools\nimport os\nimport shutil\n\n\nclass OpenCVConan(ConanFile):\n name = \"opencv\"\n version = \"3.4.2\"\n license = \"LGPL\"\n homepage = \"https://github.com/opencv/opencv\"\n url = \"https://github.com/conan-community/conan-opencv.git\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False],\n \"fPIC\": [True, False],\n \"contrib\": [True, False],\n \"jpeg\": [True, False],\n \"tiff\": [True, False],\n \"webp\": [True, False],\n \"png\": [True, False],\n \"jasper\": [True, False],\n \"gtk\": [None, 2, 3]}\n default_options = \"shared=False\",\\\n \"fPIC=True\",\\\n \"contrib=False\",\\\n \"jpeg=True\",\\\n \"tiff=True\",\\\n \"webp=True\",\\\n \"png=True\",\\\n \"jasper=True\",\\\n \"gtk=3\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n description = \"OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine \" \\\n \"learning software library.\"\n source_subfolder = \"source_subfolder\"\n build_subfolder = \"build_subfolder\"\n short_paths = True\n\n def source(self):\n tools.get(\"https://github.com/opencv/opencv/archive/%s.zip\" % self.version)\n os.rename('opencv-%s' % self.version, self.source_subfolder)\n\n # https://github.com/opencv/opencv/issues/8010\n if str(self.settings.compiler) == 'clang' and str(self.settings.compiler.version) == '3.9':\n tools.replace_in_file(os.path.join(self.source_subfolder, 'modules', 'imgproc', 'CMakeLists.txt'),\n 'ocv_define_module(imgproc opencv_core WRAP java python js)',\n 'ocv_define_module(imgproc opencv_core WRAP java python js)\\n'\n 'set_source_files_properties(${CMAKE_CURRENT_LIST_DIR}/src/imgwarp.cpp PROPERTIES COMPILE_FLAGS \"-O0\")')\n shutil.rmtree(os.path.join(self.source_subfolder, '3rdparty'))\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n if self.settings.os != 'Linux':\n del self.options.gtk\n\n def system_requirements(self):\n if self.settings.os == 'Linux' and tools.os_info.is_linux:\n if tools.os_info.with_apt:\n installer = tools.SystemPackageTool()\n arch_suffix = ''\n if self.settings.arch == 'x86':\n arch_suffix = ':i386'\n elif self.settings.arch == 'x86_64':\n arch_suffix = ':amd64'\n packages = []\n if self.options.gtk == 2:\n packages.append('libgtk2.0-dev%s' % arch_suffix)\n elif self.options.gtk == 3:\n packages.append('libgtk-3-dev%s' % arch_suffix)\n for package in packages:\n installer.install(package)\n\n def requirements(self):\n self.requires.add('zlib/1.2.11@conan/stable')\n if self.options.jpeg:\n # NOTE : use the same libjpeg implementation as jasper uses\n # otherwise, jpeg_create_decompress will fail on version check\n # self.requires.add('libjpeg-turbo/1.5.2@bincrafters/stable')\n self.requires.add('libjpeg/9b@bincrafters/stable')\n if self.options.tiff:\n self.requires.add('libtiff/4.0.9@bincrafters/stable')\n if self.options.webp:\n self.requires.add('libwebp/1.0.0@bincrafters/stable')\n if self.options.png:\n self.requires.add('libpng/1.6.34@bincrafters/stable')\n if self.options.jasper:\n self.requires.add('jasper/2.0.14@conan/stable')\n\n def configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions['BUILD_EXAMPLES'] = False\n cmake.definitions['BUILD_DOCS'] = False\n cmake.definitions['BUILD_TESTS'] = False\n cmake.definitions['BUILD_PERF_TEST'] = False\n cmake.definitions['WITH_IPP'] = False\n cmake.definitions['BUILD_opencv_apps'] = False\n cmake.definitions['BUILD_opencv_java'] = False\n\n if self.settings.compiler == 'Visual Studio':\n cmake.definitions['BUILD_WITH_STATIC_CRT'] = 'MT' in str(self.settings.compiler.runtime)\n if self.settings.os != 'Windows':\n cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = self.options.fPIC\n cmake.definitions['ENABLE_PIC'] = self.options.fPIC\n\n # 3rd-party\n\n # disable builds for all 3rd-party components, use libraries from conan only\n cmake.definitions['BUILD_ZLIB'] = False\n cmake.definitions['BUILD_TIFF'] = False\n cmake.definitions['BUILD_JASPER'] = False\n cmake.definitions['BUILD_JPEG'] = False\n cmake.definitions['BUILD_PNG'] = False\n cmake.definitions['BUILD_OPENEXR'] = False\n cmake.definitions['BUILD_WEBP'] = False\n cmake.definitions['BUILD_TBB'] = False\n cmake.definitions['BUILD_IPP_IW'] = False\n cmake.definitions['BUILD_ITT'] = False\n cmake.definitions['BUILD_JPEG_TURBO_DISABLE'] = True\n\n cmake.definitions['WITH_JPEG'] = self.options.jpeg\n cmake.definitions['WITH_TIFF'] = self.options.tiff\n cmake.definitions['WITH_WEBP'] = self.options.webp\n cmake.definitions['WITH_PNG'] = self.options.png\n cmake.definitions['WITH_JASPER'] = self.options.jasper\n cmake.definitions['WITH_OPENEXR'] = False\n cmake.definitions['WITH_PROTOBUF'] = False\n cmake.definitions['WITH_FFMPEG'] = False\n\n # system libraries\n if self.settings.os == 'Linux':\n cmake.definitions['WITH_GTK'] = self.options.gtk is not None\n cmake.definitions['WITH_GTK_2_X'] = self.options.gtk == 2\n\n cmake.configure(build_folder=self.build_subfolder)\n return cmake\n\n def build(self):\n cmake = self.configure_cmake()\n cmake.build()\n\n opencv_libs = [\"stitching\",\n \"superres\",\n \"videostab\",\n \"photo\",\n \"objdetect\",\n \"video\",\n \"ml\",\n \"calib3d\",\n \"features2d\",\n \"imgcodecs\",\n \"videoio\",\n \"highgui\",\n \"imgproc\",\n \"flann\",\n \"core\"]\n\n def package(self):\n cmake = self.configure_cmake()\n cmake.install()\n\n def add_libraries_from_pc(self, library):\n pkg_config = tools.PkgConfig(library)\n libs = [lib[2:] for lib in pkg_config.libs_only_l] # cut -l prefix\n lib_paths = [lib[2:] for lib in pkg_config.libs_only_L] # cut -L prefix\n self.cpp_info.libs.extend(libs)\n self.cpp_info.libdirs.extend(lib_paths)\n self.cpp_info.sharedlinkflags.extend(pkg_config.libs_only_other)\n self.cpp_info.exelinkflags.extend(pkg_config.libs_only_other)\n\n def package_info(self):\n suffix = 'd' if self.settings.build_type == 'Debug' and self.settings.compiler == 'Visual Studio' else ''\n version = self.version.replace(\".\", \"\") if self.settings.os == \"Windows\" else \"\"\n for lib in self.opencv_libs:\n self.cpp_info.libs.append(\"opencv_%s%s%s\" % (lib, version, suffix))\n\n if self.settings.compiler == 'Visual Studio':\n arch = {'x86': 'x86',\n 'x86_64': 'x64'}.get(str(self.settings.arch))\n vc = 'vc%s' % str(self.settings.compiler.version)\n bindir = os.path.join(self.package_folder, arch, vc, 'bin')\n libdir = os.path.join(self.package_folder, arch, vc, 'lib' if self.options.shared else 'staticlib')\n self.cpp_info.bindirs.append(bindir)\n self.cpp_info.libdirs.append(libdir)\n\n if self.settings.os == \"Linux\":\n self.cpp_info.libs.extend([\n \"pthread\",\n \"m\",\n \"dl\"])\n if self.options.gtk == 2:\n self.add_libraries_from_pc('gtk+-2.0')\n elif self.options.gtk == 3:\n self.add_libraries_from_pc('gtk+-3.0')\n elif self.settings.os == 'Macos':\n for framework in ['OpenCL',\n 'Accelerate']:\n self.cpp_info.exelinkflags.append('-framework %s' % framework)\n self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"416335587","text":"import json\nimport pandas as pd\nfrom collections import OrderedDict\nfrom io import open\n\n\nclass SchemedTable:\n \"\"\"\n A wrapper class for a Frictionless Data table schema. These schemas are\n used by our validator extension to check the data upon upload.\n \"\"\"\n\n def __init__(self, fpath):\n self.fname = fpath.split('/')[-1][:-5] # fName w/o path or extension\n with open(fpath, encoding='utf-8') as read_file:\n self.schema = json.load(read_file)\n\n def create_template(self, info=True):\n \"\"\"\n Creates a template dataframe from the JSON schema.\n \"\"\"\n\n # Reindex foreign key information so it is more easily accessible\n data = OrderedDict()\n foreign_keys = {v['fields']: v for v in self.schema.get('foreignKeys', [])}\n\n # Create the csv template column by column\n for f in self.schema['fields']:\n # We'll build a column of information about the data field\n info_values = []\n if info:\n # Pop enum out of constraints as we treat this seperately\n enum = f.get('constraints', {}).pop('enum', [])\n\n # State basic field constraints\n info_values = [\"\", \"--conditions--\", \"type: \"+str(f['type'])]\n for k, v in f.get('constraints', {}).iteritems():\n info_values += [str(k)+\": \"+str(v)]\n\n # List possible values if enum field\n if enum:\n info_values += [\"\", \"--restricted values--\"] + enum\n\n # Detail any foreign references\n if f['name'] in foreign_keys.keys():\n info_values += [\n \"\",\n \"--foreign key--\",\n \"field: \" + str(foreign_keys[f['name']]['reference']['fields']),\n \"resource: \" + str(foreign_keys[f['name']]['reference']['resource']),\n ]\n\n # Specify whether the field contributes to a key of some sort\n if f['name'] == self.schema.get('primaryKey', \"\"):\n info_values += [\"\", \"--primary key--\"]\n elif f['name'] in self.schema.get('primaryKey', []):\n info_values += [\"\", \"--composite key--\"]\n\n # Insert sample data, or else the conditions assembled above\n data[f['name']] = [f['name']]+map(str, f.get(\n 'example_values',\n info_values\n ))\n\n template = pd.DataFrame.from_dict(data, orient='index').transpose()\n\n # Transpose the data if the schema says so\n if self.schema.get('transpose'):\n template = template.transpose()\n\n return template\n\n def create_csv_template(self, fname=None, directory=\".\"):\n \"\"\"\n Creates a csv template from the GoodTables schema.\n \"\"\"\n if fname is None:\n fname = self.fname + \"_template.csv\"\n template = self.create_template()\n file = open(directory+\"/\"+fname, \"w\")\n file.write(unicode(template.to_csv(header=False, index=False)))\n file.close()\n\n def create_table(self, spectrum_file):\n \"\"\"\n This function should be overriden by sub-classes. It should create\n a table from the schema and populate it with data from a Spectrum File.\n \"\"\"\n return self.create_template(info=False).iloc[1:]\n\n def create_csv_table(self, spectrum_file, fname=None, directory='.'):\n \"\"\"\n Creates a csv table from the GoodTables schema.\n \"\"\"\n if fname is None:\n fname = self.fname + \"_\" + spectrum_file.country + \".csv\"\n table = self.create_table(spectrum_file)\n file = open(directory+\"/\"+fname, \"w\")\n csv_table = table.to_csv(header=True, index=False, encoding='utf-8')\n file.write(unicode(csv_table, encoding='utf-8'))\n file.close()\n","sub_path":"adx_lib/schemed_table.py","file_name":"schemed_table.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"326775938","text":"#!/usr/bin/python\n\n# shutdownButton.py #\n# Created by: Iain Brearton #\n# Shuts down RPi when GPIO pin 4 goes HIGH. #\n# Uses external pull-up resistor. #\n# #\n\nbuttonPin = 4\nledPin = 17\n\nimport os, subprocess\nimport time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(buttonPin,GPIO.IN)\nGPIO.setup(ledPin,GPIO.OUT)\n\nprev_input = 0\nwhile True:\n input = GPIO.input(buttonPin)\n if ((not prev_input) and input):\n GPIO.output(ledPin, GPIO.HIGH)\n os.system(\"sudo halt\")\n prev_input = input\n #slight pause to debounce\n time.sleep(0.05)\n\n","sub_path":"utilities/shutdownButton.py","file_name":"shutdownButton.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"280273129","text":"#!/usr/bin/env python3\n\nimport sys\nfrom Populator import Populator\n\nif (len(sys.argv) < 2):\n print (\"Provide file with IDs!\")\n sys.exit(1)\n\nfilename = sys.argv[1]\n\nuser_ids = [x.strip() for x in open(filename, \"r\").readlines()]\n\np = Populator()\n\nfor user_id in user_ids:\n message = \"{} {}\".format(user_id, 0)\n p.publish_work(message)\n","sub_path":"new/populate_work.py","file_name":"populate_work.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"50696156","text":"import turtle\n\nwn = turtle.Screen()\nwn.title(\"PingPong KarolinyQ\")\nwn.bgcolor(\"misty rose\")\nwn.setup(width=800, height=600)\nwn.tracer(0)\n\n# Wynik\nwynik_a = 0\nwynik_b = 0\n\n# Paletka A\npaletka_a = turtle.Turtle()\npaletka_a.speed(0)\npaletka_a.shape(\"square\")\npaletka_a.color(\"hot pink\")\npaletka_a.shapesize(stretch_wid=5,stretch_len=1)\npaletka_a.penup()\npaletka_a.goto(-350, 0)\n\n\n\n# Paletka B\npaletka_b = turtle.Turtle()\npaletka_b.speed(0)\npaletka_b.shape(\"square\")\npaletka_b.color(\"hot pink\")\npaletka_b.shapesize(stretch_wid=5,stretch_len=1)\npaletka_b.penup()\npaletka_b.goto(350, 0)\n\n\n# Pilka\npilka = turtle.Turtle()\npilka.speed(0)\npilka.shape(\"square\")\npilka.color(\"dim gray\")\npilka.penup()\npilka.goto(0, 0)\npilka.dx = 0.25\npilka.dy = 0.25\n\n# Dlugopis\npen= turtle.Turtle()\npen.speed(0)\npen.color(\"dim gray\")\npen.penup()\npen.hideturtle()\npen.goto(0,260)\npen.write(\"Gracz A: 0 Gracz B: 0\", align=\"center\",font=(\"Courier\", 24, \"bold\"))\n\n# Funkcje\ndef paletka_a_gora():\n y = paletka_a.ycor()\n y += 20\n paletka_a.sety(y)\n\ndef paletka_a_dol():\n y = paletka_a.ycor()\n y -= 20\n paletka_a.sety(y)\n\ndef paletka_b_gora():\n y = paletka_b.ycor()\n y += 20\n paletka_b.sety(y)\n\ndef paletka_b_dol():\n y = paletka_b.ycor()\n y -= 20\n paletka_b.sety(y)\n\n\n# Przyciski na klawiaturze\nwn.listen()\nwn.onkeypress(paletka_a_gora, \"w\")\nwn.onkeypress(paletka_a_dol, \"s\")\nwn.onkeypress(paletka_b_gora, \"Up\")\nwn.onkeypress(paletka_b_dol, \"Down\")\n\n\n# Petla glownej gry\nwhile True:\n wn.update()\n\n # Ruch pilki\n pilka.setx(pilka.xcor() + pilka.dx)\n pilka.sety(pilka.ycor() + pilka.dy)\n\n # Sprawdzanie granicy\n if pilka.ycor() > 290:\n pilka.sety(290)\n pilka.dy *= -1\n\n if pilka.ycor() < -290:\n pilka.sety(-290)\n pilka.dy *= -1\n\n if pilka.xcor() > 390:\n pilka.goto(0,0)\n pilka.dx *= -1\n wynik_a += 1\n pen.clear()\n pen.write(\"Gracz A: {} Gracz B: {}\".format(wynik_a, wynik_b), align=\"center\", font=(\"Courier\", 24, \"bold\"))\n\n if pilka.xcor() < -390:\n pilka.goto(0,0)\n pilka.dx *= -1\n wynik_b += 1\n pen.clear()\n pen.write(\"Gracz A: {} Gracz B: {}\".format(wynik_a, wynik_b), align=\"center\", font=(\"Courier\", 24, \"bold\"))\n\n # Spotkanie pilki i paletki\n if (pilka.xcor() > 340 and pilka.xcor() < 350) and (pilka.ycor() < paletka_b.ycor() + 40 and pilka.ycor() > paletka_b.ycor() -40):\n pilka.setx(340)\n pilka.dx *= -1\n\n if (pilka.xcor() < -340 and pilka.xcor() < -350) and (pilka.ycor() < paletka_a.ycor() + 40 and pilka.ycor() > paletka_a.ycor() -40):\n pilka.setx(-340)\n pilka.dx *= -1","sub_path":"PingPong.py","file_name":"PingPong.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594239394","text":"import copy\nfrom datetime import datetime\nimport json\nimport os\nimport re\n\nfrom flask import abort\nimport requests\n\nfrom application import app\nfrom jsonschema.validators import validator_for\n\n\ncollection_details = {\n \"local-land-charges\": {\"filename\": \"llc-api.json\",\n \"definition_name\": \"Local-Land-Charge\",\n \"primary\": \"local-land-charge\"},\n \"information-locations\": {\"filename\": \"llc-api.json\",\n \"definition_name\": \"Information-Location\",\n \"primary\": \"id\"},\n \"originating-authorities\": {\"filename\": \"llc-api.json\",\n \"definition_name\": \"Originating-Authority\",\n \"primary\": \"id\"},\n \"statutory-provisions\": {\"filename\": \"llc-api.json\",\n \"definition_name\": \"Statutory-Provision\",\n \"primary\": \"id\"}\n}\n\nSEARCH_DEFINITION = \"Search-Criteria\"\n\n\ndef _format_error_messages(error, collection):\n error_message = error.message\n\n # Format error message for empty string regex to be more user friendly\n if \" does not match '\\\\\\\\S+'\" in error.message:\n error_message = \"must not be blank\"\n\n # For primary key validation remove start/end of line regex characters from error message for clarity\n if collection_details[collection]['primary'] in error.path:\n error_message = re.sub('\\^(.*)\\$', '\\\\1', error.message)\n\n # Get element names of erroring fields if required\n path = []\n for element in error.path:\n if isinstance(element, str):\n path.append(element)\n\n element_id = \".\".join(path)\n if element_id:\n element_id = \"'{}'\".format(element_id)\n\n return \" \".join(list(filter(None, [element_id, error_message])))\n\n\ndef validate_helper(json_to_validate, collection, request_method, primary_id, search):\n errors = []\n\n compensation_charge = None\n if collection == \"local-land-charges\" and not search:\n compensation_charge = validate_statutory_provisions(errors, json_to_validate)\n if 'statutory-provisions' in json_to_validate:\n validate_unique_array(errors, json_to_validate['statutory-provisions'], \"statutory provision\")\n if 'further-information' in json_to_validate:\n fi_array = []\n for item in json_to_validate['further-information']:\n fi_array.append(item['information-location'])\n validate_unique_array(errors, fi_array, \"further-information\")\n\n validator = _create_llc_validator(collection, request_method, primary_id, search, compensation_charge)\n error_list = sorted(validator.iter_errors(json_to_validate),\n key=str, reverse=True)\n\n start = len(errors) + 1\n for count, error in enumerate(error_list, start=start):\n errors.append(\"Problem %s: %s\" % (count, _format_error_messages(error, collection)))\n\n if collection == 'local-land-charges' and not search:\n validate_date(errors, json_to_validate)\n\n return errors\n\n\ndef validate_statutory_provisions(errors, json_to_validate):\n # json must contain statutory-provisions and/or instrument\n compensation_charge = None\n if \"statutory-provisions\" in json_to_validate and len(json_to_validate['statutory-provisions']) > 0:\n provisions = json_to_validate['statutory-provisions']\n for provision in provisions:\n provision = str(provision)\n result = process_get_request(\"statutory-provisions\", provision)\n if result[1] == 200:\n provision_json = json.loads(result[0])\n if \"Land Compensation Act 1973 s.8(4)\".lower() in provision_json['text'].lower():\n compensation_charge = \"Land-Compensation-Charge-S8\"\n break\n elif \"Land Compensation Act 1973 s.52(8)\".lower() in provision_json['text'].lower():\n compensation_charge = \"Land-Compensation-Charge-S52\"\n break\n\n if 'local-land-charge' in json_to_validate:\n if 'archived' in provision_json and provision_json['archived']:\n record = requests.get(app.config['LLC_REGISTER_URL'] + \"/local-land-charges/{}\".format(json_to_validate['local-land-charge']))\n historic_provs = record.json()['statutory-provisions']\n for n in range(len(historic_provs)):\n historic_provs[n] = historic_provs[n]['id']\n app.logger.info(historic_provs)\n if provision_json['id'] not in historic_provs:\n errors.append(\"This statutory provision is historic and cannot be supplied: {}.\".format(provision_json['id']))\n elif 'archived' in provision_json and provision_json['archived']:\n errors.append(\"This statutory provision is historic and cannot be supplied: {}.\".format(provision_json['id']))\n else:\n error_message = \"Could not find record in the database.\"\n errors.append(\"Problem %s: %s\" % (len(errors) + 1, error_message))\n\n elif \"instrument\" not in json_to_validate:\n error_message = \"At least one of 'statutory-provisions' or 'instrument' must be supplied.\"\n errors.append(\"Problem %s: %s\" % (len(errors) + 1, error_message))\n return compensation_charge\n\n\ndef validate_unique_array(errors, array, array_name):\n s = set()\n for item in array:\n if item in s:\n errors.append(\"Duplicate value found in {}: {}\".format(array_name, str(item)))\n else:\n s.add(item)\n\n\ndef validate_date(errors, json_to_validate):\n\n dates = [\"creation-date\", \"expiration-date\"]\n for date in dates:\n try:\n if date in json_to_validate:\n datetime.strptime(json_to_validate[date], \"%Y-%m-%d\")\n except ValueError:\n error_message = \"'%s' \" % date + \"is an invalid date\"\n errors.append(\"Problem %s: %s\" % (len(errors) + 1, error_message))\n\n\ndef get_swagger_file(collection):\n return load_json_file(os.getcwd() + \"/application/schema/%s\" % collection_details[collection]['filename'])\n\n\ndef load_json_schema(compensation_charge, collection, search):\n swagger = get_swagger_file(collection)\n\n definitions = swagger[\"definitions\"]\n if search:\n record_definition = definitions[SEARCH_DEFINITION]\n else:\n record_definition = {}\n if collection == \"local-land-charges\":\n if compensation_charge:\n definition_name = compensation_charge\n else:\n definition_name = collection_details[collection]['definition_name']\n\n record_definition[\"properties\"] = {**definitions['Base-Charge']['properties'], **definitions[definition_name]['allOf'][1]['properties']}\n record_definition[\"required\"] = definitions['Base-Charge']['required'] + definitions[definition_name]['allOf'][1]['required']\n else:\n record_definition = definitions[collection_details[collection]['definition_name']]\n\n record = {\n \"definitions\": definitions,\n \"properties\": record_definition[\"properties\"],\n \"required\": record_definition[\"required\"],\n \"type\": \"object\",\n \"additionalProperties\": False\n }\n\n return record\n\n\ndef _create_llc_validator(collection, request_method, primary_id, search, compensation_charge):\n schema = copy.deepcopy(load_json_schema(compensation_charge, collection, search))\n\n if not search:\n if request_method == 'PUT':\n # If it's a PUT request consider it an update. This requires the primary ID value to\n # be specified in the JSON. This must match the vale provided in the URL endpoint so\n # dynamically alter the schema to make the field mandatory and use regex to make sure\n # the values match.\n schema['properties'][collection_details[collection]['primary']] = {\n \"type\": \"integer\", \"minimum\": primary_id, \"maximum\": primary_id\n }\n schema['required'].append(collection_details[collection]['primary'])\n if request_method == 'POST':\n # TODO: Put list of read-only fields in register_details?\n schema['properties'].pop('registration-date', None)\n\n validator = validator_for(schema)\n validator.check_schema(schema)\n return validator(schema)\n\n\ndef load_json_file(file_path):\n with open(file_path, 'rt') as file:\n json_data = json.load(file)\n\n return json_data\n\n\ndef validate_json(collection, request_json, request_method, primary_id=None, search=False):\n if collection in collection_details:\n errors = validate_helper(request_json, collection, request_method, primary_id, search)\n return_value = {\"errors\": errors}\n else:\n return_value = {\"errors\": ['invalid collection']}\n return return_value\n\n\ndef create_object(collection, request_json):\n if collection in collection_details:\n try:\n register_url = (app.config['LLC_REGISTER_URL'] + \"/\" + collection)\n response = requests.post(register_url, json=request_json)\n response.raise_for_status()\n app.logger.info(\"Processed POST request for {}\".format(collection))\n return_value = (response.text, response.status_code, {\"Content-Type\": \"application/json\"})\n except requests.HTTPError as e:\n app.logger.error(\"Error in POST to register backend for {}. {}\".format(collection, e.response.text))\n if e.response.text.startswith(\"= 1:\n page = int(page)\n\n else:\n page = 1\n\n if limit and re.match(r'^[0-9]+$', str(limit)) and int(limit) >= 1:\n limit = int(limit)\n\n else:\n limit = ApiConfig.query_row_limit\n\n if invoice and invoice not in ['0','1'] or delivery and delivery not in ['0','1']:\n error = Exception('Valor incorreto![55]')\n\n BusinessExceptionLog(request,model_login,\n message=error,\n trace=traceback.format_exc())\n\n return JsonResponse({'message': str(error)}, status=400)\n\n if invoice == '0':\n invoice = False\n\n elif invoice == '1':\n invoice = True\n\n if delivery == '0':\n delivery = False\n\n elif delivery == '1':\n delivery = True\n\n try:\n model_address = ModelAddress.objects.filter()\n\n if person_id:\n model_address = model_address.filter(person_id=person_id)\n\n if city:\n model_address = model_address.filter(city__contains=city)\n\n if state:\n model_address = model_address.filter(state__contains=state)\n\n if number:\n model_address = model_address.filter(number__contains=number)\n\n if complement:\n model_address = model_address.filter(complement__contains=complement)\n\n if invoice:\n model_address = model_address.filter(invoice=invoice)\n\n if delivery:\n model_address = model_address.filter(delivery=delivery)\n\n except Exception as error:\n BusinessExceptionLog(request,model_login,\n message=error,\n trace=traceback.format_exc())\n\n return JsonResponse({'message': 'Registros de endereço não encontrado![79]'}, status=400)\n\n paginator = Paginator(model_address, limit)\n\n try:\n address = paginator.page(page)\n address_total = model_address.count()\n address_has_next = address.has_next()\n address_has_previous = address.has_previous()\n\n address_data = address.object_list\n address_data = list(address_data.values(\n 'address_id','person_id','state','city','number',\n 'complement','invoice','delivery'))\n\n except Exception as error:\n BusinessExceptionLog(request,model_login,\n message=error,\n trace=traceback.format_exc())\n\n return JsonResponse({'message': 'Nenhum registro encontrado![80]'}, status=400)\n\n result = {\n 'total': address_total,\n 'limit': limit,\n 'count': paginator.count,\n 'num_pages': paginator.num_pages,\n 'has_next': address_has_next,\n 'has_previous': address_has_previous,\n 'data': address_data,\n }\n\n return JsonResponse(result,status=200)\n\n @csrf_exempt\n @transaction.atomic\n @method_decorator(BusinessDecoratorAuth(profile=('root','director',)))\n def post(self,request,model_login,*args,**kwargs):\n try:\n model_address = ModelAddress.objects.create(request,model_login)\n\n except Exception as error:\n BusinessExceptionLog(request,model_login,\n message=error,\n trace=traceback.format_exc())\n\n return JsonResponse({'message': str(error)}, status=400)\n\n result = {\n 'address_id': model_address.address_id,\n 'person_id': model_address.person.person_id,\n 'state': model_address.state,\n 'city': model_address.city,\n 'number': model_address.number,\n 'complement': model_address.complement,\n 'invoice': model_address.invoice,\n 'delivery': model_address.delivery,\n }\n\n return JsonResponse(result,status=200)\n\n @csrf_exempt\n @transaction.atomic\n @method_decorator(BusinessDecoratorAuth(profile=('root','director',)))\n def put(self,request,model_login,*args,**kwargs):\n try:\n model_address = ModelAddress.objects.update(request,model_login)\n\n except Exception as error:\n BusinessExceptionLog(request,model_login,\n message=error,\n trace=traceback.format_exc())\n\n return JsonResponse({'message': str(error)}, status=400)\n\n result = {\n 'address_id': model_address.address_id,\n 'person_id': model_address.person.person_id,\n 'state': model_address.state,\n 'city': model_address.city,\n 'number': model_address.number,\n 'complement': model_address.complement,\n 'invoice': model_address.invoice,\n 'delivery': model_address.delivery,\n }\n\n return JsonResponse(result,status=200)\n\n @csrf_exempt\n @transaction.atomic\n @method_decorator(BusinessDecoratorAuth(profile=('root','director',)))\n def delete(self,request,model_login,*args,**kwargs):\n try:\n model_address = ModelAddress.objects.delete(request,model_login)\n\n except Exception as error:\n BusinessExceptionLog(request,model_login,\n message=error,\n trace=traceback.format_exc())\n\n return JsonResponse({'message': str(error)}, status=400)\n\n result = {\n 'result': True\n }\n\n return JsonResponse(result,status=200)\n","sub_path":"api/Controller/v1/PersonAddress.py","file_name":"PersonAddress.py","file_ext":"py","file_size_in_byte":7881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"541211607","text":"from common import *\n\nDEBUG = False\n\n# S3 and storages settings\n# Fill these in if you want S3 storage enabled\nDEFAULT_FILE_STORAGE = ''\nAWS_ACCESS_KEY_ID = ''\nAWS_SECRET_ACCESS_KEY = ''\nAWS_STORAGE_BUCKET_NAME = ''\nAWS_LOCATION = '' # Subdirectory within your bucket. Works with boto.\n\nfrom S3 import CallingFormat\nAWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN\n\n# Fill these in\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'rainmaker',\n 'PORT': '',\n 'HOST': '',\n 'USER': '',\n 'PASSWORD': ''\n }\n}\n\n# Static (Fill this in too)\nSTATIC_URL = ''\n\nADMIN_MEDIA_PREFIX = ''\n\n# Caching (Fill this in too)\nCACHE_MIDDLEWARE_SECONDS = 90 * 60 # 90 minutes\n\nCACHES = {\n 'default': {\n 'BACKEND': ''\n }\n}\n","sub_path":"rainmaker/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"270726653","text":"import logging\n\nfrom shared import small_roots\n\n\ndef integer_bivariate(f, e, M, m, t, Y, Z, roots_method=\"groebner\"):\n \"\"\"\n Computes small integer roots of a bivariate polynomial.\n More information: Blomer J., May A., \"New Partial Key Exposure Attacks on RSA\" (Section 6)\n :param f: the polynomial\n :param e: the parameter e\n :param M: the parameter M\n :param m: the parameter m\n :param t: the parameter t\n :param Y: an approximate bound on the y roots\n :param Z: an approximate bound on the z roots\n :param roots_method: the method to use to find roots (default: \"groebner\")\n :return: a generator generating small roots (tuples of y and z roots) of the polynomial\n \"\"\"\n pr = f.parent()\n y, z = pr.gens()\n\n logging.debug(\"Generating shifts...\")\n\n shifts = []\n monomials = set()\n for i in range(m + 1):\n for j in range(i + 1):\n g = y ** j * (e * M) ** i * f ** (m - i)\n shifts.append(g)\n monomials.update(g.monomials())\n\n for j in range(1, t + 1):\n h = z ** j * (e * M) ** i * f ** (m - i)\n shifts.append(h)\n monomials.update(h.monomials())\n\n L = small_roots.fill_lattice(shifts, monomials, [Y, Z])\n L = small_roots.reduce(L)\n polynomials = small_roots.reconstruct_polynomials(L, monomials, [Y, Z])\n for roots in small_roots.find_roots(f, polynomials, pr, method=roots_method):\n yield roots[y], roots[z]\n","sub_path":"shared/small_roots/blomer_may.py","file_name":"blomer_may.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47777001","text":"# -*- coding: utf-8 -*-\n\nimport tkinter as tk\nimport reader\nfrom PIL import Image, ImageTk\nimport pl\nimport graph as graphe\nimport time\n\nfolder = [\"débutant\", \"intermédiaire\", \"avancé\", \"expert\", \"test\"]\n\norigin = (107,157)\ndiff = (150,150)\n\n#class Jam(tk.Frame):\n# def __init__(self, fenetre, file, difficulty, verbose, **kwargs):\n# vehicules = computeVehicules(copy.deepcopy(map))\n# jam = generateImage(vehicules)\n# canvas = tk.Canvas(fenetre,width=350, height=200)\n# canvas.create_image(0, 0, anchor=tk.NW, image=jam)\n# canvas.pack()\n\ndef computeVehicules(map):\n #extrait la liste des véhicules depuis la map\n width, height = map[0]\n matrice = map[1]\n print(str(width) + \" : \" + str(height))\n print(matrice)\n indexes = []\n vehicules = []\n for i in range(height):\n for j in range(width):\n cur = matrice[i][j]\n if (cur != \"0\"):\n if (cur == \"g\"):\n vehicules.append(Vehicule(0, False, False, True, origin[0] + j*diff[0], origin[1] + i*diff[1]))\n matrice[i][j+1] = \"0\"\n else:\n if cur[0]==\"t\":\n index = -int(cur[1:])\n else:\n index = int(cur[1:])\n if index not in indexes:\n indexes.append(index)\n isVertical = False\n if ((j == width-1) or (cur != matrice[i][j+1])):\n isVertical = True\n vehicules.append(Vehicule(abs(index), isVertical, cur[0]==\"t\", False, origin[0] + j*diff[0], origin[1] + i*diff[1]))\n return vehicules\ndef generateImage(vehicules):\n plateau = Image.open(\"images/plateau.png\")\n for vehicule in vehicules:\n plateau.paste(vehicule.image, (vehicule.x, vehicule.y))\n plateau.save(\"image.png\")\n return ImageTk.PhotoImage(plateau)\n\nclass Vehicule:\n def __init__(self, number, isVertical, isTruck, isGoal, x, y):\n #number est l'indice du vehicule, x,y indiquent la case la plus en haut a gauche\n self.x = x\n self.y = y\n\n type = \"car\"\n if isTruck:\n type = \"truck\"\n orientation = \"Horizontal\"\n if isVertical:\n orientation = \"Vertical\"\n imageName = type + str(number) + orientation + \".png\"\n if isGoal:\n imageName = \"goal.png\"\n self.image = Image.open(\"images/\" + imageName)\n\nclass Menu(tk.Frame):\n def __init__(self, fenetre, **kwargs):\n tk.Frame.__init__(self, fenetre, width=1000, height=1000, **kwargs)\n self.pack(fill=tk.BOTH)\n\n self.titre = tk.Label(self, text=\"Rush Hour\")\n self.titre.pack()\n\n #Les boutons radios pour la difficulté\n self.l1 = tk.Label(self, text=\"Difficulté :\")\n self.l1.pack()\n\n self.difficulte = tk.StringVar()\n self.difficulteDebutant = tk.Radiobutton(self, text=\"Débutant\", variable=self.difficulte, value=\"0\", indicatoron=0)\n self.difficulteInter = tk.Radiobutton(self, text=\"Intermédiaire\", variable=self.difficulte, value=\"1\", indicatoron=0)\n self.difficulteAvance = tk.Radiobutton(self, text=\"Avancé\", variable=self.difficulte, value=\"2\", indicatoron=0)\n self.difficulteExpert = tk.Radiobutton(self, text=\"Expert\", variable=self.difficulte, value=\"3\", indicatoron=0)\n self.difficulteDebutant.pack()\n self.difficulteInter.pack()\n self.difficulteAvance.pack()\n self.difficulteExpert.pack()\n self.difficulteDebutant.select()\n\n #La liste de niveaux\n self.l2 = tk.Label(self, text = \"Niveau :\")\n self.l2.pack()\n\n self.niveau = tk.Listbox(self)\n self.niveau.pack()\n for i in range(1,11):\n self.niveau.insert(tk.END, str(i))\n self.niveau.selection_set(0)\n\n\n #Le bouton radio \"RHC\"\n self.isRHC = tk.StringVar()\n self.rhc = tk.Radiobutton(self, text=\"RHC\", variable = self.isRHC, value = \"1\", indicatoron=0)\n self.rhm = tk.Radiobutton(self, text=\"RHM\", variable = self.isRHC, value = \"0\", indicatoron=0)\n self.rhc.pack()\n self.rhm.pack()\n self.rhc.select()\n\n #Le bouton radio \"méthode\"\n self.l3 = tk.Label(self, text = \"Méthode : \")\n self.l3.pack()\n self.isGraphe = tk.StringVar()\n self.graphe = tk.Radiobutton(self, text=\"Graphe\", variable = self.isGraphe, value = \"1\", indicatoron=0)\n self.pl = tk.Radiobutton(self, text=\"PL\", variable = self.isGraphe, value = \"0\", indicatoron=0)\n self.graphe.pack()\n self.pl.pack()\n self.graphe.select()\n\n #La checkbox \"verbose\"\n self.verbose = True\n self.verb = tk.Checkbutton(self, text = \"Verbose\", variable = self.verbose, onvalue = True, offvalue = False)\n self.verb.pack()\n\n #Le bouton \"ouvrir\"\n self.jouer = tk.Button(self, text=\"Ouvrir\", command = self.ouvrir)\n self.jouer.pack()\n\n def ouvrir(self):\n #Ce qui se passe quand l'utilisateur appuie sur \"Ouvrir\"\n diff = int(self.difficulte.get())\n niv = self.niveau.curselection()[0] +1\n isGraphe = int(self.isGraphe.get())\n isRHC = int(self.isRHC.get())\n levelToImport = \"puzzles/\" + folder[diff] + \"/jam\" + str(diff * 10 + niv) + \".txt\"\n\n if (isGraphe == 1):\n graphe.maxTime = 120\n\n plateau = graphe.Plateau(*graphe.Init.readFile(levelToImport))\n start = time.clock()\n try:\n result = graphe.GraphSolver.getSolution(plateau, isRHC, 120)\n except graphe.OutOfTimeError as err:\n print(\"At least >= \"+ str(err.extra[0]))\n elapsedTime = time.clock() - start\n print(elapsedTime)\n else:\n map = reader.read(levelToImport)\n pl.solve(map, diff, self.verbose, self.isRHC)\n\n# fenetreJam = None#tk.Tk()\n# jam = Jam(fenetreJam, levelToImport, diff, self.verbose)\n# jam.mainloop()\n\n\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"574492420","text":"import serial\nimport time\nimport pygame\nimport pygame.camera\nfrom pygame.locals import * \n\nArduinoSerial=serial.Serial(\"/dev/ttyACM1\", 9600)\nwhile True:\n\t\n\tarduiOp=str(ArduinoSerial.readline())\n\tarduiOp=arduiOp.strip(\"\\n\")\n\tarduiOp=int(arduiOp)\n\t\n\ttry:\n\t\tpygame.init()\n\t\tpygame.camera.init()\n\t\tif arduiOp:\n\t\t\tprint (\"clicked\")\n\t\t\t\n\t\t\tcam = pygame.camera.Camera(\"/dev/video1\",(640,480))\n\t\t\tcam.start()\n\t\t\timg = cam.get_image()\n\t\t\tpygame.image.save(img,\"filename.jpg\")\n\texcept:\n\t\tpass\n\t\n","sub_path":"pythonArduino.py","file_name":"pythonArduino.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88133334","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\nfrom .models import BillingProfile\nfrom addresses.forms import AddressForm\nfrom accounts.models import GuestEmail\nfrom orders.models import Order, OrderConfirmation\nfrom cart.models import Cart\nfrom .Checksum import generate_checksum, verify_checksum\nfrom products.models import Product\nfrom addresses.models import Address\nimport sys\nimport razorpay\nclient = razorpay.Client(auth=(\"rzp_test_1VsUvdyX4Wq6fC\", \"EfgxLpKdaROMlaNfn1Ot45nT\"))\n# rzp_live_vTFPJKKdWndqOM 0lPkXJif7P6kCIfSv1MNXeQ8\norder_id = None\ncart_Id = None\nGLOBAL_Entry = None\ndef razor_pay(request,id=None,*args, **kwargs):\n\tif request.method == 'GET':\n\t\tshipping_address_id = request.session.get(\"shipping_address_id\", None)\n\t\tprint(\"Session:\",shipping_address_id)\n\t\tcart_obj, cart_created = Cart.objects.new_or_get(request)\n\t\torder_obj = None\n\t\tbilling_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)\n\t\torder_obj, order_obj_created= Order.objects.new_or_get(billing_profile, cart_obj)\n\t\tglobal order_id, cart_Id, GLOBAL_Entry\n\t\torder_id=order_obj\n\t\tcart_Id=cart_obj\n\t\torder_amount = int(100 * cart_obj.total)\n\t\tprint(order_amount)\n\t\torder_currency = 'INR'\n\t\torder_receipt = 'order_rcptid_11'\n\t\tresponse = client.order.create(dict(amount=order_amount, currency=order_currency, receipt=order_receipt, payment_capture='1'))\n\t\torder = response['id']\n\t\torder_status = response['status']\n\t\torder_obj.shipping_address = Address.objects.get(id=shipping_address_id)\n\t\tprint(\"CHeck:\",shipping_address_id)\n\t\tshipping_address=order_obj.shipping_address.get_address()\n\t\torder_obj.address=shipping_address\n\t\torder_obj.save()\n\t\tprint(\"shipAdrr:\",shipping_address)\n\t\tdel request.session[\"shipping_address_id\"]\n\t\t\n\t\t\n\t\tif order_status=='created':\n\t\t\tcontext={\n\t\t\t\t\"Order_total\":order_amount,\n\t\t\t\t\"Billing_address\":billing_profile.email,\n\t\t\t\t\"Order_id\": order_obj,\n\t\t\t\t\"order_id\":order,\n\t\t\t\t'cart':cart_obj,\n\t\t\t\t'shipping_address':shipping_address,\n\t\t\t}\n\t\t\treturn render(request, 'billing/confirm_order.html', context)\n\treturn HttpResponse('

Error in create order function

')\n\n@csrf_exempt\ndef payment_status(request):\n\t# global cart_Id \n\t# print(\"Cart:\",cart_Id)\n\t# cart_obj, cart_created = Cart.objects.new_or_get(request)\n\t# order_id = None\n\t# billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)\n\t\n\torder_obj = request.POST['order_id']\n\tcart = request.POST['cart_id']\n\tprint(\"Cart2:\",cart)\n\t\n\t# cart_id = Cart.objects.get(id=cart_Id)\n\torder_id= Order.objects.get(order_id=order_obj)\t\n\tcart_id = Cart.objects.get(id=cart)\n\tprint(\"Cart3:\",cart_id)\n\tresponse = request.POST\n\tprint(response)\n\tparams_dict = {\n 'razorpay_payment_id' : response['razorpay_payment_id'],\n 'razorpay_order_id' : response['razorpay_order_id'],\n 'razorpay_signature' : response['razorpay_signature']\n }\n\ttry:\n\t\tstatus = client.utility.verify_payment_signature(params_dict)\n\t\tprint(\"status:\",status)\n\t\tprint(\"Order:\",order_id)\n\t\torder_id.status = \"paid\"\n\t\tcart_id.isordered=True\n\t\tcart_id.save()\n\t\tobj = OrderConfirmation.objects.create(billing_profile = order_id.billing_profile,order_id=order_id, email=order_id.billing_profile.email)\n\t\t# obj.send_order_confirmation()\n\t\tprint(\"Orderpaid:\",order_id)\n\t\torder_id.save()\n\t\tprint(\"4\")\n\t\tcontext={\n\t\t\t'status': 'Payment Successful',\n\t\t\t'cart_id':cart_id\n\t\t\t}\n\n\t\treturn render(request, 'billing/order_summary.html', context)\n\n\t\n\texcept:\n\t\tprint(\"Oops!\", sys.exc_info()[0], \"occurred.\")\n\t\tcontext={\n\t\t'status': 'Payment Failure!',\n\t\t'cart_id':cart_id\n\t\t}\n\t\treturn render(request, 'billing/order_summary.html',context)\n\ndef cash_on_delivery(request):\n\tcart_obj, cart_created = Cart.objects.new_or_get(request)\n\torder_id = None\n\tbilling_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)\n\torder_id, order_obj_created= Order.objects.new_or_get(billing_profile, cart_obj)\n\tcart_obj.isordered=True\n\tcart_obj.save()\n\torder_id.cash_on_delivery=True\n\torder_id.status = \"COD\"\n\torder_id.save()\n\tobj = OrderConfirmation.objects.create(billing_profile = order_id.billing_profile,order_id=order_id, email=order_id.billing_profile.email)\n\t# obj.send_order_confirmation()\n\tdel request.session['cart_id']\n\trequest.session['cart_items'] = 0\n\tcontext = {\n\t\t'status': 'Payment Successful'\n\t}\n\treturn render(request,'billing/order_summary.html',context)\n# merchant_key = settings.PAYTM_SECRET_KEY\n\n# order_id = None\n# cart_items=None\n# req = None\n# cart_id = None\n# GLOBAL_Entry = None\n# Create your views here.\n# def initiate_payment(request):\n# \tif request.method == 'GET':\n# \t\tcart_obj, cart_created = Cart.objects.new_or_get(request)\n# \t\torder_obj = None\n# \t\tbilling_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)\n# \t\torder_obj, order_obj_created= Order.objects.new_or_get(billing_profile, cart_obj)\n# \t\tglobal order_id, cart_id, cart_items, req, GLOBAL_Entry\n# \t\torder_id=order_obj\n# \t\tcart_id=cart_obj\n# \t\trequest.session['cart_items'] = 0\n# \t\tdel request.session['cart_id']\n# \t\tprint(\"Order:\",order_obj)\n# \t\tprint(\"cart_obj\",cart_obj)\n# \t\t# print(order_obj,cart_obj.total,billing_profile.email)\n# \t\tparam_dict = {\n# \t\t'MID': settings.PAYTM_MERCHANT_ID,\n# \t\t'ORDER_ID': str(order_obj),\n# \t\t'CUST_ID': str(billing_profile.email),\n# \t\t'TXN_AMOUNT': str(cart_obj.total),\n# \t\t'CHANNEL_ID': settings.PAYTM_CHANNEL_ID,\t\n# \t\t'WEBSITE': settings.PAYTM_WEBSITE,\n# \t\t# ('EMAIL', request.user.email),\n# \t\t# ('MOBILE_N0', '9911223388'),\n# \t\t'INDUSTRY_TYPE_ID': settings.PAYTM_INDUSTRY_TYPE_ID,\n# \t\t'CALLBACK_URL': 'https://enduro2020.herokuapp.com/billing/callback/',\n# \t\t# ('PAYMENT_MODE_ONLY', 'NO'),\n# \t\t}\n\t\t\n# \t\tparam_dict['CHECKSUMHASH'] = generate_checksum(param_dict, merchant_key)\n# \t\treturn render(request,\"billing/paytm.html\",{'param_dict': param_dict})\n\n# @csrf_exempt\n# def callback(request):\n\t\n# \tform=request.POST\n# \tresponse_dict={}\n# \tfor i in form.keys():\n# \t\tresponse_dict[i] = form[i]\n# \t\tprint(\"response:\",i,response_dict[i])\n# \t\tif i == 'CHECKSUMHASH':\n# \t\t\tchecksum = form[i]\n\t\n# \tverify = verify_checksum(response_dict, merchant_key ,checksum)\n\t\n# \tif verify:\n# \t\tif response_dict['RESPCODE'] == '01':\n\t\t\n# \t\t\tcart_obj,cart_created = Cart.objects.new_or_get(request)\n# \t\t\tbilling_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)\n\t\t\n# \t\t\tprint('Order Successful')\n# \t\t\tglobal order_id\n# \t\t\torder_obj=order_id\n# \t\t\torder_obj.mark_paid()\n# \t\t\tobj = OrderConfirmation.objects.create(billing_profile = order_obj.billing_profile,order_id=order_obj,email=order_obj.billing_profile.email)\n# \t\t\tobj.send_order_confirmation()\n# \t\t\torder_obj.save()\n# \t\t\tglobal cart_id\n# \t\t\tcart_id=cart_id\n# \t\telse:\n# \t\t\tprint('Order was not successful because' + response_dict['RESPMSG'])\n# \treturn render(request, 'carts/checkout-done.html', {'response': response_dict})\n\n\n","sub_path":"billing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"226946445","text":"#\n# Author: Henrique Pereira Coutada Miranda\n# Run a GW calculation using yambo\n#\nfrom __future__ import print_function\nfrom yambopy import *\nfrom qepy import *\nimport argparse\n\n#parse options\nparser = argparse.ArgumentParser(description='Test the yambopy script.')\nparser.add_argument('-dg' ,'--doublegrid', action=\"store_true\", help='Use double grid')\nargs = parser.parse_args()\n\nyambo = \"yambo\"\n\nif not os.path.isdir('database'):\n os.mkdir('database')\n\n#check if the nscf cycle is present\nif os.path.isdir('nscf/mos2.save'):\n print('nscf calculation found!')\nelse:\n print('nscf calculation not found!')\n exit()\n\n#check if the SAVE folder is present\nif not os.path.isdir('database/SAVE'):\n print('preparing yambo database')\n os.system('cd nscf/mos2.save; p2y > p2y.log')\n os.system('cd nscf/mos2.save; yambo > yambo.log')\n os.system('mv nscf/mos2.save/SAVE database')\n\n#check if the SAVE folder is present\nif not os.path.isdir('database_double/SAVE'):\n print('preparing yambo database')\n os.system('cd nscf_double/mos2.save; p2y > p2y.log')\n os.system('cd nscf_double/mos2.save; yambo > yambo.log')\n os.system('mv nscf_double/mos2.save/SAVE database_double')\n\nif not os.path.isdir('bse'):\n os.mkdir('bse')\n os.system('cp -r database/SAVE bse')\n\n#initialize the double grid\nif args.doublegrid:\n print(\"creating double grid\")\n f = open('bse/ypp.in','w')\n f.write(\"\"\"kpts_map\n %DbGd_DB1_paths\n \"../database_double\"\n %\"\"\")\n f.close()\n os.system('cd bse; ypp')\n\n#create the yambo input file\ny = YamboIn('yambo -b -o b -k sex -y d -V all',folder='bse')\n\ny['FFTGvecs'] = [15,'Ry']\ny['NGsBlkXs'] = [1,'Ry']\ny['BndsRnXs'] = [[1,40],'']\ny['BSEBands'] = [[8,11],'']\ny['BEnSteps'] = [500,'']\ny['BEnRange'] = [[0.0,6.0],'eV']\n\ny.arguments.append('WRbsWF')\ny.write('bse/yambo_run.in')\n\nprint('running yambo')\nos.system('cd bse; %s -F yambo_run.in -J yambo'%yambo)\n\n#pack in a json file\ny = YamboOut('bse')\ny.pack()\n","sub_path":"tutorial/mos2/bse_mos2.py","file_name":"bse_mos2.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"63230832","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.6-i386/egg/pylibtracer/utils.py\n# Compiled at: 2009-11-29 08:32:28\n__doc__ = '\\nutils.py\\n\\nCreated by Olli Wang (olliwang@ollix.com) on 2009-11-26.\\nCopyright (c) 2009 Ollix. All rights reserved.\\n'\nimport os, sys, py_compile\n__all__ = [\n 'import_module', 'iter_dir_filenames', 'splitext', 'compile_py']\n\nclass UselessStd:\n\n def write(self, message):\n pass\n\n\ndef import_module(module_path):\n \"\"\"Import a module by specified module path\"\"\"\n savestdout, savestderr = sys.stdout, sys.stderr\n (sys.stdout, sys.stderr) = [UselessStd()] * 2\n try:\n if sys.platform.startswith('java'):\n import java\n while True:\n try:\n module = __import__(module_path)\n except java.lang.ExceptionInInitializerError:\n continue\n else:\n break\n\n else:\n module = __import__(module_path)\n finally:\n sys.stdout, sys.stderr = savestdout, savestderr\n\n if repr(module).startswith(' motor not connected'],\n \n [ 21, 'time for a valve travel from open to closed or vice versa'],\n [ 22, '0 regulate normal; 1 if valve kept open; 2 if closed;'],\n \n [ 23, 'testMode;'],\n [ 24, 'anz. schnellerer Zyklen f. Test; zaehlt auf 0 runter'],\n [ 25, 'Gueltigkeit der Vorlauf Temp. von Zentrale; seriell empfangen']\n]\n\n# *** values for initialisation of one parameter set of a controller\n \nindex = [ 0, 1, 2 ]\nnames = [ 'active', 'tvs', 'tvw']\nunits = [ 'd', 'degC', 'degC']\nvalTyp = [ 'uint8_t','float','float']\nvalFst = [ '%d','%5.1f','%5.1f']\nvalMin = [ 0, 25.0, 32.0]\nvalDef = [ 1, 30.0, 35.0] # default values\nvalMax = [ 1, 45.0, 45.0]\n\nindex += [ 3, 4, 5, 6, 7 ]\nnames += [ 'dts', 'dtw', 'dtS', 'dtSZu', 'dtWAuf']\nunits += [ 'sec', 'sec', 'days', 'sec', 'sec']\nvalTyp += [ 'float','float', 'float', 'float', 'float']\nvalFst += [ '%5.1f','%5.1f', '%5.1f', '%5.1f', '%5.1f']\nvalMin += [ 20.0, 20.0, 2.0, 0.0, 0.0]\nvalDef += [ 300.0, 300.0, 7.0, 6.0, 4.0]\nvalMax += [ 3600.0, 600.0, 14.0, 10.0, 10.0]\n\nindex += [ 8 ]\nnames += ['dtInst']\nunits += [ 'sec']\nvalTyp += [ 'float']\nvalFst += [ '%5.1f']\nvalMin += [ 120.0 ]\nvalDef += [ 300.0 ]\nvalMax += [ 600.0]\n\nindex += [ 9, 10, 11, 12 ]\nnames += [ 'tv0', 'tr0', 'tv1', 'tr1']\nunits += [ 'degC', 'degC', 'degC', 'degC']\nvalTyp += ['float','float','float','float']\nvalFst += ['%5.1f','%5.1f','%5.1f','%5.1f']\nvalMin += [ 35.0, 30.0, 65.0, 45.0 ]\nvalDef += [ 40.0, 32.0, 75.0, 47.0 ]\nvalMax += [ 60.0, 48.0, 80.0, 55.0 ]\n\nindex += [ 13, 14, 15, 16 ]\nnames += ['dtLed', 'ttol', 'pFZu', 'pFAuf']\nunits += [ 'sec', 'degC', 'd', 'd']\nvalTyp += ['float','float','float', 'float']\nvalFst += ['%5.1f','%5.1f','%5.2f', '%5.2f']\nvalMin += [ 0.3, 0.5, 0.00, 0.00]\nvalDef += [ 1.0, 2.5, 0.02, 0.01]\nvalMax += [ 3.0, 5.0, 2.00, 2.00]\n\nindex += [ 17, 18, 19, 20 ]\nnames += ['dtMMn','dtMMx','IStop', 'IMn']\nunits += [ 'sec', 'sec', 'mA', 'mA']\nvalTyp += ['float','float','float','float']\nvalFst += ['%5.1f','%5.1f','%5.1f','%5.1f']\nvalMin += [ 0.1, 20.0, 30.0, 3.0 ]\nvalDef += [ 0.1, 30.0, 50.0, 5.0 ]\nvalMax += [ 1.0, 100.0, 80.0, 10.0 ]\n\nindex += [ 21, 22 ]\nnames += ['dtVoc', 'vFix']\nunits += [ 'sec', 'd']\nvalTyp += ['float', 'uint8_t']\nvalFst += ['%5.1f', '%d']\nvalMin += [ 20.0, 0 ]\nvalDef += [ 23.0, 0 ]\nvalMax += [ 90.0, 1 ]\n\nindex += [ 23, 24, 25 ]\nnames += [ 'test', 'fast','tvzTValid']\nunits += [ 'd', 'd', 'degC']\nvalTyp += ['uint8_t', 'uint8_t', 'float']\nvalFst += [ '%d', '%d', '%5.1f']\nvalMin += [ 0, 0 , 120.0 ]\nvalDef += [ 0, 0 , 600.0 ]\nvalMax += [ 1, 255 , 600.0 ]\n\n","sub_path":"HZRR_203/Software/RPi/Zentrale/Z1Schrau/PYTHONUSB/HZ-RR-010vorlage/LA8_Z1_monitor2020-11-06_0938/param_11d.py","file_name":"param_11d.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"58608868","text":"import logging\n\ndef wipe_table(connection, table):\n cursor = connection.cursor()\n sql = \"DELETE FROM {}\".format(table)\n cursor.execute(sql)\n logging.info('wiped table: {}'.format(table))\n\n sql = \"ALTER TABLE {} AUTO_INCREMENT = 1\".format(table)\n cursor.execute(sql)\n logging.info('reset AUTO_INCREMENT to 1 on table: {}'.format(table))\n\n connection.commit()\n cursor.close()\n\n\ndef get_last_id(connection, table):\n last_id = 0\n cursor = connection.cursor()\n sql = \"SELECT id FROM {} ORDER BY id DESC LIMIT 1\".format(table)\n\n result = cursor.execute(sql)\n for result in cursor:\n last_id = result[0]\n\n logging.info('last id: {}'.format(last_id))\n cursor.close()\n return last_id\n\ndef get_count_local_tracks(connection):\n cursor = connection.cursor()\n\n sql = \"SELECT value FROM metadata WHERE name=%(name)s\"\n val = {'name': 'count_local_tracks'}\n\n cursor.execute(sql, val)\n for result in cursor:\n count_local_tracks = result[0]\n\n logging.info('count of local tracks: {}'.format(count_local_tracks))\n cursor.close()\n return count_local_tracks\n\ndef increment_count_local_tracks(connection):\n cursor = connection.cursor()\n\n sql = \"UPDATE metadata SET value = value+1 WHERE name=%(name)s\"\n val = {'name': 'count_local_tracks'}\n\n cursor.execute(sql, val)\n\n cursor.close()\n return count_local_tracks\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397346239","text":"import time\n\nfrom keras import backend as K\nfrom keras import regularizers\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers.merge import _Merge\nfrom keras.layers import *\nfrom keras.optimizers import RMSprop, Adam\n\nfrom functools import partial\nfrom discrimination import *\n\n\n# this is following Improved WGAN\nclass RandomWeightedAverage(_Merge):\n def _merge_function(self, inputs):\n weights = K.random_uniform((64, 1, 1,1))\n return (weights * inputs[0]) + ((1 - weights) * inputs[1])\n\nclass lstm_cond_gan(object):\n \"\"\" This defines the GAN for training stock market simulator\n Arguments:\n orderLength: integer, length of one order (all features of order such as price, quantity, time diff(2), types, best bid/ask(4))\n historyLength: integer, number of past orders used as history\n noiseLength: integer, length of noise vector\n lstm_out_length: integer, length of embedding vector of history\n mini_batch_size: integer, number of orders generated each time\n data_path: string, path of training dataset\n batch_size: integer, number of orders within one batch\n \"\"\"\n def __init__(self, orderLength=9, historyLength=20,\\\n noiseLength=100, lstm_out_length=9, mini_batch_size=1,\\\n data_path=None, batch_size=64):\n self.orderLength = orderLength\n self.historyLength = historyLength\n self.noiseLength = noiseLength\n self.lstm_out_length = lstm_out_length\n self.mini_batch_size = mini_batch_size\n self.data_path = data_path\n self.batch_size = batch_size\n self.model = None\n self.build()\n\n\t# this is following Improved WGAN\n def gradient_penalty_loss(self,y_true, y_pred, averaged_samples, \\\n gradient_penalty_weight):\n gradients = K.gradients(y_pred, averaged_samples)[0]\n # compute the euclidean norm by squaring ...\n gradients_sqr = K.square(gradients)\n # ... summing over the rows ...\n gradients_sqr_sum = K.sum(gradients_sqr,\n axis=np.arange(1, len(gradients_sqr.shape)))\n # ... and sqrt\n gradient_l2_norm = K.sqrt(gradients_sqr_sum)\n # compute lambda * (1 - ||grad||)^2 still for each single sample\n gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)\n # return the mean as loss over all the batch samples\n return K.mean(gradient_penalty)\n\n def w_loss(self,y_true,y_pred):\n return K.mean(y_true*y_pred)\n\n def build(self):\n # build models\n\n if self.model:\n return self.model\n\n ##################### Input for both Generator and Critic ############################################\n # history orders of shape (self.historyLength, self.orderLength)\n history = Input(shape=(self.historyLength, self.orderLength), \\\n name='history_full')\n\n # noise input of shape (self.noiseLength)\n noise_input_1 = Input(shape=(self.noiseLength,), name='noise_input_1')\n\n # Real order of shape((self.mini_batch_size,self.orderLength)\n truth_input = Input(shape=(self.mini_batch_size,self.orderLength,1),name='truth_input')\n\n\n # lstm at Generator to extract history orders features\n lstm_output = LSTM(self.lstm_out_length)(history)\n\n # lstm at Critic to extract history orders features\n lstm_output_h = LSTM(self.lstm_out_length,name='lstm_critic')(history)\n\n # concatenate history features with noise\n gen_input = Concatenate(axis=-1)([lstm_output,noise_input_1])\n\n ####################### Generator ######################################\n # Input: gen_input, shape(self.noiseLength+self.lstm_out_length + 1)\n # Output: gen_output_1, shape(self.mini_batch_size,self.orderLength - 4)\n dropout = 0.5\n G_1 = Sequential(name='generator_1')\n G_1.add(Dense((self.orderLength-4)*self.mini_batch_size*100, \\\n input_dim=self.noiseLength+self.lstm_out_length))\n G_1.add(BatchNormalization())\n G_1.add(Activation('relu'))\n G_1.add(Reshape((int(self.mini_batch_size), int(self.orderLength - 4), 100)))\n G_1.add(UpSampling2D())\n G_1.add(Dropout(dropout))\n G_1.add(UpSampling2D())\n G_1.add(Conv2DTranspose(32, 32, padding='same'))\n G_1.add(BatchNormalization())\n G_1.add(Activation('relu'))\n G_1.add(Conv2DTranspose(16,32 , padding='same'))\n G_1.add(BatchNormalization())\n G_1.add(Activation('relu'))\n G_1.add(Conv2DTranspose(8, 32, padding='same'))\n G_1.add(BatchNormalization())\n G_1.add(Activation('relu'))\n G_1.add(MaxPooling2D((2,2)))\n G_1.add(Conv2DTranspose(1, 32, padding='same'))\n G_1.add(Activation('tanh'))\n G_1.add(MaxPooling2D((2,2)))\n\n gen_output_1 = G_1(gen_input)\n\n #CDA network(train offline)\n #Input: cda_input, shape(self.mini_batch_size, 8)\n #Output: gen_output_2, shape(self.mini_batch_size, 4)\n G_2 = Sequential(name='orderbook_gen')\n G_2.add(Dense(256*3,input_dim=8))\n G_2.add(BatchNormalization())\n G_2.add(Activation('relu'))\n G_2.add(Reshape((16, 16, 3)))\n G_2.add(Conv2D(128,(3,3),padding='same'))\n G_2.add(BatchNormalization())\n G_2.add(Activation('relu'))\n G_2.add(Conv2D(64, (3,3),padding='same'))\n G_2.add(BatchNormalization())\n G_2.add(Activation('relu'))\n G_2.add(Conv2D(32,(3,3),padding='same'))\n G_2.add(BatchNormalization())\n G_2.add(Activation('relu'))\n G_2.add(Flatten())\n G_2.add(Dense(4))\n\n # extract the last best bid/ask from history as the history of CDA\n orderbook_history = Lambda(lambda x: x[:,-1,5:], output_shape=(4,))(history)\n\t\t# gen_output_1 is output of generator\n gen_output_reshaped = Reshape((self.orderLength-4,))(gen_output_1)\n\t\t# remove time as it is not needed for CDA network\n gen_output_without_time = Lambda(lambda x: x[:,1:], output_shape=(4,))(gen_output_reshaped)\n cda_input = Concatenate(axis=1)([gen_output_without_time,orderbook_history])\n gen_output_2 = G_2(cda_input)\n\n #Output of Generator, shape(self.mini_batch_size, self.orderLength) concatentated with output\n\t\t# of the CDA network to get final output\n gen_output = Concatenate(axis=2)([gen_output_1,\\\n Reshape((self.mini_batch_size, 4, 1))(gen_output_2)])\n\n ##################### Critic ###########################################\n # Input of Critic, merge history_input, lstm_output_h and gen_output/truth_input\n discriminator_input_fake = (Concatenate(axis=2)\\\n ([Reshape((1, self.lstm_out_length,1))(lstm_output_h), gen_output]))\n discriminator_input_truth = Concatenate(axis=2)\\\n ([Reshape((1, self.lstm_out_length,1))(lstm_output_h), truth_input])\n #random-weighted average of real and generated samples - following Improved WGAN work\n averaged_samples = RandomWeightedAverage()\\\n ([discriminator_input_fake, discriminator_input_truth])\n\n #Critic\n #Input: discriminator_input_fake/discriminator_input_truth\n #Ouput: score\n D = Sequential(name='discriminator')\n D.add(Conv2D(512,(3,3),padding='same', input_shape=(self.mini_batch_size, \\\n self.orderLength+self.lstm_out_length,1)))\n D.add(Activation('relu'))\n D.add(Conv2D(256, (3,3),padding='same'))\n D.add(Activation('relu'))\n D.add(Conv2D(128,(3,3),padding='same'))\n D.add(Activation('relu'))\n D.add(Flatten())\n D.add(Dense(1))\n #self.D = D\n\n discriminator_output_fake = D(discriminator_input_fake)\n discriminator_output_truth = D(discriminator_input_truth)\n averaged_samples_output = D(averaged_samples)\n\n #Def gradient penalty loss\n partial_gp_loss = partial(self.gradient_penalty_loss,\n averaged_samples=averaged_samples,\n gradient_penalty_weight=1)\n partial_gp_loss.__name__ = 'gradient_penalty'\n\n ########################### Model Definition ##########################\n # Generator model\n # Input: [history_input,history,noise_input_1]\n # Output: gen_output\n self.gen = Model(inputs=[history,noise_input_1], outputs= gen_output)\n #Model Truth:\n self.model_truth = Model(inputs=[history,noise_input_1,truth_input],\\\n outputs= [discriminator_output_fake,discriminator_output_truth,averaged_samples_output])\n #Model Fake:\n self.model_fake = Model(inputs=[history,noise_input_1],\\\n outputs= discriminator_output_fake)\n #Optimizer\n optimizer = Adam(0.0001, beta_1=0.5, beta_2=0.9)\n\n #Compile Models\n #Generator\n self.gen.compile(optimizer=optimizer, loss='binary_crossentropy')\n self.gen.summary()\n #Model Truth - Generator is not trainable here\n for layer in self.model_truth.layers:\n layer.trainable = False\n self.model_truth.get_layer(name='discriminator').trainable = True\n self.model_truth.get_layer(name='lstm_critic').trainable = True\n self.model_truth.compile(optimizer=optimizer, \\\n loss=[self.w_loss,self.w_loss,partial_gp_loss])\n #Model Fake - critic is not trainable here\n for layer in self.model_fake.layers:\n layer.trainable = True\n self.model_fake.get_layer(name='discriminator').trainable = False\n self.model_fake.get_layer(name='lstm_critic').trainable = False\n self.model_fake.compile(optimizer=optimizer, loss=self.w_loss)\n #print summary\n self.model_fake.summary()\n self.model_truth.summary()\n\n\t# gnr_path = path to save generator model\n def fit(self, train_steps=300001, batch_size=64, gnr_path='gnr'):\n #import data\n data = np.load(self.data_path, mmap_mode='r')\n\n\n for i in range(train_steps):\n\t\t\t# postive_y and negative_y go ultimately into the loss functions\n positive_y = np.ones((batch_size, 1), dtype=np.float32)\n negative_y = -positive_y\n\t\t\t# dummy_y goes as y_true in gradient_penalty_loss\n dummy_y = np.zeros((batch_size, 1), dtype=np.float32)\n noise = np.random.uniform(-1, 1 , size=[data.shape[0],batch_size, self.noiseLength])\n\n for j in range(100): # critic trained 100 times\n #Get one sample from index\n idx = np.random.randint(0, data.shape[0])\n # Get Noise\n noise_1 = noise[idx]\n\n ### Prepare Data\n #Normalization\n orderStreams_train = self.normalize(np.squeeze(data[idx].copy()))\n #History time\n #history = orderStreams_train[:,self.historyLength-1,0:1]\n #History\n history_full = orderStreams_train[:,:self.historyLength,1:]\n #Real orders\n truth = np.expand_dims(orderStreams_train[:,self.historyLength:,1:],-1)\n\n #Train Critic\n d_loss = self.model_truth.train_on_batch([history_full,noise_1,\\\n truth], [negative_y,positive_y,dummy_y])\n\n #Train Generator\n a_loss = self.model_fake.train_on_batch([history_full,noise_1], positive_y)\n\n #Logging\n log_mesg = \"%d: [D_fake loss: %f,D_truth loss: %f] \" % (i, d_loss[0],d_loss[1])\n log_mesg = \"%s [A loss: %f]\" % (log_mesg, a_loss)\n with open('log_goog_no_time.txt','a') as f:\n f.write(log_mesg+'\\n')\n f.close()\n if i % 1000 == 0:\n self.gen.save(gnr_path+'_'+str(i))\n\n def denormalize(self, normArray):\n def denormalize_one_dim(data,maxV=1, minV=0, high=1, low=-1):\n return ((((data - high) * (maxV - minV))/(high - low)) + maxV)\n\n Array = normArray.copy()\n # 10 dims: [inter-arrival time;buy/sell;cancel/not cancel/;price;\n # quantity;best bid price;best ask price;best bid quantity;\n # best ask quantity]\n # MinMax Values for different dataset\n #New_rep\n maxV = [16500,1,1,942,150,942,942,3000,3000]\n minV = [0,0,0,916,0,916,916,1,1]\n # GooG\n #maxV = [205,9,9,9,1,1,942,620]\n #minV = [4,0,0,0,0,0,916,0]\n # Syn32\n #maxV = [63,1,1,1,1,1,1,2,2]\n #minV = [0,0,0,-1,-1,-1,-1,1,1]\n # Syn64\n #maxV = [3.8,9,9,9,1,1,1,2]\n #minV = [3.5,0,0,0,0,0,-1,0]\n #PN\n #maxV = [np.log(233448 + 1),1,1,13,300,13,13,40000,40000]\n #minV = [0,0,0,6,0,6,6,1,1]\n #maxV = [233448,1,1,13,300,13,13,40000,40000]\n #minV = [0,0,0,6,0,6,6,1,1]\n #PN\n #maxV = [46.5,9,9,9,1,1,12.68,635]\n #minV = [6.54,0,0,0,0,0,6.41,0.8]\n\n for i in range(Array.shape[2]):\n Array[:,:,i] = denormalize_one_dim(normArray[:,:,i],maxV=maxV[i],minV=minV[i])\n\n return Array\n\n def normalize(self, normArray):\n def normalize_one_dim(array, maxV=1, minV=0, high=1, low=-1):\n for i in range(array.shape[0]):\n for j in range(array.shape[1]):\n if array[i,j] < minV:\n array[i,j] = minV\n if array[i,j] > maxV:\n array[i,j] = maxV\n return (high - (((high - low) * (maxV - array)) / (maxV - minV)))\n\n Array = normArray.copy()\n\n #MinMax Values for different dataset\n # GooG\n maxV = [23,16500,1,1,942,150,942,942,3000,3000]\n minV = [0,0,0,0,916,0,916,916,1,1]\n # Syn32\n #maxV = [1,63,1,1,1,1,1,1,2,2]\n #minV = [0,0,0,0,-1,-1,-1,-1,1,1]\n # Syn64\n #maxV = [3.8,9,9,9,1,1,1,2]\n #minV = [3.5,0,0,0,0,0,-1,0]\n #PN\n #maxV = [23,np.log(233448 + 1),1,1,13,300,13,13,40000,40000]\n #minV = [0,0,0,0,6,0,6,6,1,1]\n #maxV = [1,233448,1,1,13,300,13,13,40000,40000]\n #minV = [0,0,0,0,6,0,6,6,1,1]\n #PN\n #maxV = [46.5,9,9,9,1,1,12.68,635]\n #minV = [6.54,0,0,0,0,0,6.41,0.8]\n\n for i in range(Array.shape[2]):\n Array[:,:,i] = normalize_one_dim(normArray[:,:,i],maxV=maxV[i],minV=minV[i])\n return Array\n\n\t# length is the maximum number of orders outputted in 1 run\n def predict(self,save_path='predict_goog_no_time.npy',length=600000,step_size=1,num_runs=1):\n\n #Load Data\n data = np.load(self.data_path, mmap_mode='r')\n #Load Generator\n gen = load_model('gnr_no_time_30000')\n\n #Allocate space for generated orders\n generated_orders = np.zeros((num_runs, length*step_size+self.historyLength,10))\n\n\n for j in range(num_runs):\n #Get seeds from real data\n da = data[0,0:1,:,:,0].copy()\n orderStreams_train = self.normalize(da)\n history_full = orderStreams_train[:,:self.historyLength,1:]\n generated_orders[j,:self.historyLength,1:] = self.denormalize(history_full)\n\n for i in range(length):\n noise_1 = np.random.uniform(-1,1,size=[1, self.noiseLength])\n orderStreams = self.denormalize(np.squeeze(gen.predict([history_full,noise_1]),-1))\n\n generated_orders[j,self.historyLength+ i * step_size : self.historyLength+(i+1)*step_size,1:]\\\n = orderStreams\n r = generated_orders[j:j+1,self.historyLength + i*step_size - 1,0] + orderStreams[0,:,:1]\n generated_orders[j,self.historyLength + i * step_size : self.historyLength+(i+1)*step_size,0] = r\n\n\t\t\t\t# 11.5 corresponds to 23 time slots\n history = (np.floor(generated_orders[j:j+1,self.historyLength+ (i+1)*step_size -1,0]/1000000) - 11.5)/11.5\n history_full = self.normalize(generated_orders[j:j+1,(i+1)*step_size : self.historyLength+ (i+1)*step_size,:].copy())[:,:,1:]\n\n #stop after generating one day long stream\n if history > 1:\n break\n\n #print some stats\n if(i % 1000 == 0 ):\n print(str(j)+' runs ' + str(i)+' steps')\n # print(interval,history*11.5 + 11.5)\n #save generated orders\n np.save(save_path,generated_orders)\n","sub_path":"experiments/train_no_time.py","file_name":"train_no_time.py","file_ext":"py","file_size_in_byte":16456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"469130757","text":"class node:\n\n def __init__(self,data):\n self.data=data\n self.next=None\n\nclass ll_start:\n\n def __init__(self):\n self.head=None\n\n def print_list(self):\n temp = self.head\n if self.head==None:\n print('The list is empty')\n while (temp):\n print(temp.data)\n temp=temp.next\n\ndef add_front():\n value=input(\"Enter the head:\")\n if first.head==None:\n first.head=node(value)\n return\n temp=first.head\n first.head=node(value)\n first.head.next=temp\n\n\ndef add_mid():\n value=input('Enter data of new node:')\n prev=input('Enter the value of previous node:')\n\n loop = first.head\n while loop:\n if loop.data == prev:\n break\n else:\n loop=loop.next\n else:\n print('No such previous node exist!!')\n return\n middle=node(value)\n middle.next=loop.next\n loop.next=middle\n \ndef add_end():\n value=input(\"Enter the data of last node:\")\n endding=node(value)\n if first.head.next==None:\n first.head.next=endding\n return\n loop = first.head\n while loop.next:\n loop=loop.next\n loop.next=endding\n\ndef remove_front():\n value = first.head.data\n if first.head.next == None:\n first.head=None\n else:\n first.head = first.head.next\n print(f\"Removing from top of list with value: {value}\")\n\ndef remove_mid():\n value=input('Enter the value of node u want to remove:')\n prev = first.head \n if prev.data==value:\n remove_front()\n return\n loop=prev.next\n while (loop.next):\n if loop.data==value:\n break\n else:\n loop=loop.next\n prev=prev.next\n prev.next=loop.next\n print(f\"Removed the node with value: {value}\")\n\n\ndef remove_end():\n if first.head.next==None:\n first.head=None\n return\n prev=first.head\n loop=prev.next\n while (loop.next):\n loop=loop.next\n prev=prev.next\n prev.next=None\n print(f\"Removed the node with value: {loop.data}\")\n\nif __name__=='__main__':\n first=ll_start()\n add_front()\n while True:\n y=input(\"Do you wish to add another node(y/n):\")\n if y in 'yY':\n listing=True\n break\n elif y in 'Nn':\n listing=False\n break\n else:\n print('Invalid input')\n while listing:\n print(\"Where u want to insert node\")\n choice=input(\"front,mid or end(f/m/e):\")\n \n if choice in 'fF':\n add_front()\n elif choice in 'mM':\n add_mid()\n elif choice in 'eE':\n add_end()\n else:\n print('!! Invalid input !!')\n continue \n \n while True:\n x= input('Want to enter more?(y/n)')\n if x=='n' or x=='N':\n listing=False\n break\n elif x in ['y','Y']:\n break\n else: \n print(\"!!Invalid input(y/n)!!\")\n\n \n print(\"The Linked list:\")\n first.print_list()\n\n while True:\n y=input(\"Do you wish to remove any node(y/n):\")\n if y in 'yY':\n removing=True\n break\n elif y in 'Nn':\n removing=False\n break\n else:\n print('Invalid input')\n while removing:\n print(\"Where u want to delete node from\")\n choice=input(\"front,mid or end (f/m/e):\")\n \n if choice in 'fF':\n remove_front()\n elif choice in 'mM':\n remove_mid()\n elif choice in 'eE':\n remove_end()\n else:\n print('!! Invalid input !!')\n continue \n \n print(\"The Linked list:\")\n first.print_list()\n \n if first.head==None:\n break \n \n while True:\n x= input('Want to remove more?(y/n)')\n if x=='n' or x=='N':\n removing=False\n break\n elif x in ['y','Y']:\n break\n else: \n print(\"!!Invalid input(y/n)!!\")\n \n\n ","sub_path":"Structres/linked_lists.py","file_name":"linked_lists.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206938497","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 9 02:35:47 2019\n\n@author: User\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.linear_model import LogisticRegression\n\nfolder = r'C:\\Users\\pene\\Google Drive\\Stony Brook\\Thesis\\Prof. Djuric\\Working papers\\Finance\\Gaussian Processes\\GP Bankruptcy\\UCI_data'\nfile = os.path.join(folder,'default of credit card clients.xls')\nn_sel = 10000\n\ndata = pd.read_excel(file,'Data',skiprows=1)\ny_all = data['default payment next month'].values\nX_all = data[['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_0','PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1','BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6','PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5','PAY_AMT6']].values\n\nid_train = np.random.choice(data.shape[0],n_sel,replace=False)\nid_test = np.setdiff1d(np.arange(data.shape[0]),id_train)\nX = X_all[id_train,:]\ny = y_all[id_train]\n\nkernel = 1.0 * RBF(1.0)\ngpc = GaussianProcessClassifier(kernel=kernel,random_state=0).fit(X, y)\ngpc.score(X_all[id_test,:], y_all[id_test])\n\n\nclf = LogisticRegression(random_state=0, solver='lbfgs',multi_class='ovr').fit(X, y)\nclf.score(X_all[id_test,:], y_all[id_test])","sub_path":"bkcy_1.py","file_name":"bkcy_1.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"257481869","text":"# Code to graph Van der Waals energy based on distance between atoms\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom LJ12_6 import LJ\n\n# Create an array for the varying distance between atoms\nr = np.arange(0.01, 10., 0.01)\n\n# For each value of r, plug it into LJ and create an array V of these values\nV = []\nfor i in range(len(r)):\n V.append(LJ(r[i]))\n\n\n# Create a graph with r on the x-axis and V on the y-axis\nplt.plot(r,V,'k')\n\n# Label the axes\nplt.xlabel('r', size='x-large')\nplt.ylabel('VDW energy')\n\n# Set where grid lines will be on the graph\nx_ticks = np.arange(0,11,1)\ny_ticks = np.arange(-2,11,1)\nplt.xticks(x_ticks)\nplt.yticks(y_ticks)\n\n# Define the range of the x-axis and y-axis, then overlay the grid\nplt.axis([0,10,-2, 10])\nplt.grid(True)\n\n# Place axes at x = 0 and y = 0 rather than at bottom and left\nplt.gca().spines['left'].set_position('zero')\nplt.gca().spines['bottom'].set_position('zero')\n\nplt.savefig('VDWenergy_vs_distance.png')\nplt.show()\n","sub_path":"assignment0/energyVSdistance_graph.py","file_name":"energyVSdistance_graph.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"264541794","text":"cartelera = [# (mes, pais, nombre_pelicula, annio_filmacion, [sala1, sala2, ...])\n ('julio' , 'FRANCIA' , 'Melo' , 1986, ['sala3', 'sala1']),\n ('enero' , 'CHILE' , 'Gloria' , 2013, ['sala1', 'sala2']),\n ('marzo' , 'ALEMANIA', 'Tiempo de Canibales', 2014, ['sala1', 'sala2']),\n ('marzo' , 'ALEMANIA', 'Soul Kitchen' , 2009, ['sala3', 'sala4']),\n ('febrero', 'FRANCIA' , 'El muelle' , 1962, ['sala1', 'sala3']),\n ('febrero', 'FRANCIA' , 'La dama de honor' , 2004, ['sala1', 'sala4']),\n ('abril' , 'RUSIA' , 'Padre del soldado' , 1964, ['sala3', 'sala2', 'sala4']),\n ('mayo' , 'MEXICO' , 'Cumbres' , 2013, ['sala3', 'sala2']),\n ('junio' , 'BELGICA' , 'Rondo' , 2012, ['sala4', 'sala2'])]\n\ns = input()\ndicc = {}\nlis = []\n\nfor x in cartelera:\n if s in x[4]:\n dicc[x[0]] = []\n\nfor x in cartelera:\n if s in x[4]:\n dicc[x[0]].append(x[2])\n\nprint(dicc)","sub_path":"Certamen2/cineton-2.py","file_name":"cineton-2.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"326165461","text":"import Utils\n\nfrom EncoderStructural import EncoderStructural\nfrom EncoderCellContent import EncoderCellContent\nfrom DecoderStructural import DecoderStructural\nfrom DecoderCellContent import DecoderCellContent\n\nfrom BatchingMechanism import BatchingMechanism\nfrom CheckPoint import CheckPoint\nfrom TrainStep import train_step\nfrom ValStep import val_step\n\nimport torch\nfrom time import perf_counter, time\nimport numpy as np\n\nfrom FixedEncoder import FixedEncoder\nimport PIL\n\n\nclass Model:\n \"\"\"Combined class for encoder, structural decoder and cell decoder.\"\"\"\n\n def __init__(self,\n relative_path,\n model_tag,\n in_channels=512,\n out_channels_structural=16,\n out_channels_cell_content=16,\n structural_embedding_size=16,\n structural_hidden_size=256,\n structural_attention_size=256,\n cell_content_embedding_size=80,\n cell_content_hidden_size=512,\n cell_content_attention_size=256,\n maxT_structure = 2000,\n structural_encoder_conv = False,\n cell_content_encoder_conv = False):\n\n # set device\n # sets device for model and PyTorch tensors\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # set model tag to be used to identify checkpoint files\n self.model_tag = model_tag\n\n # set up path of dataset\n Utils.DatasetPath.set_relative_path(relative_path)\n\n # load dictionaries\n structural_token2integer, structural_integer2token = Utils.load_structural_vocabularies()\n cell_content_token2integer, cell_content_integer2token = Utils.load_cell_content_vocabularies()\n self.structural_token2integer = structural_token2integer\n self.structural_integer2token = structural_integer2token\n self.cell_content_token2integer = cell_content_token2integer\n self.cell_content_integer2token = cell_content_integer2token\n # initialize structural_encoder\n encoder_structural = EncoderStructural(\n in_channels, out_channels_structural,conv = structural_encoder_conv )\n encoder_structural_optimizer = torch.optim.Adam(params=filter(\n lambda p: p.requires_grad, encoder_structural.parameters()))\n self.encoder_structural = encoder_structural.to(self.device)\n self.encoder_structural_optimizer = encoder_structural_optimizer\n\n # initialize cell_encoder\n encoder_cell_content = EncoderCellContent(\n in_channels, out_channels_cell_content, conv = cell_content_encoder_conv)\n encoder_cell_content_optimizer = torch.optim.Adam(params=filter(\n lambda p: p.requires_grad, encoder_cell_content.parameters()))\n self.encoder_cell_content = encoder_cell_content.to(self.device)\n self.encoder_cell_content_optimizer = encoder_cell_content_optimizer\n\n # set up the decoder for structural tokens\n decoder_structural = DecoderStructural(structural_token2integer, structural_embedding_size,\n out_channels_structural, structural_hidden_size, structural_attention_size)\n decoder_structural_optimizer = torch.optim.Adam(params=filter(\n lambda p: p.requires_grad, decoder_structural.parameters()))\n self.decoder_structural = decoder_structural.to(self.device)\n self.decoder_structural_optimizer = decoder_structural_optimizer\n\n # set up the decoder for cell content tokens\n decoder_cell_content = DecoderCellContent(cell_content_token2integer, cell_content_embedding_size,\n out_channels_cell_content, structural_hidden_size, cell_content_hidden_size, cell_content_attention_size)\n decoder_cell_content_optimizer = torch.optim.Adam(params=filter(\n lambda p: p.requires_grad, decoder_cell_content.parameters()))\n self.decoder_cell_content = decoder_cell_content.to(self.device)\n self.decoder_cell_content_optimizer = decoder_cell_content_optimizer\n\n def load_checkpoint(self, file_path=\"checkpoint.pth.tar\"):\n loader = CheckPoint.load_checkpoint(file_path)\n self.encoder_structural.load_state_dict(loader['encoder_structural'])\n self.encoder_cell_content.load_state_dict(\n loader['encoder_cell_content'])\n self.decoder_structural.load_state_dict(loader['decoder_structural'])\n self.decoder_cell_content.load_state_dict(\n loader['decoder_cell_content'])\n self.encoder_structural_optimizer.load_state_dict(\n loader['encoder_structural_optimizer'])\n self.encoder_cell_content_optimizer.load_state_dict(\n loader['encoder_cell_content_optimizer'])\n self.decoder_structural_optimizer.load_state_dict(\n loader['decoder_structural_optimizer'])\n self.decoder_cell_content_optimizer.load_state_dict(\n loader['decoder_cell_content_optimizer'])\n\n def set_eval(self):\n '''Change to evaluation state.'''\n self.decoder_structural = self.decoder_structural.eval()\n self.decoder_cell_content = self.decoder_cell_content.eval()\n self.encoder_structural = self.encoder_structural.eval()\n self.encoder_cell_content = self.encoder_cell_content.eval()\n\n def set_train(self):\n ''' Change to training state'''\n self.decoder_structural = self.decoder_structural.train()\n self.decoder_cell_content = self.decoder_cell_content.train()\n self.encoder_structural = self.encoder_structural.train()\n self.encoder_cell_content = self.encoder_cell_content.train()\n\n def predict(self, file_path, maxT=2000):\n ''' Only works for a single example.'''\n self.set_eval()\n # instantiate the fixed CNN encoder\n # with ResNet-18, the features map will be (features_map_size * features_map_size, 512)\n features_map_size = 12\n fixedEncoder = FixedEncoder('ResNet18', features_map_size)\n\n # open image\n image = PIL.Image.open(file_path)\n # preprocess image\n preprocessed_images = [fixedEncoder.preprocess(image)]\n preprocessed_image = torch.stack(preprocessed_images)\n\n # run through ResNet-18\n features_map = fixedEncoder.encode(preprocessed_image)\n features_map_float32 = features_map.astype(np.float32)\n features_map_tensor = torch.from_numpy(features_map_float32)\n\n # permute axes of features map in the same way as during training\n features_map_tensor = features_map_tensor.permute(0, 2, 1)\n\n # reshape to correct dimensions\n features_map_input = torch.reshape(\n features_map_tensor, (1, 512, features_map_size, features_map_size))\n\n # pass through encoders\n encoded_structural_features_map = self.encoder_structural.forward(\n features_map_input)\n predictions, storage, pred_triggers, structure_attention_weights = self.decoder_structural.predict(\n encoded_structural_features_map, structural_target=None, store_attention=True, maxT = maxT)\n encoded_cell_content_features_map = self.encoder_cell_content.forward(\n features_map_input)\n predictions_cell, cell_attention_weights = self.decoder_cell_content.predict(\n encoded_cell_content_features_map, storage, cell_content_target=None, store_attention=True)\n\n predicted_struc_tokens = [\n self.structural_integer2token[p.item()] for p in predictions[0]]\n\n predicted_cell_tokens = []\n for n, l in enumerate(predictions_cell[0]): #\n predicted_cell_tokens.append([])\n for cell_pred in l:\n predicted_cell_tokens[n].append(self.cell_content_integer2token[cell_pred.item()])\n\n return predicted_struc_tokens, predicted_cell_tokens, structure_attention_weights, cell_attention_weights\n\n def train(self,\n gauth=None,\n checkpoint_temp_id=None,\n epochs=1,\n lambdas=[1],\n lrs=[0.001],\n number_examples=100,\n number_examples_val=100,\n batch_size=10,\n batch_size_val=10,\n storage_size=1000,\n val=None,\n maxT_val = 2000,\n alpha_c_struc = 0.0,\n alpha_c_cell_content = 0.0,\n test_link =None):\n\n if not test_link:\n test_link = epochs*[None]\n\n assert epochs == len(lambdas) == len(lrs) == len(val) ==len(test_link), \"number of epoch, learning rates, lambdas, val and test_link are inconsistent\"\n\n # instantiate the batching object\n batching = BatchingMechanism(\n dataset_split='train', number_examples=number_examples, batch_size=batch_size, storage_size=storage_size)\n\n # initialise the object\n # here the object works out how many storages and how many examples from every storage are needed\n batching.initialise()\n\n if val:\n batching_val = BatchingMechanism(\n dataset_split='dev', number_examples=number_examples_val, batch_size=batch_size_val, storage_size=storage_size)\n batching_val.initialise()\n\n # instantiate checkpoint\n checkpoint = CheckPoint(\n self.model_tag, gauth=gauth, checkpoint_temp_id=checkpoint_temp_id)\n\n # then reinitialize so we haven't used up batch\n batching.initialise()\n losses_s = []\n losses_s_val = []\n losses_cc = []\n losses_cc_val = []\n losses_total = []\n losses_total_val = []\n for epoch in range(epochs):\n print(epoch)\n # change model to training\n self.set_train()\n\n t1_start = perf_counter()\n\n # reset total loss across epoch\n total_loss_s = 0\n total_loss_cc = 0\n total_loss = 0\n total_loss_s_val = 0\n total_loss_cc_val = 0\n total_loss_val = 0\n # create random batches of examples\n # these \"batches\" are the just information needed to retrieve the actual tensors\n # batch = (storage number, [list of indices within the storage])\n batches = batching.build_batches(randomise=True)\n\n LAMBDA = lambdas[epoch]\n\n # update learning rates manually for each epoch\n lr = lrs[epoch]\n for g in self.decoder_structural_optimizer.param_groups:\n g['lr'] = lr\n for g in self.decoder_cell_content_optimizer.param_groups:\n g['lr'] = lr\n for g in self.encoder_structural_optimizer.param_groups:\n g['lr'] = lr\n for g in self.encoder_cell_content_optimizer.param_groups:\n g['lr'] = lr\n\n # batch looping for training\n for batch in batches:\n\n # call 'get_batch' to actually load the tensors from file\n features_maps, structural_tokens, triggers, cells_content_tokens = batching.get_batch(\n batch)\n # test greedy\n structural_tokens = structural_tokens # [:, 0:50]\n triggers = triggers # [:,0:50]\n# assert 0\n #####\n # send to training function for forward pass, backpropagation and weight updates\n\n predictions, loss_s, predictions_cell, loss_cc, loss = train_step(\n features_maps, structural_tokens, triggers, cells_content_tokens, self, LAMBDA=LAMBDA, alpha_c_struc=alpha_c_struc, alpha_c_cell_content = alpha_c_cell_content, test_link =test_link[epoch])\n\n # apply logsoftmax\n log_p = torch.nn.LogSoftmax(dim=2)(predictions)\n\n # greedy decoder to check prediction WITH teacher forcing\n _, predict_id = torch.max(log_p, dim=2)\n if abs(1.0-LAMBDA)>0.001:\n log_p_cell = torch.nn.LogSoftmax(dim=2)(predictions_cell)\n _, predict_id_cell = torch.max(log_p_cell, dim=2)\n\n total_loss_s += loss_s\n total_loss += loss\n if loss_cc:\n total_loss_cc += loss_cc\n\n losses_total.append( total_loss)\n losses_s.append(total_loss_s)\n losses_cc.append(total_loss_cc)\n\n\n# total_loss_s /= len(batches)\n print(\"Ground truth, structural:\")\n print([self.structural_integer2token[p.item()]\n for p in structural_tokens[0].detach().numpy()])\n print(\"Prediction WITH teacher forcing (1 example):\")\n print([self.structural_integer2token[p.item()]\n for p in predict_id[:, 0].detach().numpy()])\n print(\"Accuracy WITH teacher forcing (1 example):\")\n\n print(np.sum(structural_tokens[0,:].detach().numpy() == predict_id.detach(\n ).numpy()[:, 0])/structural_tokens[0].detach().numpy().shape[0])\n # for name, param in self.decoder_cell_content.named_parameters():\n # if param.requires_grad:\n # print(name)#, param.data)\n # for name, param in self.decoder_structural.named_parameters():\n # if param.requires_grad:\n # print(name)#, param.data)\n# print(self.decoder_cell_content.cell_content_attention.attention_encoded_features_map.bias)\n\n if val and abs(LAMBDA-1.0)>0.001:\n print(\"Ground truth, cells:\")\n print([self.cell_content_integer2token[p.item()]\n for p in cells_content_tokens[0][1].detach().numpy()])\n print(\"Prediction WITH teacher forcing (1 example):\")\n print([self.cell_content_integer2token[p.item()]\n for p in predict_id_cell[:, 1].detach().numpy()])\n print(\"Accuracy WITH teacher forcing (1 example):\")\n print(cells_content_tokens.shape, predict_id_cell.shape)\n\n print(np.sum(cells_content_tokens[0,:,:].detach().numpy().T == predict_id_cell.detach(\n ).numpy())/cells_content_tokens[0,:,:].detach().numpy().size)\n\n\n checkpoint.save_checkpoint(epoch, self.encoder_structural, self.encoder_cell_content, self.decoder_structural, self.decoder_cell_content,\n self.encoder_structural_optimizer, self.encoder_cell_content_optimizer, self.decoder_structural_optimizer, self.decoder_cell_content_optimizer, total_loss, total_loss_s, total_loss_cc)\n\n checkpoint.archive_checkpoint()\n\n if gauth:\n checkpoint.copy_checkpoint()\n\n # batch loop for validation\n if val:\n if val[epoch]:\n with torch.no_grad():\n # change state of encoders and decoders to .eval\n self.set_eval()\n\n batches_val = batching_val.build_batches(randomise=False)\n\n # batch looping for validation\n for batch in batches_val:\n # call 'get_batch' to actually load the tensors from file\n features_maps_val, structural_tokens_val, triggers_val, cells_content_tokens_val = batching_val.get_batch(batch)\n predictions_val, loss_s_val, predictions_cell_val, loss_cc_val, loss_val = val_step(\n features_maps_val, structural_tokens_val, triggers_val, cells_content_tokens_val, self, LAMBDA, maxT_val = maxT_val)\n total_loss_s_val += loss_s_val\n total_loss_val += loss_val\n if loss_cc_val:\n total_loss_cc_val += loss_cc_val\n losses_total_val.append( total_loss_val)\n losses_s_val.append(total_loss_s_val)\n losses_cc_val.append(total_loss_cc_val)\n\n print(\"-------------Validation loss:---------------\")\n print(\"-- structural decoder:---\")\n print(\"Truth (1 example)\")\n print([self.structural_integer2token[p.item()]\n for p in structural_tokens_val[0]])\n print(\"Prediction (1 example)\")\n print([self.structural_integer2token[p.item()]\n for p in predictions_val[0]])\n if abs(LAMBDA-1.0) > 0.000000001:\n print(\"-- cell decoder:---\")\n print(\"Truth\")\n print([self.cell_content_integer2token[p.item()]\n for p in cells_content_tokens_val[0][0]])\n print(\"Prediction\")\n if len(predictions_cell_val[0])>0:\n print([self.cell_content_integer2token[p.item()]\n for p in predictions_cell_val[0][0]])\n ######################\n\n t1_stop = perf_counter()\n print(\"----------------------\")\n print('epoch: %d \\tLAMBDA: %.2f\\tlr:%.5f\\ttime: %.2f' %\n (epoch, LAMBDA, lr, t1_stop-t1_start))\n print('Total loss: %.5f' % total_loss)\n print('Struct. decod. loss: %.5f' % total_loss_s)\n print(\"Cell dec. loss:\", total_loss_cc)\n if val:\n if val[epoch]:\n print(\"Total val. loss: %.5f\" %total_loss_val)\n print('Validation struct. decod. loss: %.5f'%total_loss_s_val)\n if abs(1-LAMBDA) > 0.0000001:\n# print(val)\n# # quit()\n print('Validation cell decoder. loss: %.5f'% loss_cc_val)\n print('time for 100k examples:', \"%.2f hours\" %\n ((t1_stop-t1_start)/number_examples*100000/3600))\n return losses_total , losses_s, losses_cc, losses_total_val, losses_s_val, losses_cc_val\n","sub_path":"BaseModel_pytorch/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":18265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254302074","text":"import csv\nfrom pathlib import Path\nfrom typing import List, Dict, OrderedDict, Tuple\n\nfrom . import system_data_dir\n\n\ndef arrange_tag_type(tag: str) -> str:\n return tag.replace('\\n', ' ')\n\n\ndef standardize_empty_column(tag: str) -> str:\n return tag.replace('‐', '').replace('-', '')\n\n\ndef arrange_split_point(listed_tag_attr_pair: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n return [(tag_type, tag.replace(', ', ',')) for tag_type, tag in listed_tag_attr_pair]\n\n\ndef select_column(record: OrderedDict, id_idx: int, tags_range: Tuple[int, int])\\\n -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:\n\n listed_tag_attr_pair = list(record.items())\n return listed_tag_attr_pair[id_idx], arrange_split_point(listed_tag_attr_pair[tags_range[0]:tags_range[1]+1])\n\n\ndef read_csv(raw_news_tag_csv: Path, id_idx: int, tags_range: Tuple[int, int]) \\\n -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:\n\n with raw_news_tag_csv.open(mode='r') as tc:\n tc = csv.DictReader(tc, delimiter=',')\n for row in tc:\n yield select_column(row, id_idx, tags_range)\n\n\ndef split_multi_tags(listed_tag_attr_pair: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n divided_multi_tags_dict = {\n i: [(tag_type, split_tag) for split_tag in tag.split(',') if split_tag != '']\n for i, (tag_type, tag) in enumerate(listed_tag_attr_pair) if len(tag.split(',')) > 1\n for split_tag in tag.split(',')\n }\n\n pushed_idx_by_insert = 0\n if len(divided_multi_tags_dict) > 0:\n for idx, (insert_start_idx, multi_tag_list) in enumerate(divided_multi_tags_dict.items()):\n listed_tag_attr_pair.pop(insert_start_idx + pushed_idx_by_insert)\n\n for i in range(len(multi_tag_list)):\n\n insert_point = insert_start_idx + i\n if idx != 0:\n insert_point = insert_start_idx + pushed_idx_by_insert\n\n listed_tag_attr_pair.insert(insert_point, multi_tag_list[i])\n\n if i != len(multi_tag_list) - 1:\n pushed_idx_by_insert += 1\n\n return listed_tag_attr_pair\n\n\ndef build_news_tag(raw_news_tag_csv: Path, reformed_csv_name: str, id_idx: int, tags_range: Tuple[int, int]) -> None:\n reformed_csv = system_data_dir / reformed_csv_name\n header = ['news_id', 'tag_type', 'tag']\n\n with open(reformed_csv, mode='w', newline='') as rc:\n rc_dict_writer = csv.DictWriter(rc, fieldnames=header)\n rc_dict_writer.writeheader()\n\n for news_id, tags in read_csv(raw_news_tag_csv, id_idx, tags_range):\n tags = split_multi_tags(tags)\n for tag in tags:\n rc_dict_writer.writerow({\n 'news_id': news_id[1],\n 'tag_type': arrange_tag_type(tag[0]),\n 'tag': standardize_empty_column(tag[1])\n })\n\n\nif __name__ == '__main__':\n raw_tag_csv = system_data_dir / 'editorial_part_tag_project.csv'\n\n # for news_id, tags in read_csv(raw_tag_csv):\n # print(tags)\n # for tag in tags:\n # print(standardize_empty_column(tag[1]))\n # print('\\n')\n\n # split_multi_tags(listed_dict_tags)\n\n build_news_tag(raw_tag_csv, 'reformed_tag_csv.csv', 18, (22, 32))\n","sub_path":"AWS_note/src/pj_tag/reform_csv.py","file_name":"reform_csv.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261173172","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport re\n\n# \"raw_steamspy_year_2017.txt\" is the raw source from the page \"view-source:https://steamspy.com/year/2017\"\n# I DID A FIND/REPLACE ON to GET RID OF ANY BLANK NAMES OF GAMES OR DEVS (CHANGED TO XXXXXX) #### YOU SHOULD CODE THIS SO YOU DONT HAVE TO DO IT MANUALLY\n## YOU SHOULD ALSO INCLUDE SOME CODE TO REMOVE THE RECORDS WHERE YOU REPLACED BLANK ENTRIES (\"XXXXXX\") BEFORE WRITING TO FILE\n\n## ALSO SHOULD DO A REPLACE \"N/A\" AND \"Free\" WITH ZEROS IN PRICE_DECIMAL COLUMN BEFORE WRITING TO FILE\n\n\nwith open('steamspy_year_2017_games_raw.txt', encoding=\"utf8\") as f:\n read_data = f.read()\n page_content = BeautifulSoup(read_data, \"html.parser\")\n\n\n# PARSE THE HTML TO GET THE GAME NAMES\nlist_of_names=[]\n\n# test_string = ''\n# print(re.findall(\"(?<=data-order=\")(.*)(?=\")))\")\n\nlist_of_tds = page_content.find_all(\"td\")\n\nfor td in list_of_tds:\n list_of_names.append(re.findall('(?<=data-order=\")(.*)(?=\"> 0:\n good_names.append(name)\n\n#print(len(good_names))\n\n# PARSE THE HTML TO GET THE APP ID\nlist_of_ids = []\n\n# test_string = '/app/434460'\n# print(re.findall(\"(?<=app\\/)(.*)\", test_string))\n\nlist_of_links = page_content.find_all(\"a\", href=True)\n\nfor lnk in list_of_links:\n list_of_ids.append(re.findall(\"(?<=app\\/)(.*)\", str(lnk['href'])))\n\n# print(list_of_ids)\n\ngood_ids =[]\n\nfor i in list_of_ids:\n if len(i) > 0:\n good_ids.append(i)\n\n\n# GET RELEASE DATES\n\nlist_of_rel_tds = page_content.find_all(\"td\", {\"class\":\"treleasedate\"})\n\nlist_of_dates = []\n\nfor td in list_of_rel_tds:\n list_of_dates.append(re.findall('(?<=data-order=\")(.*)(?=\">)', str(td)))\n\n#GET PRICES \n\nlist_of_price_tds = page_content.find_all(\"td\", {\"class\":\"tprice\"})\n\nlist_of_prices = []\n\nfor td in list_of_price_tds:\n list_of_prices.append(re.findall('(?<=data-order=\")(.*)(?=\">)', str(td)))\n\n\n\n#GET PRICES WITH DECIMALS AND $\n\nlist_of_price_dec_tds = page_content.find_all(\"td\", {\"class\":\"tprice\"})\n\nlist_of_prices_dec = []\n\nfor td in list_of_price_dec_tds:\n \n list_of_prices_dec.append(re.findall('(?<=\">)(.*)(?=<)', str(td)))\n\n# GET user_score_meta_score\n#N/A (N/A/62%)\n\nlist_of_score_tds = page_content.find_all(\"td\", {\"class\":\"tuserscore\"})\n\nlist_of_scores = []\n\nfor td in list_of_score_tds:\n \n list_of_scores.append(re.findall('(?<=\">)(.*)(?=<)', str(td)))\n\n\n\n# GET OWNERS\n\n# BE CAREFUL B/C FOR SOME REASON THE TAG DISAPPEARS\n\n# test_string = '100,000 .. 200,000'\n# print(re.findall('(?=td data-order=\\\"\\d)(.*)(?=<)',test_string))\n\n#200,000 .. 500,000\nlist_of_owners = []\n\nlist_of_no_class_tds = page_content.find_all(\"td\",{'class': None})\n\nfor td in list_of_no_class_tds[2::5]:\n list_of_owners.append(re.findall(r'(?<=data-order=\")(.*)(?=\">)', str(td)))\n\n# print(list_of_owners[0:20])\n# print(len(list_of_owners))\n\n\n\n# print(list_of_owners_clean[0:10])\n# print(len(list_of_owners_clean))\n\n# GET PLAYTIME\n\nlist_of_ptimes = []\nlist_of_ptime_tds = page_content.find_all(\"td\",{\"class\": \"tplaytime\"})\n\nfor td in list_of_ptime_tds:\n \n list_of_ptimes.append(re.findall('(?<=\">)(.*)(?=<)', str(td)))\n\n# print(list_of_ptimes[0:10])\n# print(len(list_of_ptimes))\n\n\n# GET DEVELOPER \n\nlist_of_blank_tds = []\nlist_of_tds = page_content.find_all(\"td\",{\"class\": None})\n\nd1=[]\nd2=[]\n\nfor td in list_of_tds[3::5]:\n d1.append(re.findall(r'(?<=data-order=\")(.*)(?=\">)', str(td)))\n\nfor td in list_of_tds[4::5]:\n d2.append(re.findall(r'(?<=data-order=\")(.*)(?=\">)', str(td)))\n\n\n# CHECK LENGTHS \n# print(len(good_names))\n# print(len(good_ids))\n# print(len(list_of_dates))\n# print(len(list_of_prices))\n# print(len(list_of_prices_dec))\n# print(len(list_of_owners)) #######\n# print(len(list_of_scores))\n# print(len(list_of_ptimes))\n# print(len(d1))\n# print(len(d2))\n\n\n\n\n# PREPARE ROWS FOR DF\nall_rows=[]\n\nfor i in range(len(good_ids)):\n current_row = [i, good_names[i][0], good_ids[i][0], list_of_dates[i][0], list_of_prices[i][0], list_of_prices_dec[i][0],\n list_of_scores[i][0], list_of_owners[i][0], list_of_ptimes[i][0], d1[i][0], d1[i][0], d2[i][0], d2[i][0]]\n all_rows.append(current_row)\n\n# CREATE DF AND WRITE TO FILE\n# colnames: app_num,app_name,app_id,release_date,price,price_decimal,user_score_meta_score,owners,playtime_median,developer,developer2,publisher,publisher2\n\ndf = pd.DataFrame(all_rows, columns = ['app_num', 'app_name', 'app_id','release_date','price','price_decimal',\n 'user_score_meta_score','owners','playtime_median','developer','developer2', 'publisher', 'publisher2'])\ndf.to_csv(\"steamspy_2017_games_clean.csv\", index=False)\n\n\n\n################# UNUSED CODE\n\n# THIS WAS FOR THE NON_MEMBERS PAGE WHERE OWNERS DATA IS GIVEN IN A RANGE\n# for td in list_of_no_class_tds:\n# list_of_owners.append(re.findall('(?<=data-order=\"\\d)(.*)(?=td)', str(td)))\n\n# print(list_of_owners[0:20])\n\n# list_of_owners_clean = []\n\n# for x in list_of_owners:\n# if len(x) >0:\n# list_of_owners_clean.append(x)\n\n# print(list_of_owners_clean[0:20])\n\n# list_of_owners_clean2 = []\n\n# for x in list_of_owners_clean:\n# # print(x)\n# if \"..\" in x[0] and \",\" in x[0]:\n# list_of_owners_clean2.append(x)\n\n# print(list_of_owners_clean2[0:20])","sub_path":"step0_get_game_ids_2017.py","file_name":"step0_get_game_ids_2017.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23605615","text":"from pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\nfrom pandas.testing import assert_series_equal\n\nfrom estimagic.optimization.process_constraints import process_constraints\nfrom estimagic.optimization.reparametrize import _increasing_to_internal\nfrom estimagic.optimization.reparametrize import _probability_to_internal\nfrom estimagic.optimization.reparametrize import _sum_to_internal\nfrom estimagic.optimization.reparametrize import reparametrize_from_internal\nfrom estimagic.optimization.reparametrize import reparametrize_to_internal\n\nfix_path = Path(__file__).resolve().parent / \"fixtures\" / \"reparametrize_fixtures.csv\"\nparams_fixture = pd.read_csv(fix_path)\nparams_fixture.set_index([\"category\", \"subcategory\", \"name\"], inplace=True)\nfor col in [\"lower\", \"internal_lower\"]:\n params_fixture[col].fillna(-np.inf, inplace=True)\nfor col in [\"upper\", \"internal_upper\"]:\n params_fixture[col].fillna(np.inf, inplace=True)\n\nexternal = []\ninternal = []\n\nfor i in range(3):\n ext = params_fixture.copy(deep=True)\n ext.rename(columns={\"value{}\".format(i): \"value\"}, inplace=True)\n external.append(ext)\n\n int_ = params_fixture.copy(deep=True)\n int_.rename(columns={\"internal_value{}\".format(i): \"value\"}, inplace=True)\n int_.dropna(subset=[\"value\"], inplace=True)\n int_.drop(columns=[\"lower\", \"upper\"], inplace=True)\n int_.rename(\n columns={\"internal_lower\": \"lower\", \"internal_upper\": \"upper\"}, inplace=True\n )\n internal.append(int_)\n\n\ndef constraints(params):\n constr = [\n {\"loc\": (\"c\", \"c2\"), \"type\": \"probability\"},\n {\n \"loc\": [(\"a\", \"a\", \"0\"), (\"a\", \"a\", \"2\"), (\"a\", \"a\", \"4\")],\n \"type\": \"fixed\",\n \"value\": [0.1, 0.3, 0.5],\n },\n {\"loc\": (\"e\", \"off\"), \"type\": \"fixed\", \"value\": 0},\n {\"loc\": \"d\", \"type\": \"increasing\"},\n {\"loc\": \"e\", \"type\": \"covariance\"},\n {\"loc\": \"f\", \"type\": \"covariance\"},\n {\"loc\": \"g\", \"type\": \"sum\", \"value\": 5},\n {\"loc\": \"h\", \"type\": \"equality\"},\n {\"loc\": \"i\", \"type\": \"equality\"},\n {\"query\": 'subcategory == \"j1\" | subcategory == \"i1\"', \"type\": \"equality\"},\n {\"loc\": \"k\", \"type\": \"sdcorr\"},\n {\"loc\": \"l\", \"type\": \"covariance\"},\n {\"locs\": [\"f\", \"l\"], \"type\": \"pairwise_equality\"},\n {\"loc\": \"m\", \"type\": \"covariance\"},\n {\"loc\": (\"m\", \"diag\", \"a\"), \"type\": \"fixed\", \"value\": 4.0},\n ]\n constr = process_constraints(constr, params)\n return constr\n\n\ninternal_categories = list(\"abcdefghikm\")\nexternal_categories = internal_categories + [\"j1\", \"j2\", \"l\"]\n\nto_test = []\nfor ext, int_ in zip(external, internal):\n for category in internal_categories:\n to_test.append((ext, int_, category))\n\n\n@pytest.mark.parametrize(\"params, expected_internal, category\", to_test)\ndef test_reparametrize_to_internal(params, expected_internal, category):\n constr = constraints(params)\n cols = [\"value\", \"lower\", \"upper\"]\n\n calculated = reparametrize_to_internal(params, constr, None)\n assert_frame_equal(\n calculated.loc[category, cols], expected_internal.loc[category, cols]\n )\n\n\nto_test = []\nfor int_, ext in zip(internal, external):\n for category in external_categories:\n to_test.append((int_, ext, category))\n\n\n@pytest.mark.parametrize(\"internal, expected_external, category\", to_test)\ndef test_reparametrize_from_internal(internal, expected_external, category):\n constr = constraints(expected_external)\n\n calculated = reparametrize_from_internal(internal, constr, expected_external, None)[\n \"value\"\n ]\n assert_series_equal(calculated[category], expected_external.loc[category, \"value\"])\n\n\ndef test_invalid_sum():\n df = pd.DataFrame(data=[[1], [2], [2.9]], columns=[\"value\"])\n df[\"lower\"] = np.nan\n df[\"upper\"] = np.nan\n df[\"_fixed\"] = False\n with pytest.raises(AssertionError):\n _sum_to_internal(df, 6)\n\n\ndef test_invalid_probability():\n df = pd.DataFrame(data=[[0.1], [0.2], [0.72]], columns=[\"value\"])\n df[\"lower\"] = np.nan\n df[\"upper\"] = np.nan\n df[\"_fixed\"] = False\n with pytest.raises(AssertionError):\n _probability_to_internal(df)\n\n\ndef test_invalid_bound_for_increasing():\n df = pd.DataFrame(data=[[1], [2], [2.9]], columns=[\"value\"])\n df[\"lower\"] = [-np.inf, 1, -np.inf]\n df[\"upper\"] = np.nan\n df[\"_fixed\"] = False\n with pytest.warns(UserWarning):\n _increasing_to_internal(df)\n\n\ndef test_only_first_bounded_incresing():\n df = pd.DataFrame(data=[[1], [2], [2.9]], columns=[\"value\"])\n df[\"lower\"] = [1, -np.inf, -np.inf]\n df[\"upper\"] = np.nan\n df[\"_fixed\"] = False\n calculated = _increasing_to_internal(df)\n expected = calculated.copy(deep=True)\n expected[\"lower\"] = [1.0, 0, 0]\n expected[\"value\"] = [1, 1, 0.9]\n pd.testing.assert_frame_equal(\n calculated[[\"value\", \"lower\"]], expected[[\"value\", \"lower\"]]\n )\n\n\ndef test_all_bounds_same_increasing():\n df = pd.DataFrame(data=[[1], [2], [2.9]], columns=[\"value\"])\n df[\"lower\"] = [1.0, 1, 1]\n df[\"upper\"] = np.nan\n df[\"_fixed\"] = False\n calculated = _increasing_to_internal(df)\n expected = calculated.copy(deep=True)\n expected[\"lower\"] = [1.0, 0, 0]\n expected[\"value\"] = [1, 1, 0.9]\n pd.testing.assert_frame_equal(\n calculated[[\"value\", \"lower\"]], expected[[\"value\", \"lower\"]]\n )\n","sub_path":"estimagic/tests/optimization/test_reparametrize.py","file_name":"test_reparametrize.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"377591184","text":"from flask import Flask\nimport time\n\napp = Flask(__name__)\n@app.route('/')\ndef index():\n time.sleep(3)\n return 'Hello'\nif __name__ == '__main__':\n app.run(threaded=True)\n # run() 方法加了一个参数 threaded,这表明 Flask 启动了多线程模式","sub_path":"Learn-python/python编程/13-asyncio并发编程/flask_test.py","file_name":"flask_test.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"130310890","text":"import socket\n\ntarget_host = \"127.0.0.1\"\ntarget_port = 80\n\n# create the socket\n# AF_INET means we're using IPv4\n# SOCK_DGRAM means we're using an UDP client\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# send the data to the address\nclient.sendto(bytes(\"AAABBBCCC\", \"utf-8\"), (target_host, target_port))\n\n# print the received data\ndata, addre = client.recvfrom(4096)\nprint(data)","sub_path":"_black_hat_python/_chapter_1/udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"531367999","text":"#coding:utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport ast\nimport numpy as np\nimport os\nimport time\n\nimport paddle\nimport paddle.fluid as fluid\nimport paddlehub as hub\n\n# yapf: disable\nparser = argparse.ArgumentParser(__doc__)\nparser.add_argument(\"--checkpoint_dir\", type=str, default=None, help=\"Directory to model checkpoint\")\nparser.add_argument(\"--use_gpu\", type=ast.literal_eval, default=True, help=\"Whether use GPU for finetuning, input should be True or False\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"Total examples' number in batch when the program predicts.\")\nargs = parser.parse_args()\n# yapf: enable.\n\nif __name__ == '__main__':\n # loading Paddlehub senta pretrained model\n module = hub.Module(name=\"senta_bilstm\")\n inputs, outputs, program = module.context(trainable=True)\n\n # Download dataset and use LACClassifyReader to read dataset\n dataset = hub.dataset.ChnSentiCorp()\n reader = hub.reader.LACClassifyReader(\n dataset=dataset, vocab_path=module.get_vocab_path())\n\n sent_feature = outputs[\"sentence_feature\"]\n\n # Setup feed list for data feeder\n # Must feed all the tensor of senta's module need\n feed_list = [inputs[\"words\"].name]\n\n # Setup runing config for PaddleHub Finetune API\n config = hub.RunConfig(\n use_data_parallel=False,\n use_cuda=args.use_gpu,\n batch_size=args.batch_size,\n checkpoint_dir=args.checkpoint_dir,\n strategy=hub.AdamWeightDecayStrategy())\n\n # Define a classfication finetune task by PaddleHub's API\n cls_task = hub.TextClassifierTask(\n data_reader=reader,\n feature=sent_feature,\n feed_list=feed_list,\n num_classes=dataset.num_labels,\n config=config)\n\n # Data to be predicted\n data = [\"这家餐厅很好吃\", \"这部电影真的很差劲\"]\n\n print(cls_task.predict(data=data, return_result=True))\n","sub_path":"program/demo/senta/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"28167726","text":"import copy\nimport numpy as np\n\nfrom ATARI.MCTS import get_module_logger\n\nmylogger = get_module_logger(__name__)\n\n\n\nclass GameState(object):\n \"\"\"\n Represents a Tic Tac Toe game.\n The state consists of a 3x3 game board with each position occupied by:\n ' ' (empty square)\n 'X' (X mark)\n 'O' (O mark)\n as well as the following terminal states:\n X won\n O won\n Tie\n \"\"\"\n def __init__(self):\n # Begin with an empty game board\n\n self.board = np.array([\" \"] * 10)\n self.NUM_MOVES = 9\n\n # GameState needs to be hashable so that it can be used as a unique graph\n # node in NetworkX\n def __key(self):\n return self.__str__()\n\n def __eq__(x, y):\n return x.__key() == y.__key()\n\n def __hash__(self):\n return hash(self.__key())\n\n def __str__(self):\n \"\"\"\n Returns a string that is a visual representation of the game\n state. Can be used to print the current game state of a game:\n print(game.state)\n will print a game board:\n ~X~\n O~~\n ~~X\n \"\"\"\n output = ''\n for r in range(10)[1:]:\n contents = self.board[r]\n if r % 3 == 0:\n output += '{}\\n'.format(contents)\n else:\n output += '{}'.format(contents)\n\n output = output.replace(' ', '~')\n\n return output\n\n def turn(self):\n \"\"\"\n Returns the player whose turn it is: 'X' or 'O'\n \"\"\"\n num_X = 0\n num_O = 0\n for r in range(10)[1:]:\n if self.board[r] == 'X':\n num_X += 1\n elif self.board[r] == 'O':\n num_O += 1\n\n if num_X != num_O:\n return 'X'\n else:\n return 'O'\n\n def move(self, r):\n \"\"\"\n Places a marker at the position (row, col). The marker placed is\n determined by whose turn it is, either 'X' or 'O'.\n \"\"\"\n if self.board[r] != \" \":\n print(\"%d is occupied number.. please use another.\" % r)\n return\n\n #print('Move: {} moves to ({})'.format(self.turn(), r))\n self.board[r] = self.turn()\n #print('{}'.format(self))\n\n def legal_moves(self):\n \"\"\"\n Returns a list of the legal actions from the current state,\n where an action is the placement of a marker 'X' or 'O' on a board\n position, represented as a (row, col) tuple, for example:\n [(2, 1), (0, 0)]\n would indicate that the positions (2, 1) and (0, 0) are available to\n place a marker on. If the game is in a terminal state, returns an\n empty list.\n \"\"\"\n # Check if terminal state\n if self.winner() is not None:\n return []\n\n possible_moves = []\n for row in range(10)[1:]:\n if self.board[row] == ' ':\n possible_moves.append(row)\n\n return possible_moves\n\n\n def transition_function(self, r):\n \"\"\"\n Applies the specified action to the current state and returns the new\n state that would result. Can be used to simulate the effect of\n different actions. The action is applied to the player whose turn\n it currently is.\n :return: The resulting new state that would occur\n \"\"\"\n # Verify that the specified action is legal\n assert (r) in self.legal_moves()\n\n # First, make a copy of the current state\n new_state = copy.deepcopy(self)\n\n # Then, apply the action to produce the new state\n new_state.move(r)\n\n return new_state\n\n def winner(self):\n \"\"\"\n Checks if the game state is a terminal state.\n :return: If it is not, returns None; if it is, returns 'X' or 'O'\n indicating who is the winner; if it is a tie, returns 'Tie'\n \"\"\"\n for player in ['X', 'O']:\n # Check for winning vertical lines\n for cols in [ [1,4,7], [2,5,8], [3,6,9] ]:\n accum = 0\n for col in cols:\n if self.board[col] == player:\n accum += 1\n if accum == 3:\n return player\n\n # Check for winning horizontal lines\n for rows in [ [1,2,3], [4,5,6], [7,8,9] ] :\n accum = 0\n for row in rows:\n if self.board[row] == player:\n accum += 1\n if accum == 3:\n return player\n\n # Check for winning diagonal lines (there are 2 possibilities)\n option1 = [self.board[1],\n self.board[5],\n self.board[9]]\n option2 = [self.board[3],\n self.board[5],\n self.board[7]]\n if all(marker == player for marker in option1) \\\n or all(marker == player for marker in option2):\n return player\n\n # Check for ties, defined as a board arrangement in which there are no\n # open board positions left and there are no winners (note that the\n # tie is not being detected ahead of time, as could potentially be\n # done)\n accum = 0\n for row in range(10)[1:]:\n if self.board[row] == ' ':\n accum += 1\n if accum == 0:\n return 'Tie'\n\n return None\n\n\ndef main():\n\n gs = GameState()\n\n for i in range(10)[1:]:\n gs.move(i)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"MCTS/GameState3.py","file_name":"GameState3.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47260768","text":"import DataStream\nfrom pymongo import MongoClient\n\n#script to save a bunch of GDAX data base\n\nclient = MongoClient()\ndb = client.profitdb\n\ndata = DataStream.getHistoricalData(3*24*60*60,'ETH-USD')\n\nresponse = db.exch_data.insert_many(data)\nprint('Posts: {0}'.format(response.inserted_ids))\n\t\n\n","sub_path":"SaveHistoricalData.py","file_name":"SaveHistoricalData.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"421421741","text":"from sklearn.linear_model import LogisticRegression\nimport numpy as np\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import text\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\n\ngossipcop_data = pd.read_csv('.\\data\\gossipcop_content_no_ignore.tsv', sep='\\t')\npolitifact_data = pd.read_csv('.\\data\\politifact_content_no_ignore.tsv',sep = '\\t')\n\n\ndef tfidf(data_set,data_set1):\n sequence = []\n for content in data_set['content']:\n sequence.append(content)\n\n for content in data_set1['content']:\n sequence.append(content)\n\n vectorizer = TfidfVectorizer()\n X = vectorizer.fit_transform(sequence)\n feature_name = vectorizer.get_feature_names()\n print(X)\n print(X.shape)\n\n return X, feature_name\n\n\ndef train(data,name):\n train_feature = data\n print('feature',train_feature.shape)\n\n train_labels = np.concatenate((np.zeros(5816), np.ones(415)))\n print('lables',len(train_labels))\n\n x_train, x_test, y_train, y_test = train_test_split(train_feature, train_labels, test_size=0.33, random_state=10)\n\n classifier = LogisticRegression(C=0.1, solver='sag')\n classifier.fit(x_train,y_train)\n coefs = classifier.coef_[0]\n print('---coef----',classifier.coef_)\n top_three = np.argpartition(coefs, -3)[:500]\n feature_name = []\n for i in top_three:\n feature_name.append(name[i])\n print('name',feature_name)\n predicted = classifier.predict(x_test)\n print('accuracy_score: %0.5f' % (metrics.accuracy_score(y_test, predicted)))\n\nif __name__ == \"__main__\":\n data_set, feature_name = tfidf(gossipcop_data,politifact_data)\n print('----',data_set)\n train(data_set,feature_name)","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"493447813","text":"\"\"\"\ntabletool.py\n\nA bunch of functions that help handle stellar data stored as\nastropy table.\n\"\"\"\n\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.units.core import UnitConversionError\nimport string\n\nfrom . import coordinate\nfrom . import transform\n\ndef load(filename, **kwargs):\n \"\"\"Cause I'm too lazy to import Astropy.table.Table in terminal\"\"\"\n return Table.read(filename, **kwargs)\n\ndef read(filename, **kwargs):\n \"\"\"Cause I'm too lazy to import Astropy.table.Table in terminal\"\"\"\n return load(filename, **kwargs)\n\n\ndef get_historical_cart_colnames():\n \"\"\"\n Colnames look like X, Y, Z...\n dX, dY, dZ\n c_XY, c_CU\n\n (as opposed to modern colnames:\n X, Y, Z...\n X_error, Y_error ...\n X_Y_corr,\n \"\"\"\n main_colnames = 'XYZUVW'\n error_colnames = ['d'+el for el in main_colnames]\n corr_colnames = []\n for i, colname1 in enumerate(main_colnames):\n for colname2 in main_colnames[i+1:]:\n corr_colnames.append('c_{}{}'.format(colname1, colname2))\n return main_colnames, error_colnames, corr_colnames\n\n\ndef get_colnames(main_colnames=None, error_colnames=None, corr_colnames=None,\n cartesian=True):\n \"\"\"\n Utility function for generating standard column names\n\n Parameters\n ----------\n main_colnames: [6] str array_like {None}\n The column names of the measurements. If left as None then\n if `cartesian` is true:\n ['X', 'Y', 'Z', 'U', 'V', 'W']\n if `cartesian` is false:\n ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n error_colnames: [6] str array_like {None}\n The column names of the measurements. If left as None then\n we try to infer the names by appending '_error' to the main\n column names.\n corr_colnames: [15] str array_like {None}\n The column names of the correlations between the errors of\n each measurement pair. If left as None we try to infer the\n names by pairing each measurmenet and appending '_corr', e.g.:\n 'X_Y_corr'.\n\n Notes\n -----\n If all column names are provided as argument, this function does\n nothing.\n\n The default format for column names for errors and correlations is,\n e.g.:\n X_error, Y_error, ...\n X_Y_corr, X_Z_corr, X_U_corr, X_V_corr, X_W_corr, Y_Z_corr, ...\n The correlations are listed in the same way one would read the upper\n triangle of the correlation matrix, where the rows (and columns) of\n the matrix are in the same order as `main_colnames`.\n \"\"\"\n if main_colnames is None:\n if cartesian:\n # main_colnames = [el for el in 'XYZUVW']\n main_colnames = ['X', 'Y', 'Z', 'U', 'V', 'W']\n else: # provide astrometric column names\n main_colnames = [\n 'ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity',\n ]\n if error_colnames is None:\n error_colnames = [el+'_error' for el in main_colnames]\n if corr_colnames is None:\n corr_colnames = []\n for i, colname1 in enumerate(main_colnames):\n for colname2 in main_colnames[i + 1:]:\n corr_colnames.append('{}_{}_corr'.format(\n colname1, colname2\n ))\n return main_colnames, error_colnames, corr_colnames\n\n\ndef build_data_dict_from_table(table, main_colnames=None, error_colnames=None,\n corr_colnames=None, cartesian=True,\n historical=False, only_means=False,\n get_background_overlaps=True,\n background_colname=None,\n return_table_ixs=False):\n \"\"\"\n Use data in tale columns to construct arrays of means and covariance\n matrices.\n\n Parameters\n ----------\n table: astropy table -or- string\n The table (or path to table) which holds the required data\n main_colnames: [6] string array_like\n Set of column names of the main measurements\n e.g. ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n would be the input for the default format of Gaia data\n error_colnames: [6] string array_like {None}\n Set of column names for the errors. If left as None will be\n generated by appending '_error' to each of the main_colnames\n corr_colnames: [15] string array_like {None}\n Set of column names for the pairwise correlations between each\n of the six main measurements. If left as None will be generated\n by joining each pair of main_colnames with an underscore then\n appending '_corr'.\n It is assumed that the correlation column names are given in\n a certain order based on input order of main_colnames.\n e.g. ['ra_dec_corr', 'ra_parallax_corr', ... 'ra_radial_velocity_corr',\n 'dec_parallax_corr', ... 'dec_radial_velocity_corr',\n 'parallax_pmra_corr' ... etc]\n cartesian: bool {True}\n Set to false if trying to build astrometric data\n historical: bool {True}\n Set to True if data set is from historical uses of chronostar. This\n function will then look for different column names by default\n only_means: bool {True}\n Set to True if only after the means of the data. This will save\n time by not building covariance matrices\n get_background_overlaps: bool {True}\n Set to True if after background overlaps too\n background_colname: str {None}\n Set which column name to use for background overlaps. If left as\n None, uses 'background_log_overlap' as default.\n return_table_ixs: boolean {False}\n If set, returns a mapping taking the indices of elements in dictionary\n to rows from original table. This is useful when table rows have\n been skipped due to missing data.\n Convert data to row indices for table assignment e.g. recording of\n membership to `comp_A` thusly:\n >>> my_table['comp_A'][table_ixs] = final_memb[:,0]\n\n Or to extract gaia ids of comp A members:\n >>> my_table['gaia_dr2'][table_ixs][np.where(final_memb[:,0]>0.5)]\n\n where `final_memb` is a [nstars, ncomps] array recording membership\n probabilities.\n\n Returns\n -------\n means: [n,6] float array_like\n Array of the mean measurements\n covs: [n,6,6] float array_like\n Array of the covariance matrix for each of the `n` measured objects\n \n NICH_HONS;\n age_probs: [n,64] float array_like\n\tArray representing the age pdf generated by honpy_one_setup {TODO rename}\n\t on a axis of log(years) 5->11.4\n Comment by Marusa: it is actually a dictionary that is returned.\n \"\"\"\n # Tidy up input\n if isinstance(table, str):\n table = Table.read(table)\n if historical:\n main_colnames, error_colnames, corr_colnames =\\\n get_historical_cart_colnames()\n else:\n main_colnames, error_colnames, corr_colnames = get_colnames(\n main_colnames=main_colnames, error_colnames=error_colnames,\n corr_colnames=corr_colnames, cartesian=cartesian\n )\n\n # Generate means\n if table.masked:\n raise UserWarning('Table is masked! Replace or remove problem columns')\n means = np.vstack([table[col] for col in main_colnames]).T\n if only_means:\n return means\n results_dict = {'means':means}\n\n \n #TODO age parameter from .pars file\n age_parameter=True\n if age_parameter:\n from . import honpy_one_setup as hp\n gmag = table['mag_g']\n bprp = table['bprp_col']\n #initializing with the pdf of the first star)\n pdf = hp.g_kernal_den(bprp[0],gmag[0])\n pdfs= pdf\n for i in range(1,len(gmag)):\n pdf = hp.g_kernal_den(bprp[i],gmag[i])\n pdfs= np.vstack([pdfs,pdf])\n results_dict['age_probs']=pdfs\n #except:\n\t # print('NICH_HONS; failure at tabletool.py line 195-207') \n \n\n # Generate covariance matrices\n nstars = len(table)\n standard_devs = np.vstack([table[col] for col in error_colnames]).T\n\n # Detect mismatch in units and scale standard_devs appropriately\n # If units can't be converted\n for ix, (main_colname, error_colname) in\\\n enumerate(zip(main_colnames, error_colnames)):\n if table[main_colname].unit != table[error_colname].unit:\n try:\n scale_factor =\\\n table[error_colname].unit.to(table[main_colname].unit)\n standard_devs[:,ix] *= scale_factor\n except UnitConversionError:\n print(main_colname, error_colname)\n raise UserWarning('Units are not convertible between '\n 'measurments and errors. Are you sure '\n 'you provided column names in a consistent '\n 'ordering?')\n except AttributeError:\n # Units haven't been provided. Which is allowed but discouraged\n pass\n\n # Initialise an array of 6x6 identity matrices\n covs = np.array(nstars * [np.eye(6)])\n\n # Then turn into correlation matrices by incorporating correlation columns\n indices = np.triu_indices(6,1) # the indices of the upper right\n # triangle, excluding main diagonal\n for ix in range(len(corr_colnames)):\n try:\n fst_ix = indices[0][ix]\n snd_ix = indices[1][ix]\n covs[:, fst_ix, snd_ix] = table[corr_colnames[ix]]\n covs[:, snd_ix, fst_ix] = table[corr_colnames[ix]]\n except KeyError: # Correlations are allowed to be missing\n pass\n\n # Now multiply through the standard deviations along both axes\n # First along each column\n # We use einstein notation here such that 'ijk,ij->ijk' means\n # multiply the 'ijk'th element from covs by the 'ij'th element from\n # standard_devs. More thoroughly: for the i'th covariance matrix,\n # and the i'th 6D standard deviation vector, multiply the j'th row\n # by the j'th std\n covs = np.einsum('ijk,ij->ijk', covs, standard_devs) # the rows\n covs = np.einsum('ijk,ik->ijk', covs, standard_devs) # the columns\n results_dict['covs'] = covs\n\n # Checks for any nans in the means or covariances\n bad_mean_mask = np.any(np.isnan(means), axis=1)\n bad_cov_mask = np.any(np.isnan(covs), axis=(1,2))\n\n good_row_mask = np.logical_not(np.logical_or(bad_mean_mask, bad_cov_mask))\n \n # Notify what stars have been excluded!\n nexcluded = np.sum(np.logical_not(good_row_mask))\n if nexcluded>0:\n print('%d stars MASKED OUT!'%nexcluded)\n print(np.where(np.logical_not(good_row_mask)))\n print(table[np.logical_not(good_row_mask)])\n #TODO age parameter from .pars file\n if age_parameter:\n results_dict = {\n 'means':means[good_row_mask],\n 'covs':covs[good_row_mask],\n #NICH_HONS;\n 'age_probs':pdfs[good_row_mask]\n }\n else:\n results_dict = {\n 'means':means[good_row_mask],\n 'covs':covs[good_row_mask],\n }\n\n # Insert background overlaps\n if get_background_overlaps:\n if background_colname is None:\n background_colname = 'background_log_overlap'\n if background_colname in table.colnames:\n results_dict['bg_lnols'] = np.array(table[background_colname])[good_row_mask]\n\n if return_table_ixs:\n return results_dict, np.where(good_row_mask)\n else:\n return results_dict\n\ndef construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(table, \n memb_probs, comps, output_filename, get_background_overlaps=True, stellar_id_colname=None, overwrite_fits=False):\n \"\"\"\n MZ 2020 - 04 - 16\n Create an astropy table with Gaia DR2 ids and membership probabilities\n for all components, including background.\n \n This shoul NOT append to the original table because the number of\n components is increasing each iteration.\n Parameters\n ----------\n table: astropy table -or- string\n The table (or path to table) which holds the required data\n get_background_overlaps: bool {True}\n Set to True if after background overlaps too\n \n Returns\n -------\n None\n \n \"\"\"\n\n # Read table\n if isinstance(table, str):\n table = Table.read(table)\n ids = table[stellar_id_colname]\n tab = Table((ids,), names=(stellar_id_colname,))\n\n # compnames\n # TODO: This should be generated once in the component class!!\n ncomps = len(comps)\n if ncomps>26:\n print('*** number of components>26, cannot name them properly with letters.')\n abc=string.ascii_uppercase\n compnames = [abc[i] for i in range(ncomps)]\n\n # Membership\n for i, c in enumerate(compnames):\n tab['membership%s'%c.replace('comp', '')] = memb_probs[:,i]\n\n #~ todo='background_log_overlap'\n if get_background_overlaps:\n tab['membership_bg'] = memb_probs[:,-1]\n\n print(tab)\n tab.write(output_filename, format='fits', overwrite=overwrite_fits)\n\n #add number of components in the file. and a timestamp or random number so nothing gets overwritten.\n\n\ndef append_cart_cols_to_table(table, main_colnames=None, error_colnames=None,\n corr_colnames=None):\n \"\"\"\n Insert empty place holder columns for cartesian values\n\n Parameters\n ----------\n table: astropy.table.Table object\n Modifies table in place by appending empty columns for cartesian\n values. Default values in column are `np.nan`.\n main_colnames: str {None}\n See\n\n\n Returns\n -------\n None\n \"\"\"\n # Tidy input\n if isinstance(table, str):\n table = Table.read(table)\n main_colnames, error_colnames, corr_colnames =\\\n get_colnames(main_colnames, error_colnames, corr_colnames,\n cartesian=True)\n\n # Set up order of column names in table\n cart_colnames = []\n for measure, error in zip(main_colnames, error_colnames):\n cart_colnames.append(measure)\n cart_colnames.append(error)\n for corr in corr_colnames:\n cart_colnames.append(corr)\n\n # Insert blank rows (default value 'np.nan') with appropriate units\n nrows = len(table)\n empty_col = np.array(nrows * [np.nan])\n units = 6*['pc'] + 6*['km/s'] + 15*[None]\n for col_name, unit in zip(cart_colnames, units):\n table[col_name] = empty_col\n table[col_name].unit = unit\n\n\ndef convert_astro2cart(astr_mean, astr_cov):\n \"\"\"\n Convert astrometry data (mean and covariance) into cartesian\n coordinates, centred on the local standard of rest (Schoenrich 2010).\n\n Parameters\n ----------\n astr_mean: [6] float array_like\n The central estimate of a star's astrometry values. Provided in\n the order:\n ra [deg]\n dec [deg]\n parallax [mas]\n pmra*cos(dec) [mas/yr]\n pmdec [mas/yr]\n radial velocity [km/s]\n astr_cov: [6,6] float array_like\n The covariance matrix of the measurments with columns (and rows)\n in same order as `astr_mean`.\n\n Returns\n -------\n xyzuvw_mean: [6] float array_like\n The cartesian mean (XYZUVW)\n xyzuvw_cov: [6,6] float array_like\n The carteisan covariance matrix\n \"\"\"\n xyzuvw_mean = coordinate.convert_astrometry2lsrxyzuvw(astr_mean)\n xyzuvw_cov = transform.transform_covmatrix(\n cov=astr_cov, trans_func=coordinate.convert_astrometry2lsrxyzuvw,\n loc=astr_mean\n )\n\n return xyzuvw_mean, xyzuvw_cov\n\n\ndef insert_data_into_row(row, mean, cov, main_colnames=None, error_colnames=None,\n corr_colnames=None, cartesian=True):\n \"\"\"\n Insert data, error and correlations into a single row\n\n Given the mean and covariance matrix, we derive the standard\n deviations in each dimension as well as each pair-wise correlation,\n which are then inserted into the row (as per the provided column names).\n\n The columns must already exist!\n\n Parameters\n row: astropy table row\n The row in which the data will be inserted, with required columns\n already existing\n mean: [6] float array\n The mean of data\n cov: [6,6] float array\n The covariance matrix of data\n \"\"\"\n\n main_colnames, error_colnames, corr_colnames = get_colnames(\n main_colnames, error_colnames, corr_colnames, cartesian=cartesian\n )\n # Insert mean data\n for ix, main_colname in enumerate(main_colnames):\n row[main_colname] = mean[ix]\n\n # Insert errors\n standard_devs = np.sqrt(np.diagonal(cov))\n for ix, error_colname in enumerate(error_colnames):\n row[error_colname] = standard_devs[ix]\n\n # Build correlation matrix by dividing through by stdevs in both axes\n corr_matrix = cov / standard_devs / standard_devs.reshape(6, 1)\n\n # Insert correlations\n indices = np.triu_indices(6,1) # the indices of the upper right\n # triangle, excluding main diagonal\n for ix in range(len(corr_colnames)):\n try:\n fst_ix = indices[0][ix]\n snd_ix = indices[1][ix]\n row[corr_colnames[ix]] = corr_matrix[fst_ix, snd_ix]\n except KeyError:\n # It's fine if some correlation columns are missing\n pass\n\n\ndef insert_column(table, col_data, col_name, filename=''):\n \"\"\"\n Little helper to insert column data\n\n Parameters\n ----------\n table: astropy table\n the table in which the new column will be inserted\n col_data: array_like\n An array of the column data. Must be same length as table\n (we don't check this)\n col_name: str\n The name of the new column\n filename: str {''}\n If not empty, save the new table to file\n\n Returns\n -------\n table: astropy table\n The same table, with the modification.\n \"\"\"\n table[col_name] = col_data\n if filename != '':\n # TODO work out proper way to writ etables that is consistnet across python\n # Table.write(table, filename, overwrite=True, format='ascii') # TC\n table.write(filename, overwrite=True)\n return table\n\n\ndef convert_table_astro2cart(table, return_table=False, write_table=False,\n astr_main_colnames=None,\n astr_error_colnames=None,\n astr_corr_colnames=None,\n cart_main_colnames=None,\n cart_error_colnames=None,\n cart_corr_colnames=None,\n filename=''):\n \"\"\"\n Use this function to convert astrometry data to cartesian data.\n\n Parameters\n ----------\n table: astropy table (or string)\n The table with astrometry data (and radial velocities), either\n with column names consistent with defaults, or provided as input.\n If column names aren't specified we assume the measurements\n have column names:\n ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n With the error column names:\n ['ra_error', 'dec_error', ... ]\n And correlation column names:\n ['ra_dec_corr', 'ra_parallax_corr', 'ra_pmra_corr' ... ,\n 'dec_parallax_corr', 'dec_pmra_corr' ... ,\n 'parallax_pmra_corr', ... ,\n ... ]\n return_table: bool {False}\n Whether to return the converted table\n write_table: bool {False}\n Whether to write the converted table to filename. It is not\n sufficient to simply supply a filename to write as we do not\n want to risk overwriting someone's table (even though we simply\n extend with new columns).\n main_colnames: [6] string array_like\n Set of column names of the main measurements\n e.g. ['ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity']\n would be the input for the default format of Gaia data\n error_colnames: [6] string array_like {None}\n Set of column names for the errors. If left as None will be\n generated by appending '_error' to each of the main_colnames\n corr_colnames: [15] string array_like {None}\n Set of column names for the pairwise correlations between each\n of the six main measurements. If left as None will be generated\n by joining each pair of main_colnames with an underscore then\n appending '_corr'.\n It is assumed that the correlation column names are given in\n a certain order based on input order of main_colnames.\n e.g. ['ra_dec_corr', 'ra_parallax_corr', ... 'ra_radial_velocity_corr',\n 'dec_parallax_corr', ... 'dec_radial_velocity_corr',\n 'parallax_pmra_corr' ... etc]\n filename: str {''}\n Save filename for storing the resulting table\n\n Returns\n -------\n res: astropy table\n If `return_table` flag is set, will return the resulting\n astropy table\n \"\"\"\n if isinstance(table, str):\n if filename and not write_table:\n raise UserWarning('Specify how to handle result, I won\\'t'\n 'overwrite without explicit permission.')\n filename = table\n table = Table.read(table)\n\n # Get astrometric column names\n astr_main_colnames, astr_error_colnames, astr_corr_colnames =\\\n get_colnames(main_colnames=astr_main_colnames,\n error_colnames=astr_error_colnames,\n corr_colnames=astr_corr_colnames,\n cartesian=False)\n\n data = build_data_dict_from_table(table,\n astr_main_colnames,\n astr_error_colnames,\n astr_corr_colnames)\n\n # Establish what column names are used\n cart_main_colnames, cart_error_colnames, cart_corr_colnames = \\\n get_colnames(cart_main_colnames,\n cart_error_colnames,\n cart_corr_colnames,\n cartesian=True)\n\n # if cartesian columns don't exist, then insert them\n if cart_corr_colnames[0] not in table.keys():\n append_cart_cols_to_table(table,\n cart_main_colnames,\n cart_error_colnames,\n cart_corr_colnames)\n\n # Iteratively transform data to cartesian coordinates, storing as we go\n for row, astr_mean, astr_cov in zip(table, data['means'], data['covs']):\n cart_mean, cart_cov = convert_astro2cart(astr_mean, astr_cov)\n insert_data_into_row(row, cart_mean, cart_cov,\n main_colnames=cart_main_colnames,\n error_colnames=cart_error_colnames,\n corr_colnames=cart_corr_colnames\n )\n\n # Save data\n if filename and write_table:\n table.write(filename, overwrite=True)\n\n if return_table:\n return table\n\n","sub_path":"chronostar/tabletool.py","file_name":"tabletool.py","file_ext":"py","file_size_in_byte":23148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"479557470","text":"from functools import cached_property\n\nimport paramiko\nfrom retrying import retry\n\nfrom shell_tests.configs import HostWithUserConfig\nfrom shell_tests.handlers.abc_remote_file_handler import AbcRemoteFileHandler\nfrom shell_tests.helpers.logger import logger\n\n\nclass ScpError(Exception):\n \"\"\"Base Error.\"\"\"\n\n\nclass ScpFileNotFoundError(ScpError):\n \"\"\"File not found.\"\"\"\n\n def __init__(self, file_name: str):\n self.file_name = file_name\n\n def __str__(self):\n return f\"File not found - {self.file_name}\"\n\n\ndef _retry_on_file_not_found(exception: Exception) -> bool:\n return isinstance(exception, ScpFileNotFoundError)\n\n\nclass SCPHandler(AbcRemoteFileHandler):\n RETRY_STOP_MAX_ATTEMPT_NUM = 10\n RETRY_WAIT_FIXED = 3000\n IS_RETRY_FUNC = _retry_on_file_not_found\n\n def __init__(self, conf: HostWithUserConfig):\n super().__init__(conf)\n self.conf = conf\n\n @cached_property\n def session(self):\n transport = paramiko.Transport(self.conf.netloc)\n logger.info(\"Connecting to SCP\")\n transport.connect(None, self.conf.user, self.conf.password)\n return paramiko.SFTPClient.from_transport(transport)\n\n @retry(\n stop_max_attempt_number=RETRY_STOP_MAX_ATTEMPT_NUM,\n wait_fixed=RETRY_WAIT_FIXED,\n retry_on_exception=IS_RETRY_FUNC,\n )\n def _read_file(self, file_path: str) -> bytes:\n logger.info(f\"Reading file {file_path} from SCP\")\n try:\n resp = self.session.open(file_path)\n data = resp.read()\n except FileNotFoundError:\n raise ScpFileNotFoundError(file_path)\n except Exception as e:\n if \"No such file\" in str(e):\n raise ScpFileNotFoundError(file_path)\n raise e\n return data\n\n @retry(\n stop_max_attempt_number=RETRY_STOP_MAX_ATTEMPT_NUM,\n wait_fixed=RETRY_WAIT_FIXED,\n retry_on_exception=IS_RETRY_FUNC,\n )\n def _delete_file(self, file_path: str):\n logger.info(f\"Deleting file {file_path}\")\n try:\n self.session.remove(file_path)\n except FileNotFoundError:\n raise ScpFileNotFoundError(file_path)\n except Exception as e:\n if \"No such file\" in str(e):\n raise ScpFileNotFoundError(file_path)\n raise e\n","sub_path":"shell_tests/handlers/scp_handler.py","file_name":"scp_handler.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"565598103","text":"import os\nimport socket\nimport sys\nimport urllib.parse\n\nimport time\n\nimport select\n\nfrom Shared.LogObject import LogObject\nfrom Shared.Logger import Logger, LogVerbosity\nfrom Shared.Settings import Settings\nfrom Shared.Threading import CustomThread\nfrom Shared.Util import current_time\n\n\nclass StreamListener(LogObject):\n\n wait_for_data = 0.1\n\n mime_mapping = {\n \".mp4\": \"video/mp4\",\n \".avi\": \"video/x-msvideo\",\n \".mkv\": \"video/mp4\",\n \".srt\": \"json\"\n }\n\n def __init__(self, name, port, arg=None):\n super().__init__(arg, name)\n\n self.name = name\n self.torrent = arg\n\n self.port = port\n self.thread = None\n self.chunk_length = Settings.get_int(\"max_chunk_size\")\n self.server = StreamServer(self.name, port, self.handle_request)\n\n self.sockets_writing_data = []\n\n self.running = False\n self.bytes_send = 0\n self.id = 0\n\n def start_listening(self):\n self.thread = CustomThread(self.server.start, \"Listener: \" + self.name)\n self.running = True\n self.thread.start()\n\n def handle_request(self, socket):\n Logger().write(LogVerbosity.Info, self.name + \" new request\")\n\n # Read headers\n total_message = self.read_headers(socket)\n if total_message is None:\n return\n\n header = HttpHeader.from_string(total_message)\n if header.path == \"/torrent\":\n # Handle torrent stream request\n self.handle_torrent_request(socket, header)\n elif header.path.startswith(\"/file\"):\n # Handle file stream request\n self.handle_file_request(socket, header)\n else:\n # Unknown request\n Logger().write(LogVerbosity.Info, self.name + \" streamListener received unknown request: \" + header.path)\n socket.close()\n\n def read_headers(self, socket):\n try:\n total_message = b''\n while not total_message.endswith(b'\\r\\n\\r\\n'):\n rec = socket.recv(1024)\n if len(rec) == 0:\n break\n total_message += rec\n time.sleep(0)\n except (socket.timeout, ConnectionRefusedError, ConnectionAbortedError, ConnectionResetError, OSError):\n socket.close()\n Logger().write(LogVerbosity.Info, self.name + \" error reading http header\")\n return\n\n if not total_message.endswith(b'\\r\\n\\r\\n'):\n socket.close()\n Logger().write(LogVerbosity.Info, self.name + \" invalid http header, closing\")\n return\n return total_message\n\n def handle_file_request(self, socket, header):\n file_path = header.path[6:]\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n file_path = \"/\" + file_path\n\n Logger().write(LogVerbosity.Debug, self.name + \" file request for \" + file_path)\n\n if not os.path.exists(file_path):\n file_path = urllib.parse.unquote_plus(file_path)\n if not os.path.exists(file_path):\n Logger().write(LogVerbosity.Info, self.name + \" file not found: \" + file_path)\n self.write_header(socket, \"404 Not Found\")\n socket.close()\n return\n\n read_file = ReadFile(file_path)\n read_file.open()\n\n if header.range_end == 0 or header.range_end == -1:\n header.range_end = read_file.size - 1\n\n if header.range is None:\n Logger().write(LogVerbosity.Debug, self.name + ' request without range')\n self.write_header_with_content(socket, \"200 OK\", 0, header.range_end, read_file.size, file_path)\n self.write_data(socket, header.range_start, header.range_end - header.range_start + 1, read_file.get_bytes)\n else:\n Logger().write(LogVerbosity.Debug, self.name + ' request with range')\n self.write_header_with_content(socket, \"206 Partial Content\", header.range_start, header.range_end, read_file.size, file_path)\n self.write_data(socket, header.range_start, header.range_end - header.range_start + 1, read_file.get_bytes)\n read_file.close()\n\n def handle_torrent_request(self, socket, header):\n if not self.torrent or not self.running:\n socket.close()\n Logger().write(LogVerbosity.Debug, self.name + \" stopping connection because there is no more torrent\")\n return\n\n if header.range_end == 0 or header.range_end == -1:\n header.range_end = self.torrent.media_file.length - 1\n\n if header.range:\n range_start = header.range_start\n if range_start == self.torrent.media_file.length:\n Logger().write(LogVerbosity.Debug, \"Request for content length 0, cant process\")\n self.write_header(socket, \"416 Requested range not satisfiable\")\n socket.close()\n return\n\n if header.range is None:\n Logger().write(LogVerbosity.Debug, self.name + ' request without range')\n success = self.write_header_with_content(socket, \"200 OK\", 0, header.range_end, self.torrent.media_file.length,\n self.torrent.media_file.path)\n\n if not success:\n return\n\n self.write_data(socket, header.range_start, header.range_end - header.range_start + 1,\n self.torrent.get_data)\n else:\n Logger().write(LogVerbosity.Debug, self.name + ' request with range')\n success = self.write_header_with_content(socket, \"206 Partial Content\", header.range_start, header.range_end,\n self.torrent.media_file.length, self.torrent.media_file.path)\n\n if not success:\n return\n\n self.write_data(socket, header.range_start, header.range_end - header.range_start + 1,\n self.torrent.get_data)\n\n def write_header(self, socket, status):\n response_header = HttpHeader()\n response_header.status_code = status\n\n Logger().write(LogVerbosity.Info, self.name + \" return header: \" + response_header.to_string())\n\n try:\n socket.send(response_header.to_string().encode())\n return True\n except (ConnectionAbortedError, ConnectionResetError, OSError):\n Logger().write(LogVerbosity.Info, \"Connection closed 2 during sending of response header\")\n socket.close()\n return False\n\n def write_header_with_content(self, socket, status, start, end, length, path):\n response_header = HttpHeader()\n Logger().write(LogVerbosity.Debug, self.name + \" stream requested: \" + str(start) + \"-\" + str(end))\n\n response_header.status_code = status\n response_header.content_length = end - start + 1\n response_header.set_range(start, end, length)\n filename, file_extension = os.path.splitext(path.lower())\n if file_extension not in StreamListener.mime_mapping:\n Logger().write(LogVerbosity.Info, self.name + \" unknown video type: \" + str(file_extension) + \", defaulting to mp4\")\n response_header.mime_type = StreamListener.mime_mapping[\".mp4\"]\n else:\n response_header.mime_type = StreamListener.mime_mapping[file_extension]\n\n Logger().write(LogVerbosity.Info, self.name + \" return header: \" + response_header.to_string())\n\n try:\n socket.send(response_header.to_string().encode())\n return True\n except (ConnectionAbortedError, ConnectionResetError, OSError):\n Logger().write(LogVerbosity.Info, \"Connection closed 2 during sending of response header\")\n socket.close()\n return False\n\n def write_data(self, socket, requested_byte, length, data_delegate):\n written = 0\n Logger().write(LogVerbosity.Info, self.name + \" write data: \" + str(requested_byte) + \", length \" + str(length))\n id = self.id\n self.id += 1\n data_writer = SocketWritingData(self, id, socket, requested_byte, requested_byte + length, current_time())\n self.sockets_writing_data.append(data_writer)\n if len(self.sockets_writing_data) > 1:\n Logger().write(LogVerbosity.Debug, \"Multiple data writers:\")\n for writer in self.sockets_writing_data:\n Logger().write(LogVerbosity.Debug, \" \" + str(writer))\n\n while written < length:\n part_length = min(length - written, self.chunk_length)\n if not self.running:\n Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + \" canceling retrieving data because we are no longer running 1\")\n data_writer.close()\n self.sockets_writing_data.remove(data_writer)\n return\n\n if data_writer.stop:\n Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + \" canceling because we're seeking and expecting a new request\")\n data_writer.close()\n self.sockets_writing_data.remove(data_writer)\n return\n\n if not self.wait_writable(data_writer, socket):\n Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + \" closed\")\n self.sockets_writing_data.remove(data_writer)\n return\n\n data = data_delegate(requested_byte + written, part_length)\n if not self.running:\n Logger().write(LogVerbosity.Debug, self.name + ' writer ' + str(data_writer.id) + \" canceling retrieved data because we are no longer running 2\")\n data_writer.close()\n self.sockets_writing_data.remove(data_writer)\n return\n\n if data is None:\n time.sleep(self.wait_for_data)\n continue\n\n Logger().write(LogVerbosity.Info, self.name + ' writer ' + str(data_writer.id) + ' data retrieved: ' + str(requested_byte + written) + \" - \" + str(requested_byte + written + part_length))\n send = 0\n try:\n while send < len(data):\n this_send = data[send: send + 50000]\n data_length = len(this_send)\n socket.sendall(this_send)\n written += data_length\n send += data_length\n self.bytes_send += data_length\n data_writer.streamed += data_length\n Logger().write(LogVerbosity.All, self.name + ' writer ' + str(data_writer.id) + \" send \" + str(data_length) + \" bytes\")\n time.sleep(0.005) # give other threads some time\n except (ConnectionAbortedError, ConnectionResetError, OSError) as e:\n Logger().write(LogVerbosity.Info, self.name + \" writer \" + str(data_writer.id) + \" connection closed during sending of data: \" + str(e))\n data_writer.close()\n self.sockets_writing_data.remove(data_writer)\n return\n\n Logger().write(LogVerbosity.Info, \"Completed request: \" + str(data_writer))\n data_writer.close()\n self.sockets_writing_data.remove(data_writer)\n\n def wait_writable(self, writer, socket):\n while True:\n if not self.running:\n return False\n\n if writer.stop:\n Logger().write(LogVerbosity.Debug, self.name + \" canceling because we're seeking and expecting a new request\")\n writer.close()\n return False\n\n # check if socket is still open\n readable, writeable, exceptional = select.select([socket], [socket], [socket], 0)\n if len(readable) == 1:\n read = []\n try:\n read = socket.recv(1024)\n except Exception as e:\n Logger().write(LogVerbosity.Debug, \"Request socket closed with exception: \" + str(e))\n\n if len(read) == 0:\n Logger().write(LogVerbosity.Info, self.name + \" socket no longer open 3\")\n writer.close()\n return False\n else:\n Logger().write(LogVerbosity.Info, self.name + \" recv received data?? - \" + str(read.decode(\"utf-8'\")))\n\n if len(writeable) == 0:\n # not currently writeable, wait for it to become available again\n time.sleep(0.1)\n continue\n\n return True\n\n def stop(self):\n self.running = False\n for writer in self.sockets_writing_data:\n writer.stop = True\n\n self.torrent = None\n if self.server is not None:\n self.server.close()\n Logger().write(LogVerbosity.Info, self.name + \" stopped\")\n\n\nclass StreamServer:\n\n def __init__(self, name, port, client_thread):\n self.port = port\n self.name = name\n self.soc = None\n self.running = False\n self.client_thread = client_thread\n\n def start(self):\n Logger().write(LogVerbosity.Debug, self.name + \" starting listener on port \" + str(self.port))\n self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.running = True\n\n try:\n self.soc.bind((\"\", self.port))\n Logger().write(LogVerbosity.Info, \"StreamServer \"+self.name+\" listening on port \" + str(self.port))\n except (socket.error, OSError) as e:\n Logger().write(LogVerbosity.Info, \"Couldn't start StreamServer \" + self.name + \": \" + str(e))\n return\n\n self.soc.listen(10)\n\n try:\n while True:\n Logger().write(LogVerbosity.Debug, \"StreamServer \"+self.name+\" listening for incoming connection\")\n conn, addr = self.soc.accept()\n if not self.running:\n break\n ip, port = str(addr[0]), str(addr[1])\n Logger().write(LogVerbosity.Debug, 'New connection from ' + ip + ':' + port)\n thread = CustomThread(self.client_thread, \"Stream request\", [conn])\n thread.start()\n except Exception as e:\n Logger().write_error(e, \"Stream server\")\n\n Logger().write(LogVerbosity.Debug, \"StreamServer \"+self.name+\" closing\")\n self.soc.close()\n\n def close(self):\n self.running = False\n if self.soc is not None:\n try:\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((\"127.0.0.1\", self.port))\n except ConnectionRefusedError:\n pass\n\n\nclass ReadFile:\n\n def __init__(self, path):\n self.path = path\n self.size = os.path.getsize(path)\n self.location = 0\n self.file = None\n\n def open(self):\n self.file = open(self.path, 'rb')\n self.location = 0\n\n def get_bytes(self, start, length):\n if self.location != start:\n Logger().write(LogVerbosity.Info, \"Seeking file to \" + str(start))\n self.file.seek(start)\n data = self.file.read(length)\n self.location = start + len(data)\n return data\n\n def close(self):\n self.file.close()\n\n\nclass HttpHeader:\n\n def __init__(self):\n self.host = None\n self.range = None\n self.range_start = 0\n self.range_end = 0\n self.range_total = 0\n self.path = None\n\n self.content_length = 0\n self.mime_type = None\n self.accept_ranges = None\n self.connection = None\n self.status_code = None\n\n @classmethod\n def from_string(cls, header):\n header = header.decode('utf8')\n Logger().write(LogVerbosity.Info, \"Received header: \" + header)\n result = cls()\n split = header.splitlines(False)\n request = split[0].split(\" \")\n result.path = request[1]\n for head in split:\n keyvalue = head.split(': ')\n if len(keyvalue) != 2:\n continue\n\n if keyvalue[0] == \"Host\":\n result.host = keyvalue[1]\n if keyvalue[0] == \"Range\":\n result.range = keyvalue[1]\n type_bytes = result.range.split(\"=\")\n start_end = type_bytes[1].split(\"-\")\n result.range_start = int(start_end[0])\n if len(start_end) > 1 and start_end[1] is not \"\":\n result.range_end = int(start_end[1])\n else:\n result.range_end = -1\n\n if keyvalue[0] == \"Content-Length\":\n result.content_length = keyvalue[1]\n\n return result\n\n def set_range(self, start, end, total):\n self.range = \"bytes \" + str(start) + \"-\" + str(end) + \"/\" + str(total)\n\n def to_string(self):\n result = \"\"\n result += \"HTTP/1.1 \" + self.status_code + \"\\r\\n\"\n if self.mime_type:\n result += \"Content-Type: \" + self.mime_type + \"\\r\\n\"\n if self.content_length:\n result += \"Accept-Ranges: bytes\" + \"\\r\\n\"\n result += \"Content-Length: \" + str(self.content_length) + \"\\r\\n\"\n result += \"Content-Range: \" + self.range + \"\\r\\n\" + \"\\r\\n\"\n return result\n\n\nclass SocketWritingData(LogObject):\n\n @property\n def stream_speed(self):\n return self.streamed / ((current_time() - self.connect_time) / 1000)\n\n def __init__(self, parent, id, socket, range_start, range_end, connect_time):\n super().__init__(parent, \"request \" + str(id))\n self.id = id\n self.socket = socket\n self.range_start = range_start\n self.range_end = range_end\n self.connect_time = connect_time\n self.streamed = 0\n self.stop = False\n\n def close(self):\n self.socket.close()\n self.finish()\n\n def __str__(self):\n return \"Id: \"+str(self.id)+\", Range: \" + str(self.range_start) + \"-\" + str(self.range_end) + \" connected at \" + str(self.connect_time) + \", streamed: \" +str(self.streamed)\n\n","sub_path":"src/MediaPlayer/Torrents/Streaming/StreamListener.py","file_name":"StreamListener.py","file_ext":"py","file_size_in_byte":18111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"63969381","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Mesogens with NP | Equilibriums\n\n# ## Temperature: 9.2 | Cluster Run\n\n# ### Date: 21/01/2019 | System P = 2.8, Expected value of $T_c$ : \n\n# In[ ]:\n\n\nfrom __future__ import division\nimport hoomd\nimport hoomd.md\n\n\n# In[ ]:\n\n\n#-----Define relevant variables\np_max = 2.8;\nt_max = 9.2;\ncopies = 1;\nsteps_run = 1e5;\ninit_file = \"T_CM&NP_\" + str(t_max) + \"_P_\" + str(p_max) + \"_ramp.gsd\"\n\n\n# In[ ]:\n\n\n#-----Define a simulation context\n\nhoomd.context.initialize(\"--mode=gpu\");\n\n\n# In[ ]:\n\n\n#-----Extract the configuration of the system and expand the system\n\nsnap = hoomd.data.gsd_snapshot(init_file, frame = -1);\nsnap.replicate(copies,copies,copies);\nsystem = hoomd.init.read_snapshot(snap);\n\n\n# In[ ]:\n\n\n#-----Define each mesogen in the local reference frame of each center of mass\nrigid = hoomd.md.constrain.rigid();\nrigid.set_param('M', \n types = ['A']*8,\n positions = [(-4,0,0),(-3,0,0),(-2,0,0),(-1,0,0),\n (1,0,0),(2,0,0),(3,0,0),(4,0,0)]);\n\n\n# In[ ]:\n\n\n#-----Declare molecules as rigid bodies\nrigid.create_bodies();\n\n\n# In[ ]:\n\n\n#-----Define the potential energy\nnl = hoomd.md.nlist.tree();\nlj = hoomd.md.pair.lj(r_cut = 3.5, nlist = nl);\nlj.set_params(mode = 'shift')\n\n\n# In[ ]:\n\n\n#------Define the interaction\nlj.pair_coeff.set('NP','NP', epsilon = 1.0, sigma = 5.0);\nlj.pair_coeff.set('M', 'M', epsilon = 1.0, sigma = 1.0);\nlj.pair_coeff.set('A', 'A', epsilon = 1.0, sigma = 1.0);\nlj.pair_coeff.set('M', 'A', epsilon = 1.0, sigma = 1.0);\nlj.pair_coeff.set('NP', 'M', epsilon = 1.0, sigma = 3.0);\nlj.pair_coeff.set('NP', 'A', epsilon = 1.0, sigma = 3.0);\n\n\n# In[ ]:\n\n\n#------Select an standar integrator\nhoomd.md.integrate.mode_standard(dt = 0.005);\n\n#-----Define some groups and make their union\n\nnanoparticles = hoomd.group.type(name = 'NPs', type = 'NP');\nmesogens = hoomd.group.rigid_center();\ngroupNP_mes = hoomd.group.union(name = 'NP_Mes', a = nanoparticles, b = mesogens);\n\n\n# In[ ]:\n\n\n#-----Integrate using NPT\n\nnpt = hoomd.md. integrate.npt(group = groupNP_mes, kT = t_max, tau = 10.0, tauP = 10.0, P = p_max);\n\n\n# In[ ]:\n\n\n#-----Save data\n\nlog_file = \"T_\" + str(t_max) + \"_P_\" + str(p_max) + \"_equilibrium.log\"\ngsd_file = \"T_\" + str(t_max) + \"_P_\" + str(p_max) + \"_equilibrium.gsd\"\nmeso_gsd_file = \"T_CM_\" + str(t_max) + \"_P_\" + str(p_max) + \"_equilibrium.log\"\n\nlog = hoomd.analyze.log(filename = log_file,\n quantities = ['num_particles', \n 'ndof',\n 'translational_ndof',\n 'rotational_ndof',\n 'potential_energy',\n 'kinetic_energy',\n 'translational_kinetic_energy',\n 'rotational_kinetic_energy',\n 'temperature',\n 'pressure',\n 'volume'],\n period = 1e3,\n overwrite = True);\ngsd = hoomd.dump.gsd(gsd_file, period = 1e3, group = hoomd.group.all(), overwrite = True);\nmeso_gsd = hoomd.dump.gsd(meso_gsd_file, period=1e3, group = mesogens, overwrite = True);\n\n\n# In[ ]:\n\n\n#-----Run the simulation\n\nhoomd.run(steps_run)\n\n\n# In[ ]:\n\n\n#-----Get volume and density information.\nsystem.box.get_volume()\n\n\n# In[ ]:\n\n\nsystem.get_metadata()\n\n","sub_path":"Sigma = 2.5/P_28/Equilibriums/Equilibrium_P_92.py","file_name":"Equilibrium_P_92.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"404719985","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUtility functions for main module\nCreated on 10/9/2019\n@author: Anurag\n\"\"\"\n\n# imports\nimport time\n\n# Local imports\nfrom preprocessing.common_vulnerability.nvd_cve import CVEProcessor\nfrom managers.cve_manager import CVEManager\nfrom drivers.basedriver import BaseDriver\n\n\nclass CVEDriver(BaseDriver):\n \"\"\"\n A class for gathering logic for main function\n \"\"\"\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n super().__init__()\n self.logger.info(\"CVE module started\")\n self.manager = CVEManager()\n self.source_objects = (CVEProcessor(),)\n self.is_initial_run = False\n self.total_cve = self.manager.db.get_total_cve()\n if self.total_cve == 0:\n self.logger.info(\"Initial run detected\")\n self.is_initial_run = True\n\n def run(self, dataset=None) -> None:\n \"\"\"\n A logic to run regularly\n :param dataset: response dataframe\n :return: None\n \"\"\"\n log = self.logger\n try:\n if self.is_initial_run:\n log.info(\"{} records found\".format(len(dataset)))\n log.info(\"Database insert started\")\n log.info(\"Querying database\")\n self.manager.insert_db(dataset)\n log.info(\"Records inserted into database successfully\")\n else:\n uniq = self.manager.get_unique(cleaned_df=dataset)\n log.info(\"Total %s new cve found\" % len(uniq))\n log.info(\"Database insert started\")\n self.manager.insert_db(uniq)\n log.info(\"Records inserted into database successfully\")\n except Exception as err:\n log.exception(err, exc_info=False)\n\n def drive(self) -> None:\n \"\"\"\n Main driver code for running operation with all urls\n :return: None\n \"\"\"\n st = time.time()\n try:\n self.logger.info(\"Getting updates from sources\")\n for source in self.source_objects:\n dataset = source.run()\n if dataset.empty:\n continue\n self.run(dataset=dataset)\n # self.run(dataset=self.nvd.get_all_cve())\n except Exception as err:\n self.logger.exception(err, exc_info=False)\n finally:\n # db connection is not closed here. last driver will take care of that.\n self.logger.info(f\"Total {self.manager.db.get_total_cve()} cve records present.\")\n self.logger.info(\"Finished executing cve module in {} seconds\".format(time.time() - st))\n","sub_path":"drivers/cve_driver.py","file_name":"cve_driver.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409340843","text":"english_train_path = '../input/cikm_english_train_20180516.txt'\nspanish_train_path = '../input/cikm_spanish_train_20180516.txt'\nunlabel_spanish_train_path = '../input/cikm_unlabel_spanish_train_20180516.txt'\ntest_path = '../input/cikm_test_b_20180730.txt'\n\nen_vec_path = '../input/wiki.en.vec'\nes_vec_path = '../input/wiki.es.vec'\n\nembed_size = 300\nmax_features = 5500\nmax_features = 4300 # this is for only es dataset\nmaxlen = 54\n\nes_stop_list = ['no', 'ha', 'con', 'el', 'la', 'ti', 'o', 'te', 'para', '?',\n 'mi', 'lo', 'hay', 'como', 'esta', 'una', 'otra', 'es', 'mis',\n 'los', 'tengo', 'este', 'estoy', 'me', 'eran', 'qué', 'entre',\n 'cuando', 'en', 'de', '.', 'esto', 'que', '¿']\n\nes_5w1h_list = ['cuándo', 'por qué', 'qué', 'como', 'puedo']\n\nen_stop_list = []\nen_5w1h_list = ['what', 'why', 'how', 'when', 'who', 'which']\n\nen = False\n","sub_path":"2018/lightgbm/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213468584","text":"#Imports\nimport pandas as pd\n#from sklearn.decomposition import PCA\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\n# Gloal constants:\nTRAIN_PATH = \"../input/processed_data.csv\"\nTEST_PATH = \"../input/processed_test.csv\"\n\n# Functions definition:\n\n\n# Execution:\nif __name__ == \"__main__\":\n #Data preparing\n training = pd.read_csv(TRAIN_PATH)\n test = pd.read_csv(TEST_PATH)\n\n\n data_train = training.drop(['price'],axis = 1)\n data_submit = test.drop(['id'],axis = 1) \n y = training['price']\n \n # PCA\n\n X_train, X_test, y_train, y_test = train_test_split(data_train,y, test_size=0.1)\n\n #Model Training:\n \n l_reg = LinearRegression()\n l_reg.fit(X_train, y_train)\n\n #Applying trained model to our train set:\n y_test_pred = l_reg.predict(X_test)\n #checking the error\n rmse = mean_squared_error(y_test,y_test_pred)\n print(rmse)\n\n # Applying model to our submission set:\n y_pred = l_reg.predict(data_submit)\n \n\n\n #Result tratement to be submmited:\n submission = pd.DataFrame({\n 'Id':test['id'],\n 'Price': y_pred\n })\n submission.Price = submission.Price.apply(lambda x: ((x**2)**(1/2)))\n\n \n # Generating output file:\n submission.to_csv('../output/submission.csv',index=False)\n\n # Adding metrics to a log, for next study of better model.\n with open('../output/log.txt',\"a+\") as f: \n f.write(\"RMSE: {} | MODEL: LR | COLUMNS: {} \\n\".format(rmse, len(training.columns))) ","sub_path":"src/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"306467426","text":"import random\n\n\nclass CryptoException(Exception):\n pass\n\n\nclass Vigenere:\n STANDARD = 1\n FULL = 2\n AUTO_KEY = 3\n RUNNING_KEY = 4\n EXTENDED = 5\n _DEFAULT_CHARSET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n _DEFAULT_N = 26\n\n # https://stackoverflow.com/questions/43656104/creation-of-nxn-matrix-sudoku-like\n @staticmethod\n def bitcount(n):\n i = 0\n while n:\n i += 1\n n &= n - 1\n return i\n\n def complete(self, rowset, colset, entries):\n random.seed(self._key)\n n_size = self._DEFAULT_N\n if entries == n_size * n_size:\n return True\n i, j = max(\n ((i, j) for i in range(n_size) for j in range(n_size) if self._s_box[i][j] == 0),\n key=lambda item: (\n self.bitcount(rowset[item[0]] | colset[item[1]])\n )\n )\n\n bits = rowset[i] | colset[j]\n p = [n for n in range(1, n_size + 1) if not (bits >> (n - 1)) & 1]\n random.shuffle(p)\n\n for n in p:\n self._s_box[i][j] = n\n rowset[i] |= 1 << (n - 1)\n colset[j] |= 1 << (n - 1)\n if self.complete(rowset, colset, entries + 1):\n return True\n rowset[i] &= ~(1 << (n - 1))\n colset[j] &= ~(1 << (n - 1))\n\n self._s_box[i][j] = 0\n return False\n\n def _load_key(self):\n key = open(self._filename, \"r\").read()\n return ''.join(filter(str.isalpha, key.upper()))\n\n def __init__(self, variant, key=None, filename=None):\n if (variant < 1) and (variant > 5):\n raise CryptoException(\"Invalid Vigenere variant\")\n self._type = variant\n\n if self._type == Vigenere.RUNNING_KEY:\n if filename is None:\n raise CryptoException(\"Need filename parameter\")\n self._filename = filename\n\n else:\n if key is None:\n raise CryptoException(\"Need key parameter\")\n\n if self._type == Vigenere.EXTENDED:\n if isinstance(key, bytes):\n self._key = key\n else:\n raise CryptoException(\"Invalid key type\")\n else:\n self._key = ''.join(filter(str.isalpha, key.upper()))\n\n if self._type == Vigenere.FULL:\n self._s_box = [[0] * self._DEFAULT_N for _ in range(self._DEFAULT_N)]\n assert self.complete([0] * self._DEFAULT_N, [0] * self._DEFAULT_N, 0)\n\n def _encrypt_standard(self, plaintext: str):\n ct = \"\"\n idx_key = 0\n\n for c in plaintext:\n if c not in Vigenere._DEFAULT_CHARSET:\n ct += c\n continue\n\n k = Vigenere._DEFAULT_CHARSET.index(self._key[idx_key])\n c = Vigenere._DEFAULT_CHARSET.index(c)\n ct += Vigenere._DEFAULT_CHARSET[(c + k) % 26]\n idx_key += 1\n idx_key %= len(self._key)\n\n return ct\n\n def _encrypt_full_key(self, plaintext: str):\n ct = \"\"\n idx_key = 0\n\n for c in plaintext:\n if c not in Vigenere._DEFAULT_CHARSET:\n ct += c\n continue\n\n k = Vigenere._DEFAULT_CHARSET.index(self._key[idx_key])\n plain_pos = Vigenere._DEFAULT_CHARSET.index(c)\n cipher_pos = self._s_box[k][plain_pos] - 1\n ct += Vigenere._DEFAULT_CHARSET[cipher_pos]\n idx_key += 1\n idx_key %= len(self._key)\n\n return ct\n\n def _encrypt_auto_key(self, plaintext: str):\n ct = \"\"\n idx_key = 0\n extend = False\n\n for c in plaintext:\n if c not in Vigenere._DEFAULT_CHARSET:\n ct += c\n continue\n\n if not extend:\n k = Vigenere._DEFAULT_CHARSET.index(self._key[idx_key])\n else:\n while plaintext[idx_key] not in Vigenere._DEFAULT_CHARSET:\n idx_key += 1\n k = Vigenere._DEFAULT_CHARSET.index(plaintext[idx_key])\n\n c = Vigenere._DEFAULT_CHARSET.index(c)\n ct += Vigenere._DEFAULT_CHARSET[(c + k) % 26]\n\n idx_key += 1\n if not extend:\n if idx_key == len(self._key):\n extend = True\n idx_key = 0\n\n return ct\n\n def _encrypt_running_key(self, plaintext: str):\n ct = \"\"\n idx_key = 0\n key = self._load_key()\n\n for c in plaintext:\n if c not in Vigenere._DEFAULT_CHARSET:\n ct += c\n continue\n\n k = Vigenere._DEFAULT_CHARSET.index(key[idx_key])\n c = Vigenere._DEFAULT_CHARSET.index(c)\n ct += Vigenere._DEFAULT_CHARSET[(c + k) % 26]\n idx_key += 1\n idx_key %= len(key)\n\n return ct\n\n def _encrypt_extended(self, plaintext: bytes):\n ct = b\"\"\n idx_key = 0\n\n assert isinstance(self._key, bytes)\n\n for c in plaintext:\n k = self._key[idx_key]\n ct += bytes([(c + k) % 256])\n idx_key += 1\n idx_key %= len(self._key)\n\n return ct\n\n def encrypt(self, plaintext):\n if type(plaintext) == str:\n plaintext = plaintext.upper()\n\n if self._type == Vigenere.STANDARD:\n return self._encrypt_standard(plaintext)\n elif self._type == Vigenere.FULL:\n return self._encrypt_full_key(plaintext)\n elif self._type == Vigenere.AUTO_KEY:\n return self._encrypt_auto_key(plaintext)\n elif self._type == Vigenere.RUNNING_KEY:\n return self._encrypt_running_key(plaintext)\n elif self._type == Vigenere.EXTENDED:\n return self._encrypt_extended(plaintext)\n\n def _decrypt_standard(self, ciphertext: str):\n pt = \"\"\n idx_key = 0\n\n for c in ciphertext:\n if c not in Vigenere._DEFAULT_CHARSET:\n pt += c\n continue\n\n k = Vigenere._DEFAULT_CHARSET.index(self._key[idx_key])\n c = Vigenere._DEFAULT_CHARSET.index(c)\n pt += Vigenere._DEFAULT_CHARSET[(c - k) % 26]\n idx_key += 1\n idx_key %= len(self._key)\n\n return pt\n\n def _decrypt_full_key(self, ciphertext: str):\n pt = \"\"\n idx_key = 0\n\n for c in ciphertext:\n if c not in Vigenere._DEFAULT_CHARSET:\n pt += c\n continue\n\n k = Vigenere._DEFAULT_CHARSET.index(self._key[idx_key])\n cipher_pos = Vigenere._DEFAULT_CHARSET.index(c)\n plain_pos = self._s_box[k].index(cipher_pos + 1)\n pt += Vigenere._DEFAULT_CHARSET[plain_pos]\n idx_key += 1\n idx_key %= len(self._key)\n\n return pt\n\n def _decrypt_auto_key(self, ciphertext: str):\n pt = \"\"\n idx_key = 0\n extend = False\n\n for c in ciphertext:\n if c not in Vigenere._DEFAULT_CHARSET:\n pt += c\n continue\n\n if not extend:\n k = Vigenere._DEFAULT_CHARSET.index(self._key[idx_key])\n else:\n while pt[idx_key] not in Vigenere._DEFAULT_CHARSET:\n idx_key += 1\n k = Vigenere._DEFAULT_CHARSET.index(pt[idx_key])\n\n c = Vigenere._DEFAULT_CHARSET.index(c)\n pt += Vigenere._DEFAULT_CHARSET[(c - k) % 26]\n\n idx_key += 1\n if not extend:\n if idx_key == len(self._key):\n extend = True\n idx_key = 0\n\n return pt\n\n def _decrypt_running_key(self, ciphertext: str):\n pt = \"\"\n idx_key = 0\n key = self._load_key()\n\n for c in ciphertext:\n if c not in Vigenere._DEFAULT_CHARSET:\n pt += c\n continue\n\n k = Vigenere._DEFAULT_CHARSET.index(key[idx_key])\n c = Vigenere._DEFAULT_CHARSET.index(c)\n pt += Vigenere._DEFAULT_CHARSET[(c - k) % 26]\n idx_key += 1\n idx_key %= len(key)\n\n return pt\n\n def _decrypt_extended(self, ciphertext: bytes):\n pt = b\"\"\n idx_key = 0\n\n assert isinstance(self._key, bytes)\n\n for c in ciphertext:\n k = self._key[idx_key]\n pt += bytes([(c - k) % 256])\n idx_key += 1\n idx_key %= len(self._key)\n\n return pt\n\n def decrypt(self, ciphertext):\n if self._type == Vigenere.STANDARD:\n return self._decrypt_standard(ciphertext)\n elif self._type == Vigenere.FULL:\n return self._decrypt_full_key(ciphertext)\n elif self._type == Vigenere.AUTO_KEY:\n return self._decrypt_auto_key(ciphertext)\n elif self._type == Vigenere.RUNNING_KEY:\n return self._decrypt_running_key(ciphertext)\n elif self._type == Vigenere.EXTENDED:\n return self._decrypt_extended(ciphertext)\n","sub_path":"stegan/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":8943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"463097525","text":"import pickle\nimport gzip\nimport cv2\n\n# Third-party libraries\nimport numpy as np\n\ndef load_data():\n f = gzip.open('data/dataset.pkl.gz', 'rb')\n l = pickle.load(f)\n training_data, validation_data, test_data = l\n f.close()\n return (training_data, validation_data, test_data)\n\ndef load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n\n training_inputs = format_data(tr_d[0])\n training_results = format_data(tr_d[1])\n training_data = zip(training_inputs, training_results)\n\n validation_inputs = format_data(va_d[0])\n validation_results = format_data(va_d[1])\n validation_data = zip(validation_inputs, validation_results)\n\n test_inputs = format_data(te_d[0])\n test_results = format_data(te_d[1])\n test_data = zip(test_inputs, test_results)\n\n return (training_data, validation_data, test_data)\n\ndef format_data(i):\n return [np.reshape(x, (90000, 1)) for x in i]\n","sub_path":"dataset_loader.py","file_name":"dataset_loader.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"601108914","text":"import xcffib\nimport struct\nimport six\n_events = {}\n_errors = {}\nclass KeymapNotifyEvent(xcffib.Event):\n def __init__(self, unpacker):\n xcffib.Event.__init__(self, unpacker)\n base = unpacker.offset\n self.keys = xcffib.List(unpacker, \"B\", 31)\n self.bufsize = unpacker.offset - base\n_events[11] = KeymapNotifyEvent\nxcffib._add_ext(key, no_sequenceExtension, _events, _errors)\n","sub_path":"tests/generator/no_sequence.py","file_name":"no_sequence.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"590456424","text":"import hashlib\nimport json\nimport os\nimport time\n\nimport requests\n\nfrom dwarf_debugger.lib import utils\n\n\nclass Git(object):\n CACHE_PATH = '.git_cache'\n DWARF_CACHE = CACHE_PATH + '/dwarf'\n DWARF_COMMITS_CACHE = CACHE_PATH + '/dwarf_commits'\n DWARF_SCRIPTS_CACHE = CACHE_PATH + '/dwarf_scripts'\n FRIDA_CACHE = CACHE_PATH + '/frida'\n\n def __init__(self):\n if not os.path.exists(Git.CACHE_PATH):\n os.mkdir(Git.CACHE_PATH)\n\n def _open_cache(self, path, url, _json=True):\n data = None\n now = time.time()\n if os.path.exists(path):\n with open(path, 'r') as f:\n data = json.load(f)\n last_update = data['updated']\n data = data['data']\n if now - last_update < 60 * 15:\n return data\n if utils.is_connected():\n try:\n r = requests.get(url)\n except:\n return data\n if r is None or r.status_code != 200:\n return data\n if _json:\n try:\n data = r.json()\n except:\n return None\n else:\n data = r.text\n with open(path, 'w') as f:\n f.write(json.dumps({\n 'updated': now,\n 'data': data\n }))\n return data\n\n def get_frida_version(self):\n return self._open_cache(\n Git.FRIDA_CACHE, 'https://api.github.com/repos/frida/frida/releases/latest')\n\n def get_script(self, url):\n return self._open_cache(\n Git.CACHE_PATH + '/' + hashlib.md5(url.encode('utf8')).hexdigest(), url, _json=False)\n\n def get_script_info(self, url):\n return self._open_cache(\n Git.CACHE_PATH + '/' + hashlib.md5(url.encode('utf8')).hexdigest(), url)\n","sub_path":"dwarf_debugger/lib/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47884567","text":"from pygame.locals import *\nfrom os import path\n\nfrom player import *\n\nclass Game:\n def __init__(self):\n # initialize game window, etc\n pygame.init()\n pygame.mixer.init()\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(TITLE)\n self.clock = pygame.time.Clock()\n self.running = True\n self.load_data()\n\n def load_data(self):\n # Easy names to start file directories\n game_folder = path.dirname(__file__)\n img_folder = path.join(game_folder, 'img')\n\n # load all image files\n self.player_icon = pygame.image.load(path.join(img_folder, 'player.png')).convert_alpha()\n\n def new(self):\n # start a new game\n self.all_sprites = pygame.sprite.Group()\n\n player = Player(self, 0, 0)\n\n def run(self):\n # Game Loop\n self.playing = True\n while self.playing:\n self.dt = self.clock.tick(FPS) / 1000\n self.events()\n self.update()\n self.draw()\n\n def update(self):\n # Game Loop - Update\n self.all_sprites.update()\n\n\n def events(self):\n # Game Loop - Events\n for event in pygame.event.get():\n\n # quit event / close window\n if event.type == QUIT:\n if self.playing:\n self.playing = False\n self.running = False\n\n def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pygame.draw.line(self.screen, WHITE, (x, 0), (x, HEIGHT))\n\n for y in range(0, HEIGHT, TILESIZE):\n pygame.draw.line(self.screen, WHITE, (0, y), (WIDTH, y))\n\n def draw(self):\n # DRAW STUFF\n self.screen.fill(BLACK)\n self.draw_grid()\n self.all_sprites.draw(self.screen)\n\n # display frame\n pygame.display.flip()\n\n def show_start_screen(self):\n # Start Screen / Menu\n pass\n\n def show_end_screen(self):\n # Game Over Screen\n pass\n\n\ngame = Game()\ngame.show_start_screen()\nwhile game.running:\n game.new()\n game.run()\n\n game.show_end_screen()\n\npygame.quit()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"589441237","text":"import geni.portal as portal\nimport geni.rspec.pg as pg\nimport geni.rspec.igext as IG\n\npc = portal.Context()\nrequest = pc.makeRequestRSpec()\n\ntourDescription = \"A compute node with Docker installed on Ubuntu 18.04\" \ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT,tourDescription)\nrequest.addTour(tour)\n\nnode = request.RawPC(\"head\")\nnode.routable_control_ip = \"true\" \nnode.disk_image = \"urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD\"\nnode.addService(pg.Execute(shell=\"sh\", command=\"sudo bash /local/repository/install_docker.sh\")) \n\npc.printRequestRSpec(request)\n","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"197277151","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n\n\n# In[3]:\n\n\nseries = pd.read_csv('gaz1.csv', ';', header=0, parse_dates=[0], index_col=[''], squeeze=True)\nseries = series.drop([\"\",\"\",\"