diff --git "a/343.jsonl" "b/343.jsonl"
new file mode 100644--- /dev/null
+++ "b/343.jsonl"
@@ -0,0 +1,630 @@
+{"seq_id":"624289127","text":"from tqdm import tqdm_notebook as tqdm\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\n\ndef elbow_method(df_, k_initial=1, maxClusters=15, k_iteration=1):\n # to validing K value in K-means\n print('Executing Elbow Method to:\\n...K of {} to {} from k_iteration:{}\\n'.format(k_initial,maxClusters, k_iteration))\n inertia_dic = {}\n for k in tqdm(range(k_initial, maxClusters, k_iteration)):\n ## validing K value in K-means\n\n # print('...testing k: {}'.format(k))\n inertia_dic[k] = KMeans(n_clusters=k).fit(df_).inertia_\n return inertia_dic\n\ndef gap_statistic(df_, nrefs=3, maxClusters=15, k_initial=1, k_iteration=1):\n #### Gap\n #https://anaconda.org/milesgranger/gap-statistic/notebook\n \"\"\"\n Calculates KMeans optimal K using Gap Statistic from Tibshirani, Walther, Hastie\n Params:\n df: ndarry of shape (n_samples, n_features)\n nrefs: number of sample reference datasets to create\n maxClusters: Maximum number of clusters to test for\n Returns: (gaps, optimalK)\n \"\"\"\n gaps = {}\n for k in tqdm(range(k_initial, maxClusters, k_iteration)):\n # Holder for reference dispersion results\n refDisps = np.zeros(nrefs)\n # For n references, generate random sample and perform kmeans getting resulting dispersion of each loop\n for i in range(nrefs):\n # Create new random reference set\n randomReference = np.random.random_sample(size=df_.shape)\n # Fit to it\n km = KMeans(k)\n km.fit(randomReference)\n refDisp = km.inertia_\n refDisps[i] = refDisp\n # Fit cluster to original data and create dispersion\n km = KMeans(k).fit(df_)\n origDisp = km.inertia_\n # Calculate gap statistic\n gap = np.log(np.mean(refDisps)) - np.log(origDisp)\n # Assign this loop's gap statistic to gaps\n gaps[k] = gap\n\n return gaps # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal","sub_path":"pymove/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"280020761","text":"import pydash\n\nfrom dashboard.data.utils import values_for_records, QCheck, facility_not_reporting, multiple_orders_score\nfrom dashboard.helpers import *\n\n\nclass BlanksQualityCheck(QCheck):\n test = ORDER_FORM_FREE_OF_GAPS\n combinations = [{NAME: DEFAULT}]\n\n fields = [OPENING_BALANCE,\n QUANTITY_RECEIVED,\n ART_CONSUMPTION,\n LOSES_ADJUSTMENTS,\n ESTIMATED_NUMBER_OF_NEW_ART_PATIENTS]\n\n def for_each_facility(self, data, combination, previous_cycle_data=None):\n result = NOT_REPORTING\n\n values = values_for_records(self.fields, data.get(C_RECORDS, []))\n number_of_consumption_record_blanks = len(pydash.select(\n values, lambda v: v is None))\n\n c_count_ = data.get(C_COUNT, 0)\n a_count_ = data.get(A_COUNT, 0)\n p_count_ = data.get(P_COUNT, 0)\n if c_count_ == 0 and a_count_ == 0 and p_count_ == 0:\n return result\n if c_count_ < 25 or a_count_ < 22 or p_count_ < 7:\n result = NO\n elif number_of_consumption_record_blanks > 2:\n result = NO\n else:\n result = YES\n return result\n\n\nclass WebBasedCheck(QCheck):\n test = WEB_BASED\n combinations = [{NAME: DEFAULT}]\n\n def for_each_facility(self, data, combination, previous_cycle_data=None):\n value = data[WEB_PAPER].strip()\n result = NOT_REPORTING\n if value:\n if value.lower() == WEB.lower():\n result = WEB\n if value.lower() == PAPER.lower():\n result = PAPER\n\n return result\n\n\nclass IsReportingCheck(QCheck):\n test = REPORTING\n combinations = [{NAME: DEFAULT}]\n\n def for_each_facility(self, facility, combination, previous_cycle_data=None):\n return NO if facility_not_reporting(facility) else YES\n\n\nclass MultipleCheck(QCheck):\n test = MULTIPLE_ORDERS\n combinations = [{NAME: DEFAULT}]\n\n def for_each_facility(self, facility, combination, previous_cycle_data=None):\n return multiple_orders_score(facility)\n","sub_path":"dashboard/data/blanks.py","file_name":"blanks.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"273889285","text":"import scrapy\nimport datetime\nfrom scrape_img.items import ScrapeImgItem\n\n\nclass imgSpider(scrapy.Spider):\n name = \"imgSpider\"\n domain = \"http://www.gettyimages.com\"\n\n def start_requests(self):\n yield scrapy.Request(\"http://www.gettyimages.ca/photos/arne-friedrich?family=editorial&phrase=arne%20friedrich&sort=best&excludenudity=true\", \\\n callback=self.parse_page)\n\n\n def parse_page(self, response):\n for href in response.xpath('//a[@class=\"search-result-asset-link\"]/@href').extract():\n yield scrapy.Request(self.domain + href, callback=self.parse_pic)\n\n last_page = response.xpath('//section[@class=\"gallery\"]/@data-is-last-page').extract_first()\n\n if last_page == \"false\":\n next_page = response.xpath('//a[@id=\"next-gallery-page\"]/@href').extract_first()\n yield scrapy.Request(self.domain + next_page, callback=self.parse_page)\n\n\n def parse_pic(self, response):\n id = response.xpath('//div[@class=\"image-container\"]/img/@asset-id').extract_first()\n title = response.xpath('//h1[@class=\"gallery_title active follow-header\"]/span/text()').extract_first()\n description = response.xpath('//meta[@name=\"description\"]/@content').extract_first().decode('utf_8')\n date = response.xpath('//div[@class=\"footer\"]/div[@class=\"credit\"]/span[1]/text()').extract_first()\n url = response.xpath('//div[@class=\"image-container\"]/img/@src').extract_first()\n\n\n #\n date = datetime.datetime.strptime(date, '%B %d, %Y').strftime('%Y_%m_%d')\n\n if id and title and date:\n image_name = date+ \"_\" + title + \"_\" + id\n image_name = image_name.replace('\\\\','_').replace('/','_')\n image_name = 'full/' + image_name[0:200] + '.jpg'\n\n\n #\n item = ScrapeImgItem()\n item['id'] = id\n item['title'] = title\n item['description'] = description\n item['date'] = date\n item['image_urls'] = [url]\n item['image_name'] = image_name\n\n yield item\n","sub_path":"scrape_img/spiders/imgspider.py","file_name":"imgspider.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"197463100","text":"print(\"Exercise 1\")\n\ndays=0\nwell_height = 125\ndaily_distance = 30\nnightly_distance = 20\nsnail_position = 0\n\nwhile snail_position <= well_height:\n if snail_position < well_height:\n pass\n snail_position+=daily_distance-nightly_distance\n days+=1\nelse:\n print(\"The snail take\", days, \"days to get out.\")\n \nprint(\"Bonus 1\")\nsnail_position = 0\nadvance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55]\n#i=0\ndisplacement = []\n\nfor i in range(0,10):\n displacement.append(advance_cm[i]-nightly_distance)\n if snail_position <= well_height:\n snail_position += advance_cm[i]-nightly_distance\n else:\n break\nprint(\"The snail take\", i, \"days to get out.\")\n#print(displacement)\nprint(\"The maximum displacement is\",max(displacement))\nprint(\"The minimum displacement is\",min(displacement))\n\naverage = sum(displacement)/len(displacement)\nprint(\"The average progress is\",average)\n\nimport statistics\nprint(\"The standart deviation is\",statistics.stdev(displacement))\n","sub_path":"1.-Python/1.-Snail-and-Well/1-Snail-and-Well_ANSWER.py","file_name":"1-Snail-and-Well_ANSWER.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"558531039","text":"import xlsxwriter\nimport csv\nimport xlrd\n\ndef open_xlsx_file(input_file_path):\n\n xls_file = xlrd.open_workbook(input_file_path)\n sheet = xls_file.sheet_by_index(0)\n xls_dict = {}\n for rownum in range(sheet.nrows):\n xls_row = sheet.row_values(rownum)\n try:\n xls_dict[int(xls_row[0])] = int(xls_row[2])\n except ValueError:\n continue\n return xls_dict\n\ndef open_csv_file(input_file_path):\n\n with open(input_file_path, 'r') as csv_file:\n csv_read = csv.reader(csv_file)\n csv_dict = {}\n for csv_row in csv_read:\n try:\n csv_dict[int(csv_row[1])] = int(csv_row[0])\n except ValueError:\n continue\n\n return csv_dict\n\ndef compare_files(xlsx_file_path, csv_file_path):\n\n xlsx_input_file_path = xlsx_file_path\n csv_input_file_path = csv_file_path\n\n xls_dict = open_xlsx_file(xlsx_input_file_path)\n csv_dict = open_csv_file(csv_input_file_path)\n\n result_dict = {}\n\n for key, csv_value in csv_dict.items():\n if key in xls_dict.keys():\n xls_value = xls_dict[key]\n result_sum = csv_value + xls_value\n if result_sum != 0:\n result_dict[key] = result_sum\n\n return result_dict\n\ndef save_report(output_file_path, xlsx_file_path, csv_file_path):\n\n xlsx_input_file_path = xlsx_file_path\n csv_input_file_path = csv_file_path\n\n result_dict = compare_files(xlsx_input_file_path, csv_input_file_path)\n\n workbook = xlsxwriter.Workbook(output_file_path)\n worksheet = workbook.add_worksheet('Result Sheet')\n worksheet.set_column(0, 0, 15)\n top_format = workbook.add_format({'bold': True, 'align': 'right'})\n worksheet.write(0, 0, 'Account', top_format)\n worksheet.write(0, 1, 'Balance', top_format)\n row = 1\n column = 0\n num_format = workbook.add_format()\n num_format.set_num_format('0')\n for acc, bal in result_dict.items():\n worksheet.write(row, column, acc, num_format)\n worksheet.write(row, column + 1, bal, num_format)\n row = row + 1\n workbook.close()\n\ndef main():\n\n xlsx_input_file_path = 'C://Users//dmitriy.khimich//Downloads//DMS_RPA_Challenge//DMS_RPA_Challenge//Банк выгрузка 2017 Сент.xlsx'\n csv_input_file_path = 'C://Users//dmitriy.khimich//Downloads//DMS_RPA_Challenge//DMS_RPA_Challenge//торговая сеть сент 2017.csv'\n xlsx_output_file_path = 'C://Users//dmitriy.khimich//Downloads//DMS_RPA_Challenge//DMS_RPA_Challenge//example2.xlsx'\n\n open_xlsx_file(xlsx_input_file_path)\n open_csv_file(csv_input_file_path)\n compare_files(xlsx_input_file_path, csv_input_file_path)\n save_report(xlsx_output_file_path, xlsx_input_file_path, csv_input_file_path)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"task_for_DMS_(with func).py","file_name":"task_for_DMS_(with func).py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"561796997","text":"#coding=utf-8\n\n\"\"\"\nCreated on 2020/9/8\n@usergpor: lianxiujuan\n@desc: 用户组\n\"\"\"\n\nimport pytest\nimport sys\nfrom src.pageobjectAdmin.pageUsergp import *\nfrom DataAdmin.UsergrpData import *\nfrom src.public.common.Login import *\nfrom src.public.common.Select_Item import *\n\n\n\nclass Test_Usergp:\n def test_usergp_login(self):\n login_usergp()\n sleep(1)\n\n # 新增用户组\n def test_add_usergp(self):\n log.info(\"开始执行用例%s\" % sys._getframe().f_code.co_name)\n usergp_add(addcodedata, addnamedata)\n time.sleep(2)\n assert new_page_source(addnamedata)\n\n # 设置权限\n def test_setauth_usergp(self):\n log.info(\"开始执行用例%s\" % sys._getframe().f_code.co_name)\n select_item(addnamedata)\n usergp_setauth()\n time.sleep(2)\n usergp_setauth()\n\n # 编辑用户组\n def test_edit_usergp(self):\n log.info(\"开始执行用例%s\" % sys._getframe().f_code.co_name)\n select_item(addnamedata)\n usergp_edit(editnamedata)\n time.sleep(2)\n assert new_page_source(editnamedata)\n\n # 删除用户组\n def test_delete_usergp(self):\n log.info(\"开始执行用例%s\" % sys._getframe().f_code.co_name)\n select_item(addnamedata)\n usergp_delete()\n time.sleep(2)\n assert new_page_source(addnamedata) == False\n new_click(authmg)\n\n\n\n","sub_path":"TestcaseAdmin/Usermg&Group&Authmg/test_Usergpcase.py","file_name":"test_Usergpcase.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"572478598","text":"from sense_hat import SenseHat\nimport time\nimport sys\nimport serial\nimport datetime\nimport os\nfrom urllib import urlencode\nimport urllib2\nsense = SenseHat()\nMEASURMENT_INTERVAL = 2\n\nWEATHER_UPLOAD = True\n\nWU_URL = \"http://weatherstation.wunderground.com/weatherstation/updateweatherstation.php\"\n\nSINGLE_HASH = '#'\n\nHASHES = '####################################'\nSLASH_N = '\\n'\n\n\ndef main():\n\n global last_temp\n\n last_minute = datetime.datetime.now().minute\n\n last_minute -= 1\n if last_minute == 0: \n last_minute = 59\n while 1:\n\n current_second = datetime.datetime.now().second\n if (current_second == 0) or ((current_second % 5) == 0):\n serialMsg = serial.Serial(\"/dev/ttyACM0\", 9600, timeout = 1)\n rawMsg = serialMsg.readline()\n message = (rawMsg.decode().strip())\n calc_temp = message\n print(calc_temp)\n temp_f = calc_temp \n humidity = round(sense.get_humidity(), 0)\n pressure = round(sense.get_pressure() * 0.0295300, 1)\n print(\"Temp: %sF, Pressure: %s inHg, Humidity: %s%%\" % (temp_f, pressure, humidity))\n current_minute = datetime.datetime.now().minute\n if current_minute:\n last_minute = current_minute\n if (current_minute == 0) or ((current_minute % MEASURMENT_INTERVAL) == 0):\n now = datetime.datetime.now()\n print(\"\\n%d minute mark (%d @ %s)\" % (MEASURMENT_INTERVAL, current_minute, str(now)))\n\n if WEATHER_UPLOAD:\n print(\"Uploading data to Weather Underground\")\n\n weather_data = {\n \"action\": \"updateraw\",\n \"ID\": wu_station_id,\n \"PASSWORD\": wu_station_key,\n \"dateutc\": \"now\",\n \"tempf\": str(temp_f),\n \"humidity\": str(humidity),\n \"baromin\": str(pressure),\n }\n try:\n upload_url = WU_URL + \"?\" + urlencode(weather_data)\n response = urllib2.urlopen(upload_url)\n html = response.read()\n print(\"Server response:\", html)\n response.close()\n except:\n print(\"Exception:\", sys.exc_info()[0], SLASH_N)\n else:\n print(\"Skipping Weather Underground upload\")\n time.sleep(1)\n print(\"Leaving main()\")\n\nprint(SLASH_N + HASHES)\nprint(SINGLE_HASH, \"Pi Weather Station \", SINGLE_HASH)\nprint(HASHES)\n\nif (MEASURMENT_INTERVAL is None) or (MEASURMENT_INTERVAL > 60):\n print(\"The application's 'MEASURMENT_INTERVAL' cannot be empty or greater than 60\")\n sys.exit(1)\n\nprint(\"\\nIntalising Weather Underground configuration\")\nwu_station_id = \"IDROITWI14\"\nwu_station_key = \"tbrmknbc\"\nif (wu_station_id is None) or (wu_station_key is None):\n print(\"Missingvalues from the Weather Underground config\")\n sys.exit(1)\n\nprint(\"Sucsessfully read Weather Underground config\")\nprint(\"Station ID:\", wu_station_id)\n\n\n\ntry:\n main()\nexcept KeyboardInterrupt:\n print(\"\\nExiting application\\n\")\n sys.exit(0)\n \n \n\n","sub_path":"weather-script-for-wu.py","file_name":"weather-script-for-wu.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"492609734","text":"import os\nimport argparse\nfrom copy import deepcopy\n\nDATA_DIR = '../data'\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--seed\", type=int, default=0)\nparser.add_argument(\"--epochs\", nargs='+', type=int,\n default=[10, 10, 10, 10, 10],\n help='Epoch number for each task')\nparser.add_argument(\"--batch_size\", type=int, default=8,\n help='training batch size')\nparser.add_argument(\"--bert_learning_rate\", type=float, default=3e-5,\n help='learning rate for pretrained Bert')\nparser.add_argument(\"--learning_rate\", type=float, default=3e-5,\n help='learning rate for Class Classifier')\nparser.add_argument('--gpu', default='0', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\nparser.add_argument('--n-labeled', type=int, default=2000,\n help='Number of labeled data')\nparser.add_argument('--tasks', nargs='+', type=str,\n default=['ag', 'yelp', 'amazon', 'yahoo', 'dbpedia'],\n help='Task Sequence')\n\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nfrom transformers import AdamW\n\nfrom model import BaseModel\nfrom read_data import compute_class_offsets, prepare_dataloaders\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nargs.device = device\nn_gpu = torch.cuda.device_count()\n\ndataset_classes = {\n 'amazon' : 5,\n 'yelp' : 5,\n 'yahoo' : 10,\n 'ag' : 4,\n 'dbpedia' : 14,\n}\n\n\ndef train_step(model, optimizer, cls_CR, x, y):\n model.train()\n logits = model(x)\n loss = cls_CR(logits, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss\n\n\ndef validation(model, t, validation_loaders):\n '''\n Compute the validation accuracy on the first (t + 1) tasks,\n return the average accuracy over (t + 1) tasks and detailed accuracy\n on each task.\n '''\n model.eval()\n acc_list = []\n with torch.no_grad():\n avg_acc = 0.0\n for i in range(t + 1):\n valid_loader = validation_loaders[i]\n total = 0\n correct = 0\n for x, mask, y in valid_loader:\n x, y = x.to(device), y.to(device)\n batch_size = x.size(0)\n logits = model(x)\n _, pred_cls = logits.max(1)\n correct += pred_cls.eq(y.view_as(pred_cls)).sum().item()\n total += batch_size\n print(\"acc on task {} : {}\".format(i, correct * 100.0 / total))\n avg_acc += correct * 100.0 / total\n acc_list.append(correct * 100.0 / total)\n\n return avg_acc / (t + 1), acc_list\n\n\ndef main():\n np.random.seed(0)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n\n task_num = len(args.tasks)\n task_classes = [dataset_classes[task] for task in args.tasks]\n total_classes, offsets = compute_class_offsets(args.tasks, task_classes)\n train_loaders, validation_loaders, test_loaders = \\\n prepare_dataloaders(DATA_DIR, args.tasks, offsets, args.n_labeled,\n 2000, args.batch_size, 128, 128)\n\n # Reset random seed by the torch seed\n np.random.seed(torch.randint(1000, [1]).item())\n\n model = BaseModel(total_classes).to(args.device)\n cls_CR = torch.nn.CrossEntropyLoss()\n\n for task_id in range(task_num):\n data_loader = train_loaders[task_id]\n length = len(data_loader)\n\n optimizer = AdamW(\n [\n {\"params\": model.bert.parameters(), \"lr\": args.bert_learning_rate},\n {\"params\": model.classifier.parameters(), \"lr\": args.learning_rate},\n ]\n )\n\n best_acc = 0\n best_model = deepcopy(model.state_dict())\n\n acc_track = []\n\n for epoch in range(args.epochs[task_id]):\n iteration = 1\n for x, mask, y in tqdm(data_loader, total=length, ncols=100):\n x, y = x.to(device), y.to(device)\n train_step(model, optimizer, cls_CR, x, y)\n\n if iteration % 250 == 0:\n print(\"----------------Validation-----------------\")\n avg_acc, acc_list = validation(model, task_id, validation_loaders)\n acc_track.append(acc_list)\n\n if avg_acc > best_acc:\n print(\"------------------Best Model Till Now------------------------\")\n best_acc = avg_acc\n best_model = deepcopy(model.state_dict())\n\n iteration += 1\n\n if len(acc_track) > 0:\n print(\"ACC Track: {}\".format(acc_track))\n\n model.load_state_dict(deepcopy(best_model))\n print(\"------------------Best Result------------------\")\n avg_acc, _ = validation(model, task_id, test_loaders)\n print(\"Best avg acc: {}\".format(avg_acc))\n\n\nif __name__ == '__main__':\n print(args)\n main()\n","sub_path":"src/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"571450762","text":"#!/usr/bin/env python\r\n# -- coding: utf-8 --\r\nimport sys\r\nfrom config import *\r\nfrom pymongo import MongoClient\r\nfrom data_parse import *\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\n\r\nclass MongoConnect():\r\n def __init__(self,mongo_database,mongo_set,mongo_host='172.25.3.215',mongo_port=27017):\r\n self.mongo_host=mongo_host\r\n self.mongo_port=mongo_port\r\n self.mongo_database=mongo_database\r\n self.mongo_set=mongo_set\r\n self.mongo_cursor,self.mongo_client=self.connect_mongo()\r\n self.bulk_mongodb_cursor, self.bulk_mongo_client=self.bulk_connect_mongo()\r\n\r\n def connect_mongo(self):\r\n mongo_client=MongoClient(host=self.mongo_host,port=self.mongo_port)\r\n mongo_db=mongo_client[self.mongo_database]\r\n mongo_cursor=mongo_db[self.mongo_set]\r\n return mongo_cursor,mongo_client\r\n\r\n def bulk_connect_mongo(self):\r\n bulk_mongo_client=MongoClient(host=self.mongo_host,port=self.mongo_port)\r\n bulk_mongo_db=bulk_mongo_client[self.mongo_database]\r\n bulk_mongodb_cursor = bulk_mongo_db[self.mongo_set].initialize_ordered_bulk_op()\r\n return bulk_mongodb_cursor,bulk_mongo_client\r\n\r\n def mongo_save_many(self,datas):\r\n res=self.mongo_cursor.insert_many(datas)\r\n print(res)\r\n\r\n def mongo_create_index(self,index_col_name):\r\n self.mongo_cursor.create_index([(index_col_name,1)])\r\n\r\n def get_mongo_endtime_starttime(self,sort_col_name,query={}):\r\n '''\r\n 用于分页查找时建立基于分页的索引项\r\n :param sort_col_name:\r\n :param query:\r\n :return:\r\n '''\r\n if query=={}:\r\n datanum = self.mongo_cursor.count()\r\n\r\n res1 = self.mongo_cursor.find().sort(sort_col_name, 1).limit(1)\r\n starttimestamp = -1\r\n for data in res1:\r\n starttimestamp += data.get(sort_col_name)\r\n\r\n res2 = self.mongo_cursor.find().sort(sort_col_name, -1).limit(1)\r\n endtimestamp = 0\r\n for data in res2:\r\n endtimestamp += data.get(sort_col_name)\r\n else:\r\n datanum = self.mongo_cursor.find(query).count()\r\n\r\n res1 = self.mongo_cursor.find(query).sort(sort_col_name, 1).limit(1)\r\n starttimestamp = -1\r\n for data in res1:\r\n starttimestamp += data.get(sort_col_name)\r\n\r\n res2 = self.mongo_cursor.find(query).sort(sort_col_name, -1).limit(1)\r\n endtimestamp = 0\r\n for data in res2:\r\n endtimestamp += data.get(sort_col_name)\r\n return datanum,starttimestamp,endtimestamp\r\n\r\n def mongo_update_bulk(self,query_col_name,query_respon_name,datas):\r\n for data in datas:\r\n self.bulk_mongodb_cursor.find({query_col_name:data.get(query_respon_name)}).update({'$set':data})\r\n result=self.bulk_mongodb_cursor.execute()\r\n print(result)\r\n\r\n def close_mongo_client(self):\r\n self.mongo_client.close()\r\n\r\n def close_bulk_mongo_client(self):\r\n self.bulk_mongo_client.close()\r\n\r\n\r\nclass MongoFold():\r\n '''\r\n 对mongodb的数据分页查找并更新,实际用时需对具体方法继承更新\r\n '''\r\n def __init__(self,parse_mongo_db_name,parse_mongo_set_name,save_mongo_db,save_mongo_set,query={}):\r\n self.mongo_db_name = parse_mongo_db_name\r\n self.mongo_set_name = parse_mongo_set_name\r\n self.mongo_connect_model = MongoConnect(mongo_host='172.25.3.215',mongo_port=27017, mongo_database=self.mongo_db_name,\r\n mongo_set=self.mongo_set_name)\r\n # self.parse_model=DataParse(parse_db_name=self.mongo_db_name,parse_set_name=self.mongo_set_name)\r\n self.query=query\r\n self.mongo_save_model = MongoConnect(mongo_database=save_mongo_db, mongo_set=save_mongo_set)\r\n\r\n\r\n def get_query_params(self,sort_col_name):\r\n query = self.query\r\n mongo_connect_model=self.mongo_connect_model\r\n datanum, starttimestamp, endtimestamp=mongo_connect_model.get_mongo_endtime_starttime(sort_col_name=sort_col_name,query=query)\r\n return datanum,starttimestamp,endtimestamp\r\n\r\n def person_fundinfo_parse(self,id):\r\n pass\r\n\r\n def fold_find_parse(self,sort_col_name,fold_row_num):\r\n datanum, starttimestamp, endtimestamp=self.get_query_params(sort_col_name)\r\n while starttimestamp < endtimestamp:\r\n for i in range(datanum / fold_row_num + 1):\r\n res_datas = []\r\n print(i+1)\r\n datas = self.mongo_connect_model.mongo_cursor.find({sort_col_name: {'$gt': starttimestamp},\"nationality\": {'$exists': False}},{\"_id\":1,\"updatetime_stamp\":1}).sort(sort_col_name, 1).limit(fold_row_num)\r\n for data in datas:\r\n res_datas.append(self.person_fundinfo_parse(data))\r\n timestamp=data.get('updatetime_stamp')\r\n starttimestamp=timestamp\r\n yield res_datas\r\n\r\n def create_indexes(self,index_col_name):\r\n self.mongo_save_model.mongo_create_index(index_col_name)\r\n\r\n def save_data(self,sort_col_name,fold_row_num):\r\n for datas in self.fold_find_parse(sort_col_name=sort_col_name,fold_row_num=fold_row_num):\r\n try:\r\n self.mongo_save_model.mongo_save_many(datas=datas)\r\n except:\r\n continue\r\n\r\n def insert_update_data(self,data,update_mongo_db,update_mongo_set):\r\n '''\r\n 插入或更新\r\n :param upate_mongo_db:\r\n :param update_mongo_set:\r\n :param sort_col_name:\r\n :return:\r\n '''\r\n mongo_update_model = MongoConnect(mongo_database=update_mongo_db, mongo_set=update_mongo_set)\r\n res=mongo_update_model.mongo_cursor.update({\"_id\": data[\"resource_id\"]},{'$set': data}, upsert=True)\r\n print(res)\r\n\r\n def update_data(self,upate_mongo_db,update_mongo_set,sort_col_name,fold_row_num):\r\n for datas in self.fold_find_parse(sort_col_name=sort_col_name,fold_row_num=fold_row_num):\r\n try:\r\n mongo_update_model=MongoConnect(mongo_database=upate_mongo_db,mongo_set=update_mongo_set)\r\n mongo_update_model.mongo_update_bulk(query_col_name=\"_id\",query_respon_name='resource_id',datas=datas)\r\n mongo_update_model.close_bulk_mongo_client()\r\n except:\r\n continue\r\n\r\n\r\nif __name__ == '__main__':\r\n pass","sub_path":"company_data_mapping/util_functions/database_connect.py","file_name":"database_connect.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"155667619","text":"class Student:\n\tcourseMarks={}\n\tname= \"\"\n\n\tdef __init__(self, first_name, last_name):\n\t\tself.name = \"%s %s\" % (first_name, last_name)\n\n\tdef addCourseMark(self, course, mark):\n\t\tself.courseMarks[course] = mark\n\n\tdef average(self):\n\t\ttotal = 0\n\t\tcount = 0\n\t\tfor key, value in self.courseMarks.iteritems():\n\t\t\ttotal += value\n\t\t\tcount += 1\n\t\taverage = total/count\n\t\treturn average\n\nS = Student(\"John\", \"Doe\")\nS.addCourseMark(\"CMPUT410\", 50)\nS.addCourseMark(\"CMPUT391\", 100)\nS.addCourseMark(\"CMPUT400\", 100)\nS.addCourseMark(\"CMPUT495\", 50)\nprint(S.courseMarks)\nprint(S.name)\nprint(S.average())\n","sub_path":"name-family.py","file_name":"name-family.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"216622149","text":"from m5stack import *\nfrom machine import Timer\n\nimport Data.Menu\n\n\nclass Battery:\n\n def __init__(self):\n\n \"\"\"Initialize Battery.\"\"\"\n\n self.vbat = int()\n\n lcd.clear(0xFF8000)\n\n buttonA.wasPressed(callback=self.exit)\n buttonB.wasPressed(callback=lambda: None)\n\n self.show_battery()\n\n self.refresh = Timer(0)\n self.refresh.init(period=10000, mode=Timer.PERIODIC, callback=self.show_battery)\n\n def show_battery(self, t=None):\n\n \"\"\"Convert battery voltage into bars.\"\"\"\n\n self.vbat = self.map_value(axp.getVbatData() * 1.1, 3000, 4100, 0, 6)\n print(axp.getVbatData() * 1.1)\n print(self.vbat)\n\n if axp.getIChargeData() / 2 > 0:\n color = lcd.YELLOW\n elif self.vbat == 1:\n color = lcd.RED\n elif self.vbat == 2:\n color = lcd.ORANGE\n else:\n color = lcd.GREEN\n\n # Battery Icon.\n lcd.fillRect(22, 10, 125, 60, lcd.BLACK)\n lcd.fillRect(12, 30, 10, 20, lcd.BLACK)\n\n # Reset bars.\n lcd.fillRect(127, 15, 15, 50, lcd.BLACK)\n lcd.fillRect(107, 15, 15, 50, lcd.BLACK)\n lcd.fillRect(87, 15, 15, 50, lcd.BLACK)\n lcd.fillRect(67, 15, 15, 50, lcd.BLACK)\n lcd.fillRect(47, 15, 15, 50, lcd.BLACK)\n lcd.fillRect(27, 15, 15, 50, lcd.BLACK)\n\n # Draw bars.\n if self.vbat >= 1:\n lcd.fillRect(127, 15, 15, 50, color)\n if self.vbat >= 2:\n lcd.fillRect(107, 15, 15, 50, color)\n if self.vbat >= 3:\n lcd.fillRect(87, 15, 15, 50, color)\n if self.vbat >= 4:\n lcd.fillRect(67, 15, 15, 50, color)\n if self.vbat >= 5:\n lcd.fillRect(47, 15, 15, 50, color)\n if self.vbat >= 6:\n lcd.fillRect(27, 15, 15, 50, color)\n\n def exit(self):\n\n \"\"\"De-init timer and exit.\"\"\"\n\n self.refresh.deinit()\n\n # Return to menu\n return Data.Menu.Menu()\n\n @staticmethod\n def map_value(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n","sub_path":"Data/Battery.py","file_name":"Battery.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"419901075","text":"from django.urls import path\nfrom .views import StudentStreamListCreateView, StudentStreamRetrieveUpdateView,\\\n StudentGroupListCreateView, StudentGroupRetrieveUpdateView, \\\n StudentGroupMembersListCreateView, GroupInStreamListCreateView, StudentListView\nfrom .views import CourseCreateView, CourseRetrieveUpdateView, LessonCreateView, \\\n LessonRetrieveUpdateView, StudentLessonResultCreateView, \\\n StudentLessonResultRetrieveUpdateView, CourseListView, \\\n LessonListView, StudentLessonResultListView\n\nfrom .views import ClassmatesCheckedTaskListCreateView, ClassmatesCheckedTaskRetrieveUpdateDestroyView\nfrom .views import TaskOptionListCreateView, TaskOptionRetrieveUpdateDestroyView\nfrom .views import StudentResultListCreateView, StudentResultRetrieveUpdateDestroyView\nfrom .views import CheckListCreateView, CheckRetrieveUpdateDestroyView\n\nfrom .views import TaskWithTeacherCheckListCreateView, TaskWithTeacherCheckRetrieveUpdateDestroyView\nfrom .views import TaskWithTeacherCheckOptionListCreateView, TaskWithTeacherCheckOptionRetrieveUpdateDestroyView\nfrom .views import TaskWithTeacherCheckResultListCreateView, TaskWithTeacherCheckResultRetrieveUpdateDestroyView\nfrom .views import TaskWithTeacherCheckCheckListCreateView, TaskWithTeacherCheckCheckRetrieveUpdateDestroyView\n\nfrom .views import SectionCreateView, SectionListView, SectionRetrieveView, \\\n SectionUpdateView, TaskWithTickCreateView, TaskWithTickListView\nfrom .views import TaskWithTickRetrieveView, TaskWithTickUpdateView, \\\n TaskWithTickOptionCreateView, TaskWithTickOptionListView, \\\n TaskWithTickOptionRetrieveView, TaskWithTickOptionUpdateView, \\\n TaskWithTickStudentResultCreateView, TaskWithTickStudentResultRetrieveView, \\\n TaskWithTickStudentResultUpdateView, TaskWithTickStudentResultListView, \\\n StatisticsTaskByStudent, StatisticsStudentResults\n\n# task with keyword views\nfrom .views import TaskWithKeywordCreateView, TaskWithKeywordRetrieveView, \\\n TaskWithKeywordUpdateView, TaskWithKeywordListView, TaskWithKeywordOptionCreateView, \\\n TaskWithKeywordOptionRetrieveView, TaskWithKeywordOptionUpdateView, \\\n TaskWithKeywordOptionListView, TaskWithKeywordResultCreateView, \\\n TaskWithKeywordResultRetrieveView, TaskWithKeywordResultUpdateView, \\\n TaskWithKeywordResultListView\n\n\napp_name = \"courses_app\"\n\nurlpatterns = [\n path('students/', StudentListView.as_view()),\n\n path('streams/', StudentStreamListCreateView.as_view()),\n path('streams//', StudentStreamRetrieveUpdateView.as_view()),\n path('streams//groups/', GroupInStreamListCreateView.as_view()),\n\n path('groups/', StudentGroupListCreateView.as_view()),\n path('groups//', StudentGroupRetrieveUpdateView.as_view()),\n path('groups//members/', StudentGroupMembersListCreateView.as_view()),\n\n path('courses/add/', CourseCreateView.as_view()),\n path('courses/all/', CourseListView.as_view()),\n path('courses//', CourseRetrieveUpdateView.as_view()),\n\n path('classmates/tasks/', ClassmatesCheckedTaskListCreateView.as_view()),\n path('classmates/tasks//', ClassmatesCheckedTaskRetrieveUpdateDestroyView.as_view()),\n\n path('classmates/options/', TaskOptionListCreateView.as_view()),\n path('classmates/options//', TaskOptionRetrieveUpdateDestroyView.as_view()),\n\n path('classmates/results/', StudentResultListCreateView.as_view()),\n path('classmates/results//', StudentResultRetrieveUpdateDestroyView.as_view()),\n\n path('classmates/checks/', CheckListCreateView.as_view()),\n path('classmates/checks//', CheckRetrieveUpdateDestroyView.as_view()),\n\n path('teacher/tasks/', TaskWithTeacherCheckListCreateView.as_view()),\n path('teacher/tasks//', TaskWithTeacherCheckResultRetrieveUpdateDestroyView.as_view()),\n\n path('teacher/options/', TaskWithTeacherCheckOptionListCreateView.as_view()),\n path('teacher/options//', TaskWithTeacherCheckOptionRetrieveUpdateDestroyView.as_view()),\n\n path('teacher/results/', TaskWithTeacherCheckResultListCreateView.as_view()),\n path('teacher/results//', TaskWithTeacherCheckResultRetrieveUpdateDestroyView.as_view()),\n\n path('teacher/checks/', TaskWithTeacherCheckCheckListCreateView.as_view()),\n path('teacher/checks//', TaskWithTeacherCheckCheckRetrieveUpdateDestroyView.as_view()),\n\n path('lessons/add/', LessonCreateView.as_view()),\n path('lessons/all/', LessonListView.as_view()),\n path('lessons//', LessonRetrieveUpdateView.as_view()),\n\n path('lessons/results/add/', StudentLessonResultCreateView.as_view()),\n path('lessons/results/all/', StudentLessonResultListView.as_view()),\n path('lessons/results//', StudentLessonResultRetrieveUpdateView.as_view()),\n\n path('courses/sections/add/', SectionCreateView.as_view()),\n path('courses/sections/all/', SectionListView.as_view()),\n path('courses/sections//', SectionRetrieveView.as_view()),\n path('courses/sections/update//', SectionUpdateView.as_view()),\n\n path('tasks/with_tick/add/', TaskWithTickCreateView.as_view()),\n path('tasks/with_tick/all/', TaskWithTickListView.as_view()),\n path('tasks/with_tick//', TaskWithTickRetrieveView.as_view()),\n path('tasks/with_tick/update//', TaskWithTickUpdateView.as_view()),\n\n path('tasks/with_tick/options/add/', TaskWithTickOptionCreateView.as_view()),\n path('tasks/with_tick/options/all/', TaskWithTickOptionListView.as_view()),\n path('tasks/with_tick/options//', TaskWithTickOptionRetrieveView.as_view()),\n path('tasks/with_tick/options/update//', TaskWithTickOptionUpdateView.as_view()),\n\n path('tasks/with_tick/results/add/', TaskWithTickStudentResultCreateView.as_view()),\n path('tasks/with_tick/results/all/', TaskWithTickStudentResultListView.as_view()),\n path('tasks/with_tick/results//', TaskWithTickStudentResultRetrieveView.as_view()),\n path('tasks/with_tick/results/update//', TaskWithTickStudentResultUpdateView.as_view()),\n\n path('tasks/with_keyword/add/', TaskWithKeywordCreateView.as_view()),\n path('tasks/with_keyword/all/', TaskWithKeywordListView.as_view()),\n path('tasks/with_keyword//', TaskWithKeywordRetrieveView.as_view()),\n path('tasks/with_keyword/update//', TaskWithKeywordUpdateView.as_view()),\n\n path('tasks/with_keyword/options/add/', TaskWithKeywordOptionCreateView.as_view()),\n path('tasks/with_keyword/options/all/', TaskWithKeywordOptionListView.as_view()),\n path('tasks/with_keyword/options//', TaskWithKeywordOptionRetrieveView.as_view()),\n path('tasks/with_keyword/options/update//', TaskWithKeywordOptionUpdateView.as_view()),\n\n path('tasks/with_keyword/results/add/', TaskWithKeywordResultCreateView.as_view()),\n path('tasks/with_keyword/results/all/', TaskWithKeywordResultListView.as_view()),\n path('tasks/with_keyword/results//', TaskWithKeywordResultRetrieveView.as_view()),\n path('tasks/with_keyword/results/update//', TaskWithKeywordResultUpdateView.as_view()),\n\n path('statistics/sections//tasks//results/', StatisticsTaskByStudent.as_view()),\n path(\n 'statistics/sections//tasks//students//results/',\n StatisticsStudentResults.as_view(),\n ),\n]\n","sub_path":"application/courses_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"611967079","text":"import torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\n\n\nimport neural_ner\nfrom neural_ner.util import Initializer\nfrom neural_ner.util import Loader\nfrom neural_ner.modules import CharEncoderCNN\nfrom neural_ner.modules import WordEncoderRNN\nfrom neural_ner.modules import DecoderCRF\n\nclass CNN_BiLSTM_CRF(nn.Module):\n \n def __init__(self, word_vocab_size, word_embedding_dim, word_hidden_dim, char_vocab_size,\n char_embedding_dim, char_out_channels, tag_to_id, cap_input_dim=4 ,\n cap_embedding_dim=0, pretrained=None):\n \n super(CNN_BiLSTM_CRF, self).__init__()\n \n self.word_vocab_size = word_vocab_size\n self.word_embedding_dim = word_embedding_dim\n self.word_hidden_dim = word_hidden_dim\n \n self.char_vocab_size = char_vocab_size\n self.char_embedding_dim = char_embedding_dim\n self.char_out_channels = char_out_channels\n \n self.cap_input_dim = cap_input_dim\n self.cap_embedding_dim = cap_embedding_dim\n \n self.tag_to_ix = tag_to_id\n self.tagset_size = len(tag_to_id)\n \n self.initializer = Initializer()\n self.loader = Loader()\n \n if self.cap_embedding_dim:\n self.cap_embedder = nn.Embedding(self.cap_input_dim, self.cap_embedding_dim)\n self.initializer.init_embedding(self.cap_embedder.weight)\n \n self.char_encoder = CharEncoderCNN(char_vocab_size, char_embedding_dim, char_out_channels, \n kernel_width=3, pad_width=1)\n \n self.initializer.init_embedding(self.char_encoder.embedding.weight)\n \n self.word_encoder = WordEncoderRNN(word_vocab_size, word_embedding_dim ,word_hidden_dim, \n char_out_channels, cap_embedding_dim, input_dropout_p=0.5)\n \n if pretrained is not None:\n self.word_encoder.embedding.weight = nn.Parameter(torch.FloatTensor(pretrained))\n \n self.initializer.init_lstm(self.word_encoder.rnn)\n \n self.decoder = DecoderCRF(word_hidden_dim*2, self.tag_to_ix, input_dropout_p=0.5)\n self.initializer.init_linear(self.decoder.hidden2tag)\n \n def forward(self, words, tags, chars, caps, wordslen, charslen, tagsmask, usecuda=True):\n \n batch_size, max_len = words.size()\n \n cap_features = self.cap_embedder(caps) if self.cap_embedding_dim else None\n \n char_features = self.char_encoder(chars)\n char_features = char_features.view(batch_size, max_len, -1)\n \n word_features = self.word_encoder(words, char_features, cap_features, wordslen)\n \n score = self.decoder(word_features, tags, tagsmask, usecuda=usecuda)\n \n return score\n \n def decode(self, words, chars, caps, wordslen, charslen, tagsmask, usecuda=True,\n score_only = False):\n \n batch_size, max_len = words.size()\n \n cap_features = self.cap_embedder(caps) if self.cap_embedding_dim else None\n \n char_features = self.char_encoder(chars)\n char_features = char_features.view(batch_size, max_len, -1)\n \n word_features = self.word_encoder(words, char_features, cap_features, wordslen)\n \n if score_only:\n score = self.decoder.decode(word_features, tagsmask, usecuda=usecuda, \n score_only = True)\n return score\n score, tag_seq = self.decoder.decode(word_features, tagsmask, usecuda=usecuda)\n return score, tag_seq","sub_path":"neural_ner/models/cnn_bilstm_crf.py","file_name":"cnn_bilstm_crf.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"152467651","text":"s = ['hi', 'hello', 'word']\ndef g(x):\n return x[::-1]\ns = list(map(g,s))\nprint(s)\n\n# or by lambda\ns = ['hi', 'hello', 'word']\ns = list(map(lambda x: x[::-1], s))\nprint(s)\n\n\n# max of x, y -----------------------------------------\ndef mx(x, y):\n if x > y:\n return x\n else:\n return y\nprint((mx(8,5)))\n\n# by lambda\nmx = lambda x, y: x if x > y else y\nprint(mx(20, 10))\n\nprint('\\n\\n\\n')\n\n\n# map ---------------------------------------------------\ndef square(lst1):\n lst2 = []\n for num in lst1:\n lst2.append(num**2)\n return lst2\nprint(square([4,3,2,1]))\n\n# by lambda\nn = [4,3,2,1]\nprint(list(map(lambda x: x**2, n)))\n\n# by list comprehension\nn = [4,3,2,1]\nprint([x**2 for x in n])\n\nprint('\\n\\n\\n')\n\n\n\n# filter -----------------------------------------------\ndef over_two(lst1):\n lst2 = [x for x in lst1 if x > 2]\n return lst2\nprint(over_two([4,3,2,1]))\n\n# by lambda\nn = [4,3,2,1]\nprint(list(filter(lambda x: x>2, n)))\n\n# by list comprehension\nn = [4,3,2,1]\nprint([x for x in n if x>2])\n\nprint('\\n\\n\\n')\n\n\n\n\n# reduce -----------------------------------------------\ndef mult(lst1):\n prod = lst1[0]\n for i in range(1, len(lst1)):\n prod *=lst1[i]\n return prod\nprint(mult([4,3,2,1]))\n\n# by lambda\n# import reduce\n# n = [4,3,2,1]\n# print(reduce(lambda x,y: x*y, n))\n","sub_path":"lambda/lambda_examples.py","file_name":"lambda_examples.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"245675189","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef help():\n\tprint('''\n\t\tUsage:\n\t\t------------\n\t\tretrotransposonNamesFromDomainOrganization.py -features -domains \n\t\t-OR-\n\t\tretrotransposonNamesFromDomainOrganization.py -features \n\n\t\tDescription:\n\t\t------------\n\t\tTakes a two column file as input via -features and a list of comma-separated\n\t\tprotein domains via -domains and outputs feature names (from the first column\n\t\tof the -features file) if the domain organization of the feature is one of those\n\t\tin the -domains file.\n\n\n\n\t\tOptions:\n\t\t------------\n\t\t-features\tA two-column tab-separated file. It's assumed that the first column\n\t\t\t\tis the name of a feature and the second column is a component of that\n\t\t\t\tfeature and that components for a given feature are in order such that\n\t\t\t\ta compenent at row n is before the component at row n+1\n\n\t\t-domains\tLines in this file should be comma separated names of domains.\n\n\t\t-names\t\tPrint out domain structure as comma-separated list for feature names in\n\t\t\t\tthe file specified by -names.\n\t\t\n\t\t''')\n\tsys.exit(0)\n\n\nargs = sys.argv\n\nif len(args) != 5 or '-features' not in args:\n\thelp()\n\nfeatures_fl_pth = args[args.index('-features') + 1]\n\nif '-domains' in args:\n\tdomains_fl_pth = args[args.index('-domains') + 1]\nelif '-names' in args:\n\tnames_fl_pth = args[args.index('-names') + 1]\n\n\nd = {} # this dict will have domain organization for each ltr in the -features file\n\n\nwith open(features_fl_pth) as features_fl:\n\tfor line in features_fl:\n\t\tcontents = line.split()\n\t\tltrname = contents[0]\n\t\tdomain = contents[1]\n\t\tif ltrname in d:\n\t\t\td[ltrname].append(domain)\n\t\telse:\n\t\t\td[ltrname] = [domain]\nif '-domains' in args:\n\tdomain_organization = []\n\twith open(domains_fl_pth) as domains_fl:\n\t\tfor line in domains_fl:\n\t\t\tdomain_organization.append(line.strip().split(','))\n\n\tfor ltr in d:\n\t\tif d[ltr] in domain_organization:\n\t\t\tprint(ltr)\n\nelif '-names' in args:\n\tnames_list = []\n\twith open(names_fl_pth) as names_fl:\n\t\tfor line in names_fl:\n\t\t\tnames_list.append(line.strip())\n\n\tfor ltr in d:\n\t\tif ltr in names_list:\n\t\t\tprint('{0}\\t{1}'.format(ltr, ','.join(d[ltr])))\n","sub_path":"retrotransposonNamesFromDomainOrganization.py","file_name":"retrotransposonNamesFromDomainOrganization.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"539541755","text":"import os\nfrom time import time\nfrom sys import stdout\nimport torch\nimport h5py as h5\nimport numpy as np\nimport torch.nn as nn\nfrom networks.models import Conditional_RNVP_with_image_prior\nfrom networks.losses import Conditional_RNVP_with_image_prior_loss\nfrom networks.optimizers import Adam, LRUpdater\nimport visdom\n\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef save_model(state, model_name):\n torch.save(state, model_name)\n print('Model saved to ' + model_name)\n\ndef cnt_params(params):\n return sum(p.numel() for p in params if p.requires_grad)\n\n\ndef save_point_clouds(batch_i, gt_cloud, gen_cloud, image, len_dataset, **kwargs):\n\n clouds_fname = '{}_{}_{}_{}_segs_clouds.h5'.format(kwargs['model_name'][:-4],\n kwargs['cloud_size'],\n kwargs['sampled_cloud_size'],\n kwargs['usage_mode'])\n cloud_fname = os.path.join(kwargs['path2save'], clouds_fname)\n if not os.path.exists(cloud_fname):\n clouds_file = h5.File(cloud_fname, 'w')\n sampled_clouds = clouds_file.create_dataset(\n 'sampled_clouds',\n shape=(kwargs['N_samples'] * len_dataset, 3,\n kwargs['cloud_size']),dtype=np.float32)\n gt_clouds = clouds_file.create_dataset(\n 'gt_clouds',\n shape=(kwargs['N_samples'] * len_dataset, 3,\n kwargs['cloud_size']),dtype=np.float32)\n gt_images = clouds_file.create_dataset(\n 'images',\n shape=(kwargs['N_samples'] * len_dataset, 4, 224, 224), dtype=np.uint8)\n else:\n clouds_file = h5.File(cloud_fname, 'a')\n sampled_clouds = clouds_file['sampled_clouds']\n gt_clouds = clouds_file['gt_clouds']\n gt_images = clouds_file['images']\n\n gen = torch.zeros((kwargs['batch_size'], 3,\n kwargs['cloud_size']))\n\n gen[:, :, :gen_cloud.size(2)] = gen_cloud\n sampled_clouds[kwargs['batch_size'] * batch_i:kwargs['batch_size'] * batch_i + gen.shape[0]] = gen\n\n gt_clouds[kwargs['batch_size'] * batch_i:kwargs['batch_size'] * batch_i + gt_cloud.shape[0]] = gt_cloud\n\n gt_images[kwargs['batch_size'] * batch_i: kwargs['batch_size'] * batch_i + image.shape[0]] = image\n clouds_file.close()\n\n\n\ndef train_test(iterator, model, loss_func, optimizer, scheduler, epoch, iter, **config):\n num_workers = config.get('num_workers')\n model_name = config.get('model_name')\n train_mode = config.get('usage_mode')\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n\n torch.set_grad_enabled(True)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n end = time()\n model.train()\n for i, data in enumerate(iterator):\n if iter + i >= len(iterator):\n break\n data_time.update(time() - end)\n scheduler(optimizer, epoch, iter + i)\n\n input = data[0].to(device)\n seg = data[1].to(device)\n image = data[2].to(device)\n num_seg_classes = data[3][0].to(device)\n\n segs_batches = []\n segs_images = []\n for n in range(1, num_seg_classes + 1):\n new_image = []\n new_batch = []\n min = 2500\n for t in range(len(input)): # for each batch, create a new batch\n new_seg = []\n k = 0\n for j in range(len(seg[t])):\n if seg[t][j] == n:\n new_seg.append(input[t][j])\n k += 1\n min = k if k <= min and k > 0 else min\n if k:\n new_seg = torch.cat(new_seg).reshape(-1, 3).unsqueeze(0)\n new_seg = torch.transpose(new_seg, 1, 2)\n new_batch.append(new_seg)\n cur_image = image[t].unsqueeze(0)\n new_image.append(cur_image) #get the corresponding image of each part\n\n if new_batch:\n for t in range(len(new_batch)):\n new_batch[t] = new_batch[t][:, :, :min] # then a new batch comes, with only a seg, with the same size\n\n new_batch = torch.cat(new_batch, dim=0) # 对每一个batch,都加上他的seg: N * 3 * min_seg_num\n new_image = torch.cat(new_image, dim=0)\n if len(new_batch) > 1:\n segs_batches.append(new_batch) #get a list, with num_seg_classes tensor, each has a N * 3 * min_seg_num point cloud\n segs_images.append(new_image) # get a list, with num_seg_classes tensor, each has a N * 3 * size * size image\n\n\n if not segs_batches:\n continue\n\n seg_labels = []\n loss_for_one_epoch = model(segs_batches, segs_images, seg_labels, train_mode, optimizer, loss_func)\n\n with torch.no_grad():\n if torch.isnan(loss_for_one_epoch):\n print('Loss is NaN! Stopping without updating the net...')\n exit()\n\n batch_time.update(time() - end)\n\n end = time()\n\n print('[epoch %d] [%d / %d]: loss %f' % (epoch, i, len(iterator), loss_for_one_epoch))\n\n\n if (iter + i + 1) % (100 * num_workers) == 0:\n save_model({\n 'epoch': epoch,\n 'iter': iter + i + 1,\n 'model_state': model.state_dict(),\n 'optimizer_state': optimizer.state_dict()\n }, model_name)\n\n save_model({\n 'epoch': epoch + 1,\n 'iter': 0,\n 'model_state': model.state_dict(),\n 'optimizer_state': optimizer.state_dict()\n }, model_name)\n\ndef evaluate_test(iterator, model, optimizer, loss_func, **kwargs):\n train_mode = kwargs.get('train_mode')\n saving_mode = kwargs.get('saving_mode')\n model.eval()\n torch.set_grad_enabled(False)\n #vis = visdom.Visdom()\n\n for i, data in enumerate(iterator):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n input = data[0].to(device)\n seg = data[1].to(device)\n image = data[2].to(device)\n num_seg_classes = data[3][0].to(device)\n\n\n\n segs_batches = []\n seg_labels = []\n\n #reconsturuct each image at a time\n # first get all the labels appear in one image\n segs_images = []\n seg_labels = []\n for n in range(1, num_seg_classes + 1):\n num = 0\n for j in range(seg[0]):\n if seg[0][j] == n:\n num += 1\n if num:\n segs_images.append(image)\n new_batch = torch.zeros((len(input), 3, num))\n segs_batches.append(new_batch)\n seg_labels.append(n)\n\n with torch.no_grad():\n full_obj, loss = model(segs_batches, image, seg_labels, train_mode, optimizer, loss_func)\n if torch.isnan(loss):\n print('Loss is NaN! Stopping without updating the net...')\n exit()\n\n if saving_mode:\n input = torch.transpose(input, 1, 2)\n save_point_clouds(i, input, full_obj, image, len(iterator), **kwargs)\n print(\"full_obj: \", full_obj.size())\n print(\"evaluate: [%d / %d]: loss: %f\" % (i, len(iterator), loss))\n","sub_path":"training_with_segs.py","file_name":"training_with_segs.py","file_ext":"py","file_size_in_byte":7587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"478830190","text":"import torch\nfrom torch.utils.data import Dataset\nfrom torchvision import utils\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport scipy.misc\nimport os\n\nnum_class = 2\nroot_dir = './Tactile/'\ntrain_file = os.path.join(root_dir, 'train.csv')\nval_file = os.path.join(root_dir, 'val.csv')\nmeans = np.array([103.939, 116.779, 123.68]) / 255.\n\n\nclass TactileDataset(Dataset):\n\n def __init__(self, csv_file, phase, n_class=num_class, crop=False, flip_rate=0):\n self.data = pd.read_csv(csv_file)\n self.means = means\n self.n_class = n_class\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n img_name = self.data.iloc[idx, 0]\n img = scipy.misc.imread(img_name, mode='RGB')\n\n label_name = self.data.iloc[idx, 1]\n label = np.load(label_name)\n # convert to tensor\n img = torch.from_numpy(img.copy()).float()\n img = img.transpose(0, 2)\n img = img.transpose(1, 2)\n label = torch.from_numpy(label.copy()).long()\n\n # create one-hot encoding\n h, w = label.size()\n target = torch.zeros(self.n_class, h, w)\n for c in range(self.n_class):\n target[c][label == c] = 1\n\n sample = {'X': img, 'Y': target, 'l': label}\n\n return sample\n\n\ndef show_batch(batch):\n img_batch = batch['X']\n img_batch[:,0,...].add_(means[0])\n img_batch[:,1,...].add_(means[1])\n img_batch[:,2,...].add_(means[2])\n batch_size = len(img_batch)\n\n grid = utils.make_grid(img_batch)\n plt.imshow(grid.numpy()[::-1].transpose((1, 2, 0)))\n\n plt.title('Batch from dataloader')\n\n\ndef imshow(img):\n img = img / 2\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n\n\nif __name__ == '__main__':\n train_data = TactileDataset(csv_file=train_file, phase='train')\n print(train_data.__len__())\n data_loader = torch.utils.data.DataLoader(train_data, batch_size=4, shuffle=True, num_workers=1)\n\n batch_size = 4\n for i in range(batch_size):\n sample = train_data[i]\n print(i, sample['X'].size(), sample['Y'].size())\n\n for i, batch in enumerate(data_loader):\n print(i, batch['X'].size(), batch['Y'].size())\n","sub_path":"utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"386515031","text":"# -*- encoding: utf-8 -*-\n# Copyright (C) 2017 TSDV TTEC. All rights reserved.\n\"\"\"\nThis module is used to draw position of elements over input image.\nThe purpose is for easy debugging PHOcrOffice module\n\"\"\"\nimport os\nfrom PIL import Image, ImageDraw, ImageFont\nfrom .base_element import BaseElement\n\n# Define some constant tag of PHOcr office\nOCR_PAGE = \"ocr_page\"\nOCR_TABLE = \"ocr_table_mask\"\nOCR_CAREA = \"ocr_carea\"\nOCR_ROW = \"ocr_row\"\n\nclass ElementDraw(object):\n \"\"\"\n Draw box of elements over image\n \"\"\"\n blank_layer = \"blank_layer.png\"\n output_file = \"element_draw.png\"\n def __init__(self, img_width, img_height, source_image=None):\n \"\"\"Init\"\"\"\n self._width = img_width\n self._height = img_height\n source_img = \"\"\n if source_image is None:\n # Create a blank layer with A4 size\n img = Image.new('RGB', (self._width, self._height), color='white')\n img.save(ElementDraw.blank_layer)\n source_img = ElementDraw.blank_layer\n else:\n source_img = source_image\n self._source_img = Image.open(source_img)\n self._draw = ImageDraw.Draw(self._source_img)\n self._font = ImageFont.load_default()\n\n def __del__(self):\n \"\"\"Destructor\"\"\"\n if os.path.exists(ElementDraw.blank_layer):\n os.remove(ElementDraw.blank_layer)\n\n def write(self, tree_element):\n \"\"\"\n Save the box of element\n The main api to outside\n :return:\n \"\"\"\n self.write_down_element(tree_element)\n self._source_img.save(self.output_file, \"PNG\")\n\n def _draw_text_box(self, textbox_element):\n \"\"\"\n Draw boxes related with text attributes element\n :return:\n \"\"\"\n textbox = BaseElement(textbox_element)\n self._draw_element(textbox)\n\n def _draw_table(self, table_element):\n \"\"\"\n Draw boxes related with table element\n :return:\n \"\"\"\n table = BaseElement(table_element)\n # Draw table box first\n self._draw_element(table)\n\n # Draw rows\n for row_element in table_element:\n row = BaseElement(row_element)\n self._draw_element(row)\n for col_element in row_element:\n col = BaseElement(col_element)\n self._draw_element(col)\n\n def _draw_element(self, base_element):\n \"\"\"Draw rectangle of element\"\"\"\n self._draw.rectangle([(base_element.box.left, base_element.box.top),\n (base_element.box.left + base_element.box.width,\n base_element.box.top + base_element.box.height)],\n outline='red', fill=None)\n\n # Write text down to photo\n text_value = base_element.tag + \" \" + base_element.m_id\n\n # Write table name to upper middle of table\n if base_element.tag == OCR_TABLE:\n self._draw.text((base_element.box.left + base_element.box.width / 2,\n base_element.box.top - 10),\n text_value, font=self._font, fill=255)\n # Write ocr row to lower of row\n elif base_element.tag == OCR_ROW:\n self._draw.text((base_element.box.left,\n base_element.box.top + 5),\n text_value, font=self._font, fill=255)\n # Write col and other to middle\n else:\n self._draw.text((base_element.box.left,\n base_element.box.top + base_element.box.height / 2),\n text_value, font=self._font, fill=255)\n\n def write_down_element(self, tree_element):\n \"\"\"\n Write down the image for input element\n Can use outside\n \"\"\"\n element = BaseElement(tree_element)\n input_tag = element.tag\n if input_tag == OCR_TABLE:\n self._draw_table(tree_element)\n if input_tag == OCR_CAREA:\n self._draw_text_box(tree_element)\n if input_tag == OCR_PAGE:\n for member in tree_element:\n self.write_down_element(member)\n","sub_path":"Run_PHocr_test/PHOcr_C2404_D3_linux_release/lib/phocroffice/phocr_shared/element_draw.py","file_name":"element_draw.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"208290487","text":"from ipaddress import ip_network, ip_address\nimport csv\nimport time\nimport random\n\nlistOfPrivateIp = []\nfor k in ip_network(\"10.0.0.0/16\"):\n listOfPrivateIp.append(str(k))\n#print(len(listOfPrivateIp))\nlistOfPrivateIp_src = random.sample(listOfPrivateIp, 3500)\nlistOfPrivateIp = list(set(listOfPrivateIp)-set(listOfPrivateIp_src))\nlistOfPrivateIp_dst = random.sample(listOfPrivateIp,1500)\n# listOfPrivateIp_src = listOfPrivateIp[:3500]\n# listOfPrivateIp_dst = listOfPrivateIp[3501:5000]\n\nlistOfPublicIp = []\nfor k in ip_network(\"14.141.56.0/21\"):\n listOfPublicIp.append(str(k))\n#print(len(listOfPublicIp))\nlistOfPublicIp_src = random.sample(listOfPublicIp, 1500)\nlistOfPublicIp = list(set(listOfPublicIp)-set(listOfPublicIp_src))\nlistOfPublicIp_dst = random.sample(listOfPublicIp, 500)\nsrc_ip = listOfPrivateIp_src + listOfPublicIp_src\ndst_ip = listOfPrivateIp_dst + listOfPublicIp_dst\ninfo = 'src_dst_ip_info.csv'\nwith open(info,'a') as infofile:\n writer = csv.writer(infofile)\n writer.writerows([[src_ip],[dst_ip]])\n\n#print(len(src_ip))\n#print(len(dst_ip))\n\nports_name_protocol = [(80, 'http', 'TCP'), (443, 'https', 'TCP'), (123, 'ntp', 'UDP'), (22, 'ssh', 'TCP'),\n (53, 'dns', 'UDP'), (445, 'microsoft-ds', 'TCP'), (389, 'ldap', 'UDP'), (514, 'syslog', 'TCP'),\n (137, 'netbios-ns', 'TCP'), (902, 'ideafarm-door', 'TCP'), (23, 'telnet', 'TCP'), (2049,'nfs','TCP'),\n (88,'kerberos','TCP'), (111,'sunrpc', 'TCP'), (135,'epmap', 'TCP'), (1433, 'ms-sql-server', 'TCP'),\n (3306,'mysql','TCP'), (67,'bootps','UDP'), (161, 'snmp', 'UDP'), (138, 'netbios-dgm', 'UDP'), (1434, 'ms-sql-m', 'UDP'),\n (25,'smtp', 'TCP'), (21, 'ftp', 'TCP'), (179,'bgp','TCP'), (110,'pop3','TCP'), (500,'isakmp', 'UDP'),\n (465,'igmpv3lite','TCP'), (19,'chargen','UDP'), (139,'netbios-ssn', 'TCP')]\ni = 0\nj = 0\nfourTupleForPlanning = []\nstart = time.time()\nwhile (len(fourTupleForPlanning) < 30000000):\n srcip = int(ip_address(src_ip[i]))\n dstip = int(ip_address(dst_ip[j]))\n rand = random.randint(1,100)\n if rand == 1:\n portsSet = random.sample(ports_name_protocol,2)\n elif rand == 2:\n portsSet = random.sample(ports_name_protocol,3)\n elif rand == 3:\n portsSet = random.sample(ports_name_protocol,4)\n else:\n portsSet = random.sample(ports_name_protocol[:10],4)\n for set in portsSet:\n port = set[0]\n portName = set[1]\n protocol = set[2]\n fourTupleForPlanning.append([srcip, dstip, port, portName, protocol])\n if len(fourTupleForPlanning) == 30000000:\n break\n j = j+1\n if (j == 2000):\n i = i + 1\n j = 0\nfilename = 'fourTupleIntIpPlanning.csv'\nwith open (filename, 'a') as file:\n writer = csv.writer(file)\n writer.writerows(fourTupleForPlanning)\nend = time.time()\ntook = round(end-start,2)\nprint(f'inserted into {filename} 30million entries took {took} seconds')\n","sub_path":"fourtupleintip.py","file_name":"fourtupleintip.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"348747971","text":"from graph import Graph\nfrom indexed_priority_queue import PriorityQueue\n\nclass LazyPrim:\n def __init__(self, g):\n self.g = g\n self._mst_vertices = {} #Vertices already in MST\n self._mst_edges = []\n self._pq = PriorityQueue()\n\n #Get vertex labeled '0': this is our starting point\n list_of_v = g.vertices_with_labels()\n s = list_of_v['0']\n\n self._visit(s)\n\n while len(self._pq) > 0:\n (wt, (u, e)) = self._pq.remove_min()\n v = e.other(u)\n if v not in self._mst_vertices:\n self._mst_edges.append(e)\n self._visit(v)\n\n def _visit(self, v):\n \"\"\"Visit a vertex:\n - Include the vertex in MST\n - For each edge from vertex s to the PQ, iff the edge is not already part of the MST\n \"\"\"\n self._mst_vertices[v] = True\n for e in self.g.incident_edges(v):\n if e.other(v) not in self._mst_vertices:\n self._pq.add(float(e.element()), (v, e))\n\n def edges(self):\n return self._mst_edges\n\nif __name__ == '__main__':\n g = Graph(directed=False,file_path = \"prim_ewg.txt\")\n mst = LazyPrim(g)\n mst_edges = mst.edges()\n for e in mst_edges:\n (u, v) = e.endpoints()\n print(\"(\", u.element(), v.element(), \"): \", e.element())\n","sub_path":"graphs/lazy_prim.py","file_name":"lazy_prim.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"600764375","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 20 16:15:16 2018\n\n@author: yangyr\n\"\"\"\n\nimport tensorflow as tf\nimport tensorlayer as tl\nimport numpy as np\n\ndef u_net(x, is_train=False, reuse=False, n_out=1):\n _, nx, ny, nz = x.shape\n nx = int(nx)\n ny = int(ny)\n nz = int(nz)\n with tf.variable_scope(\"u_net\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n inputs = tl.layers.InputLayer(x, name='inputs')\n conv1 = tl.layers.Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, name='conv1_1')\n conv1 = tl.layers.Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='conv1_2')\n pool1 = tl.layers.MaxPool2d(conv1, (2, 2), name='pool1')\n conv2 = tl.layers.Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, name='conv2_1')\n conv2 = tl.layers.Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='conv2_2')\n pool2 = tl.layers.MaxPool2d(conv2, (2, 2), name='pool2')\n conv3 = tl.layers.Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, name='conv3_1')\n conv3 = tl.layers.Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='conv3_2')\n pool3 = tl.layers.MaxPool2d(conv3, (2, 2), name='pool3')\n conv4 = tl.layers.Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, name='conv4_1')\n conv4 = tl.layers.Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='conv4_2')\n pool4 = tl.layers.MaxPool2d(conv4, (2, 2), name='pool4')\n conv5 = tl.layers.Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, name='conv5_1')\n conv5 = tl.layers.Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, name='conv5_2')\n\n up4 = tl.layers.DeConv2d(conv5, 512, (3, 3), (nx/8, ny/8), (2, 2), name='deconv4')\n up4 = tl.layers.ConcatLayer([up4, conv4], 3, name='concat4')\n conv4 = tl.layers.Conv2d(up4, 512, (3, 3), act=tf.nn.relu, name='uconv4_1')\n conv4 = tl.layers.Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='uconv4_2')\n up3 = tl.layers.DeConv2d(conv4, 256, (3, 3), (nx/4, ny/4), (2, 2), name='deconv3')\n up3 = tl.layers.ConcatLayer([up3, conv3], 3, name='concat3')\n conv3 = tl.layers.Conv2d(up3, 256, (3, 3), act=tf.nn.relu, name='uconv3_1')\n conv3 = tl.layers.Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='uconv3_2')\n up2 = tl.layers.DeConv2d(conv3, 128, (3, 3), (nx/2, ny/2), (2, 2), name='deconv2')\n up2 = tl.layers.ConcatLayer([up2, conv2], 3, name='concat2')\n conv2 = tl.layers.Conv2d(up2, 128, (3, 3), act=tf.nn.relu, name='uconv2_1')\n conv2 = tl.layers.Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='uconv2_2')\n up1 = tl.layers.DeConv2d(conv2, 64, (3, 3), (nx/1, ny/1), (2, 2), name='deconv1')\n up1 = tl.layers.ConcatLayer([up1, conv1] , 3, name='concat1')\n conv1 = tl.layers.Conv2d(up1, 64, (3, 3), act=tf.nn.relu, name='uconv1_1')\n conv1 = tl.layers.Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='uconv1_2')\n conv1 = tl.layers.Conv2d(conv1, n_out, (1, 1), act=tf.nn.sigmoid, name='uconv1')\n return conv1","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"155342412","text":"import random\nimport play_sound\nimport os\nfrom colorama import Fore, Back, Style , init\n\ndef create_cube():\n number_on = False\n #number_on = True\n #\n # 5 top\n # 0 1 2 3 left front right back\n # 4 bottom\n #\n cube = []\n for loop in range(6):\n side = []\n number = 0\n for loop2 in range(3):\n temp = []\n for loop3 in range(3):\n if number_on:\n temp += [str(loop)+str(number)]\n else:\n temp += [loop]\n number +=1\n side += [temp]\n cube += [side]\n return cube\n\ndef string_to_colour(string):\n new_string = \"\"\n\n for loop in range(len(string)):\n if string[loop] == \"0\":\n new_string += Back.RED\n elif string[loop] == \"1\":\n new_string += Back.WHITE\n new_string += Fore.BLACK\n elif string[loop] == \"2\":\n new_string += Back.MAGENTA\n elif string[loop] == \"3\":\n new_string += Back.YELLOW\n new_string += Fore.BLACK\n elif string[loop] == \"4\":\n new_string += Back.BLUE\n elif string[loop] == \"4\":\n new_string += Back.GREEN\n\n\n\n new_string += string[loop]\n new_string += Style.RESET_ALL\n \n\n return new_string\n\ndef print_cube(cube):\n lenght = len(str(cube[0][0]))\n print(\"\")\n print(string_to_colour(\" \"*lenght + str(cube[5][0])))\n print(string_to_colour(\" \"*lenght + str(cube[5][1])))\n print(string_to_colour(\" \"*lenght + str(cube[5][2])))\n \n print(string_to_colour(str(cube[0][0])+str(cube[1][0])+str(cube[2][0])+str(cube[3][0])))\n print(string_to_colour(str(cube[0][1])+str(cube[1][1])+str(cube[2][1])+str(cube[3][1])))\n print(string_to_colour(str(cube[0][2])+str(cube[1][2])+str(cube[2][2])+str(cube[3][2])))\n \n print(string_to_colour(\" \"*lenght + str(cube[4][0])))\n print(string_to_colour(\" \"*lenght + str(cube[4][1])))\n print(string_to_colour(\" \"*lenght + str(cube[4][2])))\n print(\"\")\n print(Style.RESET_ALL)\n return\n\ndef turn_face(cube,times=1):\n new_cube = create_cube()\n\n new_cube[3] = copy_plane(cube[3])\n\n new_cube[0] = copy_plane(cube[0])\n new_cube[0][0][2] = cube[4][0][0]\n new_cube[0][1][2] = cube[4][0][1]\n new_cube[0][2][2] = cube[4][0][2]\n\n new_cube[5] = copy_plane(cube[5])\n new_cube[5][2][0] = cube[0][0][2]\n new_cube[5][2][1] = cube[0][1][2]\n new_cube[5][2][2] = cube[0][2][2]\n\n new_cube[2] = copy_plane(cube[2])\n new_cube[2][0][0] = cube[5][2][0]\n new_cube[2][1][0] = cube[5][2][1]\n new_cube[2][2][0] = cube[5][2][2]\n\n new_cube[4] = copy_plane(cube[4])\n new_cube[4][0][0] = cube[2][0][0]\n new_cube[4][0][1] = cube[2][1][0]\n new_cube[4][0][2] = cube[2][2][0]\n\n\n new_cube[1] = turn_plane_no_side(cube[1])\n \n if times > 1:\n new_cube = turn_face(new_cube,times=times-1)\n\n return new_cube\n\ndef copy_plane(plane):\n new_plane = [[0,0,0],[0,0,0],[0,0,0]]\n for loop in range(3):\n for loop2 in range(3):\n new_plane[loop][loop2] = plane[loop][loop2]\n return new_plane\n\ndef turn_plane_no_side(plane):\n new_plane = [[0,0,0],[0,0,0],[0,0,0]]\n #middle\n new_plane[1][1] = plane[1][1]\n \n #corners\n new_plane[0][0] = plane[2][0]\n new_plane[2][0] = plane[2][2]\n new_plane[0][2] = plane[0][0]\n new_plane[2][2] = plane[0][2]\n \n #edges\n new_plane[0][1] = plane[1][0]\n new_plane[2][1] = plane[1][2]\n new_plane[1][2] = plane[0][1]\n new_plane[1][0] = plane[2][1]\n return new_plane\n\ndef turn_cube_horizontal(cube):\n new_cube = create_cube()\n new_cube[0] = cube[3]\n new_cube[1] = cube[0]\n new_cube[2] = cube[1]\n new_cube[3] = cube[2]\n\n new_cube[4] = turn_plane_no_side(cube[4])\n\n\n cube[5] = turn_plane_no_side(cube[5])\n cube[5] = turn_plane_no_side(cube[5])\n new_cube[5] = turn_plane_no_side(cube[5])\n return new_cube\n\ndef turn_cube_vertical(cube):\n new_cube = create_cube()\n new_cube[1] = cube[4]\n new_cube[5] = cube[1]\n\n new_cube[4] = copy_plane(cube[3])\n new_cube[4][0] = cube[3][2]\n new_cube[4][2] = cube[3][0]\n\n new_cube[3] = copy_plane(cube[5])\n new_cube[3][0] = cube[5][2]\n new_cube[3][2] = cube[5][0]\n new_cube[3][1] = cube[5][1]\n \n cube[0] = turn_plane_no_side(cube[0])\n cube[0] = turn_plane_no_side(cube[0])\n new_cube[0] = turn_plane_no_side(cube[0])\n\n new_cube[2] = turn_plane_no_side(cube[2])\n return new_cube\n\ndef turn_cal(cube,face=1,times=1):\n \n times = times%4\n if times == 0:\n return cube\n\n if face == 0:\n cube = turn_cube_horizontal(cube)\n cube = turn_face(cube,times=times)\n cube = turn_cube_horizontal(cube)\n cube = turn_cube_horizontal(cube)\n cube = turn_cube_horizontal(cube)\n elif face == 1:\n cube = turn_face(cube,times=times)\n elif face == 2:\n cube = turn_cube_horizontal(cube)\n cube = turn_cube_horizontal(cube)\n cube = turn_cube_horizontal(cube)\n cube = turn_face(cube,times=times)\n cube = turn_cube_horizontal(cube)\n elif face == 3:\n cube = turn_cube_horizontal(cube)\n cube = turn_cube_horizontal(cube)\n cube = turn_face(cube,times=times)\n cube = turn_cube_horizontal(cube)\n cube = turn_cube_horizontal(cube)\n elif face == 4:\n cube = turn_cube_vertical(cube)\n cube = turn_face(cube,times=times)\n cube = turn_cube_vertical(cube)\n cube = turn_cube_vertical(cube)\n cube = turn_cube_vertical(cube)\n elif face == 5:\n cube = turn_cube_vertical(cube)\n cube = turn_cube_vertical(cube)\n cube = turn_cube_vertical(cube)\n cube = turn_face(cube,times=times)\n cube = turn_cube_vertical(cube)\n\n return cube\n\ndef fittness_cal(cube):\n completed = create_cube()\n fittness = 0\n\n for loop in range(6):\n for loop2 in range(3):\n for loop3 in range(3):\n if cube[loop][loop2][loop3] == completed[loop][loop2][loop3]:\n fittness += 1\n return fittness/(6*3*3)*100\n\ndef random_play(cube,turns):\n for loop in range(turns):\n face = random.randint(0,5)\n times = random.randint(1,3)\n cube = turn_cal(cube,face=face,times=times)\n print(str(face) + \" \" + str(times))\n return cube\n\n\n\n\ninit()\ncube = create_cube()\nplay_sound.sound_setup(\"sounds\\\\your amazing.ogg\")\nprint_cube(cube)\n\nwhile True:\n \n turns = int(input(\"turns to random: \"))\n os.system(\"cls\")\n if turns > 0:\n cube = random_play(cube,turns)\n print_cube(cube)\n fittness = fittness_cal(cube)\n print(fittness)\n \n while True:\n face = int(input(\"face number: \"))\n if face > 5 or face < 0:\n print(\"error bad input for face number!\")\n continue\n times = int(input(\"times: \"))\n \n cube = turn_cal(cube,face=face,times=times)\n \n \n \n print_cube(cube)\n fittness = fittness_cal(cube)\n print(fittness)\n\n if fittness == 100:\n play_sound.play_sound()\n break\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"545225797","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport tensorflow as tf\nfrom collections import deque\nfrom random import randint\nimport os\nimport dqn\nimport random\nimport itertools\nfrom os import path\n\ndf = pd.read_csv('AMZN.csv')\n\ndf2 = df.iloc[[0, -1]]\n\ntotal_rows = df2.index.values[1]\n\nstart = pd.to_numeric(df2['Adj Close']).values.item(0)\nend = pd.to_numeric(df2['Adj Close']).values.item(1)\n\ndef get_velocity(start, end, span) :\n return (end - start) / span\n\ndef get_v(data, span) :\n i = 0\n v = np.zeros(span).tolist()\n\n while i < len(data) - span :\n range = data[i:i+span]\n v.append(get_velocity(range.item(0), range.item(span - 1), span))\n i = i + 1\n\n return v\n\n# velocity = get_velocity(start, end, total_rows)\n\n# 0.0311424636872 => 3 cents a day\n# print velocity\n# print '---'\n\nv5 = get_v(pd.to_numeric(df['Adj Close']).values, 5)\na5 = get_v(np.asarray(v5), 5)\n\ndfV5 = pd.DataFrame(v5)\ndfA5 = pd.DataFrame(a5)\n\nv20 = get_v(pd.to_numeric(df['Adj Close']).values, 20)\na20 = get_v(np.asarray(v20), 20)\n\ndfV20 = pd.DataFrame(v20)\ndfA20 = pd.DataFrame(a20)\n\nma5 = df['Adj Close'].rolling(window=5).mean()\nma20 = df['Adj Close'].rolling(window=20).mean()\n\nema12 = df['Adj Close'].ewm(span=12).mean()\nema26 = df['Adj Close'].ewm(span=26).mean()\nmacd = ema12 - ema26\nsignal = macd.ewm(span=9).mean()\noscillator = macd - signal\n\ndata = pd.concat([df.loc[:, ['Date', 'Adj Close']], ma5, ma20, dfA5, dfA20, macd, signal, oscillator], axis=1)\ndata.columns = ['Date', 'Adj Close', '5MA', '20MA', '5A', '20A', 'MACD', 'SIGNAL', 'OSCILLATOR']\n# print pd.concat([df.loc[:, ['Date', 'Adj Close']], dfA5, dfA20], axis=1)\n\n# print data.tail(360)\n\nnp_data = data.loc[data.shape[0] - 360: data.shape[0] - 356, 'Adj Close':].as_matrix()\n\n# print np_data[4, 0]\n\n# print Decimal(3 * (-19 * -5) / 100.0).quantize(Decimal('.1'), rounding=ROUND_FLOOR)\n# print Decimal(3 * (-19 * -5) / 100.0).quantize(Decimal('.1'), rounding=ROUND_DOWN)\n# print '{0}% {1} - {2}'.format((-19 * -5), round(3 * (-19 * -5) / 100.0, 2), round(round(3 * (-19 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-18 * -5), round(3 * (-18 * -5) / 100.0, 2), round(round(3 * (-18 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-17 * -5), round(3 * (-17 * -5) / 100.0, 2), round(round(3 * (-17 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-16 * -5), round(3 * (-16 * -5) / 100.0, 2), round(round(3 * (-16 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-15 * -5), round(3 * (-15 * -5) / 100.0, 2), round(round(3 * (-15 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-14 * -5), round(3 * (-14 * -5) / 100.0, 2), round(round(3 * (-14 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-11 * -5), round(3 * (-11 * -5) / 100.0, 2), round(round(3 * (-11 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-10 * -5), round(3 * (-10 * -5) / 100.0, 2), round(round(3 * (-10 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-9 * -5), round(3 * (-9 * -5) / 100.0, 2), round(round(3 * (-9 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-8 * -5), round(3 * (-8 * -5) / 100.0, 2), round(round(3 * (-8 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-7 * -5), round(3 * (-7 * -5) / 100.0, 2), round(round(3 * (-7 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-6 * -5), round(3 * (-6 * -5) / 100.0, 2), round(round(3 * (-6 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-5 * -5), round(3 * (-5 * -5) / 100.0, 2), round(round(3 * (-5 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-4 * -5), round(3 * (-4 * -5) / 100.0, 2), round(round(3 * (-4 * -5) / 100.0, 0)))\n# print '{0}% {1} - {2}'.format((-3 * -5), round(3 * (-3 * -5) / 100.0, 2), round(round(3 * (-3 * -5) / 100.0, 0)))\n\n# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (1 * 5 / 100.0)), 1 * 5)\n# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (2 * 5 / 100.0)), 2 * 5)\n# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (3 * 5 / 100.0)), 3 * 5)\n# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (4 * 5 / 100.0)), 4 * 5)\n# print '{0} {1} {2} {3}'.format(np_data[4, 0], 10000 // np_data[4, 0], math.floor((10000 // np_data[4, 0]) * (5 * 5 / 100.0)), 5 * 5)\n\n\n# print(data.loc[data.shape[0] - 360: data.shape[0] - 356, 'Adj Close':].as_matrix())\n# print(data.loc[data.shape[0] - 359: data.shape[0] - 355, 'Adj Close':].as_matrix())\n#\n# print(np.ravel(data.loc[data.shape[0] - 360: data.shape[0] - 356, 'Adj Close':].as_matrix())[20])\n# print(np.ravel(data.loc[data.shape[0] - 359: data.shape[0] - 355, 'Adj Close':].as_matrix())[20])\n\n# plt.figure()\n# plt.plot(dfA5.tail(30))\n# plt.plot(dfA20.tail(30))\n# plt.grid()\n# plt.show()\n\nprop_count = 8\n# take last 20 screens as input with 8 properties each (price, 5ma, 20ma, 5a, 20a, macd, signal, oscillator)\nnum_screen = 20\ninput_size = num_screen * prop_count + 1\noutput_size = 41\nminibatch_size = 30\n\nstarting_point = 360\n\n# discount factor\ndis = 0.9\n# buffer size\nREPLAY_MEMORY = 50000\n\nmax_episodes = 2000\n# store the previous observations in replay memory\nreplay_buffer = deque()\n\nlast_100_game_reward = deque()\n\ncsv = np.zeros((max_episodes, starting_point - num_screen))\n\ndef get_copy_var_ops(dest_scope_name=\"target\", src_scope_name=\"main\"):\n\n # Copy variables src_scope to dest_scope\n op_holder = []\n\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)\n\n for src_var, dest_var in zip(src_vars, dest_vars):\n op_holder.append(dest_var.assign(src_var.value()))\n\n return op_holder\n\ndef _slice(data, start, span=19):\n return np.ravel(data.loc[data.shape[0] - start: data.shape[0] - (start - span), 'Adj Close':].as_matrix())\n\ndef ddqn_replay_train(mainDQN, targetDQN, train_batch):\n '''\n Double DQN implementation\n :param mainDQN: main DQN\n :param targetDQN: target DQN\n :param train_batch: minibatch for train\n :return: loss\n '''\n x_stack = np.empty(0).reshape(0, mainDQN.input_size)\n y_stack = np.empty(0).reshape(0, mainDQN.output_size)\n\n # Get stored information from the buffer\n for state, action, reward, next_state, done in train_batch:\n Q = mainDQN.predict(state)\n\n # terminal?\n if done:\n Q[0, action] = reward\n else:\n # Double DQN: y = r + gamma * targetDQN(s')[a] where\n # a = argmax(mainDQN(s'))\n Q[0, action] = reward + dis * \\\n targetDQN.predict(next_state)[\n 0, np.argmax(mainDQN.predict(next_state))]\n\n y_stack = np.vstack([y_stack, Q])\n x_stack = np.vstack([x_stack, state])\n\n # Train our network using target and predicted Q values on each episode\n return mainDQN.update(x_stack, y_stack)\n\nwith tf.Session() as sess:\n mainDQN = dqn.DQN(sess, input_size, output_size, name=\"main\")\n targetDQN = dqn.DQN(sess, input_size, output_size, name=\"target\")\n tf.global_variables_initializer().run()\n\n # writer = tf.summary.FileWriter(path.abspath(path.join(os.sep, '/Users/seanlee', 'logdir')), sess.graph)\n\n copy_ops = get_copy_var_ops(dest_scope_name=\"target\", src_scope_name=\"main\")\n sess.run(copy_ops)\n\n # e = 0.1\n\n for episode in range(max_episodes):\n e = 1. / ((episode / 10) + 1)\n # print(\"E: {}\".format(e))\n done = False\n step_count = 1\n cash = 10000\n holdings = 0\n state = np.append(_slice(data, starting_point), 0)\n next_state = []\n rewards = []\n\n while not done:\n # del rewards[:]\n if np.random.rand(1) < e:\n # Explore\n # Random action between -20 and 20\n action = randint(-20, 20)\n # action = env.action_space.sample()\n else:\n # Exploit\n # Choose an action greedily from the Q-network\n action = np.argmax(mainDQN.predict(state)) - 20\n\n # Get new state and reward from environment\n # next_state, reward, done, _ = _step(action, step_count)\n next_state = _slice(data, starting_point - step_count)\n price = next_state[prop_count * num_screen - prop_count]\n\n if (action < 0 and holdings < 1) or action == 0:\n # cannot sell while no shares are held. thus do nothing\n reward = cash + (price * holdings)\n # print(\"{} \\t{}% \\tCash: {} \\tHoldings: {} \\tReward: {} \\tsteps: {}\".format(0, action*5, cash, holdings, reward, step_count))\n elif action < 0 and holdings > 0:\n qty = round(round(holdings * (action * -5) / 100.0, 0))\n holdings = holdings - qty\n cash = cash + (price * qty)\n reward = cash + (price * holdings)\n # print(\"{} \\t{}% \\tCash: {} \\tHoldings: {} \\tReward: {} \\tsteps: {}\".format(int(round(qty)), action*5, cash, holdings, reward, step_count))\n elif action > 0:\n qty = math.floor((cash // price) * (action * 5 / 100.0))\n holdings = holdings + qty\n cash = cash - (price * qty)\n reward = cash + (price * holdings)\n # print(\"{} \\t{}% \\tCash: {} \\tHoldings: {} \\tReward: {} \\tsteps: {}\".format(int(round(qty)), action*5, cash, holdings, reward, step_count))\n\n next_state = np.append(next_state, reward)\n\n if starting_point - step_count <= num_screen:\n done = True\n else:\n done = False\n\n csv[episode][step_count - 1] = reward\n # Save the experience to our buffer\n replay_buffer.append((state, action, reward, next_state, done))\n if len(replay_buffer) > REPLAY_MEMORY:\n replay_buffer.popleft()\n\n state = next_state\n step_count += 1\n\n # if e > 0.0001:\n # e -= (0.1 - 0.0001) / max_episodes\n\n # print(\"Episode: {} steps: {}\".format(episode, step_count))\n\n if episode % 10 == 1: # train every 10 episode\n # Get a random batch of experiences.\n for _ in range(50):\n minibatch_start = randint(0, len(replay_buffer) - minibatch_size)\n minibatch = list(itertools.islice(replay_buffer, minibatch_start, minibatch_start + minibatch_size))\n # minibatch = random.sample(replay_buffer, 25)\n loss, _ = ddqn_replay_train(mainDQN, targetDQN, minibatch)\n\n # copy q_net -> target_net\n sess.run(copy_ops)\n\n # print(\"Cash: {} Holdings: {} Price: {}\".format(cash, holdings, next_state[20]))\n # last_100_game_reward.append(cash + (holdings * next_state[20]))\n #\n # if len(last_100_game_reward) > 100:\n # last_100_game_reward.popleft()\n #\n # avg_reward = np.mean(last_100_game_reward)\n #\n # if avg_reward > 199:\n # print(\"Game Cleared in {:f} episodes with avg reward {:f}\".format(episode, avg_reward))\n # break\n\n # pd.DataFrame(csv).to_csv('333.csv')\n\n # Predict on new state\n # print mainDQN.predict()\n\n plt.figure()\n plt.plot(csv[:, -1])\n plt.grid()\n plt.show()","sub_path":"examples/agents/amzn.py","file_name":"amzn.py","file_ext":"py","file_size_in_byte":11539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"245903798","text":"from functools import partial\nimport numpy\nimport h5py\nfrom lazyflow.request import Request\nfrom lazyflow.graph import Operator, InputSlot, OutputSlot, OperatorWrapper\nfrom lazyflow.operators import OpCompressedCache, OpVigraLabelVolume, OpFilterLabels, OpSelectLabel, OpMaskedSelect, OpDtypeView\nfrom lazyflow.operators.ioOperators import OpH5WriterBigDataset\nfrom lazyflow.operators.opReorderAxes import OpReorderAxes\n\nfrom lazyflow.utility import PathComponents\nfrom ilastik.utility import bind\nfrom ilastik.applets.splitBodyPostprocessing.opSplitBodyPostprocessing import OpAccumulateFragmentSegmentations, OpMaskedWatershed\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass OpSplitBodySupervoxelExport(Operator):\n\n DatasetInfos = InputSlot(level=1) # Used to extract the other datasets from the segmentation file.\n WorkingDirectory = InputSlot()\n \n RawData = InputSlot() # (Display only)\n InputData = InputSlot() # The membrane probabilities\n RavelerLabels = InputSlot()\n Supervoxels = InputSlot()\n AnnotationBodyIds = InputSlot() # The list of bodies actually edited\n # (Must be connected to ensure that setupOutputs will be \n # called resize the multi-slots when necessary)\n\n # For these multislots, N = number of raveler bodies that were edited\n EditedRavelerBodies = OutputSlot(level=1)\n MaskedSupervoxels = OutputSlot(level=1)\n RelabeledSupervoxels = OutputSlot(level=1)\n FilteredMaskedSupervoxels = OutputSlot(level=1)\n HoleFilledSupervoxels = OutputSlot(level=1)\n \n FinalSupervoxels = OutputSlot()\n SupervoxelMapping = OutputSlot()\n\n # RavelerLabels ------>------------------------------------------------------------------------------------------------------------------------------------------------>------------------------------------------------------------------------------------------------------------------------------------\n # \\ \\ \\\n # \\ Supervoxels -- MaskedSupervoxels \\ \\\n # \\ \\ / \\ \\\n # AnnotationBodyIds ----> opSelectLabel[n] --> opMaskedSelect[n] --> opRelabelMaskedSupervoxels[n] --> opRelabeledMaskedSupervoxelsCaches[n] --> opSmallLabelFilter[n] --> opMaskedWatershed[n] --> opMaskedWatershedCaches[n] --> opRelabelMergedSupervoxels[n] --> opRelabeledMergedSupervoxelsCaches[n] --> opAccumulateFinalImage --> opFinalCache --> FinalSupervoxels\n # \\ / \\ \\\n # ------------------------------------------------------------------------------------------------------------------------------- RelabeledSupervoxels (SupervoxelMapping)\n\n def __init__(self, *args, **kwargs):\n super( OpSplitBodySupervoxelExport, self ).__init__(*args, **kwargs)\n\n # HACK: Be sure that the output slots are resized if the raveler body list changes\n self.AnnotationBodyIds.notifyDirty( bind(self._setupOutputs) )\n\n # Prepare a set of OpSelectLabels for easy access to raveler object masks\n self._opSelectLabel = OperatorWrapper( OpSelectLabel, parent=self, broadcastingSlotNames=['Input'] )\n self._opSelectLabel.Input.connect( self.RavelerLabels )\n self.EditedRavelerBodies.connect( self._opSelectLabel.Output )\n\n # Mask in the body of interest\n self._opMaskedSelect = OperatorWrapper( OpMaskedSelectUint32, parent=self, broadcastingSlotNames=['Input'] )\n self._opMaskedSelect.Input.connect( self.Supervoxels )\n self._opMaskedSelect.Mask.connect( self._opSelectLabel.Output )\n self.MaskedSupervoxels.connect( self._opMaskedSelect.Output ) \n\n # Must run CC before filter, to ensure that discontiguous labels can't avoid the filter.\n self._opRelabelMaskedSupervoxels = OperatorWrapper( OpVigraLabelVolume, parent=self )\n self._opRelabelMaskedSupervoxels.Input.connect( self._opMaskedSelect.Output )\n \n self._opRelabeledMaskedSupervoxelCaches = OperatorWrapper( OpCompressedCache, parent=self )\n self._opRelabeledMaskedSupervoxelCaches.Input.connect( self._opRelabelMaskedSupervoxels.Output )\n\n # Filter out the small CC to eliminate tiny pieces of supervoxels that overlap the mask boundaries\n self._opSmallLabelFilter = OperatorWrapper( OpFilterLabels, parent=self, broadcastingSlotNames=['MinLabelSize'] )\n self._opSmallLabelFilter.MinLabelSize.setValue( 10 )\n self._opSmallLabelFilter.Input.connect( self._opRelabeledMaskedSupervoxelCaches.Output )\n\n self._opSmallLabelFilterCaches = OperatorWrapper( OpCompressedCache, parent=self )\n self._opSmallLabelFilterCaches.Input.connect( self._opSmallLabelFilter.Output )\n self.FilteredMaskedSupervoxels.connect( self._opSmallLabelFilterCaches.Output )\n\n # Re-fill the holes left by the filter using region growing (with a mask)\n self._opMaskedWatersheds = OperatorWrapper( OpMaskedWatershed, parent=self )\n self._opMaskedWatersheds.Input.connect( self.InputData )\n self._opMaskedWatersheds.Mask.connect( self._opSelectLabel.Output )\n self._opMaskedWatersheds.Seeds.connect( self._opSmallLabelFilterCaches.Output )\n\n # Cache is necessary because it ensures that the entire volume is used for watershed.\n self._opMaskedWatershedCaches = OperatorWrapper( OpCompressedCache, parent=self )\n self._opMaskedWatershedCaches.Input.connect( self._opMaskedWatersheds.Output )\n self.HoleFilledSupervoxels.connect( self._opMaskedWatershedCaches.Output )\n\n # Relabel the supervoxels in the mask to ensure contiguous supervoxels (after mask) and consecutive labels\n self._opRelabelMergedSupervoxels = OperatorWrapper( OpVigraLabelVolume, parent=self )\n self._opRelabelMergedSupervoxels.Input.connect( self._opMaskedWatershedCaches.Output )\n \n self._opRelabeledMergedSupervoxelCaches = OperatorWrapper( OpCompressedCache, parent=self )\n self._opRelabeledMergedSupervoxelCaches.Input.connect( self._opRelabelMergedSupervoxels.Output )\n self.RelabeledSupervoxels.connect( self._opRelabeledMergedSupervoxelCaches.Output )\n\n self._opAccumulateFinalImage = OpAccumulateFragmentSegmentations( parent=self )\n self._opAccumulateFinalImage.RavelerLabels.connect( self.RavelerLabels )\n self._opAccumulateFinalImage.FragmentSegmentations.connect( self._opRelabeledMergedSupervoxelCaches.Output )\n \n self._opFinalCache = OpCompressedCache( parent=self )\n self._opFinalCache.Input.connect( self._opAccumulateFinalImage.Output )\n self.FinalSupervoxels.connect( self._opFinalCache.Output )\n self.SupervoxelMapping.connect( self._opAccumulateFinalImage.Mapping )\n \n def setupOutputs(self):\n raveler_bodies = self.AnnotationBodyIds.value\n num_bodies = len(raveler_bodies)\n\n # Map raveler body ids to the subslots that need them.\n self._opSelectLabel.SelectedLabel.resize( num_bodies )\n for index, raveler_body_id in enumerate(raveler_bodies):\n self._opSelectLabel.SelectedLabel[index].setValue( raveler_body_id )\n\n def execute(self, slot, subindex, roi, result):\n assert False, \"Can't execute slot {}. All slots should be connected to internal operators\".format( slot.name )\n\n def propagateDirty(self, slot, subindex, roi):\n # If anything is dirty, the entire output is dirty\n self.FinalSupervoxels.setDirty()\n\n def exportFinalSupervoxels(self, outputPath, axisorder, progressCallback=None):\n \"\"\"\n Executes the export process within a request.\n The (already-running) request is returned, in case you want to wait for it or monitor its progress.\n \"\"\"\n assert self.FinalSupervoxels.ready(), \"Can't export yet: The final segmentation isn't ready!\"\n\n logger.info(\"Starting Final Segmentation Export...\")\n \n opTranspose = OpReorderAxes( parent=self )\n opTranspose.AxisOrder.setValue( axisorder )\n opTranspose.Input.connect( self.FinalSupervoxels )\n \n f = h5py.File(outputPath, 'w')\n opExporter = OpH5WriterBigDataset(parent=self)\n opExporter.hdf5File.setValue( f )\n opExporter.hdf5Path.setValue( 'stack' )\n opExporter.Image.connect( opTranspose.Output )\n if progressCallback is not None:\n opExporter.progressSignal.subscribe( progressCallback )\n \n req = Request( partial(self._runExporter, opExporter) )\n\n def cleanOps():\n opExporter.cleanUp()\n opTranspose.cleanUp()\n \n def handleFailed( exc, exc_info ):\n cleanOps() \n f.close()\n import traceback\n traceback.print_tb(exc_info[2])\n msg = \"Final Supervoxel export FAILED due to the following error:\\n{}\".format( exc )\n logger.error( msg )\n\n def handleFinished( result ):\n # Generate the mapping transforms dataset\n mapping = self._opAccumulateFinalImage.Mapping.value\n num_labels = mapping.keys()[-1][1]\n transform = numpy.zeros( shape=(num_labels, 2), dtype=numpy.uint32 )\n for (start, stop), body_id in mapping.items():\n for supervoxel_label in range(start, stop):\n transform[supervoxel_label][0] = supervoxel_label\n if body_id == -1:\n # Special case: -1 means \"identity transform\" for this supervoxel\n # (Which is really an untouched raveler body)\n transform[supervoxel_label][1] = supervoxel_label\n else:\n transform[supervoxel_label][1] = body_id\n\n # Save the transform before closing the file\n f.create_dataset('transforms', data=transform)\n\n # Copy all other datasets from the original segmentation file.\n ravelerSegmentationInfo = self.DatasetInfos[2].value\n pathComponents = PathComponents(ravelerSegmentationInfo.filePath, self.WorkingDirectory.value)\n with h5py.File(pathComponents.externalPath, 'r') as originalFile:\n for k,dset in originalFile.items():\n if k not in ['transforms', 'stack']:\n f.copy(dset, k)\n \n try:\n cleanOps()\n logger.info(\"FINISHED Final Supervoxel Export\")\n finally:\n f.close()\n\n def handleCancelled():\n cleanOps()\n f.close()\n logger.info( \"Final Supervoxel export was cancelled!\" )\n\n req.notify_failed( handleFailed )\n req.notify_finished( handleFinished )\n req.notify_cancelled( handleCancelled )\n \n req.submit()\n return req # Returned in case the user wants to cancel it.\n\n def _runExporter(self, opExporter):\n # Trigger the export\n success = opExporter.WriteImage.value\n assert success\n return success\n\nclass OpMaskedSelectUint32(Operator):\n # Upstream watershed is output as signed int32.\n # We must produce uint32 for the label op.\n Input = InputSlot()\n Mask = InputSlot()\n Output = OutputSlot()\n \n def __init__(self, *args, **kwargs):\n super( OpMaskedSelectUint32, self ).__init__( *args, **kwargs )\n \n self._opMaskedSelect = OpMaskedSelect( parent=self )\n self._opMaskedSelect.Input.connect( self.Input )\n self._opMaskedSelect.Mask.connect( self.Mask )\n \n self._opConvertDtype = OpDtypeView( parent=self )\n self._opConvertDtype.Input.connect( self._opMaskedSelect.Output )\n self._opConvertDtype.OutputDtype.setValue( numpy.uint32 )\n self.Output.connect( self._opConvertDtype.Output )\n\n def setupOutputs(self):\n pass\n \n def execute(self, slot, subindex, roi, result):\n pass\n\n def propagateDirty(self, slot, subindex, roi):\n pass\n\n\n\n","sub_path":"ilastik/applets/splitBodySupervoxelExport/opSplitBodySupervoxelExport.py","file_name":"opSplitBodySupervoxelExport.py","file_ext":"py","file_size_in_byte":13289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"398890996","text":"\"\"\"\nTest obs package to make sure that the header in output csv files is\ncorrect.\n\"\"\"\nimport os\nimport numpy as np\n\ntry:\n import pymake\nexcept:\n msg = 'Error. Pymake package is not available.\\n'\n msg += 'Try installing using the following command:\\n'\n msg += ' pip install https://github.com/modflowpy/pymake/zipball/master'\n raise Exception(msg)\n\ntry:\n import flopy\nexcept:\n msg = 'Error. FloPy package is not available.\\n'\n msg += 'Try installing using the following command:\\n'\n msg += ' pip install flopy'\n raise Exception(msg)\n\nfrom framework import testing_framework\nfrom simulation import Simulation\n\ncell_dimensions = (300,)\nex = ['gwf_obs02']\nexdirs = []\nfor s in ex:\n exdirs.append(os.path.join('temp', s))\nddir = 'data'\n\nh0, h1 = 1., 0.\nnlay, nrow, ncol = 1, 10, 10\n\ndef get_model(idx, dir):\n nper = 1\n perlen = [5.0]\n nstp = [1]\n tsmult = [1.]\n delr = 1.\n delc = 1.\n top = 1.\n laytyp = 0\n botm = [0.]\n hk = 1.0\n\n nouter, ninner = 100, 300\n hclose, rclose, relax = 1e-6, 1e-6, 1.\n\n tdis_rc = []\n for i in range(nper):\n tdis_rc.append((perlen[i], nstp[i], tsmult[i]))\n\n name = ex[idx]\n\n # build MODFLOW 6 files\n ws = dir\n sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',\n exe_name='mf6',\n sim_ws=ws)\n # create tdis package\n flopy.mf6.ModflowTdis(sim, time_units='DAYS',\n nper=nper, perioddata=tdis_rc)\n\n # create iterative model solution and register the gwf model with it\n flopy.mf6.ModflowIms(sim, print_option='SUMMARY',\n no_ptcrecord=\"ALL\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n inner_maximum=ninner,\n inner_dvclose=hclose, rcloserecord=rclose,\n linear_acceleration='CG',\n relaxation_factor=relax,\n )\n\n # create gwf model\n gwfname = name\n gwf = flopy.mf6.MFModel(sim, model_type='gwf6', modelname=gwfname,\n model_nam_file='{}.nam'.format(gwfname))\n gwf.name_file.save_flows = True\n\n flopy.mf6.ModflowGwfdis(gwf, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr, delc=delc,\n top=top, botm=botm,\n idomain=np.ones((nlay, nrow, ncol),\n dtype=np.int))\n\n # build list of obs csv files to create\n obsdict = {}\n for i in range(nrow):\n obslst = [('h_{}_{}'.format(i, j), 'head', (0, i, j))\n for j in range(ncol)]\n fname = '{}.{}.obs.csv'.format(name, i)\n obsdict[fname] = obslst\n\n flopy.mf6.ModflowUtlobs(gwf, pname='head_obs',\n digits=20,\n continuous=obsdict)\n\n # initial conditions\n flopy.mf6.ModflowGwfic(gwf, strt=1.)\n\n # node property flow\n flopy.mf6.ModflowGwfnpf(gwf,\n save_specific_discharge=True,\n icelltype=laytyp,\n k=hk,\n k33=hk)\n\n # chd files\n chdlist = [[(0, 0, 0), h0]]\n chdlist += [[(0, nrow - 1, ncol - 1), h1]]\n flopy.mf6.ModflowGwfchd(gwf,\n stress_period_data=chdlist,\n save_flows=False,\n print_flows=True,\n pname='CHD-1')\n\n # output control\n flopy.mf6.ModflowGwfoc(gwf,\n head_filerecord='{}.hds'.format(name),\n printrecord=[('BUDGET', 'LAST'), ('HEAD', 'LAST')],\n saverecord=[('HEAD', 'LAST')],\n )\n\n return sim\n\n\ndef build_models():\n for idx, dir in enumerate(exdirs):\n sim = get_model(idx, dir)\n sim.write_simulation()\n return\n\n\ndef eval_model(sim):\n print('evaluating model observations...')\n name = ex[sim.idxsim]\n headcsv = np.empty((nlay, nrow, ncol), dtype=np.float)\n for i in range(nrow):\n fname = '{}.{}.obs.csv'.format(name, i)\n print('Loading and testing {}'.format(fname))\n fname = os.path.join(sim.simpath, fname)\n rec = np.genfromtxt(fname, names=True, delimiter=',', deletechars='')\n for j in range(ncol):\n obsname_true = 'h_{}_{}'.format(i, j).upper()\n obsname_found = rec.dtype.names[j + 1].upper()\n errmsg = 'obsname in {} is incorrect. Looking for \"{}\" but found \"{}\"'\n errmsg = errmsg.format(fname, obsname_true, obsname_found)\n assert obsname_true == obsname_found, errmsg\n headcsv[0, i, :] = np.array(rec.tolist()[1:])\n\n fn = os.path.join(sim.simpath, '{}.hds'.format(name))\n hobj = flopy.utils.HeadFile(fn)\n headbin = hobj.get_data()\n\n assert np.allclose(headcsv, headbin), 'headcsv not equal head from binary file'\n\n return\n\n\n# - No need to change any code below\ndef test_mf6model():\n # initialize testing framework\n test = testing_framework()\n\n # build all of the models\n build_models()\n\n # run the test models\n for idx, dir in enumerate(exdirs):\n yield test.run_mf6, Simulation(dir, exfunc=eval_model, idxsim=idx)\n\n return\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # build all of the models\n build_models()\n\n # run the test models\n for idx, dir in enumerate(exdirs):\n sim = Simulation(dir, exfunc=eval_model, idxsim=idx)\n test.run_mf6(sim)\n\n return\n\n\nif __name__ == \"__main__\":\n # print message\n print('standalone run of {}'.format(os.path.basename(__file__)))\n\n # run main routine\n main()\n","sub_path":"Groundwater/mf6/autotest/test_gwf_obs02.py","file_name":"test_gwf_obs02.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"255968483","text":"def solve():\n\tsum_swifts = 0\n\tsum_semaphores = 0\n\n\tcounter = 0\n\n\tfor i in range(n):\n\t\tsum_swifts = sum_swifts + scores_swifts[i]\n\t\tsum_semaphores = sum_semaphores + scores_semaphores[i]\n\n\t\tif sum_swifts == sum_semaphores:\n\t\t\tcounter = i + 1\n\n\treturn counter\n\n\nn = int(input())\nscores_swifts = [int(s) for s in input().split(' ')]\nscores_semaphores = [int(s) for s in input().split(' ')]\n\nsol = solve()\nprint(sol)","sub_path":"2017/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"266591622","text":"\"\"\"\r\nCreate two lists - students and marks\r\nCreate a dictionary from these two lists using dictionary comprehension\r\nUse names as keys and marks as values\r\n\"\"\"\r\n\r\n# lists of keys and values\r\nlstnames = ['Sunil', 'Sachin', 'Rahul', 'Kapil', 'Rohit']\r\nlstmarks = [54, 65, 45, 67, 78]\r\n\r\n# dictionary comprehension\r\nd = {k:v for (k,v) in zip(lstnames, lstmarks)}\r\nprint(d)","sub_path":"Chapter-9-Dictionaries/Examples/3-create-dictionary-from-two-lists.py","file_name":"3-create-dictionary-from-two-lists.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"28702522","text":"# License\n'''\nCode by Rishitha\nApril 29,2020\nReleased under GNU GPL\n'''\n\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nfrom pylab import*\nimport control\nfrom control import tf\nfrom scipy.interpolate import interp1d\n\n#if using termux\nimport subprocess\nimport shlex\n#end if\n\n#Defining the transfer function \ns1 = signal.lti([1778.8], [1,27,207,405]) #Transfer Function = 75(1+0.2s)/s(s^2+16s+100)\n\n#signal.bode takes transfer function as input and returns frequency,magnitude and phase arrays\nw,mag,phase = signal.bode(s1)\nsys = tf([1778.8], [1,27,207,405])\ngm, pm, Wgc, Wpc = control.margin(sys)\nfreq_as_fn_of_w = interp1d(phase, w)\nWgc = freq_as_fn_of_w(-140)\nGm = -20*log10(gm)\nGm = 0\nprint(\"Phase Margin=\",pm) #Phase margin\nprint(\"Gain Margin=\",Gm) #Gain margin\nprint(\"Gain crossover frequency(dB)=\",Wgc) #Gain crossover freq.(dB)\nprint(\"Phase crossover frequency(dB)=\",Wpc) #Phase crossover freq.(dB)\n\nplt.subplot(2,1,1)\nplt.plot(Wgc,0,'o', label='_nolegend_')\nplt.ylabel('Magnitude(deg)')\nplt.text(3.5,-10, '({}, {})'.format(Wgc.round(2),0))\nplt.semilogx(w, mag,'b') \nplt.axhline(y = 0,xmin=0,xmax= 1,color = 'b',linestyle='dashed')\nplt.axvline(x = Wgc, ymin = -140 ,color='k',linestyle='dashed')\nplt.legend(['0 dB line'], loc= 'lower left')\nplt.grid() \n\n\nplt.subplot(2,1,2)\nplt.xlabel('Frequency(rad/s)')\nplt.ylabel('Phase(deg)')\nplt.plot(14.39,-180,'x')\nplt.text(14.39,-180, '({}, {})'.format(14.39,-180))\nplt.semilogx(w,phase, label='_nolegend_') \nplt.plot(Wgc,-140,'o', label='_nolegend_')\nplt.axhline(y = -140,xmin=0,xmax= Wgc,color = 'r',linestyle='dashed')\nplt.axvline(x = Wgc, ymin = -140 ,color='k',linestyle='dashed')\nplt.text(2,-180, '({}, {})'.format(Wgc.round(2),-140))\nplt.legend(['-140 deg line'], loc= 'lower left')\nplt.grid() \n\n\n#if using termux\n\nplt.savefig('./figs/ee18btech11033/ee18btech11033_ver2.pdf')\nplt.savefig('./figs/ee18btech11033/ee18btech11033_ver2.eps')\nsubprocess.run(shlex.split(\"termux-open ./figs/ee18btech11033/ee18btech11033_ver2.pdf\"))\n\n#else \n\n \n#plt.show()\n","sub_path":"codes/ee18btech11033/ee18btech11033_ver2.py","file_name":"ee18btech11033_ver2.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"456534656","text":"# encoding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\nimport os\n\nfrom datetime import date\nfrom urlparse import urlparse\n\nfrom django import template\nfrom django.db.models import Model\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\n\nfrom cmdb.models import PKG_STATUS_CHOICES, PkgStatus, UploadStatus\nfrom autopkg.views import DeployView\nfrom menus.views import get_perms, get_menus_map\n\nregister = template.Library()\nencoder = DjangoJSONEncoder()\n\nPATH_RE = re.compile(r'((&)?page=(\\d+))')\n\n\n@register.filter\ndef json_encode(data):\n if isinstance(data, Model):\n data = {field.name: getattr(data, field.name) for field in data._meta.fields}\n\n return encoder.encode(data)\n\n\n@register.filter\ndef get_pkg_status(key):\n return dict(PKG_STATUS_CHOICES).get(key, \"\")\n\n\n@register.filter\ndef get_label_class(key):\n if key in [PkgStatus.deploy_success, PkgStatus.rollback_success]:\n return \"pass\"\n elif key in [PkgStatus.abort]:\n return \"stop\"\n else:\n return \"nopass\"\n\n\n@register.filter\ndef get_pkg_status_img(status):\n if status in [PkgStatus.deploy_success, PkgStatus.rollback_success]:\n return os.path.join(settings.STATIC_URL, \"kylin/img/pass.png\")\n elif status in DeployView.deploying_status:\n return os.path.join(settings.STATIC_URL, \"public/img/refresh.png\")\n elif status in [PkgStatus.abort]:\n return os.path.join(settings.STATIC_URL, \"kylin/img/stop.png\")\n else:\n return os.path.join(settings.STATIC_URL, \"kylin/img/nopass.png\")\n\n\n@register.filter\ndef status_icon(status):\n if status in [PkgStatus.deploy_success, PkgStatus.rollback_success]:\n return ' '\n elif status in DeployView.deploying_status:\n return ' '\n elif status in [PkgStatus.abort]:\n return ' '\n else:\n return ' '\n\n\n@register.filter\ndef pkg_host_label_class(key):\n if key in [UploadStatus.start_success, UploadStatus.rollback_success, UploadStatus.restart_success]:\n return \"label-success\"\n elif key in [UploadStatus.start_fail, UploadStatus.rollback_fail, UploadStatus.restart_fail]:\n return \"label-danger\"\n else:\n return \"label-default\"\n\n\n@register.filter\ndef get_map_value(key, _map):\n return _map.get(key, \"\")\n\n\n@register.filter\ndef disable_status(pkg):\n if pkg.status not in DeployView.deploying_status:\n return \"disabled\"\n else:\n return \"\"\n\n\n@register.filter\ndef has_perm(request, menu):\n if request.session.get(\"menus\"):\n context = request.session[\"menus\"]\n perms = context.get(\"perm_names\", None)\n else:\n perms = get_perms(request, get_menus_map())\n return (perms is None) or (str(menu.unique_id) in perms)\n\n\n@register.filter\ndef concat_page(path, page):\n urlparts = urlparse(path)\n path = path.replace(\"?{}\".format(urlparts.query), \"\")\n query = urlparts.query\n if not query:\n return \"{path}?page={page}\".format(path=path, page=page)\n query_parts = query.split(\"&\")\n result_query = []\n has_page = False\n for q in query_parts:\n if q.startswith(\"page\"):\n has_page = True\n result_query.append(\"page={}\".format(page))\n else:\n result_query.append(q)\n query_str = \"&\".join(result_query)\n if not has_page:\n query_str = \"{}&page={}\".format(query_str, page)\n return \"{path}?{qs}\".format(path=path, qs=query_str)\n\n\n@register.filter\ndef env_label_class(env):\n if env == \"DEVELOP\":\n return \"cor_blue\"\n else:\n return \"cor_green\"\n\n\n@register.filter\ndef get_log_url(log):\n log_name = log.log_name\n if not log_name.endswith(\".tar.gz\"):\n log_name = \"{}.tar.gz\".format(log_name)\n url = settings.LOG_DOWNLOAD_URL.format(\n date=date.today().strftime(\"%Y%m%d\"), artifact_id=log.artifact_id, log_name=log_name)\n return url\n\n\n@register.filter\ndef to_name(directory):\n return directory.replace(\"/\", \"_\")\n","sub_path":"autopkg/templatetags/mytags.py","file_name":"mytags.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"278314915","text":"import os\nimport sys\nimport time\n# Several Polkit functions used to depend pot.py (ALL GLOBAL) #\ndef polerror2a():\n\tprint('Error in function polt2a: Equal - int1=int2')\ndef polt2a(int1,int2):\n\tif(int1 > int2):\n\t\tglobal result\n\t\tresult = 1\n\telif(int1 < int2):\n\t\tglobal result\n\t\tresult = 0\n\telse:\n\t\traise polerror2a()\ndef poltquit():\n\tprint('PoltPy either has an error or is done. Exitting')\n\tsys.exit()\n","sub_path":"libdewstar/polkit.py","file_name":"polkit.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"461539375","text":"#!/usr/bin/python\nimport sys\nimport nwb\nimport numpy as np\nfrom nwb.nwbco import *\n\n\"\"\" \nStore extracellular ephys data\n\n\"\"\"\n\n########################################################################\n# create a new NWB file\n# several settings are specified when doing so. these can be supplied within\n# the NWB constructor or defined in a dict, as in in this example\nsettings = {}\nsettings[\"filename\"] = \"sample_extracellular_spikes.nwb\"\n\n# each file should have a descriptive globally unique identifier \n# that specifies the lab and this experiment session\n# the function nwb.create_identifier() is recommended to use as it takes\n# the string and appends the present date and time\nsettings[\"identifier\"] = nwb.create_identifier(\"extracellular spikes example\")\n\n# indicate that it's OK to overwrite exting file\nsettings[\"overwrite\"] = True\n\n# specify the start time of the experiment. all times in the NWB file\n# are relative to experiment start time\n# if the start time is not specified the present time will be used\nsettings[\"start_time\"] = \"Sat Jul 04 2015 3:14:16\"\n\n# provide one or two sentences that describe the experiment and what\n# data is in the file\nsettings[\"description\"] = \"Test file demonstrating a simple extracellular ephys recording\"\n\n# create the NWB object. this manages the file\nprint(\"Creating \" + settings[\"filename\"])\nneurodata = nwb.NWB(**settings)\n\n########################################################################\n# create two electrical series, one with a single electrode and one with many\n# then create a spike event series\n\n# first create the electrode map\n# example simulated recording is made from two 2-electrode probes named\n# 'p0' and 'p1'. we need to define the locations of the electrodes\n# relative to each probe, and the location of the probes\n# electrode coordinates are in meters and their positions \n# are relative to each other. the location of the probe itself is\n# stored separately. using absolute coordinates here, if they are known, \n# is still OK\nelectrode_map = [[0, 0, 0], [0, 1.5e-6, 0], [0, 0, 0], [0, 3.0e-5, 0]]\nelectrode_group = [ \"p0\", \"p0\", \"p1\", \"p1\" ]\nneurodata.set_metadata(EXTRA_ELECTRODE_MAP, electrode_map)\nneurodata.set_metadata(EXTRA_ELECTRODE_GROUP, electrode_group)\n# set electrode impedances\nneurodata.set_metadata(EXTRA_IMPEDANCE, [ 1e6, 1.1e6, 1.2e6, 1.3e6 ])\n\n# define the placement of each probe\nneurodata.set_metadata(EXTRA_SHANK_LOCATION(\"p0\"), \"CA1, left hemisphere, stereotactic coordinates xx, yy\")\nneurodata.set_metadata(EXTRA_SHANK_LOCATION(\"p1\"), \"CA3, left hemisphere, stereotactic coordinates xx, yy\")\n\n########################################################################\n# the example is of two 2-electrode probes. the electrode data from these\n# probes can be stored individually, grouped as probes (eg, 2-electrode\n# pair) or all stored together. these approaches are all exampled here \n\n# create time series with all electrode data stored together\nquad = neurodata.create_timeseries(\"ElectricalSeries\", \"quad\", \"acquisition\")\nquad.set_comment(\"Data corresponds to four electrodes (two probes)\")\nquad.set_data(np.zeros((10000, 4)), resolution=1.2345e-6)\nquad.set_time(np.arange(10000) * 0.0001)\n# indicate that we're recording from the first electrode defined in the\n# above map (electrode numbers start at zero, so electrodes are \n# 0, 1, 2 and 3\nquad.set_value(\"electrode_idx\", [0, 1, 2, 3])\n# finish the time series and write data to disk\nquad.finalize()\n\n########################################################################\n# spikes can be reported by hardware or be detected by software\n# in both cases, they are considered to be processed data and so belong\n# in a processing module\n\n# create the module\nspike_mod = neurodata.create_module(\"my spikes\")\n\n# create an interface that stores the events. here they will be stored\n# with their waveforms, such as would be the input to a spike-sorting\n# algorithm\nspike_iface = spike_mod.create_interface(\"EventWaveform\")\nspike_iface.set_source(\"Data from device FooBar-X1 using dynamic multi-phasic threshold of 5xRMS\")\n\n# the event waveform interface publishes a SpikeEventSeries. make \n# that series\nspike = neurodata.create_timeseries(\"SpikeEventSeries\", \"my waveforms\")\nspike.set_comment(\"Snapshots of spike events pulled from a recording\")\nspike.set_value(\"electrode_idx\", [2, 3]) # probe 'p1'\n# describe the source of the data (may be redundant w/ interface source)\nspike.set_value(\"source\", \"Data from device FooBar-X1 using dynamic multi-phasic threshold of 5xRMS\")\n# make some bogus simulated data\n# this is 20 events all having the same shape and a pseudorandom time\nevt = np.zeros((8,2))\nevt[3][0] = 0.01\nevt[4][0] = -0.005\nevt[3][1] = 0.005\nevt[4][1] = -0.0025\ndata = []\nt = []\nlast = 1.0\nfor i in range(20):\n data.append(evt)\n last = last + (i * 17) % 29\n t.append(last)\n# \nspike.set_time(t)\nspike.set_data(data, resolution=1.2345e-6)\n# if data were stored in another unit such as microvolts, it would be\n# necessary to specify a converstion between that unit and Volts.\n# that would be done using the following:\n#spike.set_data(data, conversion=1.0e-6)\n\n# add the time series to the interface. the interface will manage \n# finalizing the time series\nspike_iface.add_timeseries(spike)\n\n# now close the interface and its parent module\nspike_iface.finalize()\nspike_mod.finalize()\n\n# close file, otherwise it will fail to write properly\nneurodata.close()\n\n","sub_path":"ainwb/examples/extracellular_spikes.py","file_name":"extracellular_spikes.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"513045986","text":"\"\"\"\n@version: 1\n@author: zyb\n@site: \n@software: PyCharm Community Edition\n@file: multipleMatrix.py\n@time: 2017/3/10 10:06\n矩阵乘法运算\n\"\"\"\ndef multipleMatrix(ma1,ma2,n):\n new_ma=[]\n\n for i in range(0,n):\n new_ma.append([])\n for j in range(0,n):\n temp = 0\n for k in range(0,n):\n temp=temp+ma1[i][k]*ma2[k][j]\n new_ma[i].append(temp)\n return new_ma\n\nif __name__==\"__main__\":\n\n ma1=[[1,2],[1,2]]\n ma2=[[2,1],[2,1]]\n print(ma1)\n new_ma=multipleMatrix(ma1,ma2,2)\n print(new_ma)","sub_path":"multipleMatrix.py","file_name":"multipleMatrix.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"394992328","text":"from student import Student\n\n\ns1 = Student('Didas','Mbalanya','TZ')\ns2 = Student('Cynthia','Chepkemoi',)\ns3 = Student('Kennedy','Mutemi','UG')\ns4 = Student('Victor','Muthomi')\ns5 = Student('Paul','Kahohi','NG')\ns6 = Student('john','Ouma','NG')\ns7 = Student('james','john')\ns8 = Student('nevo','nunda')\ns9 = Student('boom','pow')\ns10= Student('brian','john')\n\ns1.attend_class()\ns2.attend_class()\ns3.attend_class()\ns4.attend_class()\ns5.attend_class()","sub_path":"attendFun.py","file_name":"attendFun.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"338619704","text":"import copy\nfrom rl.make_game import is_atari_game\nimport numpy as np\nfrom particle_filtering.pf_uct import PFMCTS\n\n\nclass PFMCTS3(PFMCTS):\n\n def search(self, n_mcts, c, Env, mcts_env, budget, max_depth=200, fixed_depth=True):\n \"\"\" Perform the MCTS search from the root \"\"\"\n env = copy.deepcopy(Env)\n self.create_root(env, budget)\n if self.root.terminal:\n raise (ValueError(\"Can't do tree search from a terminal state\"))\n\n is_atari = is_atari_game(env)\n if is_atari:\n raise NotImplementedError\n while budget > 0:\n state = self.root # reset to root for new trace\n if not is_atari:\n mcts_env = copy.deepcopy(Env) # copy original Env to rollout from\n else:\n raise NotImplementedError\n mcts_env.seed(np.random.randint(1e7))\n st = 0\n source_particle = None\n could_sample = True\n n_particles = 1\n while not state.terminal:\n bias = c * self.gamma ** st / (1 - self.gamma) if self.depth_based_bias else c\n action = state.select(c=bias, variance=self.variance)\n st += 1\n k = np.ceil(self.beta * state.n ** self.alpha)\n previous_particles = n_particles\n n_particles = state.get_n_particles()\n previous_could_sample = could_sample\n could_sample = n_particles >= k or state.root\n if not could_sample and source_particle is None:\n source_particle, budget = state.parent_action.sample_from_parent_state(mcts_env, budget)\n state.add_particle(source_particle)\n if source_particle.terminal:\n break\n if action.child_state is not None:\n # select\n state = action.child_state\n if source_particle is not None:\n source_particle, budget = action.sample_from_particle(source_particle, mcts_env, budget)\n state.add_particle(source_particle)\n if source_particle.terminal:\n break\n elif state.terminal:\n source_particle, budget = state.parent_action.sample_from_parent_state(mcts_env, budget)\n state.add_particle(source_particle)\n else:\n rollout_depth = max_depth if fixed_depth else max_depth - st\n state, budget, source_particle = action.add_child_state(mcts_env, budget, max_depth=rollout_depth,\n source_particle=source_particle,\n depth=st) # expand\n break\n\n # Back-up\n R = state.V\n state.update()\n particle = source_particle\n while state.parent_action is not None: # loop back-up until root is reached\n r = particle.reward\n if not particle.terminal:\n R = r + self.gamma * R\n else:\n R = r\n action = state.parent_action\n action.update(R)\n state = action.parent_state\n state.update()\n particle = particle.parent_particle\n","sub_path":"particle_filtering/pf_uct_3.py","file_name":"pf_uct_3.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"456586616","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time, datetime\nfrom tensorflow.examples.tutorials.mnist import input_data\n# 최신 Windows Laptop에서만 사용할것.CPU Version이 높을때 사용.\n# AVX를 지원하는 CPU는 Giuthub: How to compile tensorflow using SSE4.1, SSE4.2, and AVX. \n# Ubuntu와 MacOS는 지원하지만 Windows는 없었음. 2018-09-29\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Compuntational Graph Initialization\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\nDATA_DIR = \"/tmp/ML/MNIST_data\"\nmnist = input_data.read_data_sets(DATA_DIR, one_hot=True)\n\n# Define Hyper Parameters\nAlpha_Lr = 0.001 # Learning Rate Alpha\nN_EPISODES = 15\nbatch_size = 100\n\n#########\n# 신경망 모델 구성\n######\n# 기존 모델에서는 입력 값을 28x28 하나의 차원으로 구성하였으나,\n# CNN 모델을 사용하기 위해 2차원 평면과 특성치의 형태를 갖는 구조로 만듭니다.\nX = tf.placeholder(tf.float32, [None, 28, 28, 1])\nY = tf.placeholder(tf.float32, [None, 10])\n# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1 for testing\nkeep_prob = tf.placeholder(tf.float32)\n\n# 각각의 변수와 레이어는 다음과 같은 형태로 구성됩니다.\n# W01_m [3 3 1 32] -> [3 3]: 커널 크기, 1: 입력값 X 의 특성수, 32: 필터 갯수\n# _LAY01_m Conv shape=(?, 28, 28, 32)\n# Pool ->(?, 14, 14, 32)\nW01_m = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))\n# tf.nn.conv2d 를 이용해 한칸씩 움직이는 컨볼루션 레이어를 쉽게 만들 수 있습니다.\n# padding='SAME' 은 커널 슬라이딩시 최외곽에서 한칸 밖으로 더 움직이는 옵션\nCONV_01 = tf.nn.conv2d(X, W01_m, strides=[1, 1, 1, 1], padding='SAME')\nRELU_01 = tf.nn.relu(CONV_01)\n# Pooling 역시 tf.nn.max_pool 을 이용하여 쉽게 구성할 수 있습니다.\nMAX_POOL_01 = tf.nn.max_pool(RELU_01, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n# _LAY01_m = tf.nn.dropout(_LAY01_m, keep_prob)\n\n# _LAY02_m Conv shape=(?, 14, 14, 64)\n# Pool ->(?, 7, 7, 64)\n# W02_m 의 [3, 3, 32, 64] 에서 32 는 _LAY01_m 에서 출력된 W01_m 의 마지막 차원, 필터의 크기 입니다.\nW02_m = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))\nCONV_02 = tf.nn.conv2d(MAX_POOL_01, W02_m, strides=[1, 1, 1, 1], padding='SAME')\nRELU_02 = tf.nn.relu(CONV_02)\nMAX_POOL_02 = tf.nn.max_pool(RELU_02, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n# _LAY02_m = tf.nn.dropout(_LAY02_m, keep_prob)\n\n# FC 레이어: 입력값 7x7x64 -> 출력값 256\n# Full connect를 위해 직전의 Pool 사이즈인 (?, 7, 7, 64) 를 참고하여 차원을 줄여줍니다.\n# Reshape ->(?, 256)\nW03_m = tf.Variable(tf.random_normal([7 * 7 * 64, 256], stddev=0.01))\n_LAY03_m = tf.reshape(MAX_POOL_02, [-1, 7 * 7 * 64])\nFULLY_CONN_03 = tf.matmul(_LAY03_m, W03_m)\nRELU_03 = tf.nn.relu(FULLY_CONN_03)\nDROP_OUT_03 = tf.nn.dropout(RELU_03, keep_prob)\n\n# 최종 출력값 _LAY03_m 에서의 출력 256개를 입력값으로 받아서 0~9 레이블인 10개의 출력값을 만듭니다.\nW04_m = tf.Variable(tf.random_normal([256, 10], stddev=0.01))\nPred_m = tf.matmul(DROP_OUT_03, W04_m)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Pred_m, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate = Alpha_Lr).minimize(cost)\n# 최적화 함수를 RMSPropOptimizer 로 바꿔서 결과를 확인해봅시다.\n# optimizer = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)\n\n#########\n# 신경망 모델 학습\n######\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\nstart_time = time.time()\nprint('Learning Started!')\n\ntotal_batch = int(mnist.train.num_examples / batch_size)\n\nfor episode in range(N_EPISODES):\n total_cost = 0\n\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # 이미지 데이터를 CNN 모델을 위한 자료형태인 [28 28 1] 의 형태로 재구성합니다.\n batch_xs = batch_xs.reshape(-1, 28, 28, 1)\n\n _, cost_val = sess.run([optimizer, cost],\n feed_dict={X: batch_xs,\n Y: batch_ys,\n keep_prob: 0.7})\n total_cost += cost_val\n\n print('episode:', '%05d' % (episode + 1),\n 'Avg. cost =', '{:.5f}'.format(total_cost / total_batch))\n \n elapsed_time = datetime.timedelta(seconds=int(time.time()-start_time))\n print(\"[{}]\".format(elapsed_time))\n\nprint('Optimization Completed!')\n\n#########\n# 결과 확인\n######\nis_correct = tf.equal(tf.argmax(Pred_m, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\nprint('Accuracy:', sess.run(accuracy,\n feed_dict={X: mnist.test.images.reshape(-1, 28, 28, 1),\n Y: mnist.test.labels,\n keep_prob: 1}))\n\nprint('Total_batch =', total_batch)\n\nelapsed_time = time.time() - start_time\nformatted = datetime.timedelta(seconds=int(elapsed_time))\nprint(\"=== training time elapsed: {}s ===\".format(formatted))\n\n#########\n# 결과 확인 (matplot)\n######\nlabels = sess.run(Pred_m,\n feed_dict={X: mnist.test.images.reshape(-1, 28, 28, 1),\n Y: mnist.test.labels,\n keep_prob: 1})\n\nfig = plt.figure()\nfor i in range(60):\n subplot = fig.add_subplot(4, 15, i + 1)\n subplot.set_xticks([])\n subplot.set_yticks([])\n subplot.set_title('%d' % np.argmax(labels[i]))\n subplot.imshow(mnist.test.images[i].reshape((28, 28)),\n cmap=plt.cm.gray_r)\n\nplt.show()\n\n\n# 세션을 닫습니다.\nsess.close()\n\n\n# Step 10. Tune hyperparameters:\n# Step 11. Deploy/predict new outcomes:\n\n","sub_path":"05_MNIST_CNN_Tensorboard_Save_Restore/01_MNIST_Simple_CNN_01_w_time.py","file_name":"01_MNIST_Simple_CNN_01_w_time.py","file_ext":"py","file_size_in_byte":5871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"96186836","text":"\"\"\"\nCopyright 2013 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom datetime import datetime\n\nfrom cloudcafe.compute.common.constants import Constants\nfrom cloudroast.stacktach.fixtures import StackTachComputeIntegration,\\\n StackTachTestAssertionsFixture\n\n\nclass StackTachDBServerResizeUpConfirmTests(StackTachComputeIntegration,\n StackTachTestAssertionsFixture):\n \"\"\"\n @summary: With Server Resize Up (e.g., from flavor 2 -> 3),\n tests the entries created in StackTach DB.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.create_server()\n cls.resize_server()\n cls.confirm_resize_server()\n cls.audit_period_beginning = \\\n datetime.utcnow().strftime(Constants.DATETIME_0AM_FORMAT)\n\n cls.stacktach_events_for_server(server=cls.confirmed_resized_server)\n cls.event_launch_resize_server = cls.event_launches[1]\n\n def test_launch_entry_on_resize_server_up_response(self):\n \"\"\"\n Verify the Launch parameters are being returned in the\n Server Resize Up response\n \"\"\"\n # There should be 2 launch entries for a resize.\n self.validate_attributes_in_launch_response(num_of_launch_entry=2)\n\n def test_launch_entry_fields_on_create_server(self):\n \"\"\"\n Verify that the first Launch entry will have all expected fields\n after a Server Resize Up\n \"\"\"\n self.validate_launch_entry_field_values(\n server=self.created_server)\n\n def test_launch_entry_fields_on_resize_up(self):\n \"\"\"\n Verify that the second Launch entry will have all expected fields\n after a Server Resize Up\n \"\"\"\n self.validate_launch_entry_field_values(\n server=self.verify_resized_server,\n event_launch_server=self.event_launch_resize_server,\n expected_flavor_ref=self.flavor_ref_alt,\n launched_at=self.launched_at_resized_server)\n\n def test_exist_entry_on_resize_up_server_response(self):\n \"\"\"\n Verify the Exist parameters are correct after a Server Resize Up\n \"\"\"\n self.validate_attributes_in_exist_response()\n\n def test_exists_entry_fields_on_resize_up(self):\n \"\"\"\n Verify that the Exist entry will have all expected fields\n after Server Resize Up\n \"\"\"\n self.validate_exist_entry_field_values(\n server=self.created_server)\n self.validate_exist_entry_audit_period_values(\n expected_audit_period_ending=self.resize_start_time,\n expected_audit_period_beginning=self.audit_period_beginning)\n\n def test_exist_launched_at_field_match_on_resize_up(self):\n \"\"\"\n Verify that the Exists entry launched_at matches the\n Launch entry launched_at for a Server Resize Up\n \"\"\"\n\n self.assertEqual(self.event_launch.launched_at,\n self.event_exist.launched_at,\n self.msg.format(\n \"launched_at\",\n self.event_launch.launched_at,\n self.event_exist.launched_at,\n self.exist_response.reason,\n self.exist_response.content))\n\n def test_no_delete_entry_on_resize_up_server_response(self):\n \"\"\"\n Verify that there is no delete entry after a Server Resize Up\n \"\"\"\n self.validate_no_deletes_entry_returned()\n","sub_path":"cloudroast/stacktach/functional/test_server_resize_up_confirm_in_stacktach_db.py","file_name":"test_server_resize_up_confirm_in_stacktach_db.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"121884733","text":"import tornado.websocket\nimport tornado.web\nimport tornado.ioloop\nimport time\nfrom tributary.reactive.input import _gen\n\n\nclass DummyWebSocket(tornado.websocket.WebSocketHandler):\n def open(self):\n print(\"WebSocket opened\")\n i = 0\n x = {y: _gen() for y in ('A', 'B', 'C', 'D')}\n try:\n while i < len(x['A']):\n self.write_message({'A': x['A'][i],\n 'B': x['B'][i],\n 'C': x['C'][i],\n 'D': x['D'][i]})\n i += 1\n time.sleep(.1)\n finally:\n print(\"WebSocket closed\")\n self.close()\n\n def on_message(self, message):\n self.write_message(u\"You said: \" + message)\n\n def on_close(self):\n print(\"WebSocket closed\")\n\n\ndef main():\n app = tornado.web.Application([(r\"/\", DummyWebSocket)])\n app.listen(8899)\n print('listening on %d' % 8899)\n tornado.ioloop.IOLoop.current().start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/dummy_ws.py","file_name":"dummy_ws.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"398751667","text":"import pandas as pd\nfrom konlpy import tag\nimport random\nimport pickle\nfrom keras.preprocessing import sequence\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import losses\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import models\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nimport os.path\nfrom flask import Flask, request\n\n\ndef data_load(option):\n csv = pd.read_csv('train_data_add_add.csv')\n titles_csv = csv['title']\n prices_csv = csv['price']\n if option == \"titles\":\n csv = titles_csv\n elif option == 'price':\n csv = prices_csv\n return csv\n\n\ndef words_to_ids(words, word_dict):\n ids = []\n for word in words:\n try:\n ids.append(word_dict.index(word))\n except Exception as e:\n print(e)\n return ids\n\n\nclass RNN:\n\n def __init__(self, model_name):\n self._model_name = model_name\n\n try:\n with open(\"titles_words.bin\", \"rb\") as f:\n self._titles_words = pickle.load(f)\n with open(\"dictionary.bin\", \"rb\") as f:\n self._dictionary = pickle.load(f)\n with open(\"titles_ids.bin\", \"rb\") as f:\n self._titles_ids = pickle.load(f)\n print(\"------------Data 사전을 로드합니다--------------\")\n\n except Exception as e:\n print(\"------------Data 사전이 없으므로 생성합니다----------\")\n okt = tag.Okt()\n words_set = set()\n self._titles_words = []\n count = 1\n for title in data_load(\"titles\"):\n title_pos = okt.pos(title, norm=True)\n words = []\n for word in title_pos:\n words_set.add(word[0])\n words.append(word[0])\n self._titles_words.append(words)\n count += 1\n\n dictionary = list(words_set)\n random.shuffle(dictionary)\n self._dictionary = [0] + dictionary\n\n self._titles_ids = []\n count = 1\n for title in self._titles_words:\n words_id = words_to_ids(title, self._dictionary)\n self._titles_ids.append(words_id)\n count += 1\n with open(\"titles_words.bin\", \"wb\") as f:\n pickle.dump(self._titles_words, f)\n with open(\"dictionary.bin\", \"wb\") as f:\n pickle.dump(self._dictionary, f)\n with open(\"titles_ids.bin\", \"wb\") as f:\n pickle.dump(self._titles_ids, f)\n\n def ids_to_words(self, ids):\n words = []\n for word_id in ids:\n if word_id != 0:\n words.append(self._dictionary[word_id])\n return words\n\n def index_process(self):\n self._max_title_len = max(len(title_ids) for title_ids in self._titles_ids)\n # print(max_title_len)\n titles_ids_np = sequence.pad_sequences(self._titles_ids, maxlen=self._max_title_len, padding='post')\n # print(titles_ids_np)\n self._prices_np = np.array([[price] for price in data_load(\"price\")])\n # print(prices_np)\n\n index = [i for i in range(len(titles_ids_np))]\n random.shuffle(index)\n\n train_len = int(len(index) * 0.9)\n train_index = index[:train_len]\n test_index = index[train_len:]\n\n # print(len(titles_ids_np))\n # print(len(train_index))\n # print(len(test_index))\n\n self._X_train = titles_ids_np[train_index]\n self._X_test = titles_ids_np[test_index]\n\n self._scaler = MinMaxScaler() # StandardScaler()\n self._scaler.fit(self._prices_np)\n y_scaled = self._scaler.transform(self._prices_np)\n\n self._y_train_scaled = y_scaled[train_index]\n self._y_test_scaled = y_scaled[test_index]\n\n # print(prices_np)\n # print(y_scaled)\n\n self._vocab_size = len(self._dictionary)\n\n @staticmethod\n def tokenizer_create(text):\n okt = tag.Okt()\n text_pos = okt.pos(text, norm=True)\n\n words = []\n for word in text_pos:\n words.append(word[0])\n\n return words\n\n def sequence_create(self, text_ids):\n sequence_np = sequence.pad_sequences([text_ids], maxlen=self._max_title_len, padding='post')\n return sequence_np\n\n def model_create(self):\n model = keras.Sequential([\n layers.Embedding(self._vocab_size, 64),\n layers.Bidirectional(layers.LSTM(64, return_sequences=True)),\n layers.Bidirectional(layers.LSTM(32)),\n layers.Dense(64, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(1)\n ])\n model.summary()\n\n model.compile(loss=losses.MeanSquaredError(), optimizer=optimizers.Adam(1e-4), metrics=['mae'])\n history = model.fit(self._X_train, self._y_train_scaled, epochs=30,\n validation_data=(self._X_test, self._y_test_scaled),\n validation_steps=30, verbose=1)\n model.save(self._model_name)\n\n return model\n\n def model_load(self):\n try:\n model = models.load_model(self._model_name)\n print(\"-----------RNN 모델을 로드합니다-------------\")\n return model\n except Exception as e:\n print(e)\n print(\"-----------RNN 모델이 없으므로 학습을 진행합니다-------------\")\n model = self.model_create()\n return model\n\n def plot_graphs(history, metric):\n plt.plot(history.history[metric])\n plt.plot(history.history['val_' + metric], '')\n plt.xlabel(\"Epochs\")\n plt.ylabel(metric)\n plt.legend([metric, 'val_' + metric])\n plt.show()\n\n # plot_graphs(history, 'mae')\n # plot_graphs(history, 'loss')\n\n # price_predictions = model.predict(X_test)\n #\n # y_test_inverse = scaler.inverse_transform(y_test_scaled)\n # price_predictions_inverse = scaler.inverse_transform(price_predictions)\n\n # for i in range(100):\n # print(f\"{i}: {ids_to_words(X_test[i])}\")\n # print(f\"{i}: {y_test_inverse[i]} = {price_predictions_inverse[i]}\")\n # print()\n\n # print(ids_to_words(X_test[5]))\n\n def predict_phone(self, text):\n text_words = self.tokenizer_create(text)\n print(text_words)\n text_ids = words_to_ids(text_words, self._dictionary)\n text_ids_np = self.sequence_create(text_ids)\n model = self.model_load()\n predictions = model.predict(text_ids_np)\n text_predictions_inverse = self._scaler.inverse_transform(predictions)\n # print(f'{text} -> {text_predictions_inverse}')\n return text_predictions_inverse[0][0]\n\n\nrnn = RNN(\"baseline_model_data_add.h5\")\nrnn.index_process()\nfile_1 = 'C:\\\\Users\\\\JEONKYUBIN\\\\Desktop\\\\AI\\\\RNN\\\\data.txt'\nfile_2 = 'C:\\\\Users\\\\JEONKYUBIN\\\\Desktop\\\\AI\\\\RNN\\\\prediction.txt'\n\napp = Flask(__name__)\n\n\n\n\n@app.route('/volt/ai', methods=['GET'])\ndef ai_route() -> None:\n try:\n product_name = request.args.get('product_name')\n price_float: float = rnn.predict_phone(product_name.upper())\n price_int: int = round(int(price_float), -3)\n return {\n 'data': price_int\n }\n except:\n return {\n 'data': 'error'\n }\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n\n# while True:\n# if os.path.isfile(file_1): # if 문 쓰기 (파일 존재 유무 검사)\n# with open(\"data.txt\", 'r', encoding='UTF-8') as f:\n# test = f.read()\n# if not test:\n# # print(\"문자열이 비어있음\")\n# continue\n# text_upped = test.upper()\n# print(test)\n# price_float = rnn.predict_phone(text_upped)\n# price_int = round(int(price_float), -3)\n# price_str = str(price_int)\n# with open(\"prediction.txt\", 'w', encoding='UTF-8') as f:\n# f.write(price_str)\n# print(price_str + \"원\")\n#\n# os.remove(file_1)\n# # data.txt 파일 지우기 추가\n# else:\n# pass\n# # print(\"AI 작동에 문제가 생겼습니다\")\n","sub_path":"RNN/PredictPrice.py","file_name":"PredictPrice.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"115775353","text":"LETTERS = ['Z','E','R','O','N','T','W','H','F','U','V','S','I','X','G']\nCOUNT_0 = [1,1,1,1,0,0,0,0,0,0,0,0,0,0,0]\nCOUNT_1 = [0,1,0,1,1,0,0,0,0,0,0,0,0,0,0]\nCOUNT_2 = [0,0,0,1,0,1,1,0,0,0,0,0,0,0,0]\nCOUNT_3 = [0,2,1,0,0,1,0,1,0,0,0,0,0,0,0]\nCOUNT_4 = [0,0,1,1,0,0,0,0,1,1,0,0,0,0,0]\nCOUNT_5 = [0,1,0,0,0,0,0,0,1,0,1,0,1,0,0]\nCOUNT_6 = [0,0,0,0,0,0,0,0,0,0,0,1,1,1,0]\nCOUNT_7 = [0,2,0,0,1,0,0,0,0,0,1,1,0,0,0]\nCOUNT_8 = [0,1,0,0,0,1,0,1,0,0,0,0,1,0,1]\nCOUNT_9 = [0,1,0,0,2,0,0,0,0,0,0,0,1,0,0]\n\ndef read(name):\n return [s.split('\\n')[0] for s in open(name,'r').readlines()[1:]]\n \ndef count(s):\n counts = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n for c in s:\n counts[LETTERS.index(c)] += 1\n return counts\n \ndef subtract_counts(c1,c2):\n # subtract c2 from c1\n return [v1-v2 for v1,v2 in zip(c1,c2)]\n \ndef find_number(counts):\n number = []\n \n for zero in range(0,counts[LETTERS.index('Z')]):\n counts = subtract_counts(counts,COUNT_0)\n number.append(0)\n \n for six in range(0,counts[LETTERS.index('X')]):\n counts = subtract_counts(counts,COUNT_6)\n number.append(6)\n \n for eight in range(0,counts[LETTERS.index('G')]):\n counts = subtract_counts(counts,COUNT_8)\n number.append(8)\n \n for seven in range(0,counts[LETTERS.index('S')]):\n counts = subtract_counts(counts,COUNT_7)\n number.append(7)\n \n for seven in range(0,counts[LETTERS.index('V')]):\n counts = subtract_counts(counts,COUNT_5)\n number.append(5)\n \n for nine in range(0,counts[LETTERS.index('I')]):\n counts = subtract_counts(counts,COUNT_9)\n number.append(9)\n \n for four in range(0,counts[LETTERS.index('U')]):\n counts = subtract_counts(counts,COUNT_4)\n number.append(4)\n \n for three in range(0,counts[LETTERS.index('H')]):\n counts = subtract_counts(counts,COUNT_3)\n number.append(3)\n \n for one in range(0,counts[LETTERS.index('E')]):\n counts = subtract_counts(counts,COUNT_1)\n number.append(1)\n \n for two in range(0,counts[LETTERS.index('O')]):\n counts = subtract_counts(counts,COUNT_2)\n number.append(2)\n \n return sorted(number)\n \n\ncases = read('A-large.in')\nnumbers = []\nfor case in cases:\n counts = count(case)\n number = find_number(counts)\n numbers.append(number)\n\noutfile = open('out','w')\nfor i,number in enumerate(numbers):\n outfile.write('Case #%s: %s\\n' % (i+1,''.join([str(n) for n in number])))\noutfile.close()\n ","sub_path":"codes/CodeJamCrawler/16_2_1_neat/16_2_1_ThomasB_code.py","file_name":"16_2_1_ThomasB_code.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"211842175","text":"from tests import DocumentPageTestCase\nfrom document_generator.decorators import IdTitleCollectorFileDecorator\n\n\nclass IdTitleCollectorFileDecoratorTest(DocumentPageTestCase):\n def decorate(self, node):\n decorator = IdTitleCollectorFileDecorator()\n state = decorator.init_state(node)\n decorator.run(node, state)\n return (state, decorator)\n\n def test_no_files(self):\n node = {\n 'files': {},\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEmpty(state['id-title-map'])\n\n self.assertEmpty(decorator.get_messages(state))\n\n def test_plain(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'en': {\n 'id': 'id-foo',\n 'key': 'en',\n 'markdown': 'foo\\nbar',\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo']['en'], 'foo')\n self.assertLenIs(state['id-title-map']['id-foo'], 1)\n\n self.assertEmpty(decorator.get_messages(state))\n\n def test_multiple_files(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'en': {\n 'id': 'id-foo',\n 'key': 'en',\n 'markdown': 'foo\\nbar',\n },\n 'de': {\n 'id': 'id-foo',\n 'key': 'de',\n 'markdown': 'quux\\nbar',\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo']['en'], 'foo')\n self.assertEqual(state['id-title-map']['id-foo']['de'], 'quux')\n self.assertLenIs(state['id-title-map']['id-foo'], 2)\n\n self.assertEmpty(decorator.get_messages(state))\n\n def test_empty_lines(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'en': {\n 'id': 'id-foo',\n 'key': 'en',\n 'markdown': '\\n \\nfoo\\nbar',\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo']['en'], 'foo')\n self.assertLenIs(state['id-title-map']['id-foo'], 1)\n\n self.assertEmpty(decorator.get_messages(state))\n\n def test_section_marker_stripping(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'en': {\n 'id': 'id-foo',\n 'key': 'en',\n 'markdown': '## # # foo\\nbar',\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo'],\n {'en': 'foo'})\n\n self.assertEmpty(decorator.get_messages(state))\n\n def test_attribute_stripping(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'en': {\n 'id': 'id-foo',\n 'key': 'en',\n 'markdown': 'foo {=BAR} {: class=quux}\\nbar',\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo'],\n {'en': 'foo {=BAR}'})\n\n self.assertEmpty(decorator.get_messages(state))\n\n def test_anonymous(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'en': {\n 'id': 'id-foo',\n 'key': 'en',\n 'markdown': '',\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo'],\n {'en': '(anonymous)'})\n\n messages = decorator.get_messages(state)\n self.assertEqual(messages[0]['kind'], 'error')\n self.assertIn('title', messages[0]['text'])\n self.assertLenIs(messages, 1)\n\n def test_default(self):\n node = {\n 'name': 'bar',\n 'files': {\n 'default': {\n 'id': 'id-foo',\n 'key': 'en',\n 'is-default': True,\n 'markdown': 'foo'\n },\n },\n 'subnodes': [],\n }\n\n state, decorator = self.decorate(node)\n\n self.assertEqual(state['id-title-map']['id-foo'],\n {'default': 'foo'})\n\n self.assertEmpty(decorator.get_messages(state))\n","sub_path":"tests/decorators/test_id_title_collector_file_decorator.py","file_name":"test_id_title_collector_file_decorator.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"9815310","text":"from config import Config\nna = \"1970-01-01 08:00:00\"\n\nopt = Config()\n\ndef divide_data(para):\n if para == 0:\n filename = opt.TRAIN_FILE\n else:\n filename = opt.TEST_FILE\n with open(filename, 'r') as f:\n with open('data/withoutpreselling.csv', 'w') as out1:\n with open('data/preselling.csv', 'w') as out2:\n idx = 0\n for line in f:\n if idx == 0:\n out1.write(line)\n out2.write(line)\n else:\n preselling = line.split('\\t')[9]\n if preselling == '0' or preselling == na:\n out1.write(line)\n else:\n out2.write(line)\n idx+=1\n\n# 0 means train\n# 1 means test\ndivide_data(0)\n","sub_path":"seedcup_tf/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"324204561","text":"#!/usr/bin/python3\n\nfrom which_pyqt import PYQT_VER\nif PYQT_VER == 'PYQT5':\n\tfrom PyQt5.QtCore import QLineF, QPointF\nelif PYQT_VER == 'PYQT4':\n\tfrom PyQt4.QtCore import QLineF, QPointF\nelse:\n\traise Exception('Unsupported Version of PyQt: {}'.format(PYQT_VER))\n\n\n\n\nimport time\nimport numpy as np\nfrom TSPClasses import *\nimport heapq\nimport itertools\nfrom MyClasses import *\nimport HeapQueue\n\n\n\nclass TSPSolver:\n\tdef __init__( self, gui_view ):\n\t\tself._scenario = None\n\n\tdef setupWithScenario( self, scenario ):\n\t\tself._scenario = scenario\n\n\n\t''' \n\t\tThis is the entry point for the default solver\n\t\twhich just finds a valid random tour. Note this could be used to find your\n\t\tinitial BSSF.\n\t\t \n\t\tresults dictionary for GUI that contains three ints: cost of solution, \n\t\ttime spent to find solution, number of permutations tried during search, the \n\t\tsolution found, and three null values for fields not used for this \n\t\talgorithm \n\t'''\n\t\n\tdef defaultRandomTour( self, time_allowance=60.0 ):\n\t\tresults = {}\n\t\tcities = self._scenario.getCities()\n\t\tncities = len(cities)\n\t\tfoundTour = False\n\t\tcount = 0\n\t\tbssf = None\n\t\tstart_time = time.time()\n\t\twhile not foundTour and time.time()-start_time < time_allowance:\n\t\t\t# create a random permutation\n\t\t\tperm = np.random.permutation( ncities )\n\t\t\troute = []\n\t\t\t# Now build the route using the random permutation\n\t\t\tfor i in range( ncities ):\n\t\t\t\troute.append( cities[ perm[i] ] )\n\t\t\tbssf = TSPSolution(route)\n\t\t\tcount += 1\n\t\t\tif bssf.cost < np.inf:\n\t\t\t\t# Found a valid route\n\t\t\t\tfoundTour = True\n\t\tend_time = time.time()\n\t\tresults['cost'] = bssf.cost if foundTour else math.inf\n\t\tresults['time'] = end_time - start_time\n\t\tresults['count'] = count\n\t\tresults['soln'] = bssf\n\t\tresults['max'] = None\n\t\tresults['total'] = None\n\t\tresults['pruned'] = None\n\t\treturn results\n\n\n\t''' \n\t\tThis is the entry point for the greedy solver, which you must implement for \n\t\tthe group project (but it is probably a good idea to just do it for the branch-and\n\t\tbound project as a way to get your feet wet). Note this could be used to find your\n\t\tinitial BSSF.\n\t\t \n\t\tresults dictionary for GUI that contains three ints: cost of best solution, \n\t\ttime spent to find best solution, total number of solutions found, the best\n\t\tsolution found, and three null values for fields not used for this \n\t\talgorithm \n\t'''\n\n\tdef greedy( self,time_allowance=60.0, all_solns = False ):\n\t\tresults = {}\n\t\tsolutions = []\n\t\tnsolutions = 0\n\t\tcities = self._scenario.getCities()\n\t\tncities = len(cities)\n\t\tbssf = None\n\t\tstart_time = time.time()\n\t\tfor start_city in cities:\n\t\t\troute_set = set()\n\t\t\troute_set.add(start_city)\n\t\t\troute = [start_city]\n\t\t\tcurrent_city = start_city\n\t\t\twhile len(route) < ncities and time.time()-start_time < time_allowance:\n\t\t\t\tmin_cost = np.inf\n\t\t\t\tnext_city = None\n\t\t\t\tfor c2 in cities:\n\t\t\t\t\tif c2 not in route_set:\n\t\t\t\t\t\tcost_to = current_city.costTo(c2)\n\t\t\t\t\t\tif cost_to < min_cost:\n\t\t\t\t\t\t\tnext_city = c2\n\t\t\t\t\t\t\tmin_cost = cost_to\n\t\t\t\tif next_city is not None:\n\t\t\t\t\troute_set.add(next_city)\n\t\t\t\t\troute.append(next_city)\n\t\t\t\t\tcurrent_city = next_city\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\tif time.time()-start_time < time_allowance and len(route) == ncities:\n\t\t\t\tbssf_candidate = TSPSolution(route)\n\t\t\t\tif bssf_candidate.cost < np.inf:\n\t\t\t\t\tnsolutions += 1\n\t\t\t\t\t#print(f\"Cost of solution starting at {start_city._name} - {bssf_candidate.cost}\")\n\t\t\t\t\tif bssf is None or bssf_candidate.cost < bssf.cost:\n\t\t\t\t\t\tbssf = bssf_candidate\n\t\t\t\t\tif all_solns:\n\t\t\t\t\t\tsolutions.append(bssf_candidate)\n\n\t\tend_time = time.time()\n\t\tif bssf is not None:\n\t\t\tresults['cost'] = bssf.cost\n\t\telse:\n\t\t\tresults['cost'] = math.inf\n\t\tresults['time'] = end_time - start_time\n\t\tresults['count'] = nsolutions\n\t\tif all_solns:\n\t\t\tresults['soln'] = (solutions, bssf)\n\t\telse:\n\t\t\tresults['soln'] = bssf\n\t\tresults['max'] = None\n\t\tresults['total'] = None\n\t\tresults['pruned'] = None\n\t\treturn results\t\t\t\t\n\t\n\n\t''' \n\t\tThis is the entry point for the branch-and-bound algorithm that you will implement\n\t\t \n\t\tresults dictionary for GUI that contains three ints: cost of best solution, \n\t\ttime spent to find best solution, total number solutions found during search (does\n\t\tnot include the initial BSSF), the best solution found, and three more ints: \n\t\tmax queue size, total number of states created, and number of pruned states. \n\t'''\n\t\t\n\tdef branchAndBound( self, time_allowance=60.0 ):\n\t\t# Set bssf to greedy solution\n\t\tstart_time = time.time()\n\t\tcities = self._scenario.getCities()\n\t\tncities = len(cities)\n\t\t#greedy_results = self.defaultRandomTour(time_allowance)\n\t\tgreedy_results = self.greedy(time_allowance)\n\t\tif greedy_results['soln'] is None:\n\t\t\tgreedy_results = self.defaultRandomTour(time_allowance)\n\n\t\tself._priority_queue = MyHeap()\n\t\tself._bssf = greedy_results['soln']\n\t\tself._states_created = 0\n\t\tself._pruned_states = 0\n\t\tself._max_queue_size = 0\n\t\tself._solutions_found = 0\n\n\t\t#initialize rcm\n\t\tstart_rcm = self.init_rcm(cities, ncities)\n\t\t# Start at first city and create nodes for each city not in route\n\t\troute = [cities[0]]\n\t\troute_set = set()\n\t\troute_set.add(cities[0])\n\t\tstart_node = Node(route, route_set, start_rcm)\n\t\tself._priority_queue.insert(start_node)\n\n\t\t# This creates at most, n! states but won't ever actually make that many\n\t\t# Because of this, the total time and space complexity is O(n!n^2)\n\t\twhile time.time()-start_time < time_allowance and self._priority_queue.size() > 0:\n\t\t\tnode = self._priority_queue.delete_min()\n\t\t\troute = node.route\n\t\t\troute_set = node.route_set\n\t\t\trcm = node.rcm\n\t\t\tif (rcm.lower_bound >= self._bssf.cost):\n\t\t\t\tself._pruned_states += 1\n\t\t\t\tcontinue\n\n\t\t\t# Runs O(n) times, but the complexity is factored into the while loop\n\t\t\t# The time complexity isn't in the update_rcm function, but is when we make a copy of the matrix\n\t\t\t# So this section is O(n^3)\n\t\t\tfor city in cities:\n\t\t\t\tif city not in route_set:\n\t\t\t\t\tself._states_created += 1\n\t\t\t\t\tnew_route = route[:]\n\t\t\t\t\tnew_route.append(city)\n\t\t\t\t\tnew_route_set = route_set.copy()\n\t\t\t\t\tnew_route_set.add(city)\n\t\t\t\t\tnew_rcm = rcm.copy()\n\t\t\t\t\tself.update_rcm(new_rcm, route[-1]._index, city._index, ncities)\n\t\t\t\t\tif new_rcm.lower_bound >= self._bssf.cost:\n\t\t\t\t\t\tself._pruned_states += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif len(new_route) == ncities:\n\t\t\t\t\t\t\tbssf_candidate = TSPSolution(new_route)\n\t\t\t\t\t\t\tself._solutions_found += 1\n\t\t\t\t\t\t\tif bssf_candidate.cost < self._bssf.cost:\n\t\t\t\t\t\t\t\tself._bssf = bssf_candidate\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself._pruned_states += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnew_node = Node(new_route, new_route_set, new_rcm)\n\t\t\t\t\t\t\tnew_node.update_priority()\n\t\t\t\t\t\t\tself._priority_queue.insert(new_node)\n\t\t\t\t\t\t\tif self._priority_queue.size() > self._max_queue_size:\n\t\t\t\t\t\t\t\tself._max_queue_size = self._priority_queue.size()\n\t\tend_time = time.time()\n\t\tif time.time()-start_time < time_allowance:\n\t\t\tself._pruned_states += self._priority_queue.size()\n\t\tresults = {}\n\t\tresults['cost'] = self._bssf.cost\n\t\tresults['time'] = end_time - start_time\n\t\tresults['count'] = self._solutions_found\n\t\tresults['soln'] = self._bssf\n\t\tresults['max'] = self._max_queue_size\n\t\tresults['total'] = self._states_created\n\t\tresults['pruned'] = self._pruned_states\n\t\treturn results\t\t\n\n\n\t# city_start and city_end are indices\n\tdef update_rcm(self, rcm, city_start, city_end, ncities):\n\t\t# Add residual cost of edge we ended up taking to lower bound\n\t\trcm.lower_bound += rcm.matrix[city_start][city_end]\n\t\t\n\t\t# replace every value in that node's rows and columns with np.inf\n\t\tfor i in range(ncities):\n\t\t\trcm.matrix[i][city_end] = np.inf\n\t\t\trcm.matrix[city_start][i] = np.inf \n\n\t\t# Turn reverse node into infty\n\t\trcm.matrix[city_end][city_start] = np.inf\n\t\t\n\t\t# reduce so each row and column has a 0 in it (except ones with only infinity)\n\t\tself.reduce_rcm(ncities, rcm)\n\n\n\t# O(n^2) time and space complexity\n\tdef init_rcm(self, cities, ncities):\n\t\trcm = ReducedCostMatrix(ncities)\n\t\t#fill in edges from graph O(n^2)\n\t\tfor i in range(ncities):\n\t\t\tfor j in range(ncities):\n\t\t\t\tif i != j:\n\t\t\t\t\trcm.matrix[i][j] = cities[i].costTo(cities[j])\n\t\t#self.print_matrix(rcm.matrix)\n\t\t# reduce the rcm\n\t\tself.reduce_rcm(ncities, rcm)\n\t\treturn rcm\n\n\t# Constant space complexity, O(n^2) time complexity\n\tdef reduce_rcm(self, ncities, rcm):\n\t\t# For each row, find lowest number and add to lower bound\n\t\t# then replace each node in that row with cost - lowest_cost O(n^2)\n\t\tfor row in range(ncities):\n\t\t\tmin_cost = np.inf\n\t\t\tfor column in range(ncities):\n\t\t\t\t\tif rcm.matrix[row][column] < min_cost:\n\t\t\t\t\t\tmin_cost = rcm.matrix[row][column]\n\t\t\t#print(f\"Min cost = {min_cost}\")\n\t\t\tif min_cost != 0 and min_cost != np.inf:\n\t\t\t\t\trcm.lower_bound += min_cost\n\t\t\t\t\tfor column in range(ncities):\n\t\t\t\t\t\trcm.matrix[row][column] -= min_cost\n\t\t# Do the same thing for each column\n\t\tfor column in range(ncities):\n\t\t\tmin_cost = np.inf\n\t\t\tfor row in range(ncities):\n\t\t\t\t\tif rcm.matrix[row][column] < min_cost:\n\t\t\t\t\t\tmin_cost = rcm.matrix[row][column]\n\t\t\tif min_cost != 0 and min_cost != np.inf:\n\t\t\t\t\trcm.lower_bound += min_cost\n\t\t\t\t\tfor row in range(ncities):\n\t\t\t\t\t\trcm.matrix[row][column] -= min_cost\n\n\tdef print_matrix(self, A):\n\t\tprint('\\n'.join([''.join(['{:5} '.format(item) for item in row]) for row in A]))\n\n\n\n\n\n\t''' \n\t\tThis is the entry point for the algorithm you'll write for your group project.\n\t\t \n\t\tresults dictionary for GUI that contains three ints: cost of best solution, \n\t\ttime spent to find best solution, total number of solutions found during search, the \n\t\tbest solution found. You may use the other three field however you like.\n\t\talgorithm \n\t'''\n\t\t\n\tdef fancy( self,time_allowance=60.0 ):\n\t\tself.population_size = 1000\n\t\tself.mating_size = int(self.population_size/2)\n\t\tself.num_mutations = int(self.population_size/4)\n\t\tself.random_sol_time = 10\n\t\tself.greedy_sol_time = 600\n\t\tself.total_solutions = 0\n\t\tself.bssf_updates = 0\n\t\tself.invalid_sols_generated = 0\n\t\tself.num_generations = 0\n\t\tsolution_timeout = 15.0\n\t\tself.last_solution_update = time.time()\n\t\tstart_time = time.time()\t\t\n\t\tself.init_population()\n\t\tself.max_generations_since_update = 5000\n\t\tself.num_generations_since_update = 0\n\t\twhile time.time()-start_time < time_allowance and self.num_generations_since_update < self.max_generations_since_update:\n\t\t\t# Determine Fitness --> Already done because our population is just the solutions\n\t\t\t# Select mating pool\n\t\t\tmating_population = self.select_mates()\n\t\t\t# Breed\n\t\t\tbreeding_order = np.random.permutation(mating_population)\n\t\t\tfor i in range(0, len(breeding_order), 2):\n\t\t\t\tself.breed(breeding_order[i], breeding_order[i+1])\n\t\t\t# Mutate\n\t\t\tfor _ in range(self.num_mutations):\n\t\t\t\tself.mutate(self.population[random.randint(0,len(self.population)-1)])\n\t\t\t# Prune to population size\n\t\t\tself.prune()\n\t\t\tself.num_generations += 1\n\t\t\tself.num_generations_since_update += 1\n\t\tend_time = time.time()\n\n\t\tresults = {}\n\t\tresults['cost'] = self.bssf.cost\n\t\tresults['time'] = end_time - start_time\n\t\tresults['count'] = self.bssf_updates\n\t\tresults['soln'] = self.bssf\n\t\tresults['max'] = self.num_generations\n\t\tresults['total'] = self.total_solutions\n\t\tresults['pruned'] = self.invalid_sols_generated\n\t\t# print(self.bssf_updates)\n\t\treturn results\n\n\n\tdef select_mates(self):\n\t\tpopulation_costs = np.array([1/p.cost for p in self.population])\n\t\tpopulation_distribution = population_costs/np.sum(population_costs)\n\t\treturn np.random.choice(self.population, self.mating_size, p=population_distribution)\n\n\tdef init_population(self):\n\t\t# self.population, bssf = [], self.defaultRandomTour()['soln'] \n\t\tself.population, bssf = self.greedy(time_allowance=self.greedy_sol_time, all_solns=True)['soln']\n\t\tself.bssf = bssf\n\t\tnum_iters = 0\n\t\t# while len(self.population) < self.population_size or num_iters < self.population_size*5:\n\t\t# \tsol = self.defaultRandomTour(time_allowance=self.random_sol_time)['soln']\n\t\t# \tself.add_sol(sol)\n\t\t# \tnum_iters += 1\n\t\twhile len(self.population) < self.population_size:\n\t\t\tself.add_sol(self.random())\n\n\tdef mutate(self, sol):\n\t\tidx = random.randint(0, len(sol.route)-2)\n\t\troute = sol.route.copy()\n\t\troute[idx], route[idx+1] = route[idx+1], route[idx]\n\t\tnew_sol = TSPSolution(route)\n\t\tself.add_sol(new_sol)\n\t\t\n\n\tdef add_sol(self, new_sol, keep_inf_prob=.5):\n\t\tself.total_solutions += 1\n\t\tif new_sol.cost < np.inf or random.random() < keep_inf_prob:\n\t\t\tself.population.append(new_sol)\n\t\telif new_sol.cost == np.inf:\n\t\t\tself.invalid_sols_generated += 1\n\t\tif self.bssf is None or new_sol.cost < self.bssf.cost:\n\t\t\t\tself.bssf = new_sol\n\t\t\t\tself.num_generations_since_update = 0\n\t\t\t\tself.last_solution_update = time.time()\n\t\t\t\tself.bssf_updates += 1\n\t\t\n\tdef breed(self, sol1, sol2):\n\t\trange1 = random.randint(0, len(sol1.route)-1)\n\t\trange2 = random.randint(0, len(sol1.route)-1)\n\n\t\tstart_idx = min(range1, range2)\n\t\tend_idx = max(range1, range2)\n\t\tself.add_sol(self.breed_single(sol1, sol2, start_idx, end_idx))\n\t\tself.add_sol(self.breed_single(sol2, sol1, start_idx, end_idx))\n\t\n\n\tdef breed_single(self, sol1, sol2, start_idx, end_idx):\n\t\tcities = set(map(lambda x: x._index, sol1.route[start_idx:end_idx+1]))\n\t\tnew_route = sol1.route.copy()\n\t\tj = 0\n\t\tfor i in range(len(sol1.route)):\n\t\t\tif i >= start_idx or i <= end_idx:\n\t\t\t\tcontinue\n\t\t\twhile sol2.route[j]._index in cities:\n\t\t\t\tj += 1\n\t\t\tnew_route[i] = sol2.route[j]\n\t\t\tj += 1\n\t\treturn TSPSolution(new_route)\n\n\tdef prune(self):\n\t\tnum_to_prune = len(self.population) - self.population_size\n\t\tif num_to_prune > 0:\n\t\t\tcosts = [p.cost for p in self.population]\n\t\t\tmax_cost = max(filter(lambda x: x < np.inf, costs))\n\t\t\tcosts = [c if c < np.inf else max_cost for c in costs]\n\t\t\tpopulation_costs = np.array(costs)\n\t\t\tpopulation_distribution = population_costs/np.sum(population_costs)\n\t\t\tdelete_routes = np.random.choice(self.population, num_to_prune, p=population_distribution)\n\t\t\tself.population = list(filter(lambda city: city not in delete_routes, self.population))\n\n\tdef random(self):\n\t\tresults = {}\n\t\tcities = self._scenario.getCities()\n\t\tncities = len(cities)\n\t\tperm = np.random.permutation(ncities)\n\t\troute = []\n\t\t# Now build the route using the random permutation\n\t\tfor i in range(ncities):\n\t\t\troute.append(cities[perm[i]])\n\t\tbssf = TSPSolution(route)\n\t\treturn bssf\n","sub_path":"TSPSolver.py","file_name":"TSPSolver.py","file_ext":"py","file_size_in_byte":14238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"472144497","text":"import sys\n\nN = int(sys.stdin.readline())\nK = int(sys.stdin.readline())\nboard = [[0 for _ in range(N+1)] for _ in range(N+1)]\nboard[0][0]=1\n\nr = 0\nc = 0\nfor i in range(K):\n r,c = map(int,sys.stdin.readline().split())\n if r>= 1 and c >=1:\n board[r-1][c-1] = 2\n \n\nL = int(sys.stdin.readline())\n\nsecs= [0 for _ in range(L+1)]\ndirection=[0 for _ in range(L+1)]\n\nfor i in range(L):\n s,dr = sys.stdin.readline().split()\n secs[i] = int(s)\n direction[i] = dr\n\ndx = [0,1,0,-1]\ndy = [-1,0,1,0]\n\nsec = 0\nx = 0\ny = 0\nht = 0 # head turn\ntt = 0 # tail turn\nd = 1\ntail_d = 1\ntail = [0,0]\nwhile(True):\n sec +=1\n next_x = x+ dx[d]\n next_y = y + dy[d]\n \n if next_x < 0 or next_x >=N or next_y < 0 or next_y >=N:\n break\n if board[next_y][next_x] == 1:\n break\n \n x = next_x\n y = next_y\n\n if board[y][x] == 2:\n board[tail[0]][tail[1]] = 1\n board[y][x] = 1\n elif board[y][x] == 0:\n board[y][x] = 1\n board[tail[0]][tail[1]] = 0\n next_t_y = tail[0] + dy[tail_d]\n next_t_x = tail[1] + dx[tail_d]\n\n if (next_t_x < 0 or next_t_x >=N or next_t_y < 0 or next_t_y >=N):\n if tt < L:\n if direction[tt] == 'L':\n tail_d = (tail_d+3)%4\n elif direction[tt] == 'D':\n tail_d = (tail_d+1)%4\n tt += 1\n else:\n break\n tail[0] = tail[0] + dy[tail_d]\n if(tail[0] < 0 or tail[0] >=N):\n break\n tail[1] = tail[1] + dx[tail_d]\n if(tail[1] < 0 or tail[1] >=N):\n break\n board[tail[0]][tail[1]] = 1\n\n if ht < L:\n if sec == secs[ht]:\n if direction[ht] == 'L':\n d = (d+3)%4\n elif direction[ht] == 'D':\n d = (d+1)%4\n ht += 1\n\nprint(sec) \n\n\n","sub_path":"2020_spring/samsungSW/3190.py","file_name":"3190.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"327453173","text":"import time\ndef addUpToV1(n):\n total = 0\n for i in range(n):\n total += (i+1)\n return(total)\n\ndef addUpToV2(n):\n return n * (n+1)/2\n\nn = int(input('Input Value:'))\n\nstart = time.time()\nprint('answer V1:', addUpToV1(n))\nprint('time V1', (time.time()-start)*1000)\n\nstart = time.time()\nprint('answer V2:', addUpToV2(n))\nprint('time V2:', (time.time()-start)*1000)","sub_path":"week3/addUp.py","file_name":"addUp.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"33400351","text":"import random as rand\nimport abc\n\n\nclass List:\n # Initializes an empty linked list.\n def __init__(self):\n self.liste = []\n\n # Returns true if this linked list is empty.\n def isEmpty(self):\n return len(self.liste) == 0\n\n # Returns the number of items in this linked list.\n def size(self):\n return len(self.liste)\n\n # Returns the first item added to this linked list\n def check(self):\n if self.isEmpty():\n raise ValueError(\"linked list underflow\")\n return self.liste[0]\n\n # Removes and returns the first item in the linked list\n def peek(self):\n if self.isEmpty():\n raise ValueError(\"linked list underflow\")\n item = self.liste.pop(0)\n return item\n\n def append(self, item):\n self.liste.append(item)\n\n def prepend(self, item):\n self.liste.insert(0, item)\n\n def accept(self, visitor):\n visitor.visit(self)\n\n\nclass Queue(List):\n\n # Initializes an empty queue.\n def __init__(self, max_size, *args, **kwargs):\n self.max_size = max_size\n super(Queue, self).__init__()\n\n def isFull(self):\n return len(self.liste) == self.max_size\n\n # Adds the item to this queue.\n def enqueue(self, item):\n if self.isFull():\n raise ValueError(\"Queue overflow\")\n self.append(item)\n\n # Removes and returns the item on this queue that was least recently added.\n def dequeue(self):\n try:\n return self.peek()\n except ValueError:\n raise ValueError(\"Queue underflow\")\n\n\nclass Stack(List):\n # Initializes an empty stack.\n def __init__(self, max_size, *args, **kwargs):\n self.max_size = max_size\n super(Stack, self).__init__()\n\n # Returns true if this stack is full.\n def isFull(self):\n return len(self.liste) == self.max_size\n\n # Adds the item to this stack.\n def push(self, item):\n if self.isFull():\n raise ValueError(\"Stack overflow\")\n self.prepend(item)\n\n # Removes and returns the item most recently added to this stack.\n def pop(self):\n try:\n return self.peek()\n except ValueError:\n raise ValueError(\"Stack underflow\")\n\n\nclass AutoAdaptiveStack(Stack):\n\n def __init__(self, max_trials, size_increment, *args, **kwargs):\n self.max_trials = max_trials\n self.size_increment = size_increment\n self.trials = 0\n self.waiting_list = []\n self.waiting_list_size = 1\n super(AutoAdaptiveStack, self).__init__(*args, **kwargs)\n\n def push(self, item):\n try:\n super(AutoAdaptiveStack, self).push(item)\n except ValueError:\n print(\"There is no free space actually :( try later\")\n if len(self.waiting_list) < self.waiting_list_size:\n self.waiting_list.append(item)\n self.trials += 1\n if self.trials == self.max_trials:\n self.max_size += self.size_increment\n for i in range(self.size_increment):\n if len(self.waiting_list) > 0:\n self.push(self.waiting_list.pop(0))\n else:\n break\n self.trials = 0\n\n def pop(self):\n try:\n removed_el = self.peek()\n if len(self.waiting_list) > 0:\n self.push(self.waiting_list.pop(0))\n return removed_el\n except ValueError:\n raise ValueError(\"Stack underflow\")\n\n\nclass AutoAdaptiveQueue(Queue):\n\n def __init__(self, max_trials, size_increment, *args, **kwargs):\n self.max_trials = max_trials\n self.size_increment = size_increment\n self.trials = 0\n self.waiting_list = []\n self.waiting_list_size = 1\n super(AutoAdaptiveQueue, self).__init__(*args, **kwargs)\n\n def enqueue(self, item):\n try:\n super(AutoAdaptiveQueue, self).enqueue(item)\n except ValueError:\n print(\"There is no free space actually :( try later\")\n if len(self.waiting_list) < self.waiting_list_size:\n self.waiting_list.append(item)\n self.trials += 1\n if self.trials == self.max_trials:\n self.max_size += self.size_increment\n for i in range(self.size_increment):\n if len(self.waiting_list) > 0:\n self.append(self.waiting_list.pop(0))\n else:\n break\n self.trials = 0\n\n def dequeue(self):\n try:\n removed_el = self.peek()\n if len(self.waiting_list) > 0:\n self.enqueue(self.waiting_list.pop(0))\n return removed_el\n except ValueError:\n raise ValueError(\"Queue underflow\")\n\n\nclass Printer(object, metaclass=abc.ABCMeta):\n def __init__(self, name):\n self.name = name\n\n def visit(self, list_obj):\n if isinstance(list_obj, Stack):\n display_message = \"\\n-------\\n\"\n i = 0\n while i < list_obj.size():\n display_message += ' ' + str(list_obj.liste[i]) + ' '\n display_message += \"\\n-------\\n\"\n i += 1\n elif isinstance(list_obj, Queue):\n display_message = \"\\n|\"\n i = 0\n while i < list_obj.size():\n display_message += str(list_obj.liste[i]) + \"|\"\n i += 1\n display_message += \"\\n\"\n else:\n display_message = \"\\n(\"\n i = 0\n while i < list_obj.size() - 1:\n display_message += str(list_obj.liste[i]) + \",\"\n i += 1\n display_message += str(list_obj.liste[i]) + \")\\n\"\n self.log(display_message)\n\n @abc.abstractmethod\n def log(self, display_message):\n raise NotImplementedError('child objects must define log to create a printer')\n\n\nclass ScreenPrinter(Printer):\n def __init__(self, *args, **kwargs):\n super(ScreenPrinter, self).__init__(*args, **kwargs)\n\n def log(self, display_message):\n print(self.name)\n print(display_message)\n\n\nclass FilePrinter(Printer):\n def __init__(self, file_path, *args, **kwargs):\n self.file_path = file_path\n super(FilePrinter, self).__init__(*args, **kwargs)\n\n def log(self, display_message):\n with open(self.file_path, 'a') as f:\n f.write(self.name)\n f.write(display_message)\n\n\nclass Calculator:\n\n @staticmethod\n def union(first_list, second_list):\n if isinstance(first_list, Queue) and isinstance(second_list, Queue):\n merged_queue = Queue(max_size=first_list.max_size + second_list.max_size)\n if first_list.size() > 0: # On check si first_list contient bien des éléments\n merged_queue.liste = first_list.liste + second_list.liste\n else:\n merged_queue.liste = second_list.liste\n return merged_queue\n elif isinstance(first_list, Stack) and isinstance(second_list, Stack):\n merged_stack = Stack(max_size=first_list.max_size + second_list.max_size)\n if first_list.size() > 0: # On check si first_list contient bien des éléments\n merged_stack.liste = first_list.liste + second_list.liste\n else:\n merged_stack.liste = second_list.liste\n return merged_stack\n # Test pour voir si first_list et second_list sont des linkedlist mais pas des stacks ni des queues\n elif isinstance(first_list, List) and not (isinstance(first_list, Stack) or isinstance(first_list, Queue)) \\\n and isinstance(second_list, List) and not (\n isinstance(second_list, Stack) or isinstance(second_list, Queue)):\n merged_list = List()\n first_list_copy = first_list.liste.copy()\n second_list_copy = second_list.liste.copy()\n\n while len(first_list_copy) > 0 and len(second_list_copy) > 0:\n if rand.uniform(0, 1) < 0.5:\n merged_list.append(first_list_copy.pop(0))\n else:\n merged_list.append(second_list_copy.pop(0))\n while len(first_list_copy) > 0:\n merged_list.append(first_list_copy.pop(0))\n while len(second_list_copy) > 0:\n merged_list.append(second_list_copy.pop(0))\n return merged_list\n else:\n raise ValueError('The types of both lists are different')\n","sub_path":"TP6/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"644371052","text":"import unittest\n\nimport config\nfrom app import create_app\nfrom app.models import db, Inventory, Item, User, Shop, Discrepancy\n\n\nclass InventoryTestCase(unittest.TestCase):\n\n def setUp(self):\n self.app = create_app(config.TestConfig)\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n # add test user\n self.user = User(username=\"Test\")\n self.user.set_pwd('12345678')\n db.session.add(self.user)\n # add test items\n for i in range(3):\n db.session.add(Item(name=f'Item-{i}'))\n # add test shop\n self.shop = Shop(name='Test-Shop')\n db.session.add(self.shop)\n db.session.commit()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_basic_inventory(self):\n self.assertIsNone(Inventory.query.first())\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n inventory = Inventory.query.first()\n\n self.assertIsNotNone(inventory)\n self.assertIsNotNone(inventory.created_at)\n self.assertEqual(inventory.user, self.user)\n self.assertEqual(inventory.shop, self.shop)\n\n # add some association objects\n items = Item.query.all()\n for i in range(3):\n inventory.discrepancies.append(Discrepancy(item=items[i], target=3, actual=i))\n db.session.commit()\n\n self.assertEqual(len(inventory.discrepancies), 3)\n self.assertTrue(any([inventory.discrepancies[0].item == x for x in Item.query.all()]))\n\n def test_user_deletion(self):\n # should NOT delete the inventory\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n db.session.delete(self.user)\n db.session.commit()\n inventory = Inventory.query.first()\n self.assertIsNotNone(inventory) # inventory still exists\n self.assertIsNone(inventory.user) # but has no associated user\n\n def test_shop_deletion(self):\n # should delete the inventory\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n db.session.delete(self.shop)\n db.session.commit()\n self.assertIsNone(Inventory.query.first()) # no shop -> no inventory\n\n def test_delete_inventory(self):\n # should delete ALL discrepancies\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n # add some association objects\n items = Item.query.all()\n for i in range(3):\n inventory.discrepancies.append(Discrepancy(item=items[i], target=3, actual=i))\n db.session.commit()\n\n self.assertEqual(len(Discrepancy.query.all()), 3) # there are existing discrepancies\n db.session.delete(inventory)\n db.session.commit()\n # after the associated inventory got deleted they are also gone\n self.assertEqual(len(Discrepancy.query.all()), 0)\n self.assertIsNone(Discrepancy.query.first())\n\n def test_discrepancy(self):\n self.assertIsNone(Inventory.query.first())\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n inventory = Inventory.query.first()\n\n # add some association objects\n items = Item.query.all()\n for i in range(3):\n inventory.discrepancies.append(Discrepancy(item=items[i], target=3, actual=i))\n db.session.commit()\n self.assertEqual(Discrepancy.query.first().inventory, inventory)\n self.assertTrue(any([Discrepancy.query.first().item == x for x in items]))\n\n def test_delete_discrepancy(self):\n # shouldn´t change anything\n self.assertIsNone(Inventory.query.first())\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n inventory = Inventory.query.first()\n\n # add some association objects\n items = Item.query.all()\n for i in range(3):\n inventory.discrepancies.append(Discrepancy(item=items[i], target=3, actual=i))\n db.session.commit()\n\n for d in Discrepancy.query.all():\n db.session.delete(d)\n db.session.commit()\n\n self.assertIsNotNone(Inventory.query.first())\n self.assertEqual(len(inventory.discrepancies), 0)\n\n def test_delete_item(self):\n # should delete discrepancy but not inventory\n # shouldn´t change anything\n self.assertIsNone(Inventory.query.first())\n inventory = Inventory(user=self.user, shop=self.shop)\n db.session.add(inventory)\n db.session.commit()\n inventory = Inventory.query.first()\n\n # add a association object\n item = Item.query.first()\n inventory.discrepancies.append(Discrepancy(item=item, target=3, actual=93))\n db.session.commit()\n\n # remove it again\n db.session.delete(Discrepancy.query.first())\n db.session.commit()\n\n # deletes discrepancy\n self.assertIsNone(Discrepancy.query.first())\n # but inventory remains\n self.assertIsNotNone(Inventory.query.first())\n self.assertEqual(len(Inventory.query.first().discrepancies), 0)\n","sub_path":"tests/test_model_inventory.py","file_name":"test_model_inventory.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"424516917","text":"value = 0\nwhile value <= 10:\n print(value)\n value += 1\n\nstart = 100\nwhile start > 0:\n print(\"Start is {}\".format(start))\n start -= 10\n print(\"Start is now {}\".format(start))\n print()\n\nname = \"\"\nwhile len(name) < 2:\n print(\"Your full name\")\n name = input(\"Enter your name: \")\n\nnumber = 0\nwhile number != 3:\n print(\"Type the number 3.\")\n number = int(input(\">_ \"))\nprint(\"You typed the number 3!\")\nprint(\"Good Job!\")\n\nimport random # This only has to happen once!\nnum = 0\ncounter = 0\nwhile num != 6:\n num = random.randint(1, 100)\n counter += 1\n print(\"Our random number is {}\".format(num))\nprint(\"We got the number 6!\")\nprint(\"It took us {} tries to get the number 6\"\n .format(counter))\n","sub_path":"While Loops.py","file_name":"While Loops.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"473896441","text":"from tests.cases.base import TestAuthorized\nfrom tests.pages.pin import PinDetailsPage\nfrom tests.pages.user_details import UserDetailsPage\n\n\nclass Test(TestAuthorized):\n def setUp(self):\n super().setUp()\n self.page = UserDetailsPage(self.driver, \"testTest\")\n\n def test_subscribe(self):\n self.page.form.subscribe()\n try:\n self.assertTrue(\n self.page.form.check_subscription(), \"You have not subscribed to user\"\n )\n except TimeoutError:\n self.fail(\"Cannot find subscription button!\")\n\n def test_unsubscribe(self):\n self.page.form.unsubscribe()\n try:\n self.assertFalse(\n self.page.form.check_subscription(estimated=False),\n \"You have not unsubscribed from user\",\n )\n except TimeoutError:\n self.fail(\"Cannot find subscription button!\")\n\n def test_open_pin(self):\n name, link = self.page.form.open_pin(0)\n page = PinDetailsPage(self.driver, False)\n real_name = page.form.get_title()\n self.assertEqual(real_name, name, \"Names are different\")\n self.assertEqual(self.driver.current_url, link, \"Wrong page opened\")\n","sub_path":"tests/cases/user_test.py","file_name":"user_test.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"234113705","text":"from os import cpu_count\nfrom typing import Optional\n\nimport psutil\nimport pydantic\nfrom pydantic import Field, PositiveInt\n\nfrom gfw_pixetl import get_module_logger\nfrom gfw_pixetl.models.enums import DstFormat\nfrom gfw_pixetl.settings.models import EnvSettings\n\nLOGGER = get_module_logger(__name__)\n\n\nclass Secret:\n \"\"\"Holds a string value that should not be revealed in tracebacks etc.\n\n You should cast the value to `str` at the point it is required.\n \"\"\"\n\n def __init__(self, value: str):\n self._value = value\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}('**********')\"\n\n def __str__(self) -> str:\n return self._value\n\n\nclass Globals(EnvSettings):\n\n #####################\n # General\n #####################\n\n default_dst_format = DstFormat.geotiff\n\n #####################\n # Resource management\n ######################\n cores: PositiveInt = Field(cpu_count(), description=\"Number of CPU cores available\")\n num_processes: PositiveInt = Field(\n cpu_count(), description=\"Max number of parallel processes to use\"\n )\n max_mem: PositiveInt = Field(\n psutil.virtual_memory()[1] / 1000000,\n description=\"Max memory available to pixETL\",\n )\n divisor: PositiveInt = Field(\n 4,\n description=\"Fraction of memory per worker to use to compute maximum block size.\"\n \"(ie 4 => size = 25% of available memory)\",\n )\n workers: PositiveInt = Field(\n cpu_count(), description=\"Number of workers to use to execute job.\"\n )\n\n ########################\n # PostgreSQL authentication\n ########################\n db_username: Optional[str] = Field(\n None, env=\"PGUSER\", description=\"PostgreSQL user name\"\n )\n db_password: Optional[Secret] = Field(\n None, env=\"PGPASSWORD\", description=\"PostgreSQL password\"\n )\n db_host: Optional[str] = Field(None, env=\"PGHOST\", description=\"PostgreSQL host\")\n db_port: Optional[int] = Field(None, env=\"PGPORT\", description=\"PostgreSQL port\")\n db_name: Optional[str] = Field(\n None, env=\"PGDATABASE\", description=\"PostgreSQL database name\"\n )\n\n ######################\n # AWS configuration\n ######################\n aws_region: str = Field(\"us-east-1\", description=\"AWS region\")\n aws_batch_job_id: Optional[str] = Field(None, description=\"AWS Batch job ID\")\n aws_job_role_arn: Optional[str] = Field(\n None,\n description=\"ARN of the AWS IAM role which runs the batch job on docker host\",\n )\n aws_gcs_key_secret_arn: Optional[str] = Field(\n None, description=\"ARN of AWS Secret which holds GCS key\"\n )\n\n aws_endpoint_url: Optional[str] = Field(\n None, description=\"Endpoint URL for AWS S3 Server (required for Moto)\"\n )\n\n aws_secretsmanager_url: Optional[str] = Field(\n None,\n description=\"Endpoint URL for AWS Secretsmanager Server (required for Moto)\",\n )\n\n @pydantic.validator(\"db_password\", pre=True, always=True)\n def hide_password(cls, v):\n return Secret(v) or None\n\n @pydantic.root_validator()\n def set_processes_workers(cls, values):\n cores = values.get(\"cores\")\n\n # Don't allow specifying more processes than cores\n num_processes = max(min(cores, values.get(\"num_processes\")), 1)\n\n # Don't allow specifying more workers than processes\n workers = max(min(num_processes, values.get(\"workers\")), 1)\n\n values[\"num_processes\"] = num_processes\n values[\"workers\"] = workers\n\n LOGGER.info(f\"Set num_processes to {num_processes}\")\n LOGGER.info(f\"Set workers to {workers}\")\n\n return values\n\n @pydantic.validator(\"max_mem\", pre=True, always=True)\n def set_max_mem(cls, v, *, values, **kwargs):\n max_mem = max(min(psutil.virtual_memory()[1] / 1000000, float(v)), 1)\n LOGGER.info(f\"Set maximum memory to {max_mem} MB\")\n return max_mem\n\n\nGLOBALS = Globals()\n","sub_path":"gfw_pixetl/settings/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"292695102","text":"import cv2\nimport numpy as np\nimport time\nfrom matplotlib import pyplot as plt\n\n# Função histograma\ndef histograma(img):\n vet = np.zeros((256), dtype=np.uint16)\n img = np.uint8(img)\n for l in range(0, altura):\n for c in range(0, largura):\n vet[img[l,c]] += 1\n plt.plot(vet)\n plt.title('Histograma')\n plt.ylabel('Quantidade de pixels')\n plt.xlabel('Intensidade de cor')\n string = str(int(time.time()))\n print(string)\n plt.savefig('processadas/'+nome+'_histograma_'+string)\n axes = plt.gca()\n axes.set_ylim(0,)\n plt.show()\n\n# Altera a intensidade do pixel, para mais ou para menos\ndef alteraBrilho(img, intensidade):\n nova = np.zeros((altura,largura))\n for l in range(0, altura):\n for c in range(0, largura):\n nova[l,c] = max(min(img[l,c]+intensidade,255),0)\n cv2.imshow('Imagem alterada', np.uint8(nova))\n cv2.imwrite('processadas/'+nome+'_altera_brilho_'+str(intensidade)+'.png', np.uint8(nova))\n cv2.waitKey(0)\n return np.uint8(nova)\n\n# Aplica o filtro de média à imagem\ndef media(img):\n nova = np.zeros((altura,largura))\n for l in range(0, altura):\n for c in range(0, largura):\n soma = 0\n interacoes = 0\n for lm in range (-1,2):\n for cm in range (-1,2):\n if(l+lm>=0 and c+cm>=0 and l+lm= 0 and c + cm >= 0 and l + lm < altura and c + cm < largura):\n m[i] = img[l+lm,c+cm]\n else:\n m[i] = 0\n i += 1\n nova[l,c] = np.median(m)\n cv2.imshow('Imagem - Filtro de média', np.uint8(nova))\n cv2.imwrite('processadas/' + nome + '_mediana.png', np.uint8(nova))\n cv2.waitKey(0)\n return np.uint8(nova)\n\n# Aplica filtros na imagem com o uso de máscaras\ndef mascara(img, tipo):\n nova = np.zeros((altura,largura))\n m = np.zeros((3, 3))\n\n if(tipo==\"sobel\"):\n m[0,0] = 1\n m[0, 1] = 0\n m[0, 2] = -1\n m[1, 0] = 2\n m[1, 1] = 0\n m[1, 2] = -2\n m[2, 0] = 1\n m[2, 1] = 0\n m[2, 2] = -1\n elif (tipo == \"prewitt\"):\n m[0, 0] = -1\n m[0, 1] = 0\n m[0, 2] = 1\n m[1, 0] = -1\n m[1, 1] = 0\n m[1, 2] = 1\n m[2, 0] = -1\n m[2, 1] = 0\n m[2, 2] = 1\n elif (tipo == \"laplaciano\"):\n m[0, 0] = 0\n m[0, 1] = -1\n m[0, 2] = 0\n m[1, 0] = -1\n m[1, 1] = 4\n m[1, 2] = -1\n m[2, 0] = 0\n m[2, 1] = -1\n m[2, 2] = 0\n elif (tipo == \"passa-alta\"):\n m[0, 0] = -1\n m[0, 1] = -1\n m[0, 2] = -1\n m[1, 0] = -1\n m[1, 1] = 8\n m[1, 2] = -1\n m[2, 0] = -1\n m[2, 1] = -1\n m[2, 2] = -1\n\n for l in range(0, altura):\n for c in range(0, largura):\n soma = 0\n for lm in range (-1,2):\n for cm in range (-1,2):\n if (l + lm >= 0 and c + cm >= 0 and l + lm < altura and c + cm < largura):\n soma = soma + (int(m[lm+1,cm+1]) * img[l+lm,c+cm])\n else:\n soma = soma + (int(m[lm+1,cm+1]) * img[l,c])\n nova[l,c] = max(min(soma, 255), 0)\n cv2.imshow('Imagem - Filtro mask', np.uint8(nova))\n cv2.imwrite('processadas/' + nome + '_mascara_'+tipo+'.png', nova)\n cv2.waitKey(0)\n return np.uint8(nova)\n\n\n#Definições de arquivo\nnome = 'passaro'\nimagem = cv2.imread('imagens/'+nome+'.jpg')\ncv2.imwrite('processadas/' + nome + '_original.png', imagem)\nimagem = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)\ncv2.imwrite('processadas/' + nome + '_cinza.png', imagem)\n#cv2.imshow('Imagem original', imagem)\n#cv2.waitKey(0)\n\n#Informações de tamanho da imagem\naltura = imagem.shape[0]\nlargura = imagem.shape[1]\n\nhistograma(imagem)\nhistograma(mascara(imagem,\"passa-alta\"))\nhistograma(mascara(imagem,\"laplaciano\"))\nhistograma(mascara(imagem,\"sobel\"))\n\n#alteraBrilho(imagem,-90)\n#alteraBrilho(imagem,90)\n#media(imagem)\n#mediana(imagem)\n#mascara(imagem,\"sobel\")\n","sub_path":"atividades.py","file_name":"atividades.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"106906326","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_CATEGORIES_ID = r\"(?P[0-9]+)\"\nPVAR_ANOTHER_CATEGORIES_ID = r\"(?P[a-z0-9-]+)\"\n\n\nurlpatterns = [\n ##############\n # categories #\n ##############\n url(\n r\"^api/v2/categories_metas/$\",\n views.CategoriesViewSet.as_view(\n {\n \"get\": \"list_metas\",\n }\n ),\n name=\"categories.metas\",\n ),\n url(\n r\"^api/v2/categories/$\",\n views.CategoriesViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"categories\",\n ),\n url(\n r\"^api/v2/categories/default/$\",\n views.CategoriesViewSet.as_view(\n {\n \"get\": \"get_default\",\n }\n ),\n name=\"categories.default\",\n ),\n url(\n r\"^api/v2/categories/%s/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesViewSet.as_view(\n {\n \"patch\": \"update\",\n \"delete\": \"delete\",\n }\n ),\n name=\"categories.actions\",\n ),\n url(\n r\"^api/v2/categories/%s/switch_order/%s/$\" % (PVAR_CATEGORIES_ID, PVAR_ANOTHER_CATEGORIES_ID),\n views.CategoriesViewSet.as_view(\n {\n \"patch\": \"switch_order\",\n }\n ),\n name=\"categories.switch_order\",\n ),\n url(\n r\"^api/v2/categories/%s/sync/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesSyncViewSet.as_view(\n {\n \"post\": \"sync\",\n }\n ),\n name=\"categories.sync\",\n ),\n url(\n r\"^api/v2/categories/%s/activate/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesSyncViewSet.as_view(\n {\n \"post\": \"activate\",\n }\n ),\n name=\"categories.activate\",\n ),\n url(\n r\"^api/v2/categories/%s/test_connection/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesSyncViewSet.as_view(\n {\n \"post\": \"test_connection\",\n }\n ),\n name=\"categories.test_connection\",\n ),\n url(\n r\"^api/v2/categories/%s/test_fetch_data/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesSyncViewSet.as_view(\n {\n \"post\": \"test_fetch_data\",\n }\n ),\n name=\"categories.test_fetch_data\",\n ),\n url(\n r\"^api/v2/categories/%s/export/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesExportViewSet.as_view(\n {\n \"get\": \"export\",\n }\n ),\n name=\"categories.export\",\n ),\n url(\n r\"^api/v2/categories/%s/export_template/$\" % PVAR_CATEGORIES_ID,\n views.CategoriesExportViewSet.as_view(\n {\n \"get\": \"export_template\",\n }\n ),\n name=\"categories.export_template\",\n ),\n]\n","sub_path":"src/saas/bkuser_shell/categories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"15866226","text":"\n\n#calss header\nclass _CHIGGER():\n\tdef __init__(self,): \n\t\tself.name = \"CHIGGER\"\n\t\tself.definitions = [u\"a type of flea (= very small jumping insect) that lays eggs beneath an animal's or person's skin\", u\"a type of mite (= very small animal similar to a spider) that lays eggs under an animal's or person's skin causing small red bumps that are very itchy (= you want to scratch them)\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_chigger.py","file_name":"_chigger.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"521283825","text":"#!/usr/bin/env python\n# (C) 2017 OpenEye Scientific Software Inc. All rights reserved.\n#\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\n#############################################################################\n# Modifies the SD data of a set of input molecules by clearing all tags,\n# defining which tags to keep or defining which tags to remove\n#############################################################################\nimport sys\nfrom openeye import oechem\n\n\ndef ClearProps(ifs, ofs):\n for mol in ifs.GetOEGraphMols():\n oechem.OEClearSDData(mol)\n oechem.OEWriteMolecule(ofs, mol)\n\n\ndef KeepProps(proplist, ifs, ofs):\n for mol in ifs.GetOEGraphMols():\n for dp in oechem.OEGetSDDataPairs(mol):\n if dp.GetTag() not in proplist:\n oechem.OEDeleteSDData(mol, dp.GetTag())\n oechem.OEWriteMolecule(ofs, mol)\n\n\ndef RemoveProps(proplist, ifs, ofs):\n for mol in ifs.GetOEGraphMols():\n for tag in proplist:\n oechem.OEDeleteSDData(mol, tag)\n oechem.OEWriteMolecule(ofs, mol)\n\n\ndef ModProps(itf, ifs, ofs):\n proplist = []\n if itf.HasString(\"-keep\"):\n for prop in itf.GetStringList(\"-keep\"):\n proplist.append(prop)\n KeepProps(proplist, ifs, ofs)\n elif itf.HasString(\"-remove\"):\n for prop in itf.GetStringList(\"-remove\"):\n proplist.append(prop)\n RemoveProps(proplist, ifs, ofs)\n elif itf.GetBool(\"-clearAll\"):\n ClearProps(ifs, ofs)\n\n\ndef main(argv=[__name__]):\n itf = oechem.OEInterface(InterfaceData, argv)\n\n haskeep = itf.HasString(\"-keep\")\n hasremove = itf.HasString(\"-remove\")\n hasclear = itf.GetBool(\"-clearAll\")\n\n numoption = 0\n for hasoption in [haskeep, hasremove, hasclear]:\n if hasoption:\n numoption += 1\n\n if numoption != 1:\n oechem.OEThrow.Usage(\"Need to pick one from -keep, -remove, or -clearAll\")\n\n ifs = oechem.oemolistream()\n if not ifs.open(itf.GetString(\"-i\")):\n oechem.OEThrow.Fatal(\"Unable to open %s for reading\" % itf.GetString(\"-i\"))\n if not oechem.OEIsSDDataFormat(ifs.GetFormat()):\n oechem.OEThrow.Fatal(\"Only works for input file formats that support SD data (sdf,oeb,csv)\")\n\n ofs = oechem.oemolostream()\n if not ofs.open(itf.GetString(\"-o\")):\n oechem.OEThrow.Fatal(\"Unable to open %s for writing\" % itf.GetString(\"-o\"))\n if not oechem.OEIsSDDataFormat(ofs.GetFormat()):\n oechem.OEThrow.Fatal(\"Only works for output file formats that support SD data \\\n (sdf,oeb,csv)\")\n\n ModProps(itf, ifs, ofs)\n\n\nInterfaceData = \"\"\"\n!BRIEF [-remove] [-keep] [-clearAll] -i -o \n!PARAMETER -i\n !ALIAS -in\n !TYPE string\n !REQUIRED true\n !BRIEF Input file name\n !END\n!PARAMETER -o\n !ALIAS -out\n !TYPE string\n !REQUIRED true\n !BRIEF Output file name\n !END\n!PARAMETER -keep\n !ALIAS -k\n !TYPE string\n !LIST true\n !BRIEF SD tags to be kept\n !END\n!PARAMETER -remove\n !ALIAS -r\n !TYPE string\n !LIST true\n !BRIEF SD tags to be removed\n !END\n!PARAMETER -clearAll\n !ALIAS -c\n !TYPE bool\n !DEFAULT false\n !BRIEF Removes all SD tags\n !END\n!END\n\"\"\"\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"venv/Scripts/sdfmodprops.py","file_name":"sdfmodprops.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"93467514","text":"import io\nimport os\n\n# Imports the Google Cloud client library\nfrom google.cloud import speech\n\n# Instantiates a client\nspeech_client = speech.Client()\n\n# The name of the audio file to transcribe\nfile_name = os.path.join(\n\tos.path.dirname(__file__),\n\t'resources',\n\t'obbatriple.flac')\n\n# Loads the audio into memory\nwith io.open(file_name, 'rb') as audio_file:\n\tcontent = audio_file.read()\n\taudio_sample = speech_client.sample(\n\t\tcontent,\n\t\tsource_uri=None,\n\t\tencoding='FLAC',\n\t\tsample_rate=48000)\n\n# Detects speech in the audio file\nalternatives = speech_client.speech_api.sync_recognize(audio_sample)\n\nfor alternative in alternatives:\n\tprint('Transcript: {}'.format(alternative.transcript))\n","sub_path":"tests-google-cloud-sdk/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"345524166","text":"import json\nimport pandas as pd\nfrom gensim.models import Word2Vec\n\njson_file = 'intents.json'\nwith open('intents.json','r') as f:\n\tdata = json.load(f)\n\ndf = pd.DataFrame(data)\nprint(df)\ndf['patterns'] = df['patterns'].apply(', '.join) \nprint(df)\n\nfrom nltk.corpus import stopwords\nfrom textblob import Word\nimport string\nstop = stopwords.words('english')\ndf['patterns'] = df['patterns'].apply(lambda x:' '.join(x.lower() for x in x.split())) # lower every word\ndf['patterns']= df['patterns'].apply(lambda x: ' '.join(x for x in x.split() if x not in string.punctuation)) # exclude punctuation\ndf['patterns']= df['patterns'].str.replace('[^\\w\\s]','') # replace non-word\ndf['patterns']= df['patterns'].apply(lambda x: ' '.join(x for x in x.split() if not x.isdigit())) # exclude numbers\ndf['patterns'] = df['patterns'].apply(lambda x:' '.join(x for x in x.split() if not x in stop)) # exclude stopwords\ndf['patterns'] = df['patterns'].apply(lambda x: \" \".join([Word(word).lemmatize() for word in x.split()])) # textblob is another library like nltk\n\nbigger_list=[]\nfor i in df['patterns']:\n li = list(i.split(\" \"))\n bigger_list.append(li)\t\nmodel = Word2Vec(bigger_list,min_count=1,size=300,workers=4)\nprint(\"Data format for the overall list:\",bigger_list)\n#custom data is fed to machine for further processing\nmodel = Word2Vec(bigger_list, min_count=1,size=300,workers=4)\n\nmodel.save(\"model.bin\")\ndel model\nmodel = Word2Vec.load('model.bin')\nprint(model)\n\n# Most Similar words checking\nsimilar_words = model.most_similar('thanks')\t\nprint(similar_words)\n\n# Does not match word from words supplied\ndissimlar_words = model.doesnt_match('See you later, thanks for visiting'.split())\nprint(dissimlar_words)\n\n# Finding the similarity between two words\nsimilarity_two_words = model.similarity('please','see')\nprint(\"Please provide the similarity between these two words:\")\nprint(similarity_two_words)\n\nsimilar = model.similar_by_word('kind')\nprint(similar)","sub_path":"complex_word2vec.py","file_name":"complex_word2vec.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"613389506","text":"GET_MEMBERS_FIELDS = {\n \"id\": {},\n \"username\": {},\n \"email\": {},\n \"member_purchases\": {\n \"id\": {},\n \"purchase_date\": {},\n \"amount\": {},\n \"approved\": {}\n },\n \"member_payments\": {\n \"id\": {},\n \"payment_date\": {},\n \"recipient\": {},\n \"amount\": {},\n \"confirmed\": {}\n },\n \"owed_amounts\": {\n \"id\": {},\n \"creditor_id\": {},\n \"living_group_id\": {},\n \"amount\": {}\n },\n \"owed_monthly_expenses\": {\n \"id\": {},\n \"living_group\": {},\n \"amount\": {}\n }\n}\n\nUPDATE_MEMBER_FIELDS = {\n \"id\": {},\n \"username\": {},\n \"email\": {},\n}\n\nGET_LIVING_GROUPS_FIELDS = {\n \"id\": {},\n \"name\": {},\n \"admin_id\": {},\n \"members\": {\n \"id\": {},\n \"username\": {},\n \"email\": {},\n \"member_purchases\": {\n \"id\": {},\n \"purchase_date\": {},\n \"amount\": {},\n \"approved\": {},\n \"living_group\": {}\n },\n \"member_payments\": {\n \"id\": {},\n \"payment_date\": {},\n \"recipient\": {},\n \"living_group\": {},\n \"amount\": {},\n \"confirmed\": {}\n },\n \"owed_amounts\": {\n \"id\": {},\n \"creditor_id\": {},\n \"living_group_id\": {},\n \"amount\": {}\n },\n \"owed_monthly_expenses\": {\n \"id\": {},\n \"living_group\": {},\n \"amount\": {}\n }\n },\n \"monthly_shared_expenses\": {\n \"id\": {},\n \"name\": {},\n \"start_date\": {},\n \"end_date\": {},\n \"amount\": {}\n },\n \"shopping_categories\": {\n \"id\": {},\n \"name\": {}\n },\n \"shopping_lists\": {\n \"id\": {},\n \"name\": {}\n }\n}\n\nUPDATE_LIVING_GROUP_FIELDS = {\n \"id\": {},\n \"name\": {},\n \"admin_id\": {}\n}\n\nCREATE_LIVING_GROUP_FIELDS = {\n \"id\": {},\n \"name\": {},\n \"admin_id\": {},\n \"members\": {\n \"id\": {},\n \"username\": {},\n \"email\": {},\n \"member_purchases\": {\n \"id\": {},\n \"purchase_date\": {},\n \"amount\": {},\n \"approved\": {},\n \"living_group\": {}\n },\n \"member_payments\": {\n \"id\": {},\n \"payment_date\": {},\n \"recipient\": {},\n \"living_group\": {},\n \"amount\": {},\n \"confirmed\": {}\n },\n \"owed_amounts\": {\n \"id\": {},\n \"creditor_id\": {},\n \"living_group_id\": {},\n \"amount\": {}\n },\n \"owed_monthly_expenses\": {\n \"id\": {},\n \"living_group\": {},\n \"amount\": {}\n }\n },\n \"monthly_shared_expenses\": {},\n \"shopping_categories\": {},\n \"shopping_lists\": {}\n}\n\nGET_MONTHLY_SHARED_EXPENSES_FIELDS = {\n \"id\": {},\n \"name\": {},\n \"start_date\": {},\n \"end_date\": {},\n \"amount\": {}\n}\n\nGET_SHOPPING_CATEGORIES_FIELDS = {\n \"id\": {},\n \"name\": {}\n}\n\nGET_MEMBER_PURCHASES_FIELDS = {\n \"id\": {},\n \"purchase_date\": {},\n \"amount\": {},\n \"approved\": {},\n \"living_group\": {}\n}\n\nGET_MEMBER_PAYMENTS_FIELDS = {\n \"id\": {},\n \"payment_date\": {},\n \"recipient\": {},\n \"living_group\": {},\n \"amount\": {},\n \"confirmed\": {}\n}\n\nGET_SHOPPING_LISTS_FIELDS = {\n \"id\": {},\n \"name\": {},\n \"added_date\": {},\n \"list_items\": {\n \"id\": {},\n \"name\": {},\n \"added_date\": {},\n \"member_purchase\": {},\n \"gotten\": {}\n }\n}\n\nUPDATE_SHOPPING_LIST_FIELDS = {\n \"id\": {},\n \"name\": {}\n}\n\nLIST_ITEM_FIELDS = {\n \"id\": {},\n \"name\": {},\n \"added_date\": {},\n \"member_purchase\": {},\n \"gotten\": {}\n}\n\nLOGS_FIELDS = {\n \"id\": {},\n \"date\": {},\n \"log\": {}\n}\n\nNOT_ADMIN = {\n \"code\": 401,\n \"status\": \"Failure\",\n \"message\": \"Cannot perform this action, not admin member.\"\n}\n\nFAILED_ACCESS = {\n \"code\": 401,\n \"status\": \"Failure\",\n \"message\": \"Cannot get/change what does not exist/belong to you.\"\n}\n","sub_path":"engine/api/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"131856686","text":"from __future__ import print_function\nimport tensorflow as tf\n\nimport utility as util\nfrom model import config_model\n\nflags = tf.app.flags\nflags.DEFINE_string(\"ps_hosts\",\"192.168.10.200:2222\", \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"worker_hosts\", \"192.168.10.155:2222, 192.168.10.181:2222\", \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"job_name\", None, \"Job name: worker or ps\")\nflags.DEFINE_integer(\"task_index\", None, \"Worker task index, should be >= 0.\")\nflags.DEFINE_string('train_data', './data/train_32x32.mat', 'File of input data to train')\nflags.DEFINE_integer('iteration_steps', 10000, 'Number of global training steps to perform')\nflags.DEFINE_integer('display_delay', 50, 'Delay of global training steps to print')\nflags.DEFINE_integer('batch_size', 64, 'Size of samples for one iteration')\nflags.DEFINE_string('log_dir', None, 'Directory of log')\nFLAGS = flags.FLAGS\n\ndef main(argv):\n # check flags\n if FLAGS.job_name is None or (FLAGS.job_name != \"ps\" and FLAGS.job_name != \"worker\"):\n raise ValueError(\"Must specify an explicit `job_name`, one of 'ps' and 'worker'\")\n if FLAGS.task_index is None or FLAGS.task_index < 0:\n raise ValueError(\"Must specify an explicit `task_index`\")\n print(\"job name = %s\" % FLAGS.job_name)\n print(\"task index = %d\" % FLAGS.task_index)\n\n # construct the cluster\n ps_spec = map(lambda str: str.strip(), FLAGS.ps_hosts.split(\",\"))\n worker_spec = map(lambda str: str.strip(), FLAGS.worker_hosts.split(\",\"))\n cluster = tf.train.ClusterSpec({ \"ps\": ps_spec, \"worker\": worker_spec})\n\n # server\n server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)\n\n # ps\n if FLAGS.job_name == \"ps\":\n server.join()\n else:\n print(\"Loading data from\", FLAGS.train_data)\n samples, labels = util.load(FLAGS.train_data)\n print(\"The shape of train samples:\", samples.shape)\n print(\"The shape of train labels:\", labels.shape)\n dnn = config_model(input_samples_shape=(FLAGS.batch_size, samples.shape[1], samples.shape[2], samples.shape[3]), input_labels_shape=(FLAGS.batch_size, labels.shape[1]))\n dnn.distributed_train(samples, labels, cluster, server, FLAGS.task_index, batch_size=FLAGS.batch_size, iteration_steps=FLAGS.iteration_steps, display_delay=FLAGS.display_delay, sum_dir=FLAGS.log_dir)\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"svhn/distributed_train.py","file_name":"distributed_train.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"207131392","text":"from styx_msgs.msg import TrafficLight\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nimport rospy\nimport traceback\nimport json\nimport time\nimport os\nimport re\n\ndirname = os.path.dirname(__file__)\n\nroot_path = re.findall('^/home/.*Capstone/', dirname)[0]\n\nGRAPH_FILE = os.path.join(root_path, 'data/graphs/frozen_inference_graph.pb')\n\n\nBOXES_SCORE_MIN = 0.5 # Minimum passing score for detection\n\n'''\ndef log(arg):\n #writes log information to debug file\n with open('logfile.txt','a') as f:\n f.write(arg+'\\n')\n'''\n\n\ndef load_graph(graph_file):\n # Loads a frozen inference graph\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph\n\n# ----------FUNCTIONS FOR CLASSIFICATION----------\n\n\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=3):\n # Draws bounding boxes\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, (bbox[1], bbox[0]), (bbox[3], bbox[2]), color, thick)\n # Return the image copy with boxes drawn\n return imcopy\n\n\ndef TLDetection(image, sess):\n\n image_np = np.expand_dims(np.asarray(image, dtype = np.uint8), 0)\n boxes, scores, classes = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict = {image_tensor: image_np})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes)\n\n return boxes, scores, classes\n\n\ndef TLBoxes(prob, boxes, scores, classes):\n # filter boxes under minimum probability 'prob'\n # COCO class index for TrafficLight is '10'\n n = len(boxes)\n idxs = []\n # target = {1, 2, 3}\n for i in range(n):\n if scores[i] >= prob:\n # if scores[i] >= prob and classes[i] in target:\n idxs.append(i)\n\n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n # print(filtered_classes)\n # print()\n return filtered_boxes, filtered_scores, filtered_classes\n\n\ndef TLResizeBoxes(boxes, image_height, image_width):\n # Resize boxes from original values (0:1) to image size\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * image_height\n box_coords[:, 1] = boxes[:, 1] * image_width\n box_coords[:, 2] = boxes[:, 2] * image_height\n box_coords[:, 3] = boxes[:, 3] * image_width\n\n return box_coords\n\n\ndef TLTrim(image, box_coordinates):\n # return trimmed images containing all traffic light signals ahead: from\n # zero (no traffic lights) to whatever\n images = []\n for box in box_coordinates:\n x = int(np.round(box[1]))\n y = int(np.round(box[0]))\n w = int(np.round(box[3] - box[1]))\n h = int(np.round(box[2] - box[0]))\n trimmed_image = image[y:y + h, x:x + w]\n\n # return trimmed_image\n # cv2.imwrite('/home/gabymoynahan/CarND-Capstone/data/processed_images/trimmed_{}.png'.format(time.time()),trimmed_image)\n images.append(trimmed_image)\n\n return images\n\n\ndef TLImage_Pro(image):\n # Image processing using openCV\n image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n # cv2.imwrite('/home/gabymoynahan/CarND-Capstone/data/processed_images/HSV_{}.png'.format(time.time()),image_hsv)\n lower_red = np.array([0, 50, 50])\n upper_red = np.array([10, 255, 255])\n red1 = cv2.inRange(image_hsv, lower_red, upper_red)\n\n lower_red = np.array([170, 50, 50])\n upper_red = np.array([180, 255, 255])\n red2 = cv2.inRange(image_hsv, lower_red, upper_red)\n converted_img = cv2.addWeighted(red1, 1.0, red2, 1.0, 0.0)\n # cv2.imwrite('/home/gabymoynahan/CarND-Capstone/data/processed_images/converted_{}.png'.format(time.time()),converted_img)\n blur_img = cv2.GaussianBlur(converted_img, (15, 15), 0)\n\n circles = cv2.HoughCircles(blur_img, cv2.HOUGH_GRADIENT, 0.5, 41, param1=70,\n param2=30, minRadius=5, maxRadius=120)\n # cv2.imwrite('/home/gabymoynahan/CarND-Capstone/data/processed_images/circles_{}.png'.format(time.time()),circles)\n return circles\n\n\n'''\nwith open('logfile.txt','wb') as f:\n #creates logfile from scratch\n f.write('new log file \\n')\n'''\n\n\ndetection_graph = load_graph(GRAPH_FILE)\n\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\nsess = tf.Session(graph=detection_graph)\n\n\nclass TLClassifier(object):\n def __init__(self):\n print(\"Classifier: initialized\")\n\n def get_classification(self, image, work_mode):\n\n if work_mode == \"simulator\":\n img_circles = TLImage_Pro(image)\n if img_circles is not None:\n return TrafficLight.RED\n\n else:\n return TrafficLight.UNKNOWN\n\n # SSD with Image Processing\n elif work_mode == \"site\":\n\n # Blur the image so it seems more realistic and the classificator performs better\n gbeq_image = cv2.GaussianBlur(image, (5, 5), 0)\n boxes, scores, classes = TLDetection(gbeq_image, sess)\n # log('CAMERA IMAGE PROCESSED, {} boxes detected'.format(len(boxes)))\n boxes, scores, classes = TLBoxes(BOXES_SCORE_MIN, boxes, scores, classes)\n\n image_height = image.shape[0]\n image_width = image.shape[1]\n box_coordinates = TLResizeBoxes(boxes, image_height, image_width)\n # trimmed_lights = TLTrim(image, box_coordinates)\n\n if len(boxes) != 0:\n print(\"found boxes with prob > 0.5: \", boxes)\n most_common = np.argmax(np.bincount(classes))\n\n if most_common == 1:\n print(\"Red!\")\n return TrafficLight.RED\n\n elif most_common == 2:\n print(\"Yellow\")\n return TrafficLight.YELLOW\n elif most_common == 3:\n print(\"Green\")\n return TrafficLight.GREEN\n else:\n print(\"Unknown\")\n return TrafficLight.UNKNOWN\n else:\n return TrafficLight.UNKNOWN\n\n else:\n print(\"wrong working mode, the model only works with simulator or site\")\n return None\n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"453013422","text":"from pathlib import Path\nimport pandas as pd\nimport us\nimport download\nimport geopandas as gpd\nimport zipfile\n\n# STEP 0\n# Download data manually from American Fact Finder\n# Go to Advanced Search\n# Geography: Block 100, Topic: Total Population, Table : P12 - SEX BY AGE\n# Save each batch into format: census_data/SF2010_zips/SF2010_{state_abbr}_{batch_num}\n\nCENSUS_DATA_PATH = Path('census_data')\nif not CENSUS_DATA_PATH.exists(): CENSUS_DATA_PATH.mkdir()\n\n\ndef get_us_county_shapefiles():\n filepath = CENSUS_DATA_PATH / 'tl_2018_us_county' / 'tl_2018_us_county.shp'\n if not filepath.exists():\n # download from internet\n shape_link = 'https://www2.census.gov/geo/tiger/TIGER2018/COUNTY/'\\\n +'tl_2018_us_county.zip'\n zipped = download.download_file(shape_link, \"US Counties\")\n with zipfile.ZipFile(zipped) as z:\n z.extractall(filepath.parent)\n gdf = gpd.read_file('census_data/tl_2018_us_county/tl_2018_us_county.shp')\n return gdf\n\n\ndef are_all_counties_included(sfs, gdf):\n sfs['COUNTYFP'] = sfs['GEO.id2'].apply(lambda x: x[:5])\n state_counties = gdf.loc[gdf.STATEFP == state_fip, 'GEOID']\n result = True\n if not state_counties.isin(sfs.COUNTYFP).all():\n print(\n f'{state_abbr} dont have population count for all of its counties!'\n )\n notin = state_counties[~state_counties.isin(sfs.COUNTYFP)]\n notinstr = ', '.join(notin)\n print(f'Counties missing in {state_abbr}: {notinstr}')\n result = False\n sfs.drop('COUNTYFP', axis=1, inplace=True)\n return result\n\n\n# STEP 1\n# Run this loop\nSHAPEFILE_OUTPATH = str(CENSUS_DATA_PATH / 'census_blocks_shapefiles')\n# Shape file county level\ngdf = get_us_county_shapefiles()\n# Summary files manually selected and downloaded from American Fact Finder\nSF2010_DOWNLOADED_PATH = CENSUS_DATA_PATH / 'SF2010_zips'\nSF2010_DOWNLOADED_FILENAME = 'DEC_10_SF1_P12_with_ann.csv'\nSF2010_OUTPATH = CENSUS_DATA_PATH / 'SF2010'\n# One iteration for each state\nstate_abbrs = ['NH', 'NJ', 'CT', 'RI', 'ME', 'VT']\nfor state_abbr in state_abbrs:\n\n state_fip = us.states.lookup(state_abbr).fips\n\n # Download shapefile for this state, block level:\n shape_link = 'https://www2.census.gov/geo/tiger/TIGER2018/TABBLOCK/'\\\n + f'tl_2018_{state_fip}_tabblock10.zip'\n print(shape_link)\n zipped = download.download_file(shape_link, state_abbr)\n with zipfile.ZipFile(zipped) as z:\n z.extractall(SHAPEFILE_OUTPATH)\n\n # 2 diff name format depends on which state so just get both\n sfpaths = [SF2010_DOWNLOADED_PATH.glob(f'{state_abbr}_download_?')]\n sfpaths += [SF2010_DOWNLOADED_PATH.glob(f'SF2010_{state_abbr}_?')]\n\n dflist = []\n for i, sfp in enumerate(sfpaths):\n if i > 0:\n dflist.append(\n pd.read_csv(sfp / SF2010_DOWNLOADED_FILENAME,\n dtype=str,\n skiprows=[1]))\n else:\n dflist.append(\n pd.read_csv(sfp / SF2010_DOWNLOADED_FILENAME, dtype=str))\n sfs = pd.concat(dflist, ignore_index=True).drop_duplicates(\n ) # incase a county is included twice\n\n # check to make sure all counties are included in analysis\n checkresult = are_all_counties_included(sfs, gdf)\n if checkresult:\n sfs.to_csv(SF2010_OUTPATH /\n f'{state_abbr}_blocks_from_all_counties.csv',\n index=False)\n","sub_path":"curate_census_data.py","file_name":"curate_census_data.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"403548292","text":"import glob\nimport pandas as pd\nfrom dask import delayed\n\n\ndef counts_by_origin():\n frames = []\n # For each file\n for f in sorted(glob.glob('data/*.csv')):\n # Load the dataframe\n df = delayed(pd.read_csv)(f,\n parse_dates={'Date': [0, 1, 2]},\n infer_datetime_format=True)\n\n # Store in list of frames\n frames.append(df)\n\n # concatenate all the frames together\n df = delayed(pd.concat)(frames)\n\n # Resample by month\n by_month = (df.resample('MS', on='Date')\n .Origin.value_counts()\n .unstack())\n\n # Resample by year\n by_year = (df.resample('AS', on='Date')\n .Origin.value_counts()\n .unstack())\n\n return by_month, by_year\n","sub_path":"profile_and_debug_dask/rev2.py","file_name":"rev2.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"310689491","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport pygame\nfrom pygame.locals import *\nimport random\nimport os\n\nclass PuyoGame(object):\n MAX_COLOMN = 13\n MAX_RAW = 6\n MAX_COLORS = 5\n\n def __init__(self):\n self.GameField = [[0 for i in xrange(self.MAX_RAW)] for j in xrange(self.MAX_COLOMN)]\n self.mainPuyo = {\"x\":self.MAX_RAW/2-1 ,\"y\":0 ,\"color\":random.randint(1 ,self.MAX_COLORS)}\n self.subPuyo = {\"x\":self.MAX_RAW/2 ,\"y\":0 ,\"color\":random.randint(1 ,self.MAX_COLORS)}\n self.onPuyo = 1 #onPuyo parameter mods meens; 0:top ,1:right ,2:bellow ,3:left\n self.chaining = False\n\n def move(self ,dx):\n \"\"\"\n move main and sub Puyos to dx\n if it isn't moveable area,return false\n \"\"\"\n if self.mainPuyo == None:\n return False\n if self.is_moveable(self.mainPuyo[\"x\"]+dx ,self.mainPuyo[\"y\"]) and \\\n self.is_moveable(self.subPuyo[\"x\"]+dx ,self.subPuyo[\"y\"]):\n self.mainPuyo[\"x\"] += dx\n self.subPuyo[\"x\"] += dx\n return True\n else:\n return False\n\n\n def turn(self ,left=False):\n \"\"\"\n turn Puyo couple;change subPuyo possision\n if left arg is True ,turnig left\n default setting is turnig right\n \"\"\"\n if self.mainPuyo == None:\n return False\n if self.onPuyo%4 == 0:\n if left:\n if self.is_moveable(self.mainPuyo[\"x\"]-1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]-1\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.onPuyo -= 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"]+1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"x\"] +=1\n self.onPuyo -= 1\n return True\n else:\n return False\n else:\n if self.is_moveable(self.mainPuyo[\"x\"]+1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]+1\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.onPuyo += 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"]-1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"x\"] -=1\n self.onPuyo += 1\n return True\n else:\n return False\n elif self.onPuyo%4 == 1:\n if left:\n if self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]-1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]-1\n self.onPuyo -= 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]+1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"y\"] -=1\n self.onPuyo -= 1\n return True\n else:\n return False\n else:\n if self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]+1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]+1\n self.onPuyo += 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]-1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"y\"] -=1\n self.onPuyo += 1\n return True\n else:\n return False\n elif self.onPuyo%4 == 2:\n if left:\n if self.is_moveable(self.mainPuyo[\"x\"]+1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]+1\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.onPuyo -= 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"]-1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"x\"] -=1\n self.onPuyo -= 1\n return True\n else:\n return False\n else:\n if self.is_moveable(self.mainPuyo[\"x\"]-1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]-1\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.onPuyo += 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"]+1 ,self.mainPuyo[\"y\"]):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"x\"] +=1\n self.onPuyo += 1\n return True\n else:\n return False\n else:\n if left:\n if self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]+1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]+1\n self.onPuyo -= 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]-1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"y\"] -=1\n self.onPuyo -= 1\n return True\n else:\n return False\n else:\n if self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]-1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]-1\n self.onPuyo += 1\n return True\n elif self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]+1):\n self.subPuyo[\"x\"] = self.mainPuyo[\"x\"]\n self.subPuyo[\"y\"] = self.mainPuyo[\"y\"]\n self.mainPuyo[\"y\"] +=1\n self.onPuyo += 1\n return True\n else:\n return False\n\n def check_chain(self):\n \"\"\"\n if 4 Puyos are combined,remove their from GameField\n and return True else False\n \"\"\"\n def inner_cc(x ,y ,checked=set()):\n if len(checked) == 0:\n return inner_cc(x ,y ,set([(x ,y)]))\n else:\n if x == 0:\n raws = [x ,x+1]\n elif x == self.MAX_RAW-1:\n raws = [x-1 ,x]\n else:\n raws = [x-1 ,x ,x+1]\n if y == 0:\n colomns = [y ,y+1]\n elif y == self.MAX_COLOMN-1:\n colomns = [y-1 ,y]\n else:\n colomns = [y-1 ,y ,y+1]\n chain_poses = set([(i ,j) for i in raws for j in colomns if self.GameField[y][x] == self.GameField[j][i] and abs(i-x+j-y)==1])\n if chain_poses.issubset(checked):\n return checked\n checked.update(chain_poses)\n [inner_cc(psx ,psy ,checked) for psx ,psy in chain_poses]\n return checked\n chained = False\n for y in xrange(len(self.GameField)):\n for x in xrange(len(self.GameField[y])):\n if self.GameField[y][x] == 0:\n continue\n combs = inner_cc(x ,y)\n if len(combs) >= 4:\n for rx ,ry in combs:\n self.GameField[ry][rx] = 0\n chained = True\n return chained\n\n def drop(self):\n \"\"\"\n if you call this method,drop Puyos that is possible to drop on field\n if no Puyos are dropped,this method return False\n \"\"\"\n dropped = False\n for y in xrange(self.MAX_COLOMN):\n for x in xrange(self.MAX_RAW):\n if self.GameField[y][x] == 0:\n continue\n if self.is_moveable(x ,y+1):\n self.GameField[y+1][x] = self.GameField[y][x]\n self.GameField[y][x] = 0\n dropped = True\n if dropped:\n self.drop()\n return dropped\n\n def next(self):\n \"\"\"\n this method will progress game\n \"\"\"\n if not self.chaining:\n if self.mainPuyo == None:\n self.mainPuyo = {\"x\":self.MAX_RAW/2-1 ,\"y\":0 ,\"color\":random.randint(1 ,self.MAX_COLORS)}\n self.subPuyo = {\"x\":self.MAX_RAW/2 ,\"y\":0 ,\"color\":random.randint(1 ,self.MAX_COLORS)}\n self.onPuyo = 1\n return 0\n elif self.is_moveable(self.mainPuyo[\"x\"] ,self.mainPuyo[\"y\"]+1) and \\\n self.is_moveable(self.subPuyo[\"x\"] ,self.subPuyo[\"y\"]+1):\n self.mainPuyo[\"y\"] += 1\n self.subPuyo[\"y\"] += 1\n return -1\n else:\n self.GameField[self.mainPuyo[\"y\"]][self.mainPuyo[\"x\"]] = self.mainPuyo[\"color\"]\n self.GameField[self.subPuyo[\"y\"]][self.subPuyo[\"x\"]] = self.subPuyo[\"color\"]\n self.mainPuyo = self.subPuyo = None\n if self.drop():\n self.chaining = True\n return 0\n if self.check_chain():\n self.chaining = True\n else:\n self.chaining = False\n if self.GameField[0][self.MAX_RAW/2-1] != 0 or self.GameField[0][self.MAX_RAW/2] != 0:\n return -2\n else:\n if self.drop():\n return\n if self.check_chain():\n self.chaining = True\n else:\n self.chaining = False\n if self.GameField[0][self.MAX_RAW/2-1] != 0 or self.GameField[0][self.MAX_RAW/2] != 0:\n return -2\n return 0\n\n\n def is_moveable(self ,x ,y):\n \"\"\"\n return arg's possision is moveable or not\n \"\"\"\n if (0 <= x < self.MAX_RAW) and (0 <= y < self.MAX_COLOMN) and (self.GameField[y][x] == 0):\n return True\n else:\n return False\n\ndef LoadImage(name):\n \"\"\"\n load image from file name and change sxale\n \"\"\"\n path = os.path.join(\"img\" ,name)\n image = pygame.image.load(path).convert_alpha()\n rect = image.get_rect()\n image = pygame.transform.scale(image ,(32 ,32))\n return image\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((6*32 ,13*32))\n pygame.display.set_caption(\"Puyo2\")\n clock = pygame.time.Clock()\n spend_time = 0\n puyo2 = PuyoGame()\n colors = {1:LoadImage(\"b.gif\") ,2:LoadImage(\"g.gif\") ,3:LoadImage(\"r.gif\")\n ,4:LoadImage(\"y.gif\") ,5:LoadImage(\"p.gif\") ,6:LoadImage(\"e.gif\")}\n while 1:\n clock.tick(60)\n spend_time += 1\n if spend_time%60 == 0 or (spend_time%30 == 0 and puyo2.chaining):\n puyo2.next()\n screen.fill((0 ,0 ,0))\n for y in xrange(len(puyo2.GameField)):\n for x in xrange(len(puyo2.GameField[y])):\n if puyo2.GameField[y][x] != 0:\n screen.blit(colors[puyo2.GameField[y][x]] ,(x*32 ,y*32))\n if puyo2.mainPuyo != None:\n screen.blit(colors[puyo2.mainPuyo[\"color\"]] ,(puyo2.mainPuyo[\"x\"]*32 ,puyo2.mainPuyo[\"y\"]*32))\n screen.blit(colors[puyo2.subPuyo[\"color\"]] ,(puyo2.subPuyo[\"x\"]*32 ,puyo2.subPuyo[\"y\"]*32))\n pygame.display.flip()\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[K_DOWN] and spend_time%6 == 0 and not puyo2.chaining:\n puyo2.next()\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n return\n if event.key == K_LEFT:\n puyo2.move(-1)\n if event.key == K_RIGHT:\n puyo2.move(1)\n if event.key == K_UP:\n puyo2.turn()\n if event.key == K_z:\n puyo2.turn(left = True)\n if event.key == K_x:\n puyo2.turn()\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"419714778","text":"#!/usr/bin/python\n# -*- coding: UTF-8\nimport sys\n\nsys.path.append(\"..\")\nfrom OneNetApp.OneNetApi import *\nimport json\nfrom PublicLib.Protocol.ly_Json import subStrToJson\nimport time\nimport logging\nfrom PublicLib import public as pub\n\n\ndef onenet_paresdata(res):\n # bytes to str\n data = bytes.decode(res)\n\n # 将json对象转换成python对象\n data_python = json.loads(data)\n if data_python[\"errno\"] == 0 and data_python[\"error\"] == \"succ\":\n if 'data' in data_python:\n return True, data_python[\"data\"]\n else:\n return True, None\n else:\n return False, None\n\n\ndef onenet_contjson(content):\n jsondata = bytes.decode(content)\n\n # 将json对象转换成python对象\n data = json.loads(jsondata)\n\n count = data[\"data\"][\"count\"]\n recvtimelist = []\n valuelist = []\n for i in range(count):\n dic = data[\"data\"][\"datastreams\"][0][\"datapoints\"][i]\n\n recvtime = dic[\"at\"]\n value = dic[\"value\"]\n value = subStrToJson(value)\n recvtimelist += [recvtime]\n valuelist += [value]\n return count, recvtimelist, valuelist\n\n\ndef onenet_makeframe(con, deviceinfo, val):\n # nbiot_url = {\"imei\": device_imei, \"obj_id\": obj_id, \"obj_inst_id\": obj_inst_id, \"mode\": mode} # params\n # '3308_0_5750'\n # nbiot_url = {\"imei\": deviceinfo[\"rg_id\"], \"obj_id\": deviceinfo[\"datastreams\"][0][\"id\"][:4],\n # \"obj_inst_id\": deviceinfo[\"datastreams\"][0][\"id\"][5:6], \"mode\": 2} # params\n nbiot_url = {\"imei\": deviceinfo[\"rg_id\"], \"obj_id\": '3308',\n \"obj_inst_id\": '0', \"mode\": 2} # params\n nbiot_data = {\"data\": [{\"res_id\": '5750', \"val\": val}]} # data\n\n res4 = con.nbiot_write(nbiot_data, nbiot_url)\n return (res4.content)\n\n\ndef onenet_senddata(con, deviceinfo, val):\n if deviceinfo[\"online\"]:\n # 发送数据\n # 其中datastream_id等于obj_id, obj_inst_id, res_id,如obj_id:3200,obj_inst_id:0,res_id:5501,那么这个datastream_id就为3200_0_5501。 ['3308_0_5750']\n # val = \"{'Len':'312','Cmd':'Read','SN':'1','DataTime':'180706121314','CRC':'FFFF','DataValue':{'0201FF00':''}}\" # object\n res = onenet_makeframe(con, deviceinfo, val)\n ret, data = onenet_paresdata(res)\n return ret\n return None\n\n\ndef getcurtime():\n # 时间戳\n now = time.time()\n int(now)\n\n # 时间\n tl = time.localtime(now)\n\n # 格式化时间\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", tl)\n\n\ndef getpreday(n):\n # 时间戳\n now = time.time()\n int(now)\n\n pre = now - n*24*3600\n\n # 时间\n tl = time.localtime(pre)\n\n # 格式化时间\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", tl)\n\n\n# 查询最近N条数据\ndef onenet_recvdata(con, deviceinfo, parm):\n if deviceinfo[\"online\"]:\n # res3 = con.datapoint_multi_get(device_id = deviceinfo[\"id\"], limit = 1, datastream_ids = deviceinfo[\"datastreams\"][0][\"id\"])\n\n if \"start_time\" in parm and \"end_time\" in parm and \"limit\" in parm:\n res3 = con.datapoint_multi_get(device_id=deviceinfo[\"id\"],\n start_time=parm[\"start_time\"],\n end_time=parm[\"end_time\"],\n limit=parm[\"limit\"],\n datastream_ids='3308_0_5750')\n elif \"start_time\" in parm and \"end_time\":\n res3 = con.datapoint_multi_get(device_id=deviceinfo[\"id\"],\n start_time=parm[\"start_time\"],\n end_time=parm[\"end_time\"],\n datastream_ids='3308_0_5750')\n elif \"limit\" in parm:\n res3 = con.datapoint_multi_get(device_id=deviceinfo[\"id\"],\n limit=parm[\"limit\"],\n datastream_ids='3308_0_5750')\n else:\n res3 = con.datapoint_multi_get(device_id=deviceinfo[\"id\"],\n limit=1,\n datastream_ids='3308_0_5750')\n count, recvtime, jsonstr = onenet_contjson(res3.content)\n return count, jsonstr\n else:\n print('设备不在线')\n\n\ndef connectonenet(rlist, devlist):\n for i in range(len(devlist)):\n rlist += [con.device_info(device_id=devlist[i])]\n return rlist\n\n\ndef getdevinfo(res3, device_id):\n # 获取设备信息\n ret, deviceinfo = onenet_paresdata(res3.content)\n # print('当前测试设备信息', device_id, deviceinfo['auth_info'], 'online:',deviceinfo[\"online\"])\n return ret, deviceinfo\n\n\ndef getcurinfo(con, rlist, devlist, prelist, parm):\n # 获取设备信息\n for i in range(len(devlist)):\n nret, deviceinfo = getdevinfo(rlist[i], devlist[i])\n\n if nret is True and deviceinfo[\"online\"]:\n # val = \"{'Len':'312','Cmd':'Set','SN':'2','DataTime':'200428121314','CRC':'FFFF','DataValue':{'04A20105':'01'}}\" # object\n val = \"{'Len':'312','Cmd':'Set','SN':'2','DataTime':'180706121314','CRC':'FFFF','DataValue':{'04A10101':'02#FF#8002#0005#180901120100#05#00900200#05060101#04A20201#04A50302#04A50303','04A10102':'02#01'}}\" # object\n ret = onenet_senddata(con, deviceinfo, val)\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), 'send: ', val)\n time.sleep(5) # 等待间隔\n\n # val = \"{'Len':'312','Cmd':'Read','SN':'1','DataTime':'200428121314','CRC':'FFFF','DataValue':{'04A00101':'','04A00102':'','04A20201':'','04A50302':'','04A50303':''}}\" # object\n val = \"{'Len':'0104','Cmd':'Set','SN':'179','DataTime':'200527144935','CRC':'FFFF','DataValue':{'04A20106':'0060'}}\" # object\n ret = onenet_senddata(con, deviceinfo, val)\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), 'send: ', val)\n time.sleep(1) # 等待间隔\n\n\n if nret is True and deviceinfo[\"online\"]:\n n, data = onenet_recvdata(con, deviceinfo, parm)\n if n > 0:\n s = deviceinfo['title'] + ', ' + deviceinfo['rg_id']\n print(s)\n logger.info(s)\n for d in data:\n print(d)\n logger.info(d)\n else:\n # print(deviceinfo['title'], '不在线! 最近在线时间:',deviceinfo[\"act_time\"])\n try:\n s = deviceinfo['title'] + ', ' + deviceinfo['rg_id'] + ', 不在线! 最近在线时间:' + deviceinfo[\"last_ct\"]\n if prelist[i] != s:\n prelist[i] = s\n print(s)\n logger.info(s)\n except:\n pass\n\n\n\nif __name__ == '__main__':\n pub.loggingConfig('logging.conf')\n logger = logging.getLogger('ToolMain')\n\n # 定义设备信息\n deviceinfo = {}\n\n # 定义设备云端信息\n # device_contents = \"4Hkjc25uOQ6qDd4AsfMyvMOJLSg=\"\n # device_id = 522658053\n # device_contents = \"mBnDJfsR8paDmq3g7mh=iWi9lb4=\" # NB电表\n # device_id = 525383929\n device_contents = \"sP5Mezphc5YUN9Q=mdISOM6UKVM=\" # NB生产\n con = OneNetApi(device_contents) # 文件目录\n\n # 设备ID\n # device_id_1 = 593477971 # IMEI: 868334034252753 TLY2821_移动_200424\n # device_id_2 = 593476181 # IMEI: 868334034332290 TLY2823_联通_200424\n # device_id_3 = 593474168 # IMEI: 868334033126362 TLY2823_电信_200424\n # device_id_4 = 586340334 # IMEI: 868334034332431 TLY2821_联通_200424\n\n config = pub.loadDefaultSettings(\"devIDcfg.json\")\n devlist = config['deviceID']\n\n # namelist = ['TLY2821_移动_200424', 'TLY2823_联通_200424', 'TLY2823_电信_200424', 'TLY2821_联通_200424']\n # devlist = [device_id_1, device_id_2, device_id_3, device_id_4]\n # devnum = len(devlist)\n predata = [''] * 4\n\n # start_time = getpreday(3)\n # end_time = getcurtime()\n # parm = {\"start_time\":start_time, \"end_time\":end_time, \"limit\":64}\n\n parm = {\"limit\": 64}\n\n # 连接设备\n rlist = []\n connectonenet(rlist, devlist)\n\n # 获取最近信息\n getcurinfo(con, rlist, devlist, predata, parm)\n time.sleep(3)\n\n","sub_path":"OneNetGetLog.py","file_name":"OneNetGetLog.py","file_ext":"py","file_size_in_byte":8228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"102900344","text":"\"\"\"Encapsulates datasets in a generic Dataset class, and in some more specific classes that inherit\nfrom it\n\n\"\"\"\n\nimport os\nimport json\nimport math\nfrom multiprocessing import Pool\n\nimport cv2\nfrom PIL import Image\n\nimport numpy as np\n\nfrom deeposlandia import utils\n\nclass Dataset:\n \"\"\"Generic class that describes the behavior of a Dataset object: it is initialized at least\n with an image size, its label are added always through the same manner, it can be serialized (save) and\n deserialized (load) from/to a `.json` file\n\n Attributes\n ----------\n image_size : int\n Size of considered images (height=width), raw images will be resized during the\n preprocessing\n\n \"\"\"\n def __init__(self, image_size):\n self.image_size = image_size\n self.label_info = []\n self.image_info = []\n\n @property\n def label_ids(self):\n \"\"\"Return the list of labels ids taken into account in the dataset\n\n They can be grouped.\n\n Returns\n -------\n list\n List of label ids\n \"\"\"\n return [label_id for label_id, attr in enumerate(self.label_info)\n if attr['is_evaluate']]\n\n @property\n def labels(self):\n \"\"\"Return the description of label that will be evaluated during the process\n \"\"\"\n return [label for label in self.label_info if label[\"is_evaluate\"]]\n\n def get_nb_labels(self, see_all=False):\n \"\"\"Return the number of labels\n\n Parameters\n ----------\n see_all : boolean\n If True, consider all labels, otherwise consider only labels for which `is_evaluate` is\n True\n \"\"\"\n if see_all:\n return len(self.label_info)\n else:\n return len(self.label_ids)\n\n def get_nb_images(self):\n \"\"\" `image_info` getter, return the size of `image_info`, i.e. the\n number of images in the dataset\n \"\"\"\n return len(self.image_info)\n\n def get_label_popularity(self):\n \"\"\"Return the label popularity in the current dataset, *i.e.* the proportion of images that\n contain corresponding object\n \"\"\"\n labels = [img[\"labels\"] for img in self.image_info]\n if self.get_nb_images() == 0:\n utils.logger.error(\"No images in the dataset.\")\n return None\n else:\n return np.round(np.divide(sum(np.array([list(l.values()) for l in labels])),\n self.get_nb_images()), 3)\n\n\n def add_label(self, label_id, label_name, color, is_evaluate,\n category=None, aggregated_label_ids=None,\n contained_labels=None):\n \"\"\" Add a new label to the dataset with label id `label_id`\n\n Parameters\n ----------\n label_id : integer\n Id of the new label\n label_name : str\n String designing the new label name\n color : list\n List of three integers (between 0 and 255) that characterizes the\n label (useful for semantic segmentation result printing)\n is_evaluate : bool\n category : str\n String designing the category of the dataset label\n aggregate_label_ids : list (optional)\n List of label ids aggregated by the current label_id\n contained_labels : list\n List of raw labels aggregated by the current label\n \"\"\"\n if label_id in self.label_info:\n utils.logger.error(\"Label {} already stored into the label set.\".format(label_id))\n return None\n category = label_name if category is None else category\n contains = label_name if contained_labels is None else contained_labels\n self.label_info.append({\"name\": label_name,\n \"id\": label_id,\n \"category\": category,\n \"is_evaluate\": is_evaluate,\n \"aggregate\": aggregated_label_ids,\n \"contains\": contained_labels,\n \"color\": color})\n\n def save(self, filename):\n \"\"\"Save dataset in a json file indicated by `filename`\n\n Parameters\n ----------\n filename : str\n String designing the relative path where the dataset must be saved\n \"\"\"\n with open(filename, 'w') as fp:\n json.dump({\"image_size\": self.image_size,\n \"labels\": self.label_info,\n \"images\": self.image_info}, fp)\n utils.logger.info(\"The dataset has been saved into {}\".format(filename))\n\n def load(self, filename, nb_images=None):\n \"\"\"Load a dataset from a json file indicated by `filename` ; use dict comprehension instead\n of direct assignments in order to convert dict keys to integers\n\n Parameters\n ----------\n filename : str\n String designing the relative path from where the dataset must be\n loaded\n nb_images : integer\n Number of images that must be loaded (if None, the whole dataset is loaded)\n \"\"\"\n with open(filename) as fp:\n ds = json.load(fp)\n self.image_size = ds[\"image_size\"]\n self.label_info = ds[\"labels\"]\n if nb_images is None:\n self.image_info = ds[\"images\"]\n else:\n self.image_info = ds[\"images\"][:nb_images]\n utils.logger.info(\"The dataset has been loaded from {}\".format(filename))\n\nclass MapillaryDataset(Dataset):\n \"\"\"Dataset structure that gathers all information related to the Mapillary images\n\n Attributes\n ----------\n image_size : int\n Size of considered images (height=width), raw images will be resized during the\n preprocessing\n glossary_filename : str\n Name of the Mapillary input glossary, that contains every information about Mapillary\n labels\n\n \"\"\"\n\n def __init__(self, image_size, glossary_filename):\n \"\"\" Class constructor ; instanciates a MapillaryDataset as a standard Dataset which is\n completed by a glossary file that describes the dataset labels\n \"\"\"\n super().__init__(image_size)\n self.build_glossary(glossary_filename)\n\n def build_glossary(self, config_filename):\n \"\"\"Read the Mapillary glossary stored as a json file at the data\n repository root\n\n Parameters\n ----------\n config_filename : str\n String designing the relative path of the dataset glossary\n (based on Mapillary dataset)\n \"\"\"\n with open(config_filename) as config_file:\n glossary = json.load(config_file)\n if \"labels\" not in glossary:\n utils.logger.error(\"There is no 'label' key in the provided glossary.\")\n return None\n for lab_id, label in enumerate(glossary[\"labels\"]):\n name_items = label[\"name\"].split('--')\n category = '-'.join(name_items)\n self.add_label(lab_id, name_items, label[\"color\"],\n label['evaluate'], category, label[\"contains_id\"],\n label['contains'])\n\n def group_image_label(self, image):\n \"\"\"Group the labels\n\n If the label ids 4, 5 and 6 belong to the same group, they will be turned\n into the label id 4.\n\n Parameters\n ----------\n image : PIL.Image\n\n Returns\n -------\n PIL.Image\n \"\"\"\n # turn all label ids into the lowest digits/label id according to its \"group\"\n # (manually built)\n a = np.array(image)\n for root_id, label in enumerate(self.label_info):\n for label_id in label['aggregate']:\n mask = a == label_id\n a[mask] = root_id\n return Image.fromarray(a, mode=image.mode)\n\n def _preprocess(self, image_filename, output_dir, aggregate, labelling=True):\n \"\"\"Resize/crop then save the training & label images\n\n Parameters\n ----------\n datadir : str\n image_filaname : str\n aggregate : boolean\n labelling : boolean\n\n Returns\n -------\n dict\n Key/values with the filenames and label ids\n \"\"\"\n # open original images\n img_in = Image.open(image_filename)\n\n # resize images (self.image_size*larger_size or larger_size*self.image_size)\n img_in = utils.resize_image(img_in, self.image_size)\n\n # crop images to get self.image_size*self.image_size dimensions\n crop_pix = np.random.randint(0, 1 + max(img_in.size) - self.image_size)\n final_img_in = utils.mono_crop_image(img_in, crop_pix)\n\n # save final image\n new_in_filename = os.path.join(output_dir, 'images',\n os.path.basename(image_filename))\n final_img_in.save(new_in_filename)\n\n # label_filename vs label image\n if labelling:\n label_filename = image_filename.replace(\"images/\", \"labels/\")\n label_filename = label_filename.replace(\".jpg\", \".png\")\n img_out = Image.open(label_filename)\n img_out = utils.resize_image(img_out, self.image_size)\n final_img_out = utils.mono_crop_image(img_out, crop_pix)\n # group some labels\n if aggregate:\n final_img_out = self.group_image_label(final_img_out)\n\n labels = utils.mapillary_label_building(final_img_out,\n self.label_ids)\n new_out_filename = os.path.join(output_dir, 'labels',\n os.path.basename(label_filename))\n final_img_out = utils.build_image_from_config(final_img_out,\n self.label_info)\n final_img_out.save(new_out_filename)\n else:\n new_out_filename = None\n labels = {i: 0 for i in range(self.get_nb_labels())}\n\n return {\"raw_filename\": image_filename,\n \"image_filename\": new_in_filename,\n \"label_filename\": new_out_filename,\n \"labels\": labels}\n\n def populate(self, output_dir, input_dir, nb_images=None, aggregate=False, labelling=True):\n \"\"\" Populate the dataset with images contained into `datadir` directory\n\n Parameters\n ----------\n output_dir : str\n Path of the directory where the preprocessed image must be saved\n input_dir : str\n Path of the directory that contains input images\n nb_images : integer\n Number of images to be considered in the dataset; if None, consider the whole\n repository\n aggregate : bool\n Aggregate some labels into more generic ones, e.g. cars and bus into the vehicle label\n labelling: boolean\n If True labels are recovered from dataset, otherwise dummy label are generated\n \"\"\"\n image_list = os.listdir(os.path.join(input_dir, \"images\"))[:nb_images]\n image_list_longname = [os.path.join(input_dir, \"images\", l) for l in image_list]\n with Pool() as p:\n self.image_info = p.starmap(self._preprocess, [(x, output_dir, aggregate, labelling)\n for x in image_list_longname])\n\nclass ShapeDataset(Dataset):\n \"\"\"Dataset structure that gathers all information related to a randomly-generated shape Dataset\n\n In such a dataset, a set of images is generated with either a square, or a\n circle or a triangle, or two of them, or all of them. A random background\n color is applied, and shape color itself is also randomly generated. Each\n of these labels are characterized with a fixed color for comparison between\n ground truth and predictions: squares, circles and triangles will be\n respectively set as blue, red and green, whilst background will be set as\n light grey.\n\n Attributes\n ----------\n image_size : int\n Size of considered images (height=width), raw images will be resized during the\n preprocessing\n nb_labels : int\n Number of shape types that must be integrated into the dataset (only 1, 2 and 3 are supported)\n\n \"\"\"\n\n SQUARE = 0\n SQUARE_COLOR = (50, 50, 200) # Blue\n CIRCLE = 1\n CIRCLE_COLOR = (200, 50, 50) # Red\n TRIANGLE = 2\n TRIANGLE_COLOR = (50, 200, 50) # Green\n BACKGROUND = 3\n BACKGROUND_COLOR = (200, 200, 200) # Light grey\n\n def __init__(self, image_size):\n \"\"\" Class constructor\n \"\"\"\n super().__init__(image_size)\n self.build_glossary()\n\n def build_glossary(self):\n \"\"\"Read the shape glossary stored as a json file at the data\n repository root\n\n Parameters\n ----------\n nb_labels : integer\n Number of shape types (either 1, 2 or 3, warning if more)\n \"\"\"\n self.add_label(self.SQUARE, \"square\", self.SQUARE_COLOR, True)\n self.add_label(self.CIRCLE, \"circle\", self.CIRCLE_COLOR, True)\n self.add_label(self.TRIANGLE, \"triangle\", self.TRIANGLE_COLOR, True)\n self.add_label(self.BACKGROUND, \"background\", self.BACKGROUND_COLOR, True)\n\n def generate_labels(self, nb_images):\n \"\"\" Generate random shape labels in order to prepare shape image\n generation; use numpy to generate random indices for each labels, these\n indices will be the positive examples; return a 2D-list\n\n Parameters\n ----------\n nb_images : integer\n Number of images to label in the dataset\n \"\"\"\n raw_labels = [np.random.choice(np.arange(nb_images),\n int(nb_images/2),\n replace=False)\n for i in range(self.get_nb_labels())]\n labels = np.zeros([nb_images, self.get_nb_labels()], dtype=int)\n for i in range(self.get_nb_labels()):\n labels[raw_labels[i], i] = 1\n return [dict([(i, int(j)) for i, j in enumerate(l)]) for l in labels]\n\n def populate(self, output_dir=None, input_dir=None, nb_images=10000, aggregate=False, labelling=True, buf=8):\n \"\"\" Populate the dataset with images contained into `datadir` directory\n\n Parameters\n ----------\n output_dir : str\n Path of the directory where the preprocessed image must be saved\n input_dir : str\n Path of the directory that contains input images\n nb_images: integer\n Number of images that must be added in the dataset\n aggregate: bool\n Aggregate some labels into more generic ones, e.g. cars and bus into the vehicle label\n labelling: boolean\n Dummy parameter: in this dataset, labels are always generated, as images are drawed with them\n buf: integer\n Minimal number of pixels between shape base point and image borders\n \"\"\"\n shape_gen = self.generate_labels(nb_images)\n for i, image_label in enumerate(shape_gen):\n bg_color = np.random.randint(0, 255, 3).tolist()\n shape_specs = []\n for l in image_label.items():\n if l:\n shape_color = np.random.randint(0, 255, 3).tolist()\n x, y = np.random.randint(buf, self.image_size - buf - 1, 2).tolist()\n shape_size = np.random.randint(buf, self.image_size // 4)\n shape_specs.append([shape_color, x, y, shape_size])\n else:\n shape_specs.append([None, None, None, None])\n self.add_image(i, bg_color, shape_specs, image_label)\n if not output_dir is None:\n self.draw_image(i, output_dir)\n\n def add_image(self, image_id, background, specifications, labels):\n \"\"\" Add a new image to the dataset with image id `image_id`; an image\n in the dataset is represented by an id, a list of shape specifications,\n a background color and a list of 0-1 labels (1 if the i-th label is on\n the image, 0 otherwise)\n\n Parameters\n ----------\n image_id : integer\n Id of the new image\n background : list\n List of three integer between 0 and 255 that designs the image\n background color\n specifications : list\n Image specifications, as a list of shapes (color, coordinates and\n size)\n labels : list\n List of 0-1 values, the i-th value being 1 if the i-th label is on\n the new image, 0 otherwise; the label list length correspond to the\n number of labels in the dataset\n \"\"\"\n if image_id in self.image_info:\n utils.logger.error(\"Image {} already stored into the label set.\".format(image_id))\n return None\n self.image_info.append({\"background\": background,\n \"shape_specs\": specifications,\n \"labels\": labels})\n\n def draw_image(self, image_id, datapath):\n \"\"\"Draws an image from the specifications of its shapes and saves it on\n the file system to `datapath`\n\n Save labels as mono-channel images on the file system by using the label ids\n\n Parameters\n ----------\n image_id : integer\n Image id\n datapath : str\n String that characterizes the repository in which images will be stored\n \"\"\"\n image_info = self.image_info[image_id]\n\n image = np.ones([self.image_size, self.image_size, 3], dtype=np.uint8)\n image = image * np.array(image_info[\"background\"], dtype=np.uint8)\n label = np.full([self.image_size, self.image_size, 3], self.BACKGROUND_COLOR, dtype=np.uint8)\n\n # Get the center x, y and the size s\n if image_info[\"labels\"][self.SQUARE]:\n color, x, y, s = image_info[\"shape_specs\"][self.SQUARE]\n color = tuple(map(int, color))\n image = cv2.rectangle(image, (x - s, y - s), (x + s, y + s), color, -1)\n label = cv2.rectangle(label, (x - s, y - s), (x + s, y + s), self.SQUARE_COLOR, -1)\n if image_info[\"labels\"][self.CIRCLE]:\n color, x, y, s = image_info[\"shape_specs\"][self.CIRCLE]\n color = tuple(map(int, color))\n image = cv2.circle(image, (x, y), s, color, -1)\n label = cv2.circle(label, (x, y), s, self.CIRCLE_COLOR, -1)\n if image_info[\"labels\"][self.TRIANGLE]:\n color, x, y, s = image_info[\"shape_specs\"][self.TRIANGLE]\n color = tuple(map(int, color))\n x, y, s = map(int, (x, y, s))\n points = np.array([[(x, y - s),\n (x - s / math.sin(math.radians(60)), y + s),\n (x + s / math.sin(math.radians(60)), y + s),]],\n dtype=np.int32)\n image = cv2.fillPoly(image, points, color)\n label = cv2.fillPoly(label, points, self.TRIANGLE_COLOR)\n image_filename = os.path.join(datapath, \"images\", \"shape_{:05}.png\".format(image_id))\n self.image_info[image_id][\"image_filename\"] = image_filename\n Image.fromarray(image).save(image_filename)\n label_filename = os.path.join(datapath, \"labels\", \"shape_{:05}.png\".format(image_id))\n self.image_info[image_id][\"label_filename\"] = label_filename\n Image.fromarray(label).save(label_filename)\n","sub_path":"deeposlandia/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":19460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"563470262","text":"import re\n\nfrom grab import Grab\n\nfrom grab_site.models import Vacancy\nfrom grab_worker.celery import app\n\n\ndef parse_vacancy_salary(vacancy_salary):\n replace_string = vacancy_salary.replace(u'\\xa0', '').replace(' ', '')\n salaries = re.findall('(\\d+)', replace_string)\n salary_from, salary_to = None, None\n if vacancy_salary.find('до') != -1:\n salary_to = salaries.pop()\n if vacancy_salary.find('от') != -1:\n salary_from = salaries.pop()\n\n return salary_from, salary_to\n\n\n@app.task\ndef grab_vacancies():\n g = Grab()\n g.go('https://spb.hh.ru/employer/889')\n\n xpath = '//div[contains(@class, \"resume-search-item__name\")]/a'\n vacancy_links = g.xpath_list(xpath)\n for item in vacancy_links:\n g_info = Grab()\n link = item.get('href')\n g_info.go(link)\n title = g_info.xpath('//h1[contains(@class, \"header\")]').text\n salary_from, salary_to = parse_vacancy_salary(g_info.xpath('//p[contains(@class, \"vacancy-salary\")]').text)\n vacancy_description = g_info.xpath('//div[contains(@itemprop, \"description\")]/p').text\n\n Vacancy.objects.update_or_create(vacancy_id=re.findall('\\d+', link)[0], name=title,\n description=vacancy_description, link=link,\n salary_from=salary_from, salary_to=salary_to)\n","sub_path":"grab_site/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"24916811","text":"\"\"\"\nmount - Command\n===============\n\nThis module provides parsing for the ``mount`` command. The ``Mount`` class\nimplements parsing for the ``mount`` command output which looks like::\n\n sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel)\n proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)\n /dev/mapper/HostVG-Config on /etc/shadow type ext4 (rw,noatime,seclabel,stripe=256,data=ordered)\n dev/sr0 on /run/media/root/VMware Tools type iso9660 (ro,nosuid,nodev,relatime,uid=0,gid=0,iocharset=utf8,mode=0400,dmode=0500,uhelper=udisks2) [VMware Tools]\n\nThe information is stored as a list of ``AttributeDict`` objects. Each\n``AttributeDict`` object contains attributes for the following information that\nare listed in the same order as in the command output::\n\n - filesystem: (str) Name of filesystem\n - mount_point: (str) Name of mount point for filesystem\n - mount_type: (str) Name of filesystem type\n - mount_options: (AttributeDict) Mount options as ``AttributeDict`` object\n - mount_label: (str) Only present if optional label is present\n - mount_clause: (str) Full string from command output\n\nThe `` mount_options`` contains the mount options as attributes accessible\nvia the attribute name as it appears in the command output. For instance, the\noptions ``(rw,dmode=0500)`` may be accessed as ''mnt_row_info.rw`` with the value\n``True`` and ``mnt_row_info.dmode`` with the value \"0500\". The ``in`` operator\nmay be used to determine if an option is present.\n\nMountEntry lines are also available in a ``mounts`` property, keyed on the\nmount point.\n\nExamples:\n >>> mnt_info = shared[Mount]\n >>> mnt_info\n \n >>> len(mnt_info)\n 4\n >>> mnt_info[3].__dict__\n {'filesystem': 'dev/sr0',\n 'mount_clause': 'dev/sr0 on /run/media/root/VMware Tools type iso9660 (ro,nosuid,nodev,relatime,uid=0,gid=0,iocharset=utf8,mode=0400,dmode=0500,uhelper=udisks2) [VMware Tools]',\n 'mount_label': 'VMware Tools',\n 'mount_options': {'dmode': '0500', 'relatime': True, 'uid': '0',\n 'iocharset': 'utf8', 'gid': '0', 'mode': '0400', 'ro': True,\n 'nosuid': True, 'uhelper': 'udisks2', 'nodev': True}\n 'mount_point': '/run/media/root/VMware Tools',\n 'mount_type': 'iso9660'}\n >>> mnt_info[3].filesystem\n 'dev/sr0'\n >>> mnt_info[3].mount_type\n 'iso9660'\n >>> mnt_info[3].mount_options\n {'dmode': '0500', 'gid': '0', 'iocharset': 'utf8', 'mode': '0400', 'nodev': True,\n 'nosuid': True, 'relatime': True, 'ro': True, 'uhelper': 'udisks2', 'uid': '0'}\n >>> mnt_info[3].mount_options.dmode\n >>> 'dmode' in mnt_info[3].mount_options\n True\n >>> mnt_info.mounts['/run/media/root/VMware Tools'].filesystem\n 'dev/sr0'\n\"\"\"\n\nfrom ..parsers import optlist_to_dict\nfrom .. import Parser, parser, get_active_lines, AttributeDict\n\nimport re\n\n\n@parser('mount')\nclass Mount(Parser):\n \"\"\"Class of information for all output from ``mount`` command.\n\n Attributes:\n rows (list of AttributeDict): List of ``AttributeDict`` objects for\n each row of the command output.\n\n Raises:\n ParseException: Raised when any problem parsing the command output.\n \"\"\"\n\n def __len__(self):\n return len(self.rows)\n\n def __iter__(self):\n for row in self.rows:\n yield row\n\n def __getitem__(self, idx):\n if isinstance(idx, int):\n return self.rows[idx]\n elif isinstance(idx, str):\n return self.mounts[idx]\n\n # /dev/mapper/fedora-home on /home type ext4 (rw,relatime,seclabel,data=ordered) [HOME]\n mount_line_re = r'^(?P\\S+) on (?P.+?) type ' + \\\n r'(?P\\S+) \\((?P[^)]+)\\)' + \\\n r'(?: \\[(?P.*)\\])?$'\n mount_line_rex = re.compile(mount_line_re)\n\n def parse_content(self, content):\n self.rows = []\n self.mounts = {}\n for line in get_active_lines(content):\n mount = {}\n mount['mount_clause'] = line\n match = self.mount_line_rex.search(line)\n if match:\n mount['filesystem'] = match.group('filesystem')\n mount['mount_point'] = match.group('mount_point')\n mount['mount_type'] = match.group('mount_type')\n mount_options = match.group('mount_options')\n mount['mount_options'] = AttributeDict(optlist_to_dict(mount_options))\n if match.group('mount_label'):\n mount['mount_label'] = match.group('mount_label')\n else:\n mount['parse_error'] = 'Unable to parse line'\n\n entry = AttributeDict(mount)\n self.rows.append(entry)\n if match:\n self.mounts[mount['mount_point']] = entry\n","sub_path":"insights/parsers/mount.py","file_name":"mount.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"142659717","text":"\"\"\"\n@Authors Leo.cui\n7/5/2018\nFormat train data\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndef get_score_format(test_path, answer_sheet_path):\n #read_data\n df = pd.read_csv(test_path)\n\n answer_sheet = df['id']\n\n #save answer_sheet csv\n answer_sheet.to_csv(answer_sheet_path, index = None, header = True)\n\n return\n\n\ndef xbg_format(data_path, save_path, sort_data = True, fillzero = True):\n\n #read_data\n df = pd.read_csv(data_path)\n\n #get ride off -1 label\n df = df[(df.label==0)|(df.label==1)]\n\n #sorting by data\n if sort_data == True:\n df.sort_values('date', inplace = True)\n\n #delete data column\n df = df.drop(['date','id'], axis=1)\n\n if fillzero == True:\n #fill na\n df = df.fillna(-999)\n\n #save csv\n df.to_csv(save_path, index = None, header = False)\n\n return\n\n\ndef csv2npy(csv_path, npy_path):\n\n _csv = np.loadtxt(csv_path, delimiter=',')\n\n #_csv = np.genfromtxt(csv_path, delimiter=\",\", filling_values = -999)\n\n np.save(npy_path, _csv)\n\n\ndef main():\n\n data_path = \"/home/leo/ant/model/data/train.csv\"\n save_path = \"/home/leo/ant/model/data/train_dw.csv\"\n\n #csv_path = \"/home/leo/ant/model/data/test_a_xgb_1.csv\"\n #npy_path = \"/home/leo/ant/model/data/test_a_xgb_1.npy\"\n\n #test_path = \"/home/leo/ant/model/data/test_a.csv\"\n #answer_sheet_path = \"/home/leo/ant/score/answer_sheet.csv\"\n\n xbg_format(data_path, save_path, sort_data = True, fillzero = False)\n #csv2npy(csv_path, npy_path)\n #get_score_format(test_path, answer_sheet_path)\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/lib/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"49449572","text":"import random\nranddna=''\n#for i in range(100000000):\n# randdna=randdna+'ACGT'[random.randint(0,3)]\n\nranddna= ''.join(['ACGT'[random.randint(0,3)] for i in range(10000000)])\n#print(randdna)\nranddna=list(randdna)\nfor i in range(0,len(randdna),10):\n randdna[i:i+3]=list('XYZ')\nranddna=''.join(randdna)\n#print(randdna)\nprint(randdna.count('X'))\n","sub_path":"python_examples/randdna.py","file_name":"randdna.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"82281645","text":"from pprint import pprint\r\n\r\n\r\ndef composition_dict(list):\r\n if len(list) == 3:\r\n composition = {'ingridient_name': list[0], 'quantity': list[1], 'measure': list[2]}\r\n elif len(list) == 4:\r\n composition = {'ingridient_name': list[0] + ' ' + list[1], 'quantity': list[2], 'measure': list[3]}\r\n return composition\r\n\r\n\r\ndef main():\r\n with open('recipes.txt', encoding='utf8') as f:\r\n cook_book = {}\r\n for line in f:\r\n name = line.strip()\r\n amount = int(f.readline().strip())\r\n for i in range(amount):\r\n add_line = f.readline().replace('|', '').split()\r\n cook_book.setdefault(name, [])\r\n cook_book[name].append(composition_dict(add_line))\r\n f.readline()\r\n\r\n pprint(cook_book)\r\n\r\n def get_shop_list_by_dishes(dishes, person_count):\r\n shop_list = {}\r\n for items in dishes:\r\n if items in cook_book:\r\n for ingredients_dict in cook_book[items]:\r\n # print(k)\r\n for items in ingredients_dict.values():\r\n\r\n if items not in shop_list:\r\n shop_list.setdefault(ingredients_dict['ingridient_name'],\r\n {'measure': ingredients_dict['measure'],\r\n 'quantity': int(ingredients_dict['quantity']) * person_count})\r\n else:\r\n shop_list[items]['quantity'] += shop_list[items]['quantity']\r\n\r\n return shop_list\r\n\r\n pprint(get_shop_list_by_dishes(['Запеченный картофель', 'Омлет'], 2))\r\n\r\n\r\nmain()\r\n","sub_path":"home_work_files.py","file_name":"home_work_files.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"593409909","text":"\n\nimport pickle\nimport wx\nimport wx.grid\nimport wx.html\nimport wx.aui as aui\nfrom wx.py.shell import Shell\nfrom xit_matrixunit import *\nfrom xit_Global import *\nfrom xit_Global_myop import *\n\nfrom xit_UI_tripleTextPanel import xit_MyTripleElementScrolledWindow\nfrom xit_UI_mygrid import *\nfrom xit_UI_downradioPanel import *\nfrom xit_UI_menu import MyMenuControl\nfrom six import BytesIO\n\n \n#----------------------------------------------------------------------\ndef GetMondrianData():\n return \\\nb'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00 \\x00\\x00\\x00 \\x08\\x06\\x00\\\n\\x00\\x00szz\\xf4\\x00\\x00\\x00\\x04sBIT\\x08\\x08\\x08\\x08|\\x08d\\x88\\x00\\x00\\x00qID\\\nATX\\x85\\xed\\xd6;\\n\\x800\\x10E\\xd1{\\xc5\\x8d\\xb9r\\x97\\x16\\x0b\\xad$\\x8a\\x82:\\x16\\\no\\xda\\x84pB2\\x1f\\x81Fa\\x8c\\x9c\\x08\\x04Z{\\xcf\\xa72\\xbcv\\xfa\\xc5\\x08 \\x80r\\x80\\\n\\xfc\\xa2\\x0e\\x1c\\xe4\\xba\\xfaX\\x1d\\xd0\\xde]S\\x07\\x02\\xd8>\\xe1wa-`\\x9fQ\\xe9\\\n\\x86\\x01\\x04\\x10\\x00\\\\(Dk\\x1b-\\x04\\xdc\\x1d\\x07\\x14\\x98;\\x0bS\\x7f\\x7f\\xf9\\x13\\\n\\x04\\x10@\\xf9X\\xbe\\x00\\xc9 \\x14K\\xc1<={\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n\n\ndef GetMondrianBitmap():\n return wx.Bitmap(GetMondrianImage())\n\n\ndef GetMondrianImage():\n stream = BytesIO(GetMondrianData())\n return wx.Image(stream)\n\n\ndef GetMondrianIcon():\n icon = wx.Icon()\n icon.CopyFromBitmap(GetMondrianBitmap())\n return icon\n\n\n\n\n#-----xit_MyCenterTextNotebook---------中心部分的NoteBook,包含有矩阵前页,矩阵第一页、第二页等------------------------------------------------------------------------------------- \nclass xit_MyCenterTextNotebook(wx.Notebook):\n def __init__(self, parent):\n wx.Notebook.__init__(self, parent, -1, style=wx.BK_DEFAULT )\n self._pageList=[]\n textctrlPage=wx.TextCtrl(self,-1,style=wx.TE_MULTILINE|wx.HSCROLL)\n textctrlPage.SetFont(xit_G.G_fontandcolour._firstfont)\n textctrlPage.SetForegroundColour(xit_G.G_fontandcolour._firstcolour)\n self._pageList.append(textctrlPage)\n self.AddPage(textctrlPage, \"矩阵前页\")\n self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)\n self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging)\n\n \n\n #用崭新的myMatrixUnit替换所有页面,这适用于矩阵页的完全更换\n def setALLPagesFromMatrixUnit(self,myMatrixUnit):\n cur=myMatrixUnit.cur\n #除首页外,其它页全部删除\n n=self.GetPageCount()\n for i in range(1,n):\n j=n-i\n self.DeletePage(j)\n self._pageList=[self._pageList[0]]\n\n self._pageList[0].SetValue(str(myMatrixUnit._mat))#首页赋值\n\n n=len(myMatrixUnit.matList)\n \n for i in range(n):\n \n myTextPage=xit_MyTripleElementScrolledWindow(self,myMatrixUnit[i])\n self._pageList.append(myTextPage)\n name=(\"第%d个矩阵\" %(i+1))\n self.AddPage(myTextPage, name)\n \n if cur==\"ZERO\":\n self.SetSelection(0)\n else:\n self.SetSelection(cur+1) \n \n def setSelectionPageFromMatrixUnit(self,myMatrixUnit):\n new=self.GetSelection()\n if new==0:\n self._pageList[0].SetValue(str(myMatrixUnit._mat))\n else:\n self._pageList[new].setMyText(myMatrixUnit[new-1])\n def setSelectionPageFromMatrix(self,myMatrix):\n new=self.GetSelection()\n if new==0:\n self._pageList[0].SetValue(str(myMatrix))\n else:\n self._pageList[new].setMyText(myMatrix)\n\n def AddPageFromMatrix(self,myMatrix):#增加时间:2020年1月20日,增加一个页面,避免刷屏\n n=self.GetPageCount()\n myTextPage=xit_MyTripleElementScrolledWindow(self,myMatrix)\n myTextPage.setMyText(myMatrix)\n self._pageList.append(myTextPage)\n name=(\"第%d个矩阵\" %(n))\n self.AddPage(myTextPage, name)\n self.SetSelection(n)\n \n def OnPageChanged(self, event):\n if self:\n old = event.GetOldSelection()\n new = event.GetSelection()\n sel = self.GetSelection()\n if new==0:\n xit_G.G_myOP.setUnitAndGridFromCUR(\"ZERO\")\n else:\n xit_G.G_myOP.setUnitAndGridFromCUR(new-1)\n event.Skip()\n def OnPageChanging(self, event):\n if self:\n old = event.GetOldSelection()\n new = event.GetSelection()\n sel = self.GetSelection()\n \n event.Skip()\n# \n#-----xit_AUIFrame----------------主面板------------------------------------------------------- \nclass xit_AUIFrame(wx.Frame):\n\n def __init__(self, parent, id=-1, title=\"线性代数小专家\",style=wx.DEFAULT_FRAME_STYLE |\n wx.SUNKEN_BORDER |wx.MAXIMIZE|\n wx.CLIP_CHILDREN):\n\n wx.Frame.__init__(self,parent,id=id,title=title,pos=wx.DefaultPosition,\n size=wx.Size(900,650),style=style)\n\n # tell FrameManager to manage this frame\n self._mgr = aui.AuiManager()\n self._mgr.SetManagedWindow(self)\n\n self._perspectives = []\n self.n = 0\n self.x = 0\n self._matTree=[]\n self.font = wx.Font(wx.FontInfo(10).FaceName(\"新宋体\"))\n xit_G.G_fontandcolour=G_fontcolour()\n\n #生成菜单\n self._menudict={}\n mb = wx.MenuBar()\n menuctrl=MyMenuControl(self,\"mainframe\")\n #文件菜单 \n file_menu = wx.Menu()\n tmp=file_menu.Append(wx.ID_ANY,\"新建(&N)\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, menuctrl.Control, id=tmp.GetId())\n tmp=file_menu.Append(wx.ID_ANY,\"打开...\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, menuctrl.Control, id=tmp.GetId())\n tmp=file_menu.Append(wx.ID_ANY,\"合并打开...\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, menuctrl.Control, id=tmp.GetId())\n tmp=file_menu.Append(wx.ID_ANY,\"保存\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, menuctrl.Control, id=tmp.GetId())\n tmp=file_menu.Append(wx.ID_ANY,\"另存为...\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, menuctrl.Control, id=tmp.GetId())\n tmp=file_menu.Append(wx.ID_ANY,\"退出\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, menuctrl.Control, id=tmp.GetId())\n #视图菜单--视图菜单的方法在这里更方便\n view_menu = wx.Menu()\n tmp=view_menu.Append(wx.ID_ANY,\"显示左侧矩阵树\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, self.ViewControl, id=tmp.GetId())\n tmp=view_menu.Append(wx.ID_ANY,\"显示矩阵网格\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, self.ViewControl, id=tmp.GetId())\n tmp=view_menu.Append(wx.ID_ANY,\"显示下侧面板\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, self.ViewControl, id=tmp.GetId())\n tmp=view_menu.Append(wx.ID_ANY,\"显示控制台\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, self.ViewControl, id=tmp.GetId())\n tmp=view_menu.Append(wx.ID_ANY,\"恢复默认设置\")\n self._menudict[tmp.GetId()]=tmp\n self.Bind(wx.EVT_MENU, self.ViewControl, id=tmp.GetId())\n\n #self.Bind(wx.EVT_CONTEXT_MENU, MyMenuControl(self).OnContextMenu)\n\n mb.Append(file_menu, \"文件(&F)\")\n mb.Append(view_menu, \"视图(&V)\")\n \n self.SetMenuBar(mb)\n #生成子面板\n \n self._mymattree=self.CreateTreeCtrl()\n self._mygrid=xit_MyGrid(self)\n self._mycentertextnotebook=xit_MyCenterTextNotebook(self)\n self._myshell=self.CreateShell()\n self._mynotebook=xit_MyNotebookPanel(self)\n\n \n xit_G.G_myOP=xit_MyOP(myCenterTextNotebook=self._mycentertextnotebook,cur=\"ZERO\",grid=self._mygrid,tree=self._mymattree,treedict=self.treedict,menudict=self._menudict,mainwindow=self)\n \n \n self.SetIcon(GetMondrianIcon())\n self.SetMinSize(wx.Size(800, 600))\n\n self._mymattree.SetFont(self.font)\n self._myshell.SetFont(self.font)\n self._mynotebook.SetFont(self.font)\n # add a bunch of panes\n self._mgr.AddPane(self._mymattree, aui.AuiPaneInfo().Name(\"练习题集合\").Caption(\"练习题集合\").BestSize(wx.Size(250,600)).Left())\n self._mgr.AddPane(self._myshell, aui.AuiPaneInfo().Name(\"控制台\").Caption(\"控制台\").BestSize(wx.Size(600,800)).Right())\n self._mgr.AddPane(self._mynotebook, aui.AuiPaneInfo().Name(\"选项卡\").Caption(\"选项卡\").BestSize(wx.Size(650,350)).Bottom())\n self._mgr.AddPane(self._mygrid, aui.AuiPaneInfo().Name(\"矩阵网格\").Caption(\"矩阵网格\").BestSize(wx.Size(250,200)).Left().Position(1))\n # create some center panes\n \n self._mgr.AddPane(self._mycentertextnotebook, aui.AuiPaneInfo().Name(\"结果\").CenterPane())\n\n # make some default perspectives\n\n self._mgr.GetPane(\"练习题集合\").Show()\n self._mgr.GetPane(\"选项卡\").Show()\n self._mgr.GetPane(\"控制台\").Show()\n self._mgr.GetPane(\"结果\").Show()\n self._mgr.GetPane(\"矩阵网格\").Show()\n #self._mgr.GetPane(\"测试框\").Hide()\n self.perspective_default = self._mgr.SavePerspective()\n\n # \"commit\" all changes made to FrameManager\n self._mgr.Update()\n\n \n\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\n self.Bind(wx.EVT_SIZE, self.OnSize)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n\n # Show How To Use The Closing Panes Event\n self.Bind(aui.EVT_AUI_PANE_CLOSE, self.OnPaneClose)\n \n \n def ViewControl(self,event):\n menuname=self._menudict[event.GetId()].GetName()\n if menuname==\"显示左侧矩阵树\":\n '''self._mgr.AddPane(self.CreateTreeCtrl(), aui.AuiPaneInfo().\n Caption(\"Tree Control\").\n Float().FloatingPosition(self.GetStartPosition()).\n FloatingSize(wx.Size(150, 300)).CloseButton(True).MaximizeButton(True))\n ''' \n self._mgr.GetPane(\"练习题集合\").Float().FloatingSize(wx.Size(250,500)).CloseButton(True).MaximizeButton(True).Show()\n self._mgr.Update()\n elif menuname==\"显示矩阵网格\":\n self._mgr.GetPane(\"矩阵网��\").Float().FloatingSize(wx.Size(400,300)).CloseButton(True).MaximizeButton(True).Show()\n self._mgr.Update()\n elif menuname==\"显示下侧面板\":\n self._mgr.GetPane(\"选项卡\").Float().FloatingSize(wx.Size(1000,300)).CloseButton(True).MaximizeButton(True).Show()\n self._mgr.Update()\n elif menuname==\"显示控制台\":\n self._mgr.GetPane(\"控制台\").Float().FloatingSize(wx.Size(400,1000)).CloseButton(True).MaximizeButton(True).Show()\n self._mgr.Update()\n elif menuname==\"恢复默认设置\":\n self._mgr.LoadPerspective(self.perspective_default)\n def DoUpdate(self):\n\n self._mgr.Update()\n\n\n def OnEraseBackground(self, event):\n\n event.Skip()\n\n\n def OnSize(self, event):\n\n event.Skip()\n\n\n def CreateTreeCtrl(self):\n\n tree = wx.TreeCtrl(self, -1, wx.Point(0, 0), wx.Size(250, 100),\n wx.TR_DEFAULT_STYLE | wx.NO_BORDER|wx.TR_HIDE_ROOT)\n\n root = tree.AddRoot(\"线性代数习题集\")\n items = []\n self.treedict={}\n\n imglist = wx.ImageList(16, 16, True, 2)\n imglist.Add(wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, wx.Size(16,16)))\n imglist.Add(wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, wx.Size(16,16)))\n tree.AssignImageList(imglist)\n for (_id,_father,_label) in xit_G.getTreeList():\n if _father==\"root\":\n self.treedict[_id]=tree.AppendItem(root,_label, 0)\n else:\n self.treedict[_id]=tree.AppendItem(self.treedict[_father], _label,0)\n \n \n self.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded_Tree,tree)\n self.Bind(wx.EVT_TREE_ITEM_COLLAPSED,self.OnItemCollapsed_Tree,tree)\n self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged_Tree,tree)\n self.Bind(wx.EVT_TREE_ITEM_ACTIVATED,self.OnActivated_Tree,tree)\n self.Bind(wx.EVT_TREE_ITEM_EXPANDING,self.OnItemExpanding_Tree,tree)\n init_printing(use_unicode=True)\n Lst=FileHelper.getUnitListFromSRC()\n\n for _matUnit in Lst:\n \n self.treedict[_matUnit.ID]=tree.AppendItem(self.treedict[_matUnit.matType], _matUnit.Riddle,1,data=\"\")\n tree.SetItemData(self.treedict[_matUnit.ID],_matUnit)\n \n #添加时间:2020年2月1日,初始化我的矩阵\n try:\n f=open(\"mytreedefault.dat\",\"rb\")\n while True:\n _unit=pickle.load(f)\n if _unit==None:\n break\n tmp=tree.AppendItem(self.treedict[\"MYTREE\"], _unit.Riddle,1,data=\"\")\n tree.SetItemData(tmp,_unit)\n except:\n pass\n finally:\n pass\n \n\n return tree\n\n def OnItemExpanded_Tree(self,evt):\n pass\n def OnItemCollapsed_Tree(self,evt):\n pass\n def OnSelChanged_Tree(self,evt):\n \n item=evt.GetItem()\n \n xit_G.G_myOP._myMatrixUnit=self._mymattree.GetItemData(item)\n if xit_G.G_myOP._myMatrixUnit!=None:\n xit_G.G_myOP._myMatrixUnit.cur=0\n xit_G.G_myOP.setFromxitMatrixUnit(xit_G.G_myOP._myMatrixUnit,0)\n self._mgr.Update() \n \n self._myshell.prompt()\n \n \n \n #self._mgr.Update()\n \n def OnActivated_Tree(self,evt):\n pass\n def OnItemExpanding_Tree(self,evt):\n pass\n\n def CreateShell(self):\n ctrl=Shell(parent=self)\n ctrl.redirectStdout(redirect=True)\n ctrl.redirectStdin(redirect=True)\n ctrl.redirectStderr(redirect=True)\n init_printing(use_unicode=False)\n \n return ctrl\n\n \n \n\n def OnPaneClose(self, event):\n\n caption = event.GetPane().caption\n\n if caption in [\"Tree Pane\", \"Dock Manager Settings\", \"Fixed Pane\"]:\n msg = \"Are You Sure You Want To Close This Pane?\"\n dlg = wx.MessageDialog(self, msg, \"AUI Question\",\n wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n\n if dlg.ShowModal() in [wx.ID_NO, wx.ID_CANCEL]:\n event.Veto()\n dlg.Destroy()\n\n\n def OnClose(self, event):\n self._mgr.UnInit()\n del self._mgr\n self.Destroy()\n\n\n def OnExit(self, event):\n self.Close()\n\n def OnAbout(self, event):\n\n msg = \"wx.aui Demo\\n\" + \\\n \"An advanced window management library for wxWidgets\\n\" + \\\n \"(c) Copyright 2005-2006, Kirix Corporation\"\n dlg = wx.MessageDialog(self, msg, \"About wx.aui Demo\",\n wx.OK | wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n\n\n#------------------------------------------------------------------------------------------------ \nOP=None\nU=None\nM=None\ndef Update():\n print(\"你牛!你操作吧���!\\n\")\n print(\"A--当前运行的矩阵(中心页面没有定位矩阵则为None)\")\n global OP,U,M\n OP=xit_G.G_myOP\n U=xit_G.G_myOP._myMatrixUnit\n if xit_G.G_myOP._myMatrixUnit!=None and type(xit_G.G_myOP._myMatrixUnit.cur)==type(0):\n M=xit_G.G_myOP._myMatrixUnit[xit_G.G_myOP._myMatrixUnit.cur]\nif __name__ == '__main__':\n app=wx.App()\n frame = xit_AUIFrame(parent=None)\n\n frame.Show()\n app.MainLoop()\n","sub_path":"xit_ds_wxpython/xit_main.py","file_name":"xit_main.py","file_ext":"py","file_size_in_byte":15841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"47608017","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright © 2017 Matthew Stone \n# Distributed under terms of the MIT license.\n\n\"\"\"\nClassification of reciprocal translocations.\n\"\"\"\n\n\ndef classify_insertion(plus, minus, mh_buffer=50):\n plus_A = plus.pos\n minus_A = minus.pos\n plus_B = plus.stop\n minus_B = minus.stop\n\n # Buffer comparisons\n def _greater(p1, p2):\n return p1 > p2 - mh_buffer\n\n if _greater(minus_A, plus_A) and _greater(minus_B, plus_B):\n return 'INS_B2A'\n elif _greater(plus_A, minus_A) and _greater(plus_B, minus_B):\n return 'INS_A2B'\n else:\n return 'INS_UNCLASSIFIED'\n\n\ndef classify_simple_translocation(plus, minus, mh_buffer=10):\n \"\"\"\n Resolve a pair of interchromosomal breakends.\n\n Parameters\n ----------\n FF : pysam.VariantRecord\n FF inversion breakpoint.\n RR : pysam.VariantRecord\n RR inversion breakpoint.\n cnvs : list of pysam.VariantRecord\n List of overlapping CNVs.\n\n Returns\n -------\n svtype : str\n Complex SV class.\n \"\"\"\n\n # plus refers to breakend whose strand begins with '+'\n if plus.chrom != minus.chrom or plus.info['CHR2'] != minus.info['CHR2']:\n return 'TLOC_MISMATCH_CHROM'\n\n # Reference chromosomes are labeled A and B\n # Breakpoints/Derivative chromosomes are labeled plus and minus, based on\n # ref chromosome A's strandedness on each breakpoint\n # plus_A = the breakend of ref chrom A on derivative chrom where A is\n # forward-stranded\n\n # get positions\n plus_A = plus.pos\n minus_A = minus.pos\n plus_B = plus.stop\n minus_B = minus.stop\n\n plus_strands = plus.info['STRANDS']\n\n # Buffer comparisons\n def _greater(p1, p2):\n return p1 > p2 - mh_buffer\n\n if plus_strands == '+-':\n if _greater(minus_A, plus_A) and _greater(plus_B, minus_B):\n return 'CTX_PP/QQ'\n if _greater(minus_A, plus_A) and _greater(minus_B, plus_B):\n return 'CTX_INS_B2A'\n if _greater(plus_A, minus_A) and _greater(plus_B, minus_B):\n return 'CTX_INS_A2B'\n else:\n if _greater(minus_A, plus_A) and _greater(minus_B, plus_B):\n return 'CTX_PQ/QP'\n if _greater(minus_A, plus_A) and _greater(plus_B, minus_B):\n return 'CTX_INV_INS_B2A'\n if _greater(plus_A, minus_A) and _greater(minus_B, plus_B):\n return 'CTX_INV_INS_A2B'\n\n return 'CTX_UNR'\n","sub_path":"svtk/cxsv/cpx_tloc.py","file_name":"cpx_tloc.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"620191472","text":"\"\"\"\nScript for plotting evaluated fluxes. \nIt uses mean fluxes saved in the numpy array Fluxesxx.npy (model)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport sys\nimport os\n\nsys.path.append('D:\\Evaporation\\Source_Codes\\TwoSourceEnergy' +\n 'BalanceModel\\pyTSEBmod')\nfrom Statistics import NSE, Bias, MAE, RMSD, R2\n\n#%% Get data\n\nhome = 'D:\\Auswertung_Feldarbeiten_Pt_Nobressart\\TIR_Processing'\nflightDates = np.load(os.path.join(home, 'InputsProcessing', \n 'FlightDates.npy')).item()\n \n## Input here fluxes that should be plotted\nPT_Flux = np.load(os.path.join(home, 'Fluxes', 'rs03', \n 'FluxesPT_LAI08_20_FG09_NDI06_lowEl_selected.npy')).item()\nOS_Flux = np.load(os.path.join(home, 'Fluxes', 'rs03', \n 'FluxesOS_NDI06_lowEl_Kustas_selected_cut0.npy')).item()\n##FluxesPT_LAI08_20_FG09_NDI06_lowEl_selected \n\nflights = []\nfor date in flightDates:\n for hours in flightDates[date]:\n flights.append(flightDates[date][hours])\nflights.sort()\ndates = np.array(flights, dtype = np.datetime64)\n \ndef extractFluxes(modeFluxes_dict, flux_source, *args):\n keys = modeFluxes_dict.keys()\n keys.sort()\n fluxes = []\n if args:\n [fluxes.append(modeFluxes_dict[key][flux_source][args[0]]) for key in keys]\n else:\n [fluxes.append(modeFluxes_dict[key][flux_source]) for key in keys]\n return fluxes\n \nH_mod_PT = np.array(extractFluxes(PT_Flux, 'Modelled_Flux_masked', 0)).astype('float64')\nH_mod_OS = np.array(extractFluxes(OS_Flux, 'Modelled_Flux_masked', 0)).astype('float64')\nH_EC = np.array(extractFluxes(PT_Flux, 'EC_Flux', 0))\nH_Scinti = np.array(extractFluxes(PT_Flux, 'Scinti_H_Flux'))\n\nRn_mod_PT = np.array(extractFluxes(PT_Flux, 'Modelled_Flux_masked', 3)).astype('float64')\nRn_mod_OS = np.array(extractFluxes(OS_Flux, 'Modelled_Flux_masked', 3)).astype('float64')\nRn_EC = np.array(extractFluxes(PT_Flux, 'EC_Flux', 3))\nG_mod_PT = np.array(extractFluxes(PT_Flux, 'Modelled_Flux_masked', 2)).astype('float64')\nG_mod_OS = np.array(extractFluxes(OS_Flux, 'Modelled_Flux_masked', 2)).astype('float64')\nG_EC = np.array(extractFluxes(PT_Flux, 'EC_Flux', 2))\n\nLE_mod_PT = np.array(extractFluxes(PT_Flux, 'Modelled_Flux_masked', 1)).astype('float64')\nLE_mod_OS = np.array(extractFluxes(OS_Flux, 'Modelled_Flux_masked', 1)).astype('float64')\nLE_EC = np.array(extractFluxes(PT_Flux, 'EC_Flux', 1))\nLE_Scinti = Rn_EC - G_EC - H_Scinti\n\n# corrected according to Bowen ratio and residual method\nebcGap = Rn_EC - G_EC - LE_EC - H_EC\nEBR = (LE_EC + H_EC)/(Rn_EC - G_EC)\nEF = LE_EC/(LE_EC + H_EC)\n\n# Threshold according to Ingwersen et al. 2015\nthres = 0.5\nthres_EBR = ((EBR > thres) & (EBR < (2 - thres)))\n\nLE_EC_corr_EB = LE_EC + EF*ebcGap\nLE_EC_corr_EB[~thres_EBR] = LE_EC[~thres_EBR]\nH_EC_corr_EB = H_EC + (1 - EF)*ebcGap\nH_EC_corr_EB[~thres_EBR] = H_EC[~thres_EBR]\nLE_EC_corr_res = LE_EC + ebcGap\nH_EC_corr_res = H_EC + ebcGap\n\nFluxes = pd.DataFrame({'Rn' : Rn_EC, 'Rn_PT' : Rn_mod_PT, 'Rn_OS' : Rn_mod_OS, \n 'G' : G_EC, 'G_PT' : G_mod_PT, 'G_OS' : G_mod_OS, 'H' : H_EC, \n 'LE' : LE_EC, 'H_BR' : H_EC_corr_EB,\n 'LE_BR' : LE_EC_corr_EB, 'H_res' : H_EC_corr_res,\n 'LE_res' : LE_EC_corr_res, 'H_Sc' : H_Scinti,\n 'LE_Sc' : LE_Scinti, 'H_PT' : H_mod_PT, \n 'LE_PT' : LE_mod_PT, 'H_OS' : H_mod_OS,\n 'LE_OS' : LE_mod_OS, 'Flights' : flights}, index = pd.DatetimeIndex(dates))\n\n\n#%% Difference statistics\n\nDiff_stats = {'Mean_OBS' : {}, 'NSE_PT' : {}, 'NSE_OS' : {},\n 'Bias_PT' : {}, 'Bias_PT' : {},\n 'MAE_PT' : {}, 'RMSD_PT' : {}}\n\nDiff_stats['Mean_OBS'] = {\n 'Rn' : np.round(Rn_EC.mean(), decimals = 2),\n 'G' : np.round(G_EC.mean(), decimals = 2),\n 'H' : np.round(H_EC.mean(), decimals = 2),\n 'LE' : np.round(LE_EC.mean(), decimals = 2),\n 'H_BR' : np.round(H_EC_corr_EB.mean(), decimals = 2),\n 'LE_BR' : np.round(LE_EC_corr_EB.mean(), decimals = 2),\n 'H_res' : np.round(H_EC_corr_res.mean(), decimals = 2), \n 'LE_res': np.round(LE_EC_corr_res.mean(), decimals = 2),\n 'H_Sc' : np.round(H_Scinti.mean(), decimals = 2),\n 'LE_Sc' : np.round(LE_Scinti.mean(), decimals = 2)} \n\nDiff_stats['NSE_PT'] = {\n 'Rn' : NSE(Rn_EC, Rn_mod_PT),\n 'G' : NSE(G_EC, G_mod_PT),\n 'H' : NSE(H_EC, H_mod_PT),\n 'LE' : NSE(LE_EC, LE_mod_PT),\n 'H_BR' : NSE(H_EC_corr_EB, H_mod_PT),\n 'LE_BR' : NSE(LE_EC_corr_EB, LE_mod_PT),\n 'H_res' : NSE(H_EC_corr_res, H_mod_PT),\n 'LE_res': NSE(LE_EC_corr_res, LE_mod_PT),\n 'H_Sc' : NSE(H_Scinti, H_mod_PT),\n 'LE_Sc' : NSE(LE_Scinti, LE_mod_PT)} \n\nDiff_stats['NSE_OS'] = {\n 'Rn' : NSE(Rn_EC, Rn_mod_OS),\n 'G' : NSE(G_EC, G_mod_OS),\n 'H' : NSE(H_EC, H_mod_OS),\n 'LE' : NSE(LE_EC, LE_mod_OS),\n 'H_BR' : NSE(H_EC_corr_EB, H_mod_OS),\n 'LE_BR' : NSE(LE_EC_corr_EB, LE_mod_OS),\n 'H_res' : NSE(H_EC_corr_res, H_mod_OS),\n 'LE_res': NSE(LE_EC_corr_res, LE_mod_OS),\n 'H_Sc' : NSE(H_Scinti, H_mod_OS),\n 'LE_Sc' : NSE(LE_Scinti, LE_mod_OS)} \n\nDiff_stats['Bias_PT'] = {\n 'Rn' : Bias(Rn_EC, Rn_mod_PT),\n 'G' : Bias(G_EC, G_mod_PT),\n 'H' : Bias(H_EC, H_mod_PT),\n 'LE' : Bias(LE_EC, LE_mod_PT),\n 'H_BR' : Bias(H_EC_corr_EB, H_mod_PT),\n 'LE_BR' : Bias(LE_EC_corr_EB, LE_mod_PT),\n 'H_res' : Bias(H_EC_corr_res, H_mod_PT),\n 'LE_res': Bias(LE_EC_corr_res, LE_mod_PT),\n 'H_Sc' : Bias(H_Scinti, H_mod_PT),\n 'LE_Sc' : Bias(LE_Scinti, LE_mod_PT)} \n\nDiff_stats['Bias_OS'] = {\n 'Rn' : Bias(Rn_EC, Rn_mod_OS),\n 'G' : Bias(G_EC, G_mod_OS),\n 'H' : Bias(H_EC, H_mod_OS),\n 'LE' : Bias(LE_EC, LE_mod_OS),\n 'H_BR' : Bias(H_EC_corr_EB, H_mod_OS),\n 'LE_BR' : Bias(LE_EC_corr_EB, LE_mod_OS),\n 'H_res' : Bias(H_EC_corr_res, H_mod_OS),\n 'LE_res': Bias(LE_EC_corr_res, LE_mod_OS),\n 'H_Sc' : Bias(H_Scinti, H_mod_OS),\n 'LE_Sc' : Bias(LE_Scinti, LE_mod_OS)} \n \nDiff_stats['MAE_PT'] = {\n 'Rn' : MAE(Rn_EC, Rn_mod_PT),\n 'G' : MAE(G_EC, G_mod_PT),\n 'H' : MAE(H_EC, H_mod_PT),\n 'LE' : MAE(LE_EC, LE_mod_PT),\n 'H_BR' : MAE(H_EC_corr_EB, H_mod_PT),\n 'LE_BR' : MAE(LE_EC_corr_EB, LE_mod_PT),\n 'H_res' : MAE(H_EC_corr_res, H_mod_PT),\n 'LE_res': MAE(LE_EC_corr_res, LE_mod_PT),\n 'H_Sc' : MAE(H_Scinti, H_mod_PT),\n 'LE_Sc' : MAE(LE_Scinti, LE_mod_PT)} \n\nDiff_stats['MAE_OS'] = {\n 'Rn' : MAE(Rn_EC, Rn_mod_OS),\n 'G' : MAE(G_EC, G_mod_OS),\n 'H' : MAE(H_EC, H_mod_OS),\n 'LE' : MAE(LE_EC, LE_mod_OS),\n 'H_BR' : MAE(H_EC_corr_EB, H_mod_OS),\n 'LE_BR' : MAE(LE_EC_corr_EB, LE_mod_OS),\n 'H_res' : MAE(H_EC_corr_res, H_mod_OS),\n 'LE_res': MAE(LE_EC_corr_res, LE_mod_OS),\n 'H_Sc' : MAE(H_Scinti, H_mod_OS),\n 'LE_Sc' : MAE(LE_Scinti, LE_mod_OS)} \n \nDiff_stats['RMSD_PT'] = {\n 'Rn' : RMSD(Rn_EC, Rn_mod_PT),\n 'G' : RMSD(G_EC, G_mod_PT),\n 'H' : RMSD(H_EC, H_mod_PT),\n 'LE' : RMSD(LE_EC, LE_mod_PT),\n 'H_BR' : RMSD(H_EC_corr_EB, H_mod_PT),\n 'LE_BR' : RMSD(LE_EC_corr_EB, LE_mod_PT),\n 'H_res' : RMSD(H_EC_corr_res, H_mod_PT),\n 'LE_res': RMSD(LE_EC_corr_res, LE_mod_PT),\n 'H_Sc' : RMSD(H_Scinti, H_mod_PT),\n 'LE_Sc' : RMSD(LE_Scinti, LE_mod_PT)} \n\nDiff_stats['RMSD_OS'] = {\n 'Rn' : RMSD(Rn_EC, Rn_mod_OS),\n 'G' : RMSD(G_EC, G_mod_OS),\n 'H' : RMSD(H_EC, H_mod_OS),\n 'LE' : RMSD(LE_EC, LE_mod_OS),\n 'H_BR' : RMSD(H_EC_corr_EB, H_mod_OS),\n 'LE_BR' : RMSD(LE_EC_corr_EB, LE_mod_OS),\n 'H_res' : RMSD(H_EC_corr_res, H_mod_OS),\n 'LE_res': RMSD(LE_EC_corr_res, LE_mod_OS),\n 'H_Sc' : RMSD(H_Scinti, H_mod_OS),\n 'LE_Sc' : RMSD(LE_Scinti, LE_mod_OS)} \n \nDiff_stats['R2_PT'] = {\n 'Rn' : R2(Rn_EC, Rn_mod_PT),\n 'G' : R2(G_EC, G_mod_PT),\n 'H' : R2(H_EC, H_mod_PT),\n 'LE' : R2(LE_EC, LE_mod_PT),\n 'H_BR' : R2(H_EC_corr_EB, H_mod_PT),\n 'LE_BR' : R2(LE_EC_corr_EB, LE_mod_PT),\n 'H_res' : R2(H_EC_corr_res, H_mod_PT),\n 'LE_res': R2(LE_EC_corr_res, LE_mod_PT),\n 'H_Sc' : R2(H_Scinti, H_mod_PT),\n 'LE_Sc' : R2(LE_Scinti, LE_mod_PT)} \n\nDiff_stats['R2_OS'] = {\n 'Rn' : R2(Rn_EC, Rn_mod_OS),\n 'G' : R2(G_EC, G_mod_OS),\n 'H' : R2(H_EC, H_mod_OS),\n 'LE' : R2(LE_EC, LE_mod_OS),\n 'H_BR' : R2(H_EC_corr_EB, H_mod_OS),\n 'LE_BR' : R2(LE_EC_corr_EB, LE_mod_OS),\n 'H_res' : R2(H_EC_corr_res, H_mod_OS),\n 'LE_res': R2(LE_EC_corr_res, LE_mod_OS),\n 'H_Sc' : R2(H_Scinti, H_mod_OS),\n 'LE_Sc' : R2(LE_Scinti, LE_mod_OS)} \n\ndiff_Stats = pd.DataFrame(Diff_stats, columns = ['Mean_OBS', 'Bias_PT', \n 'Bias_OS', 'MAE_PT', 'MAE_OS', 'RMSD_PT', 'RMSD_OS', 'NSE_PT', \n 'NSE_OS'])\n \ndel [Diff_stats, NSE, MAE, Bias]\n\n#%% Plotting against raw EC fluxes\n\ncolors = ['#3498db', '#2ecc71', '#f7cf33', '#fc9d1d','#fd484d', '#9b59b6', '#51677b']\npal = sns.color_palette(colors)\n\ncolors_rgb = [(52, 152, 219), (46, 204, 113), (247, 207, 51), (252, 157, 29),\n (253, 72, 77),(155, 89, 182), (81, 103, 123)]\n# http://www.husl-colors.org/\npal_red_light = sns.light_palette((11.4, 97.4, 58.1), input=\"husl\")\npal_red_dark = sns.dark_palette((11.4, 97.4, 58.1), input=\"husl\")\npal_blue_light = sns.light_palette((242.2, 90.1, 60.2), input=\"husl\")\npal_blue_dark = sns.dark_palette((242.2, 90.1, 60.2), input=\"husl\")\npal_orange_light = sns.light_palette((41.2, 96.8, 72.8), input=\"husl\")\npal_orange_dark = sns.dark_palette((41.2, 96.8, 72.8), input=\"husl\")\npal_green_light = sns.light_palette((137.9, 93.2, 72.9), input=\"husl\")\npal_green_dark = sns.dark_palette((137.9, 93.2, 72.9), input=\"husl\")\n\nsns.set(context = \"paper\", style = 'white', palette = pal,\n rc = {'axes.labelsize': 18.0, 'figure.figsize': [14, 14], \n 'legend.fontsize': 16.0, 'xtick.labelsize': 18.0,\n 'ytick.labelsize': 18.0, 'xtick.major.size': 4.0,\n 'ytick.major.size': 4.0})\n\n# Options: EC_raw,\n\nobs_options = {'EC_raw' : \n {'title' : 'Modeled fluxes vs EC flux without EB closure',\n 'H' : H_EC, 'LE' : LE_EC,\n 'outFile' : 'Flux_EC_Comparison.png'},\n 'EC_corr_EB': \n {'title' : 'Modeled fluxes vs EC fluxes with Bowen Ratio closure',\n 'H' : H_EC_corr_EB, 'LE' : LE_EC_corr_EB,\n 'outFile' : 'Flux_EC_corr_BR_Comparison'},\n 'EC_corr_LE_res': \n {'title' : 'Modeled fluxes vs EC flux with LE residual closure',\n 'H' : H_EC, 'LE' : LE_EC_corr_res,\n 'outFile' : 'Flux_EC_corr_LE_residual_Comparison.png'},\n 'EC_corr_H_res': \n {'title' : 'Modeled fluxes vs EC flux with H residual closure',\n 'H' : H_EC_corr_res, 'LE' : LE_EC,\n 'outFile' : 'Flux_EC_corr_H_residual_Comparison.png'},\n 'Scinti': \n {'title' : 'Modeled fluxes vs scintillometer fluxes',\n 'H' : H_Scinti, 'LE' : LE_Scinti,\n 'outFile' : 'Flux_Scinti_Comparison.png'},\n }\n\n## Select observed fluxes to which modelled fluxes are compared \nobs_chosen = 'EC_corr_EB'\n##\n\ntitle = obs_options[obs_chosen]['title']\nH_obs = obs_options[obs_chosen]['H']\nLE_obs = obs_options[obs_chosen]['LE']\noutFile = obs_options[obs_chosen]['outFile']\n\nfig = plt.figure()\n\nax = fig.add_subplot(111, aspect = 1)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.set_ylabel(r'Modeled fluxes (W m$^{-2}$)', labelpad=10, fontsize=20, zorder = 2)\nax.set_xlabel(r'Observed fluxes (W m$^{-2}$)', labelpad=10, fontsize=20, zorder = 2)\nax.get_yaxis().set_label_coords(-0.08,0.5)\nax.get_xaxis().set_label_coords(0.5, -0.07)\nfigure_title = title\nplt.text(0.5, 1.08, figure_title,\n horizontalalignment='center',\n fontsize=20,\n transform = ax.transAxes)\n\n\nax.set_axis_bgcolor('white')\nplt.tick_params(\n which='both', # both major and minor ticks are affected\n labelleft='off',\n labelright = 'off',\n labelbottom='off',\n bottom = 'off',\n top = 'off',\n right = 'off',\n left = 'off') # labels along the bottom edge are off\n\n\nax1 = fig.add_subplot(221, aspect = 1)\n\nH_PT_plot = plt.plot(H_obs, H_mod_PT, 'o', color = pal[4], \n label = 'TSEB', lw = 1, ms = 10)\n\n\n#h = sns.lmplot('H_BR', 'H_PT', data = Fluxes, hue = 'Day', fit_reg = False, \n# size = 10, scatter_kws={\"s\": 150, 'alpha' : 1, \n# 'linewidth' : 0.5, 'edgecolor' : None, \n# 'zorder' : 2}, legend = False,\n# markers =[\"o\", \"v\", '^', '*','p']\n# )\n\nH_OS_plot = plt.plot(H_obs, H_mod_OS, 'v', color = pal_red_dark[-3], \n label = 'OSEB', lw = 1, ms = 10)\n\nH_mod = np.concatenate((H_mod_PT, H_mod_OS)) \nlim_max = max(np.ceil(H_mod[~np.isnan(H_mod)].max()/50)*50, \n np.ceil(H_obs[~np.isnan(H_obs)].max()/50)*50)\nlim_min = min(np.floor(H_mod[~np.isnan(H_mod)].min()/50)*50, \n np.floor(H_obs[~np.isnan(H_obs)].min()/50)*50)\nax1.set_xlim([lim_min, lim_max])\nax1.set_ylim([lim_min, lim_max])\nax1.xaxis.set_ticks(np.arange(lim_min,ax1.get_ylim()[1]+1,100))\nax1.yaxis.set_ticks(np.arange(lim_min,ax1.get_ylim()[1]+1,100))\nax1.plot([lim_min, lim_max], [lim_min, lim_max], '--', \n color = (0.75, 0.75, 0.75) , lw = 1, zorder = 1)\nax1.text(0.04, 0.96,'R$^2$$_{TSEB}$: %.2f\\nR$^2$$_{OSEB}$: %.2f' % (diff_Stats.NSE_PT.H_BR, \n diff_Stats.NSE_OS.H_BR), horizontalalignment='left', fontsize = 16,\n verticalalignment='top', transform=ax1.transAxes)\nlegend = ax1.legend(loc = 'lower right') # , frameon = True\nplt.title('($a$) Sensible heat flux', fontsize = 18, y = 1.02)\n\nplt.tick_params(\n which='both', # both major and minor ticks are affected\n top = 'off',\n right = 'off') # labels along the bottom edge are off\n \nax2 = fig.add_subplot(222, aspect = 1)\nLE_PT_plot = plt.plot(LE_obs, LE_mod_PT, 'o', color = pal[0], \n label = 'TSEB', lw = 1, ms = 10)\n\nLE_OS_plot = plt.plot(LE_obs, LE_mod_OS, 'v', color = pal_blue_dark[-3], \n label = 'OSEB', lw = 1, ms = 10)\n \nLE_mod = np.concatenate((LE_mod_PT, LE_mod_OS)) \nlim_max = max(np.ceil(LE_mod[~np.isnan(LE_mod)].max()/50)*50, \n np.ceil(LE_obs[~np.isnan(LE_obs)].max()/50)*50)\nlim_min = min(np.floor(LE_mod[~np.isnan(LE_mod)].min()/50)*50, \n np.floor(LE_obs[~np.isnan(LE_obs)].min()/50)*50)\nax2.set_xlim([lim_min, lim_max])\nax2.set_ylim([lim_min, lim_max])\nax2.xaxis.set_ticks(np.arange(lim_min,ax2.get_ylim()[1]+1,150))\nax2.xaxis.set_ticks([0, 100, 200, 300, 400])\nax2.yaxis.set_ticks(np.arange(lim_min,ax2.get_ylim()[1]+1,150))\nax2.yaxis.set_ticks([0, 100, 200, 300, 400])\n\nax2.plot([lim_min, lim_max], [lim_min, lim_max], '--', \n color = (0.75, 0.75, 0.75) , lw = 1, zorder = 1)\nax2.text(0.04, 0.96,'R$^2$$_{TSEB}$: %.2f\\nR$^2$$_{OSEB}$: %.2f' % (diff_Stats.NSE_PT.LE_BR, \n diff_Stats.NSE_OS.LE_BR), horizontalalignment='left', fontsize = 16,\n verticalalignment='top', transform=ax2.transAxes)\nlegend = ax2.legend(loc = 'lower right') # , frameon = True\nplt.title('($b$) Latent heat flux', fontsize = 18, y = 1.02)\n\nplt.tick_params(\n which='both', # both major and minor ticks are affected\n top = 'off',\n right = 'off') # labels along the bottom edge are off\n \nax3 = fig.add_subplot(223, aspect = 1)\nG_PT_plot = plt.plot(G_EC, G_mod_PT, 'o', color = pal[3], \n label = 'TSEB', lw = 1, ms = 10)\n\nG_OS_plot = plt.plot(G_EC, G_mod_OS, 'v', color = pal_orange_dark[-3], \n label = 'OSEB', lw = 1, ms = 10)\n \nG_mod = np.concatenate((G_mod_PT, G_mod_OS)) \nlim_max = max(np.ceil(G_mod[~np.isnan(G_mod)].max()/50)*50, \n np.ceil(G_EC[~np.isnan(G_EC)].max()/50)*50)\nlim_min = min(np.floor(G_mod[~np.isnan(G_mod)].min()/50)*50, \n np.floor(G_EC[~np.isnan(G_EC)].min()/50)*50)\nax3.set_xlim([lim_min, lim_max])\nax3.set_ylim([lim_min, lim_max])\nax3.xaxis.set_ticks(np.arange(lim_min,ax3.get_ylim()[1]+1,50))\nax3.yaxis.set_ticks(np.arange(lim_min,ax3.get_ylim()[1]+1,50)) \nax3.plot([lim_min, lim_max], [lim_min, lim_max], '--', \n color = (0.75, 0.75, 0.75) , lw = 1, zorder = 1)\nax3.text(0.04, 0.96,'R$^2$$_{TSEB}$: %.2f\\nR$^2$$_{OSEB}$: %.2f' % (diff_Stats.NSE_PT.G, \n diff_Stats.NSE_OS.G), horizontalalignment='left', fontsize = 16,\n verticalalignment='top', transform=ax3.transAxes)\n\nlegend = ax3.legend(loc = 'lower right') # , frameon = True\nplt.title('($c$) Soil heat flux', fontsize = 18, y = 1.02)\n\nplt.tick_params(\n which='both', # both major and minor ticks are affected\n top = 'off',\n right = 'off') # labels along the bottom edge are off\n\nax4 = fig.add_subplot(224, aspect = 1) \nRn_PT_plot = plt.plot(Rn_EC, Rn_mod_PT, 'o', color = pal[1], \n label = 'TSEB', lw = 1, ms = 10)\n\nRn_OS_plot = plt.plot(Rn_EC, Rn_mod_OS, 'v', color = pal_green_dark[-3], \n label = 'OSEB', lw = 1, ms = 10) \n \nRn_mod = np.concatenate((Rn_mod_PT, Rn_mod_OS)) \nlim_max = max(np.ceil(Rn_mod[~np.isnan(Rn_mod)].max()/50)*50, \n np.ceil(Rn_EC[~np.isnan(Rn_EC)].max()/50)*50)\nlim_min = min(np.floor(Rn_mod[~np.isnan(Rn_mod)].min()/50)*50, \n np.floor(Rn_EC[~np.isnan(Rn_EC)].min()/50)*50)\nax4.set_xlim([lim_min, lim_max])\nax4.set_ylim([lim_min, lim_max]) \nax4.xaxis.set_ticks(np.arange(lim_min,ax4.get_ylim()[1]+1,150))\nax4.yaxis.set_ticks(np.arange(lim_min,ax4.get_ylim()[1]+1,150)) \nax4.xaxis.set_ticks(np.arange(0, 601, 150))\nax4.yaxis.set_ticks(np.arange(0, 601, 150))\n \nax4.plot([lim_min, lim_max], [lim_min, lim_max], '--', \n color = (0.75, 0.75, 0.75) , lw = 1, zorder = 1)\nax4.text(0.04, 0.96,'R$^2$$_{TSEB}$: %.2f\\nR$^2$$_{OSEB}$: %.2f' % (diff_Stats.NSE_PT.Rn, \n diff_Stats.NSE_OS.Rn), horizontalalignment='left', fontsize = 16,\n verticalalignment='top', transform=ax4.transAxes)\nlegend = ax4.legend(loc = 'lower right') # , frameon = True\nplt.title('($d$) Net radiation', fontsize = 18, y = 1.02) \n\nplt.tick_params(\n which='both', # both major and minor ticks are affected\n top = 'off',\n right = 'off') # labels along the bottom edge are off\n\nplt.subplots_adjust(left = None, right=None, top=None, wspace=0.35, hspace=0.20) \n \nfig.savefig('D:\\Auswertung_Feldarbeiten_Pt_Nobressart\\TIR_Pro' + \\\n 'cessing\\FurtherAnalysis\\%s.eps' % outFile, format = 'eps', dpi = 1000)\n\nfig.savefig('D:\\Auswertung_Feldarbeiten_Pt_Nobressart\\TIR_Pro' + \\\n 'cessing\\FurtherAnalysis\\%s.png' % outFile)\n\nfig.savefig('D:\\Auswertung_Feldarbeiten_Pt_Nobressart\\TIR_Pro' + \\\n 'cessing\\FurtherAnalysis\\%s.tif' % outFile, dpi = 600)\n\n#%% \nFlux_deviations = pd.DataFrame(Fluxes.LE_OS - Fluxes.LE_BR, columns = ['Dev_LE_OS'])\nFlux_deviations['Dev_LE_PT'] = Fluxes.LE_PT - Fluxes.LE_BR\nFlux_deviations['Diff_LE_OS_PT'] = Flux_deviations.Dev_LE_PT - Flux_deviations.Dev_LE_OS\nFlux_deviations['LE_Flag'] = 'PT'\nFlux_deviations.loc[(Flux_deviations[\"Dev_LE_OS\"].abs() <= Flux_deviations[\"Dev_LE_PT\"].abs()), \"LE_Flag\"] = 'OS'\n\nFlux_deviations['Dev_H_OS'] = Fluxes.H_OS - Fluxes.H_BR \nFlux_deviations['Dev_H_PT'] = Fluxes.H_PT - Fluxes.H_BR \nFlux_deviations['Diff_H_OS_PT'] = Flux_deviations.Dev_H_PT - Flux_deviations.Dev_H_OS\nFlux_deviations['H_Flag'] = 'PT'\nFlux_deviations.loc[(Flux_deviations[\"Dev_H_OS\"].abs() <= Flux_deviations[\"Dev_H_PT\"].abs()), \"H_Flag\"] = 'OS'\n\nFlux_deviations['Dev_G_OS'] = Fluxes.G_OS - Fluxes.G \nFlux_deviations['Dev_G_PT'] = Fluxes.G_PT - Fluxes.G \nFlux_deviations['Diff_G_OS_PT'] = Flux_deviations.Dev_G_PT - Flux_deviations.Dev_G_OS\nFlux_deviations['G_Flag'] = 'PT'\nFlux_deviations.loc[(Flux_deviations[\"Dev_G_OS\"].abs() <= Flux_deviations[\"Dev_G_PT\"].abs()), \"G_Flag\"] = 'OS'\n\ndel [Rn_EC, Rn_mod_PT, Rn_mod_OS, G_EC, G_mod_PT, G_mod_OS, H_EC, LE_EC, \n H_EC_corr_EB, LE_EC_corr_EB, H_EC_corr_res,\n LE_EC_corr_res, H_Scinti, LE_Scinti, H_mod_PT, \n LE_mod_PT, H_mod_OS, LE_mod_OS, flights,\n H_mod, H_obs, LE_mod, LE_obs, G_mod, Rn_mod]\n \ndel [H_OS_plot, H_PT_plot, LE_OS_plot, LE_PT_plot, G_OS_plot, G_PT_plot,\n Rn_OS_plot, Rn_PT_plot, ax, ax1, ax2, ax3, ax4]\n \n ","sub_path":"plot_Fluxes_IJRS.py","file_name":"plot_Fluxes_IJRS.py","file_ext":"py","file_size_in_byte":20661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"45467744","text":"\"\"\"\nORM models for MySQL DB.\n\n\"\"\"\nfrom datetime import datetime\n\nfrom peewee import (CharField,\n DateTimeField,\n ForeignKeyField,\n Model,\n MySQLDatabase,\n PrimaryKeyField)\n\nfrom etl_tool import settings as config\n\nMYSQL_HOST = config.DATABASE['MYSQL']['HOST']\nMYSQL_PORT = int(config.DATABASE['MYSQL']['PORT'])\nMYSQL_DATABASE = config.DATABASE['MYSQL']['DATABASE']\nMYSQL_USER = config.DATABASE['MYSQL']['USER']\nMYSQL_PASSWORD = config.DATABASE['MYSQL']['PASSWORD']\n\n\ndb_handle = MySQLDatabase(database=MYSQL_DATABASE,\n host=MYSQL_HOST,\n port=MYSQL_PORT,\n user=MYSQL_USER,\n password=MYSQL_PASSWORD)\n\n\nclass BaseModel(Model):\n class Meta:\n database = db_handle\n\n\nclass MakerDimension(BaseModel):\n uk = PrimaryKeyField(null=False)\n name = CharField(max_length=32)\n created_at = DateTimeField(default=datetime.now())\n updated_at = DateTimeField(default=datetime.now())\n\n class Meta:\n db_table = 'maker_dim'\n order_by = ('created_at',)\n\n\nclass ModelDimension(BaseModel):\n uk = PrimaryKeyField(null=False)\n maker_uk = ForeignKeyField(MakerDimension,\n related_name='fk_maker_model',\n to_field='uk',\n on_update='cascade',\n on_delete='cascade',\n db_column='maker_uk',\n null=False)\n name = CharField(max_length=32)\n created_at = DateTimeField(default=datetime.now())\n updated_at = DateTimeField(default=datetime.now())\n\n class Meta:\n db_table = 'model_dim'\n order_by = ('created_at',)\n","sub_path":"project/etl_tool/models/mysql_models.py","file_name":"mysql_models.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"47788609","text":"def full_install(package):\n import configparser\n import os\n import sys\n import installer\n\n config = configparser.ConfigParser()\n config.read('config.ini')\n platform = config['OS']['platform']\n cache_boolean =(\"True\" == config['Cache']['keep_cache'])\n cache_location = config['Cache']['cache_location']\n remote_url = config['Remote']['location']\n remote_branch = config['Remote']['location_branch']\n file_extension = config['Remote']['file_extension']\n\n full_file = package + file_extension\n file_url = fix_path(\n remote_url + 'packages-' + platform + '/'\n + remote_branch + '/scripts/' + full_file, platform)\n get_file(file_url, cache_location, full_file)\n return run_script(cache_location, full_file, cache_boolean, platform)\n\ndef get_file(file_url, cache_location, local_name):\n from urllib import request\n import shutil\n import os\n os.chdir(cache_location)\n with request.urlopen(file_url) as response, open(local_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n\ndef run_script(directory, file, cache, platform):\n import subprocess\n import os\n try:\n directory = fix_path(os.path.dirname(__file__) + '/' + directory, platform)\n os.chdir(directory)\n with open(file, 'r') as file_script:\n bashCommand = ''\n for line in file_script.readlines():\n if line[0] != '#':\n bashCommand += line\n bashCommand = bashCommand.replace('\\n', '; ')\n output =subprocess.call(\n bashCommand, stderr=subprocess.STDOUT, shell=True)\n if cache != True:\n os.remove(file)\n return output\n except (OSError, IOError, KeyError):\n return 'Issue Installing'\n if cache != True:\n os.remove(file)\n\ndef fix_path(path, platform):\n if platform == \"windows\":\n path.replace(\"/\", \"\\\\\")\n else:\n path.replace(\"\\\\\", \"/\")\n return path\n","sub_path":"installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"259683674","text":"import json\nfrom DataStructures import Queue\nfrom sms import send\n\n# there queue has to be declared globally (outside any other function)\n# that way all methods have access to it\nqueue = Queue(mode=\"FIFO\")\n#queue = Queue(mode=\"LIFO\")\n \ndef print_queue():\n # you must print on the console the entire queue list\n print(\"Printing the entire list...\")\n lista = queue.get_queue()\n y=1\n for x in lista:\n print(str(y) + \"-\" + x)\n y=y+1\n\ndef add():\n print(\"Ingresa el nombre del guevon que va a entrar en lista: \")\n usuario=input()\n cola=queue.enqueue(usuario)\n mensaje=\"Has sido agregado a la lista, tienes \" , str(len(cola) - 1), \" por delante\"\n send(mensaje)\n print(\"Has sido agregado a la lista, tienes \" , str(len(cola) - 1), \" por delante\")\n \n\ndef dequeue():\n cliente=queue.dequeue()\n print(\"has eliminados a el usuario\",cliente)\n mensaje='Le toca comer a :'+cliente\n send(mensaje)\n \ndef save():\n queue_file=queue.get_queue()\n file_to_save = open(\"queue.json\",\"w+\")\n file_to_save.write(json.dumps(queue_file))\n file_to_save.close()\n\n\ndef load():\n # pass\n file=open(\"queue.json\",\"r\")\n contenido=file.read()\n resultado = json.loads(contenido)\n queue._queue=resultado\n\n\n file.close()\n\n\n \nprint(\"\\nHello, this is the Command Line Interface for a Queue Managment application.\")\nstop = False\nwhile stop == False:\n \n print('''\nWhat would you like to do (type a number and press Enter)?\n- Type 1: For adding someone to the Queue.\n- Type 2: For removing someone from the Queue.\n- Type 3: For printing the current Queue state.\n- Type 4: To export the queue to the queue.json file.\n- Type 5: To import the queue from the queue.json file.\n- Type 6: To quit\n ''')\n\n option = int(input(\"Enter a number:\"))\n # add your options here using conditionals (if)\n\n if option == 3:\n print_queue()\n elif option== 1:\n add()\n elif option== 2:\n dequeue()\n elif option == 4:\n print(\"Guardando lista de espera\")\n save()\n elif option == 5:\n print(\"abriendo lista\")\n load()\n elif option == 6:\n print(\"Bye bye!\")\n stop = True\n else:\n print(\"Not implemented yet or invalid option \"+str(option))\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"220870875","text":"# 문제 유형 : 그리디\n\n# 문제\n# 2007년 KOI에 N명의 학생들이 참가하였다. \n# 경시일 전날인 예비소집일에, 모든 학생들은 자신이 N명 중에서 몇 등을 할 것인지 예상 등수를 적어서 제출하도록 하였다.\n\n# KOI 담당조교로 참가한 김진영 조교는 실수로 모든 학생의 프로그램을 날려 버렸다. \n# 1등부터 N등까지 동석차 없이 등수를 매겨야 하는 김 조교는, 어쩔 수 없이 각 사람이 제출한 예상 등수를 바탕으로 임의로 등수를 매기기로 했다.\n\n# 자신의 등수를 A등으로 예상하였는데 실제 등수가 B등이 될 경우, 이 사람의 불만도는 A와 B의 차이 ( |A-B| )로 수치화할 수 있다. \n# 당신은 N명의 사람들의 불만도의 총 합을 최소로 하면서, 학생들의 등수를 매기려고 한다.\n\n# 각 사람의 예상 등수가 주어졌을 때, 김 조교를 도와 이러한 불만도의 합을 최소로 하는 프로그램을 작성하시오.\n\n# 입력\n# 첫째 줄에 자연수 N이 주어진다. (1 ≤ N ≤ 500,000) 둘째 줄부터 N개의 줄에 걸쳐 각 사람의 예상 등수가 순서대로 주어진다. \n# 예상 등수는 500,000 이하의 자연수이다.\n\n# 출력\n# 첫째 줄에 불만도의 합을 최소로 할 때, 그 불만도를 출력한다.\n\n# 예제 입력 1 \n# 5\n# 1\n# 5\n# 3\n# 1\n# 2\n# 예제 출력 1 \n# 3\n\n# 문제 풀이 핵심 아이디어\n## 예상된 등수와 실제 등수의 차이를 최소화 해야 한다.\n## 예상된 등수를 오름차순으로 정렬하여 실제 등수와 - 해주면 된다.\n\nn = int(input('사람의 수 : '))\narray = []\n\nfor aa in range(n):\n array.append(int(input(f'{aa+1}번째 사람의 예상 등수 : ')))\n\n# 오름차순 정렬\narray.sort()\nprint(array)\n\n# 불만도의 합 계산\nresult = 0\nfor i in range(1, len(array)+1):\n result += abs(i-array[i-1])\n\nprint(result)\n\n# 그리디 알고리즘은 정렬과 함께 사용되는 경우가 많다.\n","sub_path":"Algorithm/Algorithm_exercise/num2012.py","file_name":"num2012.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"469951198","text":"import hashlib\nimport json\nimport datetime\nimport calendar\n\nfrom django.db.models import Max\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.http import StreamingHttpResponse, JsonResponse\nfrom django.urls import reverse\nfrom rent.models import rentOrder\nfrom . import forms\nfrom house.models import house\nfrom login.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import FileResponse\n\n\ndef search(request):\n if request.method == \"GET\":\n houses = request.session.get('houses',None)\n house_list = house.objects.all()\n flag = 1\n if houses:\n for i in houses:\n print(i)\n print(flag)\n if i != 0:\n if flag == 1:\n if i == 1:\n pass\n elif i == 2:\n house_list = house_list.filter(area='ChaoYang')\n elif i == 3:\n house_list = house_list.filter(area='HaiDian')\n elif i == 4:\n house_list = house_list.filter(area='ChangPing')\n elif flag == 2:\n if i == 1:\n house_list = house_list.filter(rental__lte=1000)\n elif i == 2:\n house_list = house_list.filter(rental__gt=1000, rental__lte=1500)\n elif i == 3:\n house_list = house_list.filter(rental__gt=1500, rental__lte=2000)\n elif i == 4:\n house_list = house_list.filter(rental__gt=2000, rental__lte=3000)\n elif i == 5:\n house_list = house_list.filter(rental__gt=3000)\n elif flag == 3:\n if i == 1:\n house_list = house_list.filter(type='single')\n elif i == 2:\n house_list = house_list.filter(type='double')\n elif i == 3:\n house_list = house_list.filter(type='triple')\n elif i == 4:\n house_list = house_list.filter(type='quad')\n elif flag == 4:\n if i == 1:\n house_list = house_list.filter(elevator=True)\n elif i == 2:\n house_list = house_list.filter(elevator=False)\n elif i == 0:\n flag += 1\n if flag == 6:\n keyword = request.session.get('keyword', None)\n print(keyword)\n if keyword:\n house_list = house.objects.filter(housename__contains=keyword)\n\n\n\n if request.method == \"POST\":\n house_list = house.objects.all()\n houses = []\n if 'area' in request.POST:\n areas = request.POST.getlist('area',[])\n print('areas')\n for i in range(0,len(areas)):\n print (areas[i])\n if areas[i] == '1':\n houses.append(1)\n elif areas[i] == '2':\n houses.append(2)\n house_list = house_list.filter(area='ChaoYang')\n elif areas[i] == '3':\n houses.append(3)\n house_list = house_list.filter(area='HaiDian')\n elif areas[i] == '4':\n houses.append(4)\n house_list = house_list.filter(area='ChangPing')\n houses.append(0)\n if 'rental' in request.POST:\n rentals = request.POST.getlist('rental',[])\n print('rentals')\n for i in range(0,len(rentals)):\n print(rentals[i])\n if rentals[i] == '1':\n houses.append(1)\n house_list = house_list.filter(rental__lte=1000)\n elif rentals[i] == '2':\n houses.append(2)\n house_list = house_list.filter(rental__gt=1000,rental__lte=1500)\n elif rentals[i] == '3':\n houses.append(3)\n house_list = house_list.filter(rental__gt=1500, rental__lte=2000)\n elif rentals[i] == '4':\n houses.append(4)\n house_list = house_list.filter(rental__gt=2000,rental__lte=3000)\n elif rentals[i] == '5':\n houses.append(5)\n house_list = house_list.filter(rental__gt=3000)\n houses.append(0)\n if 'type' in request.POST:\n types = request.POST.getlist('type',[])\n print('types')\n for i in range(0,len(types)):\n print(types[i])\n if types[i] == '1':\n houses.append(1)\n house_list = house_list.filter(type='single')\n elif types[i] == '2':\n houses.append(2)\n house_list = house_list.filter(type='double')\n elif types[i] == '3':\n houses.append(3)\n house_list = house_list.filter(type='triple')\n elif types[i] == '4':\n houses.append(4)\n house_list = house_list.filter(type='quad')\n houses.append(0)\n if 'elevator' in request.POST:\n elevators = request.POST.getlist('elevator', [])\n print('elevators')\n for i in range(0, len(elevators)):\n print(elevators[i])\n if elevators[i] == '1':\n houses.append(1)\n house_list = house_list.filter(elevator=True)\n elif elevators[i] == '2':\n houses.append(2)\n house_list = house_list.filter(elevator=False)\n houses.append(0)\n if 'keyword' in request.POST:\n keyword = request.POST.get('keyword')\n print('keyword')\n house_list = house.objects.filter(housename__contains=keyword)\n request.session['keyword'] = keyword\n houses.append(0)\n request.session['houses'] = houses\n\n house_list = house_list.order_by('id')\n paginator = Paginator(house_list,2)\n try:\n page_num = request.GET.get('page',1)\n page = paginator.page(page_num)\n except PageNotAnInteger as e:\n # 不是整数返回第一页数据\n page = paginator.page('1')\n page_num = 1\n except EmptyPage as e:\n # 当参数页码大于或小于页码范围时,会触发该异常\n print('EmptyPage:{}'.format(e))\n if int(page_num) > paginator.num_pages:\n # 大于 获取最后一页数据返回\n page = paginator.page(paginator.num_pages)\n else:\n # 小于 获取第一页\n page = paginator.page(1)\n\n # 这部分是为了再有大量数据时,仍然保证所显示的页码数量不超过10,\n page_num = int(page_num)\n if page_num < 6:\n if paginator.num_pages <= 10:\n pageRange = range(1, paginator.num_pages + 1)\n else:\n pageRange = range(1, 11)\n elif (page_num >= 6) and (page_num <= paginator.num_pages - 5):\n pageRange = range(page_num - 5, page_num + 5)\n else:\n pageRange = range(paginator.num_pages - 9, paginator.num_pages + 1)\n\n\n return render(request,'rent/search.html',locals())\n\n@csrf_exempt\ndef pay(request,rentorder_id):\n order = rentOrder.objects.get(id=rentorder_id)\n return render(request, 'rent/pay.html', locals())\n#TODO:长租的pay应该提示下载合同,审核问题\n\n\ndef download_template(request):\n file = open('static/files/hetong.docx', 'rb')\n response = FileResponse(file)\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"hetong.docx\"'\n return response\n","sub_path":"rent/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"416574398","text":"import os\nimport time\n\nvideoext = [\".3gp\", \".asf\", \".avi\", \".divx\", \".fiv\", \".swf\", \".mp4\", \".mpeg\", \".mpg\",\n \".ogm\", \".wmv\", \".mov\", \".mkv\", \".nbr\", \".rm\", \".vob\", \".sfd\", \".webm\", \".xvid\"]\n\n\n\nlista_percorsi = []\ndef filechecker(path):\n file_trovati = 0\n if os.path.isdir(path) == True:\n for cartella, sottocartella, files in os.walk(path):\n for ext in videoext:\n for file in files:\n if file.endswith(ext):\n pos = os.path.join(cartella, file)\n print(f\"Trovato file {ext}\")\n file_trovati+=1\n lista_percorsi.append(f'{pos}')\n f = open(\"cronologia.txt\", \"a\")\n f.write(f\"\\nTrovato file {ext} in: {pos}\")\n f.close()\n else:\n print(\"Percorso non trovato! Controlla e riprova!\")\n print(f\"File trovati: {file_trovati}\")\n print(\"Se vuoi vedere tutti i percorsi, apri il file 'cronologia.txt'\")\n try:\n scelta = input(\"Desideri rimuoverli tutti? y/n: \")\n except:\n print(\"Devi inserire y/n!\")\n if scelta.lower() == 'y':\n for percorso in lista_percorsi:\n try:\n print(f\"Rimuovo {percorso}\")\n os.remove(percorso)\n print(\"Fatto!\")\n except PermissionError:\n print(\"Permesso negato.\")","sub_path":"cleanvideos.py","file_name":"cleanvideos.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"503877554","text":"from ruamel import yaml\r\nfrom subprocess import Popen, PIPE\r\nimport os\r\n\r\n\r\nCURRENT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\n\r\nclass JadeAnsible(object):\r\n\r\n\tdef __init__(self):\r\n\t\tself.base_path = CURRENT + '/tools/ansible/'\r\n\t\tself.host_path = self.base_path + 'host/'\r\n\t\tself.yaml_path = self.base_path + 'yml/'\r\n\t\tself.beat_path = self.yaml_path + 'filebeat/'\r\n\t\tself.pkgs_path = self.base_path + 'pkgs/'\r\n\t\tself.group_name = 'filebeats'\r\n\t\tself.home_dir = '/its/'\r\n\t\tself.script_file = 'getAppname.sh'\r\n\t\tself.package = 'filebeat-6.3.0-linux-x86_64'\r\n\t\tself.filebeat_inputs = []\r\n\t\tself.filebeat_overwrite = []\r\n\t\tself.filebeat_template = { \r\n\t\t\t\t'filebeat.prospectors': [],\r\n\t\t\t\t'output.kafka': \r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t'hosts': 1, \r\n\t\t\t\t\t\t'topic': \"its_log\", \r\n\t\t\t\t\t\t'partition.round_robin': {'reachable_only': False}, \r\n\t\t\t\t\t\t'required_acks': 1, \r\n\t\t\t\t\t\t'compression': 'gzip', \r\n\t\t\t\t\t\t'max_message_bytes': 1000000\r\n\t\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\tself.filebeat_playbook = [\r\n\t\t\t{\r\n\t\t\t\t'name': 'setup filebeat', \r\n\t\t\t\t'hosts': 'filebeats', \r\n\t\t\t\t'remote_user': 'root', \r\n\t\t\t\t'tasks': []\r\n\t\t\t}\r\n\t\t]\r\n\r\n\tdef create_filebeat_kafka(self, kafkas):\r\n\t\tself.filebeat_template['output.kafka']['hosts'] = kafkas\r\n\t\t\r\n\tdef create_filebeat_input(self, path, topic, addr='127.0.0.1'):\r\n\r\n\t\tdata = {\r\n\t\t\t'input_type': 'log', \r\n\t\t\t'paths': \"{{%s}}\" % path,\r\n\t\t\t'multiline': { 'pattern': '^\\\\[', 'negate': True, 'match': 'after' }, \r\n\t\t\t'fields': { 'topic': '{{' + topic + '}}', 'ip_address': addr },\t \r\n\t\t\t'fields_under_root': True\r\n\t\t}\r\n\t\t\r\n\t\tself.filebeat_inputs.append(data)\r\n\r\n\r\n\tdef create_filebeat_script_upload(self, script_file):\r\n\t\tdata = {\r\n\t\t\t'name': 'upload script', \r\n\t\t\t'copy': 'src=' + self.pkgs_path + script_file + ' dest=' + self.home_dir + 'filebeat' + ' mode=0755',\r\n\t\t\t'ignore_errors': 'yes'\r\n\t\t}\r\n\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_script_tasks(self, script_file):\r\n\r\n\t\tdata = {\r\n\t\t\t'name': 'get appname', \r\n\t\t\t'script': self.pkgs_path + script_file, \r\n\t\t\t'register': 'appname',\r\n\t\t\t'ignore_errors': 'yes'\r\n\t\t}\r\n\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_home_tasks(self):\r\n\r\n\t\tdata = {\r\n\t\t\t'name': 'create ' + self.home_dir, \r\n\t\t\t'file': 'path=' + self.home_dir + ' state=directory mode=755'\r\n\t\t}\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_unacrchive_tasks(self):\r\n\r\n\t\tdata = {\r\n\t\t\t'name': 'unarchive filebeat', \r\n\t\t\t'unarchive': 'src=' + self.pkgs_path + self.package + '.tar.gz dest=' + self.home_dir + ' creates=' + self.home_dir +'filebeat', \r\n\t\t\t'notify': 'change name'\r\n\t\t}\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_meta_tasks(self):\r\n\t\tdata = {\r\n\t\t\t'meta': 'flush_handlers'\r\n\t\t}\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_config_dir(self):\r\n\t\tdata = {\r\n\t\t\t'name': 'change dir name', \r\n\t\t\t'shell': 'cd ' + self.home_dir + ';mv ' + self.package + ' filebeat'\r\n\t\t}\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_backup_tasks(self):\r\n\r\n\t\tdata = {\r\n\t\t\t'name': 'backup configfile', \r\n\t\t\t'shell': 'cp ' + self.home_dir + 'filebeat/filebeat.yml ' + self.home_dir +'filebeat/filebeat.yml_bak$(date +%Y%m%d-%H%M%S)',\r\n\t\t\t'ignore_errors': 'yes'\r\n\t\t}\r\n\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef generate_overwrite_tasks(self, hostname, addr):\r\n\t\ttemp_name = '%s_filebeat.yml' % hostname\r\n\t\tdata = {\r\n\t\t\t'name': 'overwrite configfile', \r\n\t\t\t'template': 'src=' + self.beat_path + temp_name + ' dest=' + self.home_dir + 'filebeat/filebeat.yml',\r\n\t\t\t'delegate_to': addr\r\n\t\t}\r\n\t\tself.filebeat_overwrite.append(data)\r\n\r\n\tdef create_filebeat_overwrite_tasks(self):\r\n\t\tfor data in self.filebeat_overwrite:\r\n\t\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_autostart_tasks(self):\r\n\t\ttemp_name = 'filebeat'\r\n\t\tdatas = [\r\n\t\t\t{\r\n\t\t\t\t'name': 'create autostart', \r\n\t\t\t\t'template': 'src=' + self.beat_path + temp_name + ' dest=/etc/init.d/logcenter mode=775'\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\t'name': 'config autostart', \r\n\t\t\t\t'shell': '/bin/systemctl daemon-reload;/bin/systemctl enable logcenter.service',\r\n\t\t\t\t'ignore_errors': 'yes'\r\n\t\t\t},\r\n\t\t\t{\r\n\t\t\t\t'name': 'start filebeat', \r\n\t\t\t\t'shell': '/etc/init.d/logcenter start'\r\n\t\t\t}\r\n\t\t]\r\n\t\tfor data in datas:\r\n\t\t\tself.filebeat_playbook[0]['tasks'].append(data)\r\n\r\n\tdef create_filebeat_config(self, hostname):\r\n\t\tfor data in self.filebeat_inputs:\r\n\t\t\tself.filebeat_template['filebeat.prospectors'].append(data)\r\n\t\tfilename = (self.beat_path + '%s_filebeat.yml') % hostname\r\n\t\twith open(filename, \"w\") as fp:\r\n\t\t\tyaml.dump(self.filebeat_template, fp, Dumper=yaml.RoundTripDumper)\r\n\t\t\tfp.close()\r\n\r\n\tdef create_hosts(self, hosts_var, hosts='filebeathosts'):\r\n\t\tfilename = self.host_path + hosts\r\n\t\twith open(filename, \"w\") as fp:\r\n\t\t\tfp.writelines(\"[%s]\\n\" % self.group_name)\r\n\t\t\tfor config in hosts_var:\r\n\t\t\t\tfp.writelines(config + \"\\n\")\r\n\t\t\tfp.close()\r\n\r\n\tdef create_filebeat_playbook(self):\r\n\t\tself.create_filebeat_home_tasks()\r\n\t\tself.create_filebeat_unacrchive_tasks()\r\n\t\tself.create_filebeat_config_dir()\r\n\t\tself.create_filebeat_script_upload(self.script_file)\r\n\t\tself.create_filebeat_script_tasks(self.script_file)\r\n\t\tself.create_filebeat_meta_tasks()\r\n\t\tself.create_filebeat_backup_tasks()\r\n\t\tself.create_filebeat_overwrite_tasks()\r\n\t\tself.create_filebeat_autostart_tasks()\r\n\r\n\t\tfilename = self.yaml_path + 'filebeat_playbook.yml'\r\n\t\twith open(filename, 'w') as fp:\r\n\t\t\tyaml.dump(self.filebeat_playbook, fp, Dumper=yaml.RoundTripDumper)\r\n\t\t\tfp.close()\r\n\r\n\tdef run_playbook(self):\r\n\t\thosts = self.host_path + 'filebeathosts'\r\n\t\tplaybook = self.yaml_path + 'filebeat_playbook.yml'\r\n\t\tcommand = \"ansible-playbook %s -i %s\" % (playbook, hosts)\r\n\t\tproc = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\r\n\t\tres, err = proc.communicate()\r\n\t\treturn (res, err)\r\n\r\n\tdef purge_task_file(self, hosts, playbook):\r\n\t\tpass\r\n\r\n\tdef parse_playbook(self, template):\r\n\t\ttry:\r\n\t\t\tdata = yaml.safe_load(template)\r\n\t\t\treturn data\r\n\t\texcept:\r\n\t\t\treturn False","sub_path":"comlib/ansible.py","file_name":"ansible.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"424252402","text":"# exixe modules: https://github.com/dekuNukem/exixe\n# python library docs: https://github.com/dekuNukem/exixe/tree/master/python_library\n# Demo 5: Loop digits on two tubes with crossfade animation\nimport exixe\nimport spidev\nimport time\n\nspi = spidev.SpiDev()\nspi.open(0, 0)\nspi.max_speed_hz = 7800000\ncs_pin_1 = 16\ncs_pin_2 = 32\n\nmy_tube_1 = exixe.Exixe(cs_pin_1, spi)\nmy_tube_2 = exixe.Exixe(cs_pin_2, spi)\n\ncount = 0\n# my_tube.crossfade_run is a non-blocking call which can allow a main loop to handle more tasks\n# Ideally there will be a 33ms delay but this needs to take in account all other tasks within the loop\nwhile True:\n my_tube_1.set_led(127, 64, 0) # Orange\n my_tube_2.set_led(127, 0, 127) # Purple\n\n # Initialize the crossfade with next digit and how many frames desired.\n my_tube_1.crossfade_init(count, 30)\n my_tube_2.crossfade_init(10 - count, 30)\n while my_tube_1.animation_in_progress and my_tube_2.animation_in_progress:\n my_tube_1.crossfade_run()\n my_tube_2.crossfade_run()\n # 30 frames at a 33ms delay will ~1 second\n time.sleep(0.033)\n\n count = (count + 1) % 10\n","sub_path":"python_examples/5_multiple_tubes_crossfade/5_multiple_tubes_crossfade.py","file_name":"5_multiple_tubes_crossfade.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"486605239","text":"#元组的创建\na = (1,2,3)\nb = 1,2,3\nc = (1,) #c为元组\nc = (1) #c为int\n\n#tuple(可迭代的对象)\na = tuple((1,2,3))\nb= tuple([1,2,3])\nc= tuple(range(3))\n#删除\ndel c\n\n#元组的元素不能修改,列表可以\na = (20,10,5)\n#a[0] = 1\n#TypeError: 'tuple' object does not support item assignment\n\n#通过索引和切片来访问元组对象\nprint (a[0:3:2])\n\n#元组的排序:sorted(),排序生成新的对象,不对原有的对象更改\nsorted(a)\n\n'''\n元组的处理速度比列表快;\n不可变序列;\n元组可以作为字典的键使用,列表不能\n'''","sub_path":"Pycoding/1_tuple().py","file_name":"1_tuple().py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"538537709","text":"from telegram import Bot\nimport json\n\n\nclass TelegramApi(object):\n def __init__(self):\n self.__token_file = {}\n\n with open(\"../etc/tokens.json\") as file:\n for line in file.readlines():\n self.__token_file = json.loads(line.strip())\n\n self.bot = Bot(token=self.__token_file[\"telegram\"])\n\n\nclass TelegramMessaging(TelegramApi):\n def __init__(self):\n super().__init__()\n self.__answered_dates = []\n\n def send_message(self, chat_id, message_text):\n self.bot.sendMessage(chat_id=chat_id, text=message_text)\n\n def get_message(self):\n updates = self.bot.getUpdates()\n result = []\n\n for update in updates:\n if update.message.date not in self.__answered_dates:\n self.__answered_dates.append(update.message.date)\n result.append((update.message.chat_id, update.message.text))\n\n return result\n","sub_path":"src/libs/api/telegramapi.py","file_name":"telegramapi.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"145288686","text":"\"\"\"Write report about a GNSS velocity analysis run\n\nDescription:\n------------\n\n\n\n\"\"\"\n# Standard library imports\nfrom enum import Enum\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom typing import List, Tuple, Union\n\n# External library imports\nimport numpy as np\nimport pandas as pd\n\n# Midgard imports\nfrom midgard.collections import enums \nfrom midgard.dev import plugins\nfrom midgard.gnss import gnss\nfrom midgard.plot.matplotlib_extension import plot_scatter_subplots, plot\nfrom midgard.math import rotation\nfrom midgard.writers._writers import get_existing_fields_by_attrs, get_field_by_attrs\n\n# Where imports\nfrom where.data import dataset3 as dataset\nfrom where.lib import config\nfrom where.lib import log\nfrom where.writers._report import Report\n\n\nFIGURE_DPI = 200\nFIGURE_FORMAT = \"png\"\n\nPlotField = namedtuple(\"PlotField\", [\"name\", \"attrs\", \"unit\", \"ylabel\", \"caption\"])\nPlotField.__new__.__defaults__ = (None,) * len(PlotField._fields)\nPlotField.__doc__ = \"\"\"A convenience class for defining a output field for plotting\n\n Args:\n name (str): Unique name\n attrs (Tuple[str]): Dataset field attributes\n unit (str): Unit of field\n ylabel (str): Y-axis label description\n caption (str): Caption of plot\n \"\"\"\n\nFIELDS = (\n PlotField(\n \"gnss_range_rate\", (\"delay\", \"gnss_range_rate\"), \"m/s\", \"Range rate\", \"Correction of range between satellite and receiver\"\n ),\n PlotField(\n \"gnss_satellite_clock_rate\",\n (\"delay\", \"gnss_satellite_clock_rate\"),\n \"m/s\",\n \"Satellite clock rate\",\n \"Correction of satellite clock rate\",\n ),\n PlotField(\n \"gnss_earth_rotation_drift\",\n (\"delay\", \"gnss_earth_rotation_drift\"),\n \"m/s\",\n \"Earth rotation drift\",\n \"Correction of Earth rotation drift\",\n ),\n PlotField(\n \"gnss_relativistic_clock_rate\",\n (\"delay\", \"gnss_relativistic_clock_rate\"),\n \"m/s\",\n \"Relativistic clock rate\",\n \"Correction of relativistic clock rate effect due to orbit eccentricity\",\n ),\n # PlotField(\n # \"estimate_gnss_rcv_clock_rate\",\n # (\"estimate_gnss_rcv_clock_rate\",),\n # \"m\",\n # \"Receiver clock rate estimate\",\n # \"Estimate of receiver clock rate\",\n # ),\n)\n\n\n@plugins.register\ndef gnss_vel_report(dset: \"Dataset\") -> None:\n \"\"\"Write report about a GNSS velocity analysis run\n\n Args:\n dset: A dataset containing the data.\n \"\"\"\n file_vars = {**dset.vars, **dset.analysis}\n # TODO: Better solution?\n if \"station\" not in file_vars: # necessary if called for example by ./where/tools/concatenate.py\n file_vars[\"station\"] = \"\"\n file_vars[\"STATION\"] = \"\"\n\n # Generate figure directory to save figures generated for GNSS report\n figure_dir = config.files.path(\"output_gnss_vel_report_figure\", file_vars=file_vars)\n figure_dir.mkdir(parents=True, exist_ok=True)\n\n # Generate plots\n _plot_velocity(dset, figure_dir)\n _plot_residual(dset, figure_dir)\n _plot_number_of_satellites(dset, figure_dir)\n _plot_satellite_overview(dset, figure_dir)\n _plot_skyplot(dset, figure_dir)\n _plot_satellite_elevation(dset, figure_dir)\n _plot_model(dset, figure_dir)\n\n if \"pdop\" in dset.fields:\n _plot_dop(dset, figure_dir)\n\n # Generate GNSS velocity report\n path = config.files.path(\"output_gnss_vel_report\", file_vars=file_vars)\n with config.files.open_path(path, create_dirs=True, mode=\"wt\") as fid:\n rpt = Report(fid, rundate=dset.analysis[\"rundate\"], path=path, description=\"GNSS analysis\")\n rpt.title_page()\n rpt.write_config()\n _add_to_report(dset, rpt, figure_dir)\n rpt.markdown_to_pdf()\n\n\ndef _add_to_report(dset: \"Dataset\", rpt: \"Report\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Add figures and tables to report\n\n Args:\n dset: A dataset containing the data.\n rpt: Report object.\n figure_dir: Figure directory.\n \"\"\"\n\n #\n # Position\n #\n rpt.add_text(\"\\n# GNSS site velocity analysis\\n\\n\")\n\n # Plot site velocity\n rpt.add_figure(\n f\"{figure_dir}/plot_timeseries_enu.{FIGURE_FORMAT}\",\n caption=\"Site velocity in topocentric coordinates (East, North, Up).\",\n clearpage=True,\n )\n\n # Plot horizontal error\n rpt.add_figure(\n f\"{figure_dir}/plot_horizontal_velocity.{FIGURE_FORMAT}\",\n caption=\"Horizontal velocity\",\n clearpage=True,\n )\n\n # Plot 3D timeseries\n rpt.add_figure(\n f\"{figure_dir}/plot_timeseries_pdop_hv_3d.{FIGURE_FORMAT}\",\n caption=\"Horizontal, vertical and 3D velocity of site position\",\n clearpage=True,\n )\n\n #\n # Residual\n #\n rpt.add_text(\"\\n# GNSS residual\\n\\n\")\n\n # Add outlier table\n # MURKS: does not work at the moment. complement_with is not implemented in Dataset v3.\n # MURKS rpt.write_dataframe_to_markdown(_table_outlier_overview(dset))\n\n # Plot residuals\n rpt.add_figure(\n f\"{figure_dir}/plot_residual.{FIGURE_FORMAT}\",\n # MURKScaption=\"Post-fit residuals, whereby the red dots represents the rejected outliers. The histogram represent only number of residuals from kept observations.\",\n caption=\"Post-fit residuals.\",\n clearpage=True,\n )\n\n #\n # Dilution of precision (DOP)\n #\n if \"pdop\" in dset.fields:\n rpt.add_text(\"\\n# Dilution of precision\\n\\n\")\n\n # Plot DOP\n rpt.add_figure(f\"{figure_dir}/plot_dop.{FIGURE_FORMAT}\", caption=\"Dilution of precision.\", clearpage=True)\n\n #\n # Satellite plots\n #\n rpt.add_text(\"\\n# Satellite plots\\n\\n\")\n\n rpt.add_figure(\n f\"{figure_dir}/plot_number_of_satellites.{FIGURE_FORMAT}\",\n caption=\"Number of satellites for each observation epoch\",\n clearpage=False,\n )\n\n figure_path = figure_path = figure_dir / f\"plot_satellite_overview.{FIGURE_FORMAT}\"\n if figure_path.exists(): # Note: Does not exists for concatenated Datasets.\n rpt.add_figure(\n figure_path,\n caption=\"Overview over satellite observations. Red coloured: Observation rejected in orbit stage (e.g. unhealthy satellites, exceeding validity length, no orbit data available); Orange coloured: Observation rejected in edit stage; Green coloured: Kept observations after edit stage.\",\n clearpage=False,\n )\n\n rpt.add_figure(f\"{figure_dir}/plot_skyplot.{FIGURE_FORMAT}\", caption=\"Skyplot\", clearpage=False)\n\n rpt.add_figure(\n f\"{figure_dir}/plot_satellite_elevation.{FIGURE_FORMAT}\", caption=\"Satellite elevation\", clearpage=True\n )\n\n #\n # Model parameter plots\n #\n rpt.add_text(\"\\n# Plots of model parameters\\n\\n\")\n\n for f in get_existing_fields_by_attrs(dset, FIELDS):\n rpt.add_figure(f\"{figure_dir}/plot_{f.name}.{FIGURE_FORMAT}\", caption=f.caption, clearpage=False)\n\n\n#\n# PLOT FUNCTIONS\n#\ndef _plot_velocity(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot site velocity plots\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n\n \n lat, lon, height = dset.site_pos.pos.llh.T\n vel_enu = np.squeeze(rotation.trs2enu(lat, lon) @ dset.site_vel[:,:,None]) \n\n plot_scatter_subplots(\n x_array=dset.time.gps.datetime,\n y_arrays=[vel_enu[:, 0], vel_enu[:, 1], vel_enu[:, 2]],\n xlabel=\"Time [GPS]\",\n ylabels=[\"East\", \"North\", \"Up\"],\n colors=[\"steelblue\", \"darkorange\", \"limegreen\"],\n y_units=[\"m/s\", \"m/s\", \"m/s\"],\n figure_path=figure_dir / f\"plot_timeseries_enu.{FIGURE_FORMAT}\",\n opt_args={\n \"figsize\": (6, 6.8),\n \"plot_to\": \"file\",\n \"sharey\": True,\n \"title\": \"Site velocity\",\n \"statistic\": [\"rms\", \"mean\", \"std\", \"min\", \"max\", \"percentile\"],\n },\n )\n\n vel_h = np.sqrt(vel_enu[:,0] ** 2 + vel_enu[:,1] ** 2) \n vel_v = np.absolute(vel_enu[:,2])\n #vel_3d = np.sqrt(vel_enu[:,0] ** 2 + vel_enu[:,1] ** 2 + vel_enu[:,2] ** 2)\n vel_3d = np.sqrt(dset.site_vel[:,0] ** 2 + dset.site_vel[:,1] ** 2 + dset.site_vel[:,2] ** 2)\n\n plot_scatter_subplots(\n x_array=dset.time.gps.datetime,\n y_arrays=[dset.pdop, vel_h, vel_v, vel_3d],\n xlabel=\"Time [GPS]\",\n ylabels=[\"PDOP\", \"HV\", \"VV\", \"3D\"],\n colors=[\"steelblue\", \"darkorange\", \"limegreen\", \"red\"],\n y_units=[None, \"m/s\", \"m/s\", \"m/s\"],\n figure_path=figure_dir / f\"plot_timeseries_pdop_hv_3d.{FIGURE_FORMAT}\",\n opt_args={\n \"figsize\": (7, 7),\n \"plot_to\": \"file\",\n \"sharey\": False,\n # \"title\": \"2D (horizontal) and 3D velocity\",\n \"statistic\": [\"rms\", \"mean\", \"std\", \"min\", \"max\", \"percentile\"],\n },\n )\n\n plot_scatter_subplots(\n x_array=vel_enu[:, 0],\n y_arrays=[vel_enu[:, 1]],\n xlabel=\"East [m/s]\",\n ylabels=[\"North\"],\n y_units=[\"m/s\"],\n figure_path=figure_dir / f\"plot_horizontal_velocity.{FIGURE_FORMAT}\",\n opt_args={\n \"grid\": True,\n \"figsize\": (6, 6),\n \"histogram\": \"x, y\",\n \"histogram_binwidth\": 0.002,\n \"plot_to\": \"file\",\n \"title\": \"Horizontal velocity\",\n \"xlim\": [-0.1, 0.1],\n \"ylim\": [-0.1, 0.1],\n },\n )\n\n\ndef _plot_residual(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot residual plot\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n figure_path = figure_dir / f\"plot_residual.{FIGURE_FORMAT}\"\n dset_outlier = _get_outliers_dataset(dset)\n\n if dset_outlier == enums.ExitStatus.error:\n # NOTE: This is the case for concatencated Datasets, where \"calculate\" stage data are not available.\n log.warn(f\"No data for calculate stage available. No outliers are plotted in {figure_path}.\")\n x_arrays = [dset.time.gps.datetime]\n y_arrays = [dset.residual]\n colors = [\"dodgerblue\"]\n else:\n if dset_outlier.num_obs:\n x_arrays = [dset_outlier.time.gps.datetime, dset.time.gps.datetime]\n y_arrays = [dset_outlier.residual, dset.residual]\n colors = [\"red\", \"dodgerblue\"]\n else:\n log.debug(\"No outliers detected.\")\n x_arrays = [dset.time.gps.datetime]\n y_arrays = [dset.residual]\n colors = [\"dodgerblue\"]\n\n plot(\n x_arrays=x_arrays,\n y_arrays=y_arrays,\n xlabel=\"Time [GPS]\",\n ylabel=\"Post-fit residual\",\n y_unit=\"m/s\",\n colors=colors,\n figure_path=figure_path,\n opt_args={\n \"figsize\": (7, 4),\n \"histogram\": \"y\",\n \"histogram_size\": 0.8,\n \"histogram_binwidth\": 0.002,\n \"plot_to\": \"file\",\n \"statistic\": [\"rms\", \"mean\", \"std\", \"min\", \"max\", \"percentile\"],\n },\n )\n\n\ndef _plot_dop(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot DOP\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n plot(\n x_arrays=[\n dset.time.gps.datetime,\n dset.time.gps.datetime,\n dset.time.gps.datetime,\n dset.time.gps.datetime,\n dset.time.gps.datetime,\n ],\n y_arrays=[dset.gdop, dset.pdop, dset.vdop, dset.hdop, dset.tdop],\n xlabel=\"Time [GPS]\",\n ylabel=\"Dilution of precision\",\n y_unit=\"\",\n labels=[\"GDOP\", \"PDOP\", \"VDOP\", \"HDOP\", \"TDOP\"],\n figure_path=figure_dir / f\"plot_dop.{FIGURE_FORMAT}\",\n opt_args={\"figsize\": (7, 4), \"legend\": True, \"plot_to\": \"file\"},\n )\n\n\ndef _plot_number_of_satellites(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot number of satellites\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n\n if \"num_satellite_used\" not in dset.fields:\n dset.add_float(\n \"num_satellite_used\",\n val=gnss.get_number_of_satellites(dset.system, dset.satellite, dset.time.gps.datetime),\n write_level=\"detail\",\n )\n\n plot(\n x_arrays=[dset.time.gps.datetime, dset.time.gps.datetime],\n y_arrays=[dset.num_satellite_available, dset.num_satellite_used],\n xlabel=\"Time [GPS]\",\n ylabel=\"Number of satellites\",\n y_unit=\"\",\n labels=[\"Available\", \"Used\"],\n figure_path=figure_dir / f\"plot_number_of_satellites.{FIGURE_FORMAT}\",\n opt_args={\"figsize\": (7, 4), \"legend\": True, \"marker\": \",\", \"plot_to\": \"file\", \"plot_type\": \"plot\"},\n )\n\n\ndef _plot_skyplot(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot skyplot\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n\n # Convert azimuth to range 0-360 degree\n azimuth = dset.site_pos.azimuth\n idx = azimuth < 0\n azimuth[idx] = 2 * np.pi + azimuth[idx]\n\n # Convert zenith distance from radian to degree\n zenith_distance = np.rad2deg(dset.site_pos.zenith_distance)\n\n # Generate x- and y-axis data per satellite\n x_arrays = []\n y_arrays = []\n labels = []\n for sat in dset.unique(\"satellite\"):\n idx = dset.filter(satellite=sat)\n x_arrays.append(azimuth[idx])\n y_arrays.append(zenith_distance[idx])\n labels.append(sat)\n\n # Plot with polar projection\n # TODO: y-axis labels are overwritten after second array plot. Why? What to do?\n plot(\n x_arrays=x_arrays,\n y_arrays=y_arrays,\n xlabel=\"\",\n ylabel=\"\",\n y_unit=\"\",\n labels=labels,\n figure_path=figure_dir / f\"plot_skyplot.{FIGURE_FORMAT}\",\n opt_args={\n \"colormap\": \"tab20\",\n \"figsize\": (7, 7.5),\n \"legend\": True,\n \"legend_ncol\": 6,\n \"legend_location\": \"bottom\",\n \"plot_to\": \"file\",\n \"plot_type\": \"scatter\",\n \"projection\": \"polar\",\n \"title\": \"Skyplot\\n Azimuth [deg] / Elevation[deg]\",\n \"xlim\": [0, 2 * np.pi],\n \"ylim\": [0, 90],\n \"yticks\": (range(0, 90, 30)), # sets 3 concentric circles\n \"yticklabels\": (map(str, range(90, 0, -30))), # reverse labels from zenith distance to elevation\n },\n )\n\n\ndef _plot_satellite_elevation(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot satellite elevation\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n\n # Convert elevation from radian to degree\n elevation = np.rad2deg(dset.site_pos.elevation)\n\n # Limit x-axis range to rundate\n day_start, day_end = _get_day_limits(dset)\n\n # Generate x- and y-axis data per satellite\n x_arrays = []\n y_arrays = []\n labels = []\n\n for sat in dset.unique(\"satellite\"):\n idx = dset.filter(satellite=sat)\n x_arrays.append(dset.time.gps.datetime[idx])\n y_arrays.append(elevation[idx])\n labels.append(sat)\n\n # Plot with scatter plot\n plot(\n x_arrays=x_arrays,\n y_arrays=y_arrays,\n xlabel=\"Time [GPS]\",\n ylabel=\"Elevation [deg]\",\n y_unit=\"\",\n labels=labels,\n figure_path=figure_dir / f\"plot_satellite_elevation.{FIGURE_FORMAT}\",\n opt_args={\n \"colormap\": \"tab20\",\n \"figsize\": (7, 8),\n \"legend\": True,\n \"legend_ncol\": 6,\n \"legend_location\": \"bottom\",\n \"plot_to\": \"file\",\n \"plot_type\": \"scatter\",\n \"title\": \"Satellite elevation\",\n \"xlim\": [day_start, day_end],\n },\n )\n\n\ndef _plot_satellite_overview(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> Union[None, Enum]:\n \"\"\"Plot satellite observation overview\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \n Returns:\n Error exit status if necessary datasets could not be read\n \"\"\"\n figure_path = figure_dir / f\"plot_satellite_overview.{FIGURE_FORMAT}\"\n\n # Limit x-axis range to rundate\n day_start, day_end = _get_day_limits(dset)\n\n # Get time and satellite data from read and orbit stage\n file_vars = {**dset.vars, **dset.analysis}\n file_vars[\"stage\"] = \"read\"\n file_path = config.files.path(\"dataset\", file_vars=file_vars)\n if file_path.exists(): \n time_read, satellite_read = _sort_by_satellite(\n _get_dataset(dset, stage=\"read\", systems=dset.meta[\"obstypes\"].keys())\n )\n time_orbit, satellite_orbit = _sort_by_satellite(\n _get_dataset(dset, stage=\"orbit\", systems=dset.meta[\"obstypes\"].keys())\n )\n time_edit, satellite_edit = _sort_by_satellite(\n _get_dataset(dset, stage=\"edit\", systems=dset.meta[\"obstypes\"].keys())\n )\n \n else:\n # NOTE: This is the case for concatencated Datasets, where \"read\" and \"edit\" stage data are not available.\n log.warn(f\"Read dataset does not exists: {file_path}. Plot {figure_path} can not be plotted.\")\n return enums.ExitStatus.error\n\n # Generate plot\n plot(\n x_arrays=[time_read, time_orbit, time_edit],\n y_arrays=[satellite_read, satellite_orbit, satellite_edit],\n xlabel=\"Time [GPS]\",\n ylabel=\"Satellite\",\n y_unit=\"\",\n # labels = [\"Rejected in orbit stage\", \"Rejected in edit stage\", \"Kept observations\"],\n colors=[\"red\", \"orange\", \"green\"],\n figure_path=figure_path,\n opt_args={\n \"colormap\": \"tab20\",\n \"figsize\": (7, 6),\n \"marker\": \"|\",\n \"plot_to\": \"file\",\n \"plot_type\": \"scatter\",\n \"title\": \"Overview over satellites\",\n \"xlim\": [day_start, day_end],\n },\n )\n\n\ndef _plot_model(dset: \"Dataset\", figure_dir: \"pathlib.PosixPath\") -> None:\n \"\"\"Plot model parameters\n\n Args:\n dset: A dataset containing the data.\n figure_dir: Figure directory\n \"\"\"\n\n # Limit x-axis range to rundate\n day_start, day_end = _get_day_limits(dset)\n\n for f in get_existing_fields_by_attrs(dset, FIELDS):\n\n # Generate x- and y-axis data per satellite\n x_arrays = []\n y_arrays = []\n labels = []\n\n for sat in dset.unique(\"satellite\"):\n idx = dset.filter(satellite=sat)\n x_arrays.append(dset.time.gps.datetime[idx])\n y_arrays.append(get_field_by_attrs(dset, f.attrs, f.unit)[idx])\n labels.append(sat)\n\n # Plot with scatter plot\n plot(\n x_arrays=x_arrays,\n y_arrays=y_arrays,\n xlabel=\"Time [GPS]\",\n ylabel=f.ylabel,\n y_unit=f.unit,\n labels=labels,\n figure_path=figure_dir / f\"plot_{f.name}.{FIGURE_FORMAT}\",\n opt_args={\n \"colormap\": \"tab20\",\n \"figsize\": (7, 6),\n \"legend\": True,\n \"legend_ncol\": 6,\n \"legend_location\": \"bottom\",\n \"plot_to\": \"file\",\n \"plot_type\": \"scatter\",\n \"statistic\": [\"rms\", \"mean\", \"std\", \"min\", \"max\", \"percentile\"],\n \"xlim\": [day_start, day_end],\n },\n )\n\n\n#\n# TABLE GENERATION FUNCTIONS\n#\ndef _table_outlier_overview(dset: \"Dataset\"):\n \"\"\"Generate Dataframe table with overview over number of navigation messages\n\n Args:\n dset: A dataset containing the data.\n\n Returns:\n Dataframe with satellites as indices and following columns:\n\n | Name | Description |\n |-------------|----------------------------------------------------------------------------------------------|\n | outlier | Number of outliers for each satellite |\n\n\n Example:\n\n | |outlier | \n |----|--------|\n | G01| 0 |\n | G02| 11 |\n | G03| 3 |\n | .. | ... |\n | SUM| 42 |\n\n \"\"\"\n columns = [\"outlier\"]\n df = pd.DataFrame(columns=columns)\n\n dset_outlier = _get_outliers_dataset(dset)\n if dset_outlier == enums.ExitStatus.error:\n # NOTE: This is the case for concatencated Datasets, where \"calculate\" stage data are not available.\n log.warn(f\"No data for calculate stage available. Outliers can not be detected.\")\n return df\n\n if dset_outlier.num_obs:\n log.debug(\"No outlier detected.\")\n return df\n\n for satellite in sorted(dset.unique(\"satellite\")):\n idx = dset_outlier.filter(satellite=satellite)\n row = [len(dset_outlier.satellite[idx])]\n df = df.append(pd.DataFrame([row], columns=columns, index=[satellite]))\n\n df = df.append(pd.DataFrame([[len(dset_outlier.satellite)]], columns=columns, index=[\"**SUM**\"]))\n\n return df\n\n\n#\n# AUXILIARY FUNCTIONS\n#\ndef _get_day_limits(dset: \"Dataset\") -> Tuple[datetime, datetime]:\n \"\"\"Get start and end time for given run date\n\n Args:\n dset: A dataset containing the data.\n\n Returns:\n Start and end date. \n \"\"\"\n day_start = min(dset.time.datetime)\n day_end = max(dset.time.datetime)\n\n return day_start, day_end\n\n\ndef _get_outliers_dataset(dset: \"Dataset\") -> Union[\"Dataset\", Enum]:\n \"\"\"Get dataset with outliers\n\n Args:\n dset: A dataset containing the data.\n\n Returns:\n Dataset with outliers or error exit status if no data for \"calculate\" stage are available\n \"\"\"\n\n # Get Dataset where no outliers are rejected\n file_vars = {**dset.vars, **dset.analysis}\n file_vars[\"stage\"] = \"calculate\"\n\n try:\n dset_complete = dataset.Dataset.read(**file_vars)\n except OSError:\n log.warn(f\"Could not read dataset {config.files.path('dataset', file_vars=file_vars)}.\")\n return enums.ExitStatus.error\n\n # Get relative complement, which corresponds to \"outlier\" dataset\n # dset_outliers = dset_complete.complement_with(dset, complement_by=[\"time\", \"satellite\"])\n dset_outliers = dataset.Dataset(num_obs=0) # MURKS: complement_with does not exists so far in Dataset v3.\n\n return dset_outliers\n\n\ndef _get_dataset(dset: \"Dataset\", stage: str, systems: Union[List[str], None] = None) -> Union[\"Dataset\", Enum]:\n \"\"\"Get dataset for given stage\n\n Args:\n dset: A dataset containing the data.\n systems: List with GNSS identifiers (e.g. E, G, ...)\n\n Returns:\n Dataset for given stage or error exit status if dataset could not be read\n \"\"\"\n\n # Get Dataset\n # TODO: \"label\" should have a default value.\n file_vars = {**dset.vars, **dset.analysis}\n file_vars[\"stage\"] = stage\n try:\n dset_out = dataset.Dataset.read(**file_vars)\n except OSError:\n log.warn(\"Could not read dataset {config.files.path('dataset', file_vars=file_vars)}.\")\n return enums.ExitStatus.error\n\n # Reject not defined GNSS observations\n if systems:\n systems = [systems] if isinstance(systems, str) else systems\n keep_idx = np.zeros(dset_out.num_obs, dtype=bool)\n for sys in systems:\n idx = dset_out.filter(system=sys)\n keep_idx[idx] = True\n dset_out.subset(keep_idx)\n\n return dset_out\n\n\ndef _sort_by_satellite(dset: \"Dataset\") -> Tuple[List[datetime], List[str]]:\n \"\"\"Sort time and satellite fields of dataset by satellite order\n\n Args: \n dset: A dataset containing the data.\n\n Returns:\n Tuple with ordered time array and satellite array\n \"\"\"\n time = []\n satellite = []\n for sat in sorted(dset.unique(\"satellite\"), reverse=True):\n idx = dset.filter(satellite=sat)\n time.extend(dset.time.gps.datetime[idx])\n satellite.extend(dset.satellite[idx])\n\n return time, satellite\n","sub_path":"where/writers/gnss_vel_report.py","file_name":"gnss_vel_report.py","file_ext":"py","file_size_in_byte":24087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"358451349","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport time\nimport codecs\n\nimport torch\nimport torchtext.data as data\n# import numpy as np\n# import matplotlib\n# matplotlib.use('Agg')\n# import matplotlib.pyplot as plt\n# from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score\n#\n#\n# def train(train_iter, val_iter, model, args, sentence_field, label_field):\n# start_time = time.time()\n# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n# loss_func = torch.nn.CrossEntropyLoss()\n#\n# steps = 0\n# best_acc = 0\n# last_step = 0\n# model.train()\n#\n# plot_steps = list()\n# plot_losses = list()\n# plot_accs = list()\n# annotate_best_step = 0\n# annotate_best_acc = 0\n#\n# for epoch in range(1, args.epochs + 1):\n# for batch in train_iter:\n# sentence1 = batch.sentence1\n# sentence2 = batch.sentence2\n# target = batch.label\n# sentence1 = sentence1.t_()\n# sentence2 = sentence2.t_()\n# # target = target.t_()\n#\n# model.batch_size = batch.batch_size\n#\n# if args.cuda:\n# sentence1 = sentence1.cuda()\n# sentence2 = sentence2.cuda()\n#\n# # print(sentence1.data.size())\n# # print(sentence2.data.size())\n# # print(sentence1_lengths.data)\n# # orig_text = sentence_field.reverse(sentence1.data)\n# # print(type(orig_text), len(orig_text))\n# # print(orig_text[0])\n# # print(target.data)\n#\n# optimizer.zero_grad()\n# logit = model(sentence1, sentence2)\n# loss = loss_func(logit, target)\n# # print(loss)\n# loss.backward()\n# optimizer.step()\n#\n# steps += 1\n# if steps % args.log_interval == 0:\n# plot_losses.append(loss.data[0].item())\n# plot_steps.append(steps)\n#\n# target = target.data\n# logit = torch.max(logit, 1)[1].data\n# accuracy = accuracy_score(target, logit)\n# # recall = recall_score(target, logit)\n# # precision = precision_score(target, logit)\n# f1 = f1_score(target, logit)\n# plot_accs.append(accuracy)\n# sys.stdout.write(\n# '\\rBatch[{}-{}] - loss: {:.6f} acc: {} f1: {} time: {}s'.format(\n# epoch, steps, loss.data[0].item(), accuracy, f1, time.time()-start_time))\n# if steps % args.test_interval == 0:\n# dev_acc = eval(val_iter, model, args)\n# if dev_acc > best_acc:\n# best_acc = dev_acc\n# last_step = steps\n# annotate_best_acc = best_acc\n# annotate_best_step = last_step\n# if args.save_best:\n# save(model, args.save_dir, 'best', steps)\n# else:\n# if steps - last_step >= args.early_stop:\n# print('early stop by {} steps.'.format(args.early_stop))\n# elif steps % args.save_interval == 0:\n# save(model, args.save_dir, 'snapshot', steps)\n#\n# plt.figure()\n# x = np.array(plot_steps)\n# y1 = np.array(plot_losses)\n# y2 = np.array(plot_accs)\n#\n# plt.subplot(211)\n# plt.plot(x, y1, 'r-')\n# plt.xlabel('step')\n# plt.ylabel('loss')\n#\n# plt.subplot(212)\n# plt.plot(x, y2, 'b-')\n# plt.annotate('best_acc {}'.format(annotate_best_acc), xy=(annotate_best_step, annotate_best_acc),\n# xytext=(annotate_best_step - 100, annotate_best_acc - 100),\n# arrowprops=dict(facecolor='black', shrink=0.05), )\n# plt.xlabel('step')\n# plt.ylabel('accuracy')\n#\n# plt.savefig('./img/bcdssm_acc_epoch{}.jpg'.format(epoch))\n\n\ndef eval(data_iter, model, args):\n start_time = time.time()\n model.eval()\n loss_func = torch.nn.CrossEntropyLoss()\n logits = []\n targets = []\n\n corrects, avg_loss = 0, 0\n for batch in data_iter:\n sentence1 = batch.sentence1\n sentence2 = batch.sentence2\n target = batch.label\n sentence1 = sentence1.t_()\n sentence2 = sentence2.t_()\n\n model.batch_size = batch.batch_size\n\n if args.cuda:\n sentence1 = sentence1.cuda()\n sentence2 = sentence2.cuda()\n\n logit = model(sentence1, sentence2)\n\n loss = loss_func(logit, target)\n\n avg_loss += loss.data[0]\n\n logit = torch.max(logit, 1)[1].data.tolist()\n target = target.data.tolist()\n\n logits.extend(logit)\n targets.extend(target)\n\n f1 = f1_score(targets, logits)\n accuracy = accuracy_score(targets, logits)\n\n size = len(data_iter.dataset)\n avg_loss /= size\n print('\\nEvaluation - loss: {:.6f} acc: {} f1: {} time: {}s\\n'.format(\n avg_loss, accuracy, f1, time.time()-start_time))\n return f1\n\n\ndef predict(inpath, outpath, model, sentence_field, id_field, cuda_flag):\n predict_data = data.TabularDataset(inpath, format='tsv',\n fields=[(\"id\", id_field),\n ('sentence1', sentence_field),\n ('sentence2', sentence_field)])\n batch_size = 64\n # print('DATA_SIZE={}'.format(len(predict_data)))\n # print('BATCH_SIZE={}'.format(batch_size))\n\n predict_iter = data.Iterator(predict_data, batch_size=batch_size, device=-1, repeat=False, shuffle=False)\n steps = 0\n for batch in predict_iter:\n sentence1 = batch.sentence1\n sentence2 = batch.sentence2\n # orig_sent1 = sentence_field.reverse(sentence1)\n # orig_sent2 = sentence_field.reverse(sentence2)\n sentence1 = sentence1.t_()\n sentence2 = sentence2.t_()\n if cuda_flag:\n sentence1 = sentence1.cuda()\n sentence2 = sentence2.cuda()\n model.batch_size = batch.batch_size\n\n logit = model(sentence1, sentence2)\n\n steps += 1\n # sys.stdout.write('\\rBatch[{}]'.format(steps))\n\n idx = batch.id.tolist()\n predict_label = torch.max(logit, 1)[1].data.tolist()\n results = [str(i)+'\\t'+str(l)+'\\n' for i, l in zip(idx, predict_label)]\n # print(results)\n with codecs.open(outpath, 'a', 'utf-8') as fw:\n fw.writelines(results)\n\n\ndef save(model, save_dir, save_prefix, steps):\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n save_prefix = os.path.join(save_dir, save_prefix)\n save_path = '{}_steps_{}.pt'.format(save_prefix, steps)\n torch.save(model.state_dict(), save_path)\n","sub_path":"bcdssm/train_bcdssm.py","file_name":"train_bcdssm.py","file_ext":"py","file_size_in_byte":6948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"465693330","text":"import math\nfrom functools import lru_cache\n\nfrom util.hexboard import HexBoard\n\nclass HexEvalMethod:\n\n def __init(self, eval_method):\n self.eval_method = eval_method\n\n def evaluate_board(board, color):\n raise NotImplementedError\n \n @lru_cache(maxsize=8)\n def distance_to(self, color_b, opposite_color):\n \"\"\"Returns the distance between the two provided colors, this is the vertex cost for Dijkstra\"\"\"\n if color_b == opposite_color:\n return math.inf\n elif color_b == HexBoard.EMPTY:\n return 1\n else:\n return 0\n\n def evaluate_board(self, board, color):\n winner = board.get_winner()\n if winner is not None: return HexBoard.get_reward(color, winner) * 1000\n \n player_sp = self.find_shortest_path_to_border(board, color)\n opponent_sp = self.find_shortest_path_to_border(board, HexBoard.get_opposite_color(color))\n\n if player_sp == math.inf: player_sp = 0\n if opponent_sp == math.inf: opponent_sp = 0\n\n return -(player_sp - opponent_sp)\n\n def get_score(self, board, from_coord, target_coords, color, opposite_color):\n raise NotImplementedError\n\n def find_shortest_path_to_border(self, board, color):\n \"\"\"Returns the length of the shortest possible path to the border for the specified color\"\"\"\n source_coords = board.source_coords[color]\n target_coords = board.target_coords[color]\n opposite_color = HexBoard.get_opposite_color(color)\n \n min_score = board.size**2\n\n for from_coord in source_coords:\n if board.get_color(from_coord) == opposite_color:\n continue\n\n score = self.get_score(board, from_coord, target_coords, color, opposite_color)\n\n if score < min_score:\n min_score = score\n\n return min_score\n","sub_path":"a4/evaluate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"361016542","text":"import os\n\nfrom dvc.command.common.base import CmdBase\nfrom dvc.logger import Logger\nfrom dvc.state_file import StateFile\nfrom dvc.path.data_item import DataItem\n\n\nclass CmdAdd(CmdBase):\n def __init__(self, settings):\n super(CmdAdd, self).__init__(settings)\n\n def collect_file(self, fname):\n return [self.settings.path_factory.data_item(fname)]\n\n def collect_dir(self, dname):\n targets = []\n for root, dirs, files in os.walk(dname):\n for fname in files:\n targets += self.collect_file(os.path.join(root, fname))\n return targets\n\n def collect_targets(self, inputs):\n targets = []\n for i in inputs:\n if not os.path.isdir(i):\n targets += self.collect_file(i)\n else:\n targets += self.collect_dir(i)\n return targets\n\n def add_files(self, targets):\n for data_item in targets:\n data_item.move_data_to_cache()\n\n def create_state_files(self, targets):\n \"\"\"\n Create state files for all targets.\n \"\"\"\n for data_item in targets:\n Logger.debug('Creating state file for {}'.format(data_item.data.relative))\n\n fname = os.path.basename(data_item.data.relative + StateFile.STATE_FILE_SUFFIX)\n out = StateFile.parse_deps_state(self.settings, [data_item.data.relative],\n currdir=os.path.curdir)\n state_file = StateFile(fname=fname,\n cmd=None,\n out=out,\n out_git=[],\n deps=[],\n locked=True)\n state_file.save()\n Logger.debug('State file \"{}\" was created'.format(data_item.state.relative))\n\n def run(self):\n targets = self.collect_targets(self.parsed_args.input)\n self.add_files(targets)\n self.create_state_files(targets)\n msg = 'DVC add: {}'.format(str(self.parsed_args.input))\n self.commit_if_needed(msg)\n","sub_path":"dvc/command/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"218151889","text":"from globus_sdk._testing.models import RegisteredResponse, ResponseSet\n\nmetadata = {\n \"id\": \"daa09846-eb92-11e9-b89c-9cb6d0d9fd63\",\n \"display_name\": \"example gateway 1\",\n}\n\nRESPONSES = ResponseSet(\n metadata=metadata,\n default=RegisteredResponse(\n service=\"gcs\",\n method=\"PATCH\",\n path=f\"/storage_gateways/{metadata['id']}\",\n json={\n \"DATA_TYPE\": \"result#1.0.0\",\n \"http_response_code\": 200,\n \"detail\": \"success\",\n \"message\": \"Operation successful\",\n \"code\": \"success\",\n \"data\": [\n {\n \"DATA_TYPE\": \"storage_gateway#1.0.0\",\n \"id\": metadata[\"id\"],\n \"display_name\": metadata[\"display_name\"],\n \"connector_id\": \"145812c8-decc-41f1-83cf-bb2a85a2a70b\",\n \"require_high_assurance\": False,\n \"high_assurance\": False,\n \"authentication_assurance_timeout\": 15840,\n \"authentication_timeout_mins\": 15840,\n \"allowed_domains\": [\"example.edu\"],\n \"mapping\": \"username_without_domain\",\n \"restrict_paths\": {\n \"DATA_TYPE\": \"path_restrictions#1.0.0\",\n \"read\": [\"/\"],\n },\n \"policies\": {\n \"DATA_TYPE\": \"posix_storage_gateway#1.0.0\",\n \"groups_allow\": [\"globus\"],\n \"groups_deny\": [\"nonglobus\"],\n },\n \"users_allow\": [\"user1\"],\n \"users_deny\": [\"user2\"],\n }\n ],\n },\n ),\n)\n","sub_path":"src/globus_sdk/_testing/data/globus_connect_server/update_storage_gateway.py","file_name":"update_storage_gateway.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"41672648","text":"import numpy as np\n\n\n\ndef linear_triangulation(p1, p2, m1, m2):\n\t\"\"\"\n\tLinear triangulation to find the 3D point X where p1=m1*X\n\tand p2=m2*X. Solve AX = 0.\n\n\tArgs:\n\t\tp1: 2D points from image 1\n\t\tp2: 2Dpoints from image 2\n\t\tm1: Camera matrices for image 1\n\t\tm2: Camera matrices for image 2\n\tReturns:\n\t\"\"\"\n\n\tnum_points = p1.shape[1] #calculate number of points\n\tout = np.ones((4, num_points)) #array storinng\n\n\tfor i in range(num_points):\n\t\tA = np.asarray([\n\t\t\t(p1[0,i]*m1[2,:] - m1[0,:]),\n\t\t\t(p1[1,i]*m1[2,:] - m1[1,:]),\n\t\t\t(p2[0,i]*m2[2,:] - m2[0,:]),\n\t\t\t(p2[1,i]*m2[2,:] - m2[1,:])\n\t\t])\n\n\t\t_, _, V = np.linalg.svd(A)\n\t\tX = V[-1, :4]\n\t\tout[:,i] = X/X[3]\n\n\treturn out\n\n\ndef skew(x):\n\t\"\"\"\n\tCreate a skew symmetric matrix A from a 3D vector x\n\n\tArgs:\n\t\tx: 3D vector\n\tReturns:\n\t\t3 x 3 skew symmetric matrix from x\n\t\"\"\"\n\n\treturn np.array([\n\t\t[0, -x[2], x[1]],\n\t\t[x[2], 0, -x[0]],\n\t\t[-x[1], x[0], 0]\n\t])\n\n\n\ndef computeEightPointMatrix(p1, p2):\n\t\"\"\"\n\tComputing a nx8 matrix for corresponding points in the two images\n\tfor n scene point\n\n\tArgs:\n\t\tp1:\n\t\tp2:\n\n\tReturns:\n\t\t1x8 matrix\n\t\"\"\"\n\n\tnum_points=p1.shape[0]\n\n\tp1x, p1y = p1[:2]\n\tp1y, p2y = p2[:2]\n\n\n\treturn np.array([\n\t\tp1x * p2x, p1x * p2y, p1x,\n\t\tp1y * p2x, p1y * p2y, p1y,\n\t\tp2x, p2y, np.ones(len(p1x))\n\t]).T\n\n\n\ndef computeFundamenalMatrix(p1, p2):\n\t\"\"\"\n\tArgs:\n\t\t\n\n\tReturns:\n\t\"\"\"\n\n\tn = x1.shape[1]\n\tif x2.shape[1] != n:\n\t\traise ValueError(\"Number of points don't match.\")\n\n\tA = computeEightPointMatrix(p1, p2)\n\ndef computeEssentialMatrix(x1, x2):\n\traise NotImplementedError\n\n\n\n\ndef computeCameraMatrix(p2d, p3d):\n\t\"\"\"\n\tComputes the camera matrix using Direct Linear Transformation.\n\tThis method formulates a homogeneous linear system of equations and\n\tsolves this by finding an approximate null space of the system matrix\n\n\tArgs:\n\t\tX: scene point in the 3D coordinate\n\t\tx: scene point in the image coordinate\n\n\tReturns:\n\t\t3 x 4 camera matrix\n\t\"\"\"\n\n\tn = p2d.shape[1]\n\tif p3d.shape[1] != n:\n\t\traise ValueError(\"Number of points don't match.\")\n\n\n\t# create matrix for DLT solution\n\tM = zeros((3*n,12+n))\n\tfor i in range(n):\n\t\tM[3*i,0:4] = p3d[:,i]\n\t\tM[3*i+1,4:8] = p3d[:,i]\n\t\tM[3*i+2,8:12] = p3d[:,i]\n\t\tM[3*i:3*i+3,i+12] = -p2d[:,i]\n\t\t\n\tU,S,V = linalg.svd(M)\n\t\n\treturn V[-1,:12].reshape((3,4))\n\n\n\ndef compute_epipole(F):\n\t\"\"\" Computes the (right) epipole from a \n\t\tfundamental matrix F. \n\t\t(Use with F.T for left epipole.) \"\"\"\n\t\n\t# return null space of F (Fx=0)\n\tU,S,V = linalg.svd(F)\n\te = V[-1]\n\treturn e/e[2]\n\n\n\ndef triangulate_point(x1,x2,P1,P2):\n\t\"\"\" Point pair triangulation from \n\t\tleast squares solution. \"\"\"\n\t\t\n\tM = zeros((6,6))\n\tM[:3,:4] = P1\n\tM[3:,:4] = P2\n\tM[:3,4] = -x1\n\tM[3:,5] = -x2\n\n\tU,S,V = linalg.svd(M)\n\tX = V[-1,:4]\n\n\treturn X / X[3]\n\n\ndef triangulate(x1,x2,P1,P2):\n\t\"\"\" Two-view triangulation of points in \n\t\tx1,x2 (3*n homog. coordinates). \"\"\"\n\t\t\n\tn = x1.shape[1]\n\tif x2.shape[1] != n:\n\t\traise ValueError(\"Number of points don't match.\")\n\n\n\tX = [ triangulate_point(x1[:,i],x2[:,i],P1,P2) for i in range(n)]\n\treturn array(X).T\n\n\n\ndef compute_normalized_image_to_image_matrix(p1, p2, compute_essential=False):\n\t\"\"\" Computes the fundamental or essential matrix from corresponding points\n\t\tusing the normalized 8 point algorithm.\n\t:input p1, p2: corresponding points with shape 3 x n\n\t:returns: fundamental or essential matrix with shape 3 x 3\n\t\"\"\"\n\tn = p1.shape[1]\n\tif p2.shape[1] != n:\n\t\traise ValueError('Number of points do not match.')\n\n\t# preprocess image coordinates\n\tp1n, T1 = scale_and_translate_points(p1)\n\tp2n, T2 = scale_and_translate_points(p2)\n\n\t# compute F or E with the coordinates\n\tF = compute_image_to_image_matrix(p1n, p2n, compute_essential)\n\n\t# reverse preprocessing of coordinates\n\t# We know that P1' E P2 = 0\n\tF = np.dot(T1.T, np.dot(F, T2))\n\n\treturn F / F[2, 2]\n\n\ndef compute_image_to_image_matrix(x1, x2, compute_essential=False):\n\t\"\"\" Compute the fundamental or essential matrix from corresponding points\n\t\t(x1, x2 3*n arrays) using the 8 point algorithm.\n\t\tEach row in the A matrix below is constructed as\n\t\t[x'*x, x'*y, x', y'*x, y'*y, y', x, y, 1]\n\t\"\"\"\n\tA = correspondence_matrix(x1, x2)\n\t# compute linear least square solution\n\tU, S, V = np.linalg.svd(A)\n\tF = V[-1].reshape(3, 3)\n\n\t# constrain F. Make rank 2 by zeroing out last singular value\n\tU, S, V = np.linalg.svd(F)\n\tS[-1] = 0\n\tif compute_essential:\n\t\tS = [1, 1, 0] # Force rank 2 and equal eigenvalues\n\tF = np.dot(U, np.dot(np.diag(S), V))\n\n\treturn F\n\n\ndef compute_fundamental_normalized(p1, p2):\n\treturn compute_normalized_image_to_image_matrix(p1, p2)\n\n\ndef compute_essential_normalized(p1, p2):\n\treturn compute_normalized_image_to_image_matrix(p1, p2, compute_essential=True)\n\n\ndef scale_and_translate_points(points):\n\t\"\"\" Scale and translate image points so that centroid of the points\n\t\tare at the origin and avg distance to the origin is equal to sqrt(2).\n\t:param points: array of homogenous point (3 x n)\n\t:returns: array of same input shape and its normalization matrix\n\t\"\"\"\n\tx = points[0]\n\ty = points[1]\n\tcenter = points.mean(axis=1) # mean of each row\n\tcx = x - center[0] # center the points\n\tcy = y - center[1]\n\tdist = np.sqrt(np.power(cx, 2) + np.power(cy, 2))\n\tscale = np.sqrt(2) / dist.mean()\n\tnorm3d = np.array([\n\t\t[scale, 0, -scale * center[0]],\n\t\t[0, scale, -scale * center[1]],\n\t\t[0, 0, 1]\n\t])\n\n\treturn np.dot(norm3d, points), norm3d\n\n\n\ndef correspondence_matrix(p1, p2):\n\tp1x, p1y = p1[:2]\n\tp2x, p2y = p2[:2]\n\n\treturn np.array([\n\t\tp1x * p2x, p1x * p2y, p1x,\n\t\tp1y * p2x, p1y * p2y, p1y,\n\t\tp2x, p2y, np.ones(len(p1x))\n\t]).T\n\n\n\n\ndef compute_P_from_essential(E):\n\t\"\"\" Compute the second camera matrix (assuming P1 = [I 0])\n\t\tfrom an essential matrix. E = [t]R\n\t:returns: list of 4 possible camera matrices.\n\t\"\"\"\n\tU, S, V = np.linalg.svd(E)\n\n\t# Ensure rotation matrix are right-handed with positive determinant\n\tif np.linalg.det(np.dot(U, V)) < 0:\n\t\tV = -V\n\n\t# create 4 possible camera matrices (Hartley p 258)\n\tW = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n\tP2s = [np.vstack((np.dot(U, np.dot(W, V)).T, U[:, 2])).T,\n\t\t np.vstack((np.dot(U, np.dot(W, V)).T, -U[:, 2])).T,\n\t\t np.vstack((np.dot(U, np.dot(W.T, V)).T, U[:, 2])).T,\n\t\t np.vstack((np.dot(U, np.dot(W.T, V)).T, -U[:, 2])).T]\n\n\treturn P2s\n\n\n\ndef reconstruct_one_point(pt1, pt2, m1, m2):\n\t\"\"\"\n\t\tpt1 and m1 * X are parallel and cross product = 0\n\t\tpt1 x m1 * X = pt2 x m2 * X = 0\n\t\"\"\"\n\tA = np.vstack([\n\t\tnp.dot(skew(pt1), m1),\n\t\tnp.dot(skew(pt2), m2)\n\t])\n\tU, S, V = np.linalg.svd(A)\n\tP = np.ravel(V[-1, :4])\n\n\treturn P / P[3]","sub_path":"reconstruct/sfm.py","file_name":"sfm.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"513466817","text":"import logging\n\n# logger = logging.basicConfig(\n# filename='debug.log', level=logging.DEBUG, format='%(levelname)s :: %(asctime)s %(message)s')\n# logging.warning('This is a message')\n# # logger.warning('This is a test')\n\n# logger = logging.getLogger('Something')\n# formatter = logging.Formatter(\n# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# handler = logging.FileHandler('something.log')\n# handler.setLevel(logging.DEBUG)\n# handler.setFormatter(formatter)\n# logger.addHandler(handler)\n# logger.warning('This is a test')\n\n\nclass Logs:\n \"\"\"\n Base logger for all applications in scrappers\n \"\"\"\n logger = None\n\n def __init__(self, name='Default', logfile_name='scrappers.log'):\n logger = logging.getLogger(name)\n formatter = logging.Formatter('%(levelname)s :: %(asctime)s - %(name)s - %(message)s')\n handler = logging.FileHandler(logfile_name)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n self.logger = logger\n\n def __call__(self, name, logfile_name='scrappers.log'):\n self.__init__(name, logfile_name=logfile_name)\n return self.logger\n\ndefault = Logs()\n","sub_path":"apps/config/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"570773224","text":"from tkinter import *\nfrom tkinter import filedialog\nimport random\nroot = Tk()\n\n\ndef sumBin(dec):\n if(dec[-1]==\"d\"):\n dec = int(dec[:-1])\n else:\n dec = int(dec)\n b = ''\n if(dec==0):\n b = '0'\n while(dec!=0):\n if(dec%2==1):\n b='1'+b\n dec=(dec-1)/2\n else:\n b='0'+b\n dec=dec/2\n return b\n\ndef hexaBin(hexa):\n numero =''\n diccionario = {\n '0':'0000',\n '1':'0001',\n '2':'0010',\n '3':'0011',\n '4':'0100',\n '5':'0101',\n '6':'0110',\n '7':'0111',\n '8':'1000',\n '9':'1001',\n 'A':'1010',\n 'B':'1011',\n 'C':'1100',\n 'D':'1101',\n 'E':'1110',\n 'F':'1111',\n }\n\n for i in hexa:\n numero+=diccionario[i]\n\n return numero\n\ndef Rellena(N,hasta):\n N=N.strip()\n longitud = hasta-len(N)\n Numero = '0'*longitud\n Numero+=N\n return Numero\n\n\nlista = {\n 'MOV A,B' : '1111110',\n 'MOV B,A' : '0000001',\n 'MOV A,Lit' : '0000010',\n 'MOV B,Lit' : '0000011',\n 'MOV A,(Dir)' : '0000100',\n 'MOV B,(Dir)' : '0000101',\n 'MOV (Dir),A' : '0000110',\n 'MOV (Dir),B' : '0000111',\n\n 'ADD A,B' : '0001011',\n 'ADD B,A' : '0001100',\n 'ADD A,Lit' : '0001101',\n 'ADD B,Lit' : '1000000',\n 'ADD A,(Dir)' : '0001110',\n 'ADD B,(Dir)' : '1000001',\n 'ADD (Dir)' : '0010000',\n\n 'SUB A,B' : '0010001',\n 'SUB B,A' : '0010010',\n 'SUB A,Lit' : '1000010',\n 'SUB B,Lit' : '1000011',\n 'SUB A,(Dir)' : '0010011',\n 'SUB B,(Dir)' : '1000100',\n 'SUB (Dir)' : '0010101',\n\n 'AND A,B' : '0010110',\n 'AND B,A' : '0010111',\n 'AND A,Lit' : '0011000',\n 'AND B,Lit' : '1000101',\n 'AND A,(Dir)' : '0011001',\n 'AND B,(Dir)' : '1000101',\n 'AND(Dir)' : '0011011',\n\n 'OR A,B' : '0011100',\n 'OR B,A' : '0011101',\n 'OR A,Lit' : '0011110',\n 'OR B,Lit' : '1000111',\n 'OR A,(Dir)' : '0011111',\n 'OR B,(Dir)' : '1001000',\n 'OR (Dir)' : '0100001',\n\n 'NOT A' : '0100010',\n 'NOT B,A' : '0100011',\n 'NOT (Dir),A' : '1010100',\n\n 'XOR A,B' : '0101000',\n 'XOR B,A' : '0101001',\n 'XOR A,Lit' : '0101010',\n 'XOR B,Lit' : '1001001',\n 'XOR A,(Dir)' : '0101011',\n 'XOR B,(Dir)' : '1001010',\n 'XOR (Dir)' : '0101101',\n\n 'SHL A' : '0101110',\n 'SHL B,A' : '0101111',\n 'SHL (Dir),A' : '0110011',\n\n 'SHR A' : '0110100',\n 'SHR B,A' : '0110101',\n 'SHR (Dir),A' : '0111001',\n\n 'INC A' : '11111110', ##sin uso\n 'INC B' : '0111010',\n 'INC (Dir)' : '1001011',\n\n 'DEC A' : '11111111', ##sin uso\n\n 'CMP A,B' : '0111011',\n 'CMP A,Lit' : '0111100',\n 'CMP A,(Dir)' : '1001100',\n\n 'JMP Ins' : '0111101',\n 'JEQ Ins' : '1001101',\n 'JNE Ins' : '1001110',\n 'JGT Ins' : '1001111',\n 'JGE Ins' : '1010001',\n 'JLT Ins' : '1010000',\n 'JLE Ins' : '1010010',\n 'JCR Ins' : '1010011',\n 'NOP' : '0000000'\n}\n#print(lista)\ninstrucciones = {}\n\n\nlabel = {}\n#labelVar = []\n'''for i in range(len(lista)):\n instrucciones[lista[i]]=Rellena(sumBin(i))\n ##print(lista[i]+\"=\"+str(i))\n#print(instrucciones)'''\n\n\norden_labels =[]\nl = []\n\n'''def leer(Archivo,label):\n ar = open(Archivo,'r')\n linea = ar.readline()\n\n while linea:\n\n nombre = linea.split(':')\n Nombre = nombre[0].strip()\n orden_labels.append(Nombre)\n\n label[Nombre]=[]\n while linea:\n linea = ar.readline()\n\n w = linea.split('//')\n\n if( ':' not in w[0]):\n label[Nombre].append(w[0].strip())\n else:\n break\n ar.close()'''\n\n\nvariables = {} #Diccionario de variables a direccion en ram\ndef variablesDataRAM(var):\n var = var.strip()\n [nombre,valor] = var.split(\" \")\n variables[nombre] = len(variables.keys())\n return variables\n\ndef variablesIns(var):\n var = var.strip()\n [nombre,valor] = var.split(\" \")\n variablesDataRAM(var)\n if(valor[-1]!=\"b\"):\n if(valor[-1]!=\"h\"):\n valor=sumBin(str(valor))\n else:\n valor = hexaBin(valor[:-1])\n else:\n valor = valor[:-1]\n return [\"MOV A,\"+str(valor)+\"b\",\"MOV (\"+str(variables[nombre])+\"),A\"] #transforma data a instruciones\n\ndef contador(Linea):\n conta = 0\n for i in Linea:\n if i == ' ':\n conta+=1\n else:\n break\n return conta\n\n\n'''def Leer(Archivo, label):\n Ar = open(Archivo,'r',encoding=\"latin-1\")\n Lineas = Ar.readlines()\n cont = 0\n cccc = -1\n primeras_lineas = []\n while cont < len(Lineas):\n nombre = Lineas[cont].strip()\n nombre = nombre.split(':')\n Nombre = nombre[0].strip()\n orden_labels.append(Nombre)\n label[Nombre]=[]\n #labelVar.append(Nombre+\" 0\") #Agregamos cada label como una var de valor 0\n\n while cont < len(Lineas):\n cont+=1\n if cont>=len(Lineas):\n break\n else:\n Linea_anterior = Lineas[cont-1].split(':')\n Linea_anterior = Linea_anterior[0].split('//')\n Linea_anterior = Linea_anterior[0].strip()\n w = Lineas[cont].split('//')\n\n if Linea_anterior in orden_labels:\n primeras_lineas.append(contador(w[0]))\n cccc+=1\n if w[0]!= '\\n':\n if(':' not in w[0]):\n cc = contador(w[0])\n\n if cc == primeras_lineas[cccc]:\n #label[Nombre].append(w[0].strip())\n l=w[0].strip()\n ins = l.split(\" \")[0]+\" \"+\"\".join(l.split(\" \")[1:])\n if(ins==\"DEC A\"):\n ins = \"SUB A,1\"\n if(ins==\"INC A\"):\n ins = \"ADD A,1\"\n label[Nombre].append(ins)\n else:\n label[\"CODE\"].append(w[0].strip())\n else:\n break\n Ar.close()'''\n\ndef orden_instrucciones(diccionario,valor,cantidad):\n if len(l)< cantidad:\n if valor != '':\n l.append(valor)\n x = valor.split()\n if len(x)>=2:\n if x != [] and x[1] in orden_labels:\n for z in diccionario[x[1]]:\n if z != '' :\n orden_instrucciones(diccionario,z,cantidad)\n\ndef lista_instrucciones(diccionario,cantidad):\n for x in diccionario[\"CODE\"]:\n orden_instrucciones(diccionario,x,cantidad)\n\ndef suma_instrucciones(diccionario, orden_labels):\n contador = 0\n for i in orden_labels:\n if i != \"DATA\":\n for x in diccionario[i]:\n if x != '':\n contador+= 1\n return contador\n\ninstrucciones2 = {} #Dict con data con instrucciones\ndef dataFinal(diccionario):\n for nombre2 in diccionario:\n if(nombre2 != \"DATA\"):\n instrucciones2[nombre2] = diccionario[nombre2]\n else:\n instrucciones2[\"DATA\"] = []\n\n for nombre in diccionario[\"DATA\"]:\n for ins in variablesIns(nombre):\n instrucciones2[\"DATA\"].append(ins)\n\n if(len(instrucciones2[\"DATA\"])>0):\n instrucciones2[\"DATA\"].append(\"MOV A,0\") #Limpia registro A despues de las variables\n\n '''for labelNombre in labelVar:\n for labelIns in variablesIns(labelNombre):\n instrucciones2[\"DATA\"].append(labelIns)'''\n\ndef esint(num):\n try:\n int(num)\n return True\n except:\n return False\n\ndef instr_binario(inst):\n #inst = inst.split(\" \")[1]\n lista_inst = inst.split(',')\n if lista_inst[0][0] == '(': #tipo mov (a),A\n #print('es direccion')\n if not esint(lista_inst[0][1]): #tipo mov (c),A\n if(not lista_inst[0][-2] == 'h'):\n return('variable izq')\n else:\n return('hexa izq')\n else:\n if lista_inst[0][-2] == 'b': #tipo mov (10b),A\n return('bin izq')\n elif lista_inst[0][-2] == 'h': #tipo mov (10h),A\n return('hexa izq')\n else:\n return('decimal izq') #tipo mov (10),A\n elif lista_inst[1][0] == '(': #Analogo al anterior\n if not esint(lista_inst[1][1]):\n if(not lista_inst[1][-2] == 'h'):\n return('variable der')\n else:\n return('hexa der')\n\n else:\n if lista_inst[1][-2] == 'b':\n return('bin der')\n elif lista_inst[1][-2] == 'h':\n return('hexa der')\n else:\n return('decimal der')\n\n elif lista_inst[1][-1] =='h':\n return('instruccion')\n else:\n return(\"instruccion\")\n\ndef insToType(valor):\n if not valor[0] == '(': #tipo XOR (var)\n return 0 #return('instruccion')\n elif not esint(valor[1]): #valor var\n if(not valor[-2] == 'h'):\n return 1 #return('variable')\n else:\n return 3 #return('hexa')\n else:\n if valor[-2] == 'b': #valor 10b\n return 2 #return('bin')\n elif valor[-2] == 'h': #valor 10h\n return 3 #return('hexa')\n else:\n return 4 #return('decimal') #valor 10\n\n###cambie esto\ndef tipoVar(valor):\n if(valor[-1]==\"b\"):\n return 1 #es binario\n elif(valor[-1]==\"h\"):\n return 2 #es hexa\n elif(not esint(valor[0])):\n return 0 #es variable\n else:\n return 3 #es dec\n\ndef transformarBin(valor):\n if(tipoVar(valor)==1): #lit binario\n return valor[:-1] #lit en binario sin \"b\"\n elif(tipoVar(valor)==2): #lit hexa\n return hexaBin(valor[:-1]) #lit en bin transformado desde hexa\n elif(tipoVar(valor)==3): #lit en dec\n return sumBin(str(valor)) #lit en bin transformado desde\n\ndef ins_generica(ins):\n valor=\" \" #por si aparece 'NOP'\n izqcoma=\" \"\n dercoma=\" \"\n nombre,valor=ins.split(\" \")\n nombre = nombre.strip()\n nombre = nombre.replace(\"\\t\",\"\")\n valor = valor.strip()\n retorna = \"\"\n if(nombre!=\"\"):\n if(valor.count(\",\")>0): #del tipo MOV x,y\n izqcoma,dercoma=valor.split(\",\") #lado izq y lado der de la coma\n if(instr_binario(valor)==\"instruccion\"): #tipo MOV A,Lit || MOV A,B\n if(tipoVar(izqcoma)!=0): #tipo MOV Lit,A\n retorna = str(nombre)+\" Lit,\"+str(dercoma)\n izqcoma=transformarBin(izqcoma)\n elif(tipoVar(dercoma)!=0): #tipo MOV A,Lit\n retorna = str(nombre)+\" \"+str(izqcoma)+\",Lit\"\n dercoma=transformarBin(dercoma)\n else:\n retorna = str(nombre)+\" \"+str(izqcoma)+\",\"+str(dercoma)\n elif(instr_binario(valor)==\"variable izq\"): #tipo MOV (c),A\n retorna = str(nombre)+\" (Dir),\"+str(dercoma)\n izqcoma = \"(\"+str(transformarBin(str(variables[izqcoma[1:-1]])))+\")\" #transforma var a lugar en ram (binario)\n elif(instr_binario(valor)==\"variable der\"): #tipo MOV A,(c)\n retorna = str(nombre)+\" \"+str(izqcoma)+\",(Dir)\"\n dercoma = \"(\"+str(transformarBin(str(variables[dercoma[1:-1]])))+\")\" #transforma var a lugar en ram (binario)\n elif(instr_binario(valor)==\"bin izq\" or instr_binario(valor)==\"hexa izq\" or instr_binario(valor)==\"decimal izq\"): #tipo MOV (6),A\n retorna = str(nombre)+\" (Dir),\"+str(dercoma)\n izqcoma = \"(\"+str(transformarBin(izqcoma[1:-1]))+\")\"\n elif(instr_binario(valor)==\"bin der\" or instr_binario(valor)==\"hexa der\" or instr_binario(valor)==\"decimal der\"): #tipo MOV A,(6)\n retorna = str(nombre)+\" \"+str(izqcoma)+\",(Dir)\"\n dercoma = \"(\"+str(transformarBin(dercoma[1:-1]))+\")\"\n '''print(valor)\n print(instr_binario(valor))\n print(izqcoma)\n print(dercoma)'''\n else: #del tipo JMP abc || DEC A\n dercoma = \" \"\n '''if(valor.count(\"(\")>0): #del tipo ICL (Dir)\n retorna = str(nombre)+\" (Dir)\"\n if not esint(valor[1]): #tipo\n izqcoma = valor\n else:\n izqcoma = \"(\"+transformarBin(valor[1:-1])+\")\"\n else:'''\n if(nombre==\"NOP\"):\n retorna = str(nombre)\n izqcoma = \" \"\n elif(nombre[0]!=\"J\"): #Todos los que no comienzan con J\n #retorna = str(nombre)+\" \"+str(valor)\n if(insToType(valor)==0): #instruccion: tipo INC A\n retorna = str(nombre)+\" \"+str(valor)\n izqcoma = str(valor)\n elif(insToType(valor)==1): #variable: tipo INC (var)\n retorna = str(nombre)+\" (Dir)\"\n izqcoma = \"(\"+str(transformarBin(str(variables[valor[1:-1]])))+\")\"\n dercoma = str(valor)\n else:\n #retorna = str(nombre)+\" Lit\"\n retorna = str(nombre)+\" (Dir)\" #Deberia ser Lit, pero no existe, y en ejemplo 6 se cae\n izqcoma = \"(\"+str(transformarBin(str(valor[1:-1])))+\")\"\n dercoma = str(valor)\n else: #JMP etc\n retorna = str(nombre)+\" Ins\"\n izqcoma = \"(\"+str(transformarBin(str(countIns[valor])))+\")\"\n dercoma = valor\n\n return [retorna,izqcoma,dercoma]\n else:\n return False\n '''print(izqcoma)\n print(dercoma)\n print(retorna)\n print()\n #print(nombre)\n #print(valor)'''\n\nlistaInsBin = []\nlistaInsTxt = []\ncountIns = {}\ndef dataFinalBin(dict):\n opcode = 0\n literal = 0\n count = 0 #num de linea q estamos\n ###cambie esto!!! #no era lo mismo? pero bueno lo dejo asi\n for nombre in orden_labels:\n countIns[nombre] = count\n funcion = dict[nombre]\n for ins in funcion:\n count+=1\n\n\n for nombre in orden_labels:\n #print(nombre)\n #countIns[nombre] = count\n #print(countIns)\n funcion = dict[nombre] #data,code,end,etc\n for ins in funcion: #cada instruccion de cada funcion\n if(ins_generica(ins)):\n print(ins_generica(ins))\n insgen,izq,der=ins_generica(ins)\n if(izq[0]==\"(\"): #direccion\n literal=Rellena(str(izq[1:-1]),16)\n elif(der[0]==\"(\"): #direccion\n literal=Rellena(str(der[1:-1]),16)\n elif(esint(izq)):\n literal=Rellena(str(izq),16)\n elif(esint(der)):\n literal=Rellena(str(der),16)\n else:\n literal=Rellena(\"\",16)\n opcode=Rellena(str(lista[insgen]),17)\n #print(lista)\n #print(str(lista[insgen]))\n #print(opcode)\n #print(var)\n print(str(opcode)+str(literal))\n listaInsBin.append(str(opcode)+str(literal))\n listaInsTxt.append(ins_generica(ins))\n #count += 1\n print(listaInsTxt)\n\ndef rellenaLista(lista,N):\n for i in range(len(lista), N):\n lista.append(Rellena(\"\",33))\n\ndef outputTXT(file):\n f = open(file,'w+')\n\n ##Info\n f.write(\"-- Variable | Direccion (dec) | Direccion (bin)\\n\")\n for variable in variables:\n f.write(\"-- \"+str(variable)+\" | \"+str(variables[variable])+\" | \"+str(sumBin(str(variables[variable])))+\"\\n\")\n f.write(\"---\\n\")\n for linea in countIns:\n f.write(\"-- \"+str(linea)+\" | \"+str(countIns[linea])+\" | \"+str(sumBin(str(countIns[linea])))+\"\\n\")\n f.write(\"\\n\")\n f.write(\"library IEEE;\\n\")\n f.write(\"use IEEE.STD_LOGIC_1164.ALL;\\n\")\n f.write(\"use IEEE.STD_LOGIC_UNSIGNED.ALL;\\n\")\n f.write(\"USE IEEE.NUMERIC_STD.ALL;\\n\")\n f.write(\"\\n\")\n f.write(\"entity ROM is\\n\")\n f.write(\"\\tPort (\\n\")\n f.write(\"\\t\\taddress : in std_logic_vector(11 downto 0);\\n\")\n f.write(\"\\t\\tdataout : out std_logic_vector(32 downto 0)\\n\")\n f.write(\"\\t\\t);\\n\")\n f.write(\"end ROM;\\n\")\n f.write(\"\\n\")\n f.write(\"architecture Behavioral of ROM is\\n\")\n f.write(\"\\n\")\n f.write(\"type memory_array is array (0 to ((2 ** 12) - 1) ) of std_logic_vector (32 downto 0);\\n\")\n f.write(\"\\n\")\n f.write(\"signal memory : memory_array:= (\\n\")\n for i in range(len(listaInsBin)):\n if((i+1) != len(listaInsBin)):\n f.write(str('\\t\"'+listaInsBin[i]+'\",'))\n else: #sin coma al final\n f.write(str('\\t\"'+listaInsBin[i]+'\"'))\n\n try:\n f.write(str(\" -- \"+listaInsTxt[i][0]+\" | \"+listaInsTxt[i][1]+\" | \"+listaInsTxt[i][2]+\"\\n\"))\n except:\n f.write(\"\\n\")\n f.write(\");\\n\")\n f.write(\"begin\\n\")\n f.write(\"\\n\")\n f.write(\"\\tdataout <= memory(to_integer(unsigned(address)));\\n\")\n f.write(\"\\n\")\n f.write(\"end Behavioral;\")\n\n f.close()\n\ndef Leer_Archivo(Archivo,label):\n archivo = open(Archivo,'r',encoding='latin-1')\n Linea= archivo.readline()\n Lineas = [] ## las Lineas que nos sirver\n Espacios_Label=[] ## espacios o tabulaciones de los Labels\n contador_label =-1 ## cuantos labels hay\n\n while Linea:\n temp = Linea.split('//')\n Linea = temp[0] # si la linea tiene comentarios no los pesacamos\n if Linea.strip():\n Lineas.append(Linea) ## agregamos las lineas que no son // y que no son espacion es blanco\n Linea = archivo.readline()\n\n for i in range(len(Lineas)):\n Linea_actual = Lineas[i]\n if Linea_actual.strip():\n\n if ':' in Linea_actual:\n Auxiliar = Linea_actual.split(':')\n Nombre_Label = Auxiliar[0].strip()#Nombre label\n label[Nombre_Label] = []\n orden_labels.append(Nombre_Label)\n contador_label+=1\n\n try:\n if':' in Lineas[i+1]:\n Espacios_Label.append(0)#si el label no tiene ninguna instruccion le ponemos 0 nomas\n except:\n continue\n else:\n Linea_anterior = Lineas[i-1]# linea anterior a la liena en la que vamos\n Espacios_linea_actual = contador(Linea_actual) #espacios o tabulaciones de la linea actual\n Linea_actual = Linea_actual.strip()\n Linea_actual = Linea_actual.split(\" \")[0]+\" \"+\"\".join(Linea_actual.split(\" \")[1:])\n Linea_actual = Linea_actual.replace('\\r',' ') #Porsiacaso\n Linea_actual = Linea_actual.replace(' ','\\t')\n Linea_actual = Linea_actual.replace('\\t',' ',1)\n Linea_actual = Linea_actual.replace('\\t','') # la dejamos con un solo espacio MOV A,B\n\n if ':' in Linea_anterior:\n Espacios_Label.append(Espacios_linea_actual)#Primera linea de cada label, agregamos sus espacios en blaco al comienzo\n if Espacios_linea_actual >= Espacios_Label[contador_label]: # si la linea en la que vamos tiene igual o mas espacios al comienzo que la linea anterior es porque\n #pertenence al mismo label, sino es de CODE\n LabelName = orden_labels[contador_label]\n if(Linea_actual==\"DEC A\"):\n Linea_actual = \"SUB A,1\"\n if(Linea_actual==\"INC A\"):\n Linea_actual = \"ADD A,1\"\n label[LabelName].append(Linea_actual)\n else:\n r = random.randint(0,100000)\n extra = str(r)\n label[extra]=[]\n label[extra].append(Linea_actual)\n orden_labels.append(str(r))\n print(label)\n archivo.close()\n\nroot.fileopenname = filedialog.askopenfilename(initialdir = \"./\",title = \"Escoge input\")\nLeer_Archivo(root.fileopenname,label)\n\ndataFinal(label) #Calcula los comandos en texto\n\ndataFinalBin(instrucciones2) #transforma instrucciones a bin y agrega en lista listaInsBin\n\nrellenaLista(listaInsBin,4096) #rellena la lista listaInsBin con 4096 elementos\n\nroot.filesavename = filedialog.asksaveasfilename(initialdir = \"./\", title = \"Escoge output\")\noutputTXT(root.filesavename)\n#IMPORTATE: LISTA CON 4096 ES LA LISTA listaInsBin\n\n\n\n\n#print(listaInsBin)\n#print(variables)\n\n\nlista_instrucciones(label,suma_instrucciones(label,orden_labels))\n\n#print(l)\n","sub_path":"Entrega 2/python/4096.py","file_name":"4096.py","file_ext":"py","file_size_in_byte":20476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"135145820","text":"import tensorflow as tf\n\n\nclass DeepCoNN(object):\n def __init__(\n self, user_length, item_length, num_classes, user_vocab_size, item_vocab_size, fm_k, n_latent, user_num,\n item_num,\n embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0, l2_reg_V=0.0):\n self.input_u = tf.placeholder(tf.int32, [None, user_length], name=\"input_u\")\n self.input_i = tf.placeholder(tf.int32, [None, item_length], name=\"input_i\")\n self.input_y = tf.placeholder(tf.float32, [None, 1], name=\"input_y\")\n self.input_uid = tf.placeholder(tf.int32, [None, 1], name=\"input_uid\")\n self.input_iid = tf.placeholder(tf.int32, [None, 1], name=\"input_iid\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n batch_size = tf.shape(self.input_u)[0]\n print(\"user_length: \", user_length)\n print(\"item_length: \", item_length)\n print(\"batch_size \", batch_size)\n\n l2_loss = tf.constant(0.0)\n\n with tf.name_scope(\"user_embedding\"):\n self.W1 = tf.Variable(\n tf.random_uniform([user_vocab_size, embedding_size], -1.0, 1.0),\n name=\"W\")\n self.embedded_users = tf.nn.embedding_lookup(self.W1, self.input_u)\n # N x user_len x emb_size (N x in_w x in_c)\n\n with tf.name_scope(\"item_embedding\"):\n self.W2 = tf.Variable(\n tf.random_uniform([item_vocab_size, embedding_size], -1.0, 1.0),\n name=\"W\")\n self.embedded_items = tf.nn.embedding_lookup(self.W2, self.input_i)\n\n x = self.embedded_users\n print(\"embedded_users: \", x)\n for i in range(len(filter_sizes)):\n k = filter_sizes[i]\n n_channels = num_filters[i]\n with tf.name_scope(\"user_conv-maxpool-%s\" % k):\n conv = tf.layers.conv1d(inputs=x, filters=n_channels, kernel_size=k, strides=1,\n padding='same', activation=tf.nn.relu)\n max_pool = tf.layers.max_pooling1d(inputs=conv, pool_size=4, strides=4, padding='same')\n x = max_pool\n print(\"conv: \", conv)\n print(\"max_pool: \", max_pool)\n dim = x.get_shape()[1] * x.get_shape()[2]\n self.h_pool_flat_u = tf.reshape(x, [-1, dim])\n\n x = self.embedded_items\n print(\"embedded_items: \", x)\n for i in range(len(filter_sizes)):\n k = filter_sizes[i]\n n_channels = num_filters[i]\n with tf.name_scope(\"item_conv-maxpool-%s\" % k):\n conv = tf.layers.conv1d(inputs=x, filters=n_channels, kernel_size=k, strides=1,\n padding='same', activation=tf.nn.relu)\n max_pool = tf.layers.max_pooling1d(inputs=conv, pool_size=4, strides=4, padding='same')\n x = max_pool\n print(\"conv: \", conv)\n print(\"max_pool: \", max_pool)\n\n dim = x.get_shape()[1] * x.get_shape()[2]\n self.h_pool_flat_i = tf.reshape(x, [-1, dim])\n\n with tf.name_scope(\"dropout\"):\n self.h_drop_u = tf.nn.dropout(self.h_pool_flat_u, 1.0)\n self.h_drop_i = tf.nn.dropout(self.h_pool_flat_i, 1.0)\n print(\"self.h_drop_u: \", self.h_drop_u)\n print(\"self.h_drop_i: \", self.h_drop_i)\n\n with tf.name_scope(\"get_fea\"):\n self.u_fea = tf.layers.dense(self.h_drop_u, n_latent)\n self.i_fea = tf.layers.dense(self.h_drop_i, n_latent)\n\n with tf.name_scope('fm'):\n self.z = tf.nn.relu(tf.concat([self.u_fea, self.i_fea], axis=1))\n\n # self.z=tf.nn.dropout(self.z,self.dropout_keep_prob)\n\n WF1 = tf.Variable(\n tf.random_uniform([n_latent * 2, 1], -0.1, 0.1), name='fm1')\n Wf2 = tf.Variable(\n tf.random_uniform([n_latent * 2, fm_k], -0.1, 0.1), name='fm2')\n one = tf.matmul(self.z, WF1)\n\n inte1 = tf.matmul(self.z, Wf2)\n inte2 = tf.matmul(tf.square(self.z), tf.square(Wf2))\n\n inter = (tf.square(inte1) - inte2) * 0.5\n\n inter = tf.nn.dropout(inter, self.dropout_keep_prob)\n\n inter = tf.reduce_sum(inter, 1, keepdims=True)\n print(inter)\n b = tf.Variable(tf.constant(0.1), name='bias')\n\n self.predictions = one + inter + b\n\n print(self.predictions)\n with tf.name_scope(\"loss\"):\n # losses = tf.reduce_mean(tf.square(tf.subtract(self.predictions, self.input_y)))\n losses = tf.nn.l2_loss(tf.subtract(self.predictions, self.input_y))\n\n self.loss = losses + l2_reg_lambda * l2_loss\n\n with tf.name_scope(\"accuracy\"):\n self.mae = tf.reduce_mean(tf.abs(tf.subtract(self.predictions, self.input_y)))\n self.accuracy = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.predictions, self.input_y))))\n","sub_path":"model/myDeepCoNN.py","file_name":"myDeepCoNN.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"429761661","text":"import random\n\nif __name__ == '__main__':\n V = 100\n C_atk = 100\n C_def = 100\n N = 500\n M_atk = 5\n M_def = 5\n\n P_def = 0\n P_atk = 0\n\n S_atk = 1.0\n\n k = 100\n mean_attacker_payoff = 0\n mean_defender_payoff = 0\n for a in range(0, k):\n current_atk_v = -1\n current_def_v = -1\n\n atk_vectors = [i for i in range(100)]\n M_atk_vectors = []\n M_def_vectors = []\n\n successful_atks = 0\n for i in range(0, N):\n # Attacker chooses attack vector\n if i < M_atk:\n current_atk_v = atk_vectors[int(random.random()*len(atk_vectors))]\n M_atk_vectors.append(current_atk_v)\n else:\n prob = random.random()\n if prob <= S_atk:\n chosen_index = int(random.random()*len(M_atk_vectors))\n current_atk_v = M_atk_vectors[chosen_index]\n else:\n remaining_atk_vectors = atk_vectors[:]\n for atk_vec in M_atk_vectors:\n if atk_vec in remaining_atk_vectors:\n remaining_atk_vectors.remove(atk_vec)\n\n current_atk_v = remaining_atk_vectors[int(random.random()*len(remaining_atk_vectors))]\n\n M_atk_vectors.pop(0)\n M_atk_vectors.append(current_atk_v)\n\n # Defender chooses defense vector\n if i < M_def:\n current_def_v = int(random.random()*len(atk_vectors))\n M_def_vectors.append(current_atk_v)\n else:\n chosen_index = int(random.random()*len(M_def_vectors))\n current_def_v = M_def_vectors[chosen_index]\n\n M_def_vectors.pop(0)\n M_def_vectors.append(current_atk_v)\n\n # See if attack was successful or not\n if current_atk_v == current_def_v: # successful defense\n P_atk += -C_atk\n P_def += -C_def\n else: # successful attack\n successful_atks += 1\n P_atk += 10000 - C_atk\n P_def += -10000 - C_def\n\n mean_attacker_payoff += P_atk\n mean_defender_payoff += P_def\n\n #print(\"Successful Attacks: \" + str(successful_atks))\n #print(\"Successful Defenses: \" + str(N - successful_atks))\n #print(\"Attacker Payoff: \" + str(P_atk))\n #print(\"CactusCard Payoff: \" + str(P_def))\n\n mean_attacker_payoff = float(mean_attacker_payoff) / float(k)\n mean_defender_payoff = float(mean_defender_payoff) / float(k)\n\n print(\"Attacker Payoff: \" + str(mean_attacker_payoff))\n print(\"CactusCard Payoff: \" + str(mean_defender_payoff))","sub_path":"project/part3/Question4Simulation.py","file_name":"Question4Simulation.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"163510503","text":"__author__ = 'drizzutojr'\n\n# this code is from rosetta code\n\ndef quickSort(arr):\n less = []\n pivotList = []\n more = []\n if len(arr) <= 1:\n return arr\n else:\n initial_date = arr[0][\"due date\"]\n initial_date_format = str(initial_date[6:10]) + str(initial_date[3:5]) + str(initial_date[0:2])\n pivot = int(initial_date_format)\n for assignment in arr:\n compare_date = str(assignment[\"due date\"][6:10]) + \\\n str(assignment[\"due date\"][3:5]) + \\\n str(assignment[\"due date\"][0:2])\n if int(compare_date) < pivot:\n less.append(assignment)\n elif int(compare_date) > pivot:\n more.append(assignment)\n else:\n pivotList.append(assignment)\n less = quickSort(less)\n more = quickSort(more)\n return less + pivotList + more","sub_path":"Middleware/date_quicksort.py","file_name":"date_quicksort.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"84144040","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nfrom PIL import Image\nimport numpy as np\nimport math\n\nw, h = 5000, 6000\n\ndata = np.zeros((h, w, 3), dtype=np.int8)\n\ncolCnt = 0\nfor i in range(w):\n value = 125+125.0*math.sin(0.00005*colCnt*i)#*colCnt*1.0)\n\n for j in range(h):\n data[j,i] = [value, value, value]\n colCnt = colCnt+2\nimg = Image.fromarray(data, 'RGB')\n\nimg.save('my.png')\n\nimg.show()\n","sub_path":"createHDNCMask_SineVerlauf.py","file_name":"createHDNCMask_SineVerlauf.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"242646993","text":"from ROOT import TGraph, TFile, TGraphAsymmErrors\nfrom array import array\nimport os\nimport glob,math\nimport matplotlib.pyplot as plt\n\nfile1='/Users/dekumar/cernbox/monoH_lhefiles/2HDM_sintheta_scan_tan_1_mh3_600_mh4_100/Events/scan_run_[03-12].txt'\nfile2='/Users/dekumar/cernbox/monoH_lhefiles/monoH_tanbetascan_Ma_100_MA_600_sintheta_7/Events/scan_run_[01-11].txt'\nf=open(file1,'r')\n\nsin=[]\ncross=[]\n\nfor line in f:\n if 'run_' in line:\n # print (line.split()[1],\" \",line.split()[2])\n sp=line.split()[1]\n CS=line.split()[2]\n # print (CS)\n sin.append(float(sp))\n cross.append(float(CS))\n\nf.close()\n\nprint (sin)\nprint (cross)\nplt.plot(sin,cross,'-o',label='$M_{A}=600, M_{a}=100,M_{\\chi}=10, $'+r'$tan{\\beta}$=1 ',color='red')\n\n# plt.rc('axes', labelsize=20)\nplt.xlabel(r'$Sin{\\theta}$')\nplt.ylabel(\"Cross section(pb)\")\n# plt.xticks([.1, .2, .3, .35, .4, .5,.6,.7,.8,.9])\nplt.legend()#ncol=3,title=r\"tan$\\beta$\")\nplt.title(r\"monoH+DM 2HDM+a\")\nplt.savefig('sintheta_scan.pdf')\nplt.savefig('sintheta_scan.png')\nplt.close('all')\n\n\nf=open(file2,'r')\n\ntan=[]\ncross2=[]\n\nfor line in f:\n if 'run_' in line:\n # print (line.split()[1],\" \",line.split()[2])\n sp=line.split()[1]\n CS=line.split()[2]\n # print (CS)\n tan.append(float(sp))\n cross2.append(float(CS))\n\nf.close()\n\nprint (tan)\nprint (cross2)\nplt.plot(tan,cross2,'-o',label='$M_{A}=600, M_{a}=100,M_{\\chi}=10, $'+r'$Sin{\\theta}$=0.7 ',color='red')\n\nplt.rc('axes', labelsize=20)\nplt.xlabel(r'$Tan{\\beta}$')\nplt.ylabel(\"Cross section(pb)\")\n# plt.xticks([.1, .2, .3, .35, .4, .5,.6,.7,.8,.9])\nplt.legend()#ncol=3,title=r\"tan$\\beta$\")\nplt.title(r\"monoH+DM 2HDM+a\")\nplt.savefig('tan_scan.pdf')\nplt.savefig('tan_scan.png')\nplt.close('all')\n","sub_path":"bbDM_2HDM_gridpack_cards/XsectionPlotter.py","file_name":"XsectionPlotter.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"623355378","text":"import redis\nimport pylab as plt\n\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n\n\n# 连接池\npool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\nr = redis.Redis(connection_pool=pool)\nsex_dict = eval(r.hget('anadata', 'sex'))\n\n'''\nsex_dict['男']\nsex_dict['女']\nsex_dict['未知']\n'''\nfig1 = plt.figure('男女比例')\nrects1 =plt.bar(left = (0.2),height = (sex_dict['男']),color=('g'),label=(('男')),width = 0.2,align=\"center\",yerr=0.000001)\nrects2 =plt.bar(left = (0.6),height = (sex_dict['女']),color=('b'),label=(('女')),width = 0.2,align=\"center\",yerr=0.000001)\nrects3 =plt.bar(left = (1),height = (sex_dict['未知']),color=('r'),label=(('未知')),width = 0.2,align=\"center\",yerr=0.000001)\n\nplt.legend()\nplt.xticks((0.2,0.6,1),('男','女','未知'))\nplt.title('男女比例') # 标题\nplt.ylim(0,40000)\nplt.xlabel('性别')\nplt.ylabel('人数')\n\ndef autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x()+rect.get_width()/2., 1.03*height, '%s' % float(height))\nautolabel(rects1)\nautolabel(rects2)\nautolabel(rects3)\nplt.show()\n\n","sub_path":"userfulPythonScript/spider/coding_spider/sex_figure.py","file_name":"sex_figure.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"374159431","text":"import html\nimport requests\nimport os\nimport xml.etree.ElementTree as ET\nimport sys\nimport re\nfrom enum import Enum\nimport xml.dom.minidom\n\n\nclass StringResourcePartType(Enum):\n TEXT = 1\n SPECIALCHAR = 2\n ESCAPESEQUENCE = 3\n WHITESPACE = 4\n\n\nclass StringResourcePart:\n type = None\n text = None\n\n\nclass StringResource:\n translatable = False\n name = \"\"\n parts = None\n\n\nclass StringResourceFile:\n strings = None\n\n\ndef concatstringresourceparts(parts):\n ret = \"\"\n for part in parts:\n ret += part.text\n return ret\n\n\ndef extractstring(text, name, translatable):\n string = StringResource()\n string.name = name\n string.translatable = translatable\n string.parts = []\n current = \"\"\n charIndex = 0\n while charIndex < len(text):\n if text[charIndex] == \"%\":\n if len(current) > 0:\n part = StringResourcePart()\n part.type = StringResourcePartType.TEXT\n part.text = current\n string.parts.append(part)\n\n current = text[charIndex]\n charIndex += 1\n while charIndex < len(text):\n if text[charIndex] == \" \":\n charIndex -= 1\n break\n current += text[charIndex]\n charIndex += 1\n part = StringResourcePart()\n part.type = StringResourcePartType.SPECIALCHAR\n part.text = current\n string.parts.append(part)\n current = \"\"\n elif text[charIndex] == \" \":\n if len(current) > 0:\n part = StringResourcePart()\n part.type = StringResourcePartType.TEXT\n part.text = current\n string.parts.append(part)\n\n current = text[charIndex]\n charIndex += 1\n while charIndex < len(text):\n if text[charIndex] != \" \":\n charIndex -= 1\n break\n current += text[charIndex]\n charIndex += 1\n part = StringResourcePart()\n part.type = StringResourcePartType.WHITESPACE\n part.text = current\n string.parts.append(part)\n current = \"\"\n elif text[charIndex] == \"\\\\\":\n if len(current) > 0:\n part = StringResourcePart()\n part.type = StringResourcePartType.TEXT\n part.text = current\n string.parts.append(part)\n current = text[charIndex]\n charIndex += 1\n current += text[charIndex]\n part = StringResourcePart()\n part.type = StringResourcePartType.ESCAPESEQUENCE\n part.text = current\n string.parts.append(part)\n current = \"\"\n elif text[charIndex] == \"\\t\":\n print(\"TAB\")\n elif text[charIndex] == \"\\n\":\n print(\"NEWLINE\")\n else:\n current += text[charIndex]\n charIndex += 1\n if len(current) > 0:\n part = StringResourcePart()\n part.type = StringResourcePartType.TEXT\n part.text = current\n string.parts.append(part)\n return string\n\n\ndef readresourcefile(file):\n print(\"ReadResource: \" + file)\n ret = StringResourceFile()\n ret.strings = []\n\n tree = ET.parse(file)\n root = tree.getroot()\n\n for i in range(len(root)):\n text = root[i].get('translatable')\n isTranslatable = False\n if text is None:\n isTranslatable = True\n elif text == \"true\":\n isTranslatable = True\n if root[i].tag == 'string-array':\n for j in range(len(root[i])):\n isItemTranslatable = root[i][j].get('translatable')\n if isItemTranslatable is None:\n isItemTranslatable = True\n ret.strings.append(extractstring(root[i][j].text, root[i][j].get('name'), isItemTranslatable))\n else:\n ret.strings.append(extractstring(root[i].text, root[i].get('name'), isTranslatable))\n print(\"Done Reading\")\n return ret\n\n\ndef writeresourcefile(resource, outfile):\n print(\"Write Resource: \" + outfile)\n resources = ET.Element(\"resources\")\n for string in resource.strings:\n stringelement = ET.SubElement(resources, 'string')\n stringelement.set('name', string.name)\n if not string.translatable:\n stringelement.set('translatable', \"false\")\n stringelement.text = concatstringresourceparts(string.parts)\n xmlstring = ET.tostring(resources, encoding=\"utf8\", method=\"xml\")\n minixml = xml.dom.minidom.parseString(xmlstring.decode(\"utf-8\"))\n prettystring = minixml.toprettyxml()\n\n file = open(outfile, \"w\", encoding=\"utf8\")\n file.write(prettystring)\n file.close()\n return\n\n\ndef translatestring(string, from_language=\"auto\", to_language=\"auto\"):\n ret = StringResource()\n ret.translatable = True\n ret.name = string.name\n ret.parts = []\n for part in string.parts:\n if part.type == StringResourcePartType.TEXT:\n respart = StringResourcePart()\n respart.type = StringResourcePartType.TEXT\n respart.text = translate(part.text, to_language, from_language)\n ret.parts.append(respart)\n else:\n ret.parts.append(part)\n return ret\n\n\ndef translateresource(resource, from_language=\"auto\", to_language=\"auto\"):\n print(\"Translate Resource\")\n ret = StringResourceFile()\n ret.strings = []\n for string in resource.strings:\n if string.translatable:\n ret.strings.append(translatestring(string, from_language, to_language))\n else:\n ret.strings.append(string)\n print(\"Done Translating\")\n return ret\n\n\n# This subroutine extracts the string including html tags\n# and may replace \"root[i].text\". \n# It cannot digest arbitrary encodings, so use it only if necessary.\ndef findall_content(xml_string, tag):\n pattern = r\"<(?:\\w+:)?%(tag)s(?:[^>]*)>(.*)(?:\\w+:)?%(tag)s\" % {\"tag\": tag}\n return re.findall(pattern, xml_string, re.DOTALL)\n\n\n# This subroutine calls Google translate and extracts the translation from\n# the html request\ndef translate(to_translate, to_language=\"auto\", language=\"auto\"):\n r = requests.get(\n \"https://translate.google.com/m?hl=%s&sl=%s&q=%s\" % (to_language, language, to_translate.replace(\" \", \"+\")))\n\n # set markers that enclose the charset identifier\n beforecharset = 'charset='\n aftercharset = '\" http-equiv'\n # extract charset\n parsed1 = r.text[r.text.find(beforecharset) + len(beforecharset):]\n parsed2 = parsed1[:parsed1.find(aftercharset)]\n # Display warning when encoding mismatch\n if parsed2 != r.encoding:\n print('\\x1b[1;31;40m' + 'Warning: Potential Charset conflict')\n print(\" Encoding as extracted by SELF : \" + parsed2)\n print(\" Encoding as detected by REQUESTS : \" + r.encoding + '\\x1b[0m')\n\n # Work around an AGE OLD Python bug in case of windows-874 encoding\n # https://bugs.python.org/issue854511\n if r.encoding == 'windows-874' and os.name == 'posix':\n print(\n '\\x1b[1;31;40m' + \"Alert: Working around age old Python bug (https://bugs.python.org/issue854511)\\nOn \"\n \"Linux, charset windows-874 must be labeled as charset cp874\" + '\\x1b[0m')\n r.encoding = 'cp874'\n\n # convert html tags\n text = html.unescape(r.text)\n # set markers that enclose the wanted translation\n before_trans = 'class=\"t0\">'\n after_trans = '
', html)\n self.response_html = BeautifulSoup(html, \"lxml\")\n\n def get_elements(self, query, sep='\\n'):\n try:\n rs_text = ''\n results = self.response_html.select(query)\n for result in results:\n rs_text = rs_text + sep + str(result)\n return rs_text.strip()\n except Exception as ex:\n logger.exception(str(ex))\n return None\n\n def select_text(self, query, sep=' '):\n \"\"\"\n Select text from query\n :param query:\n :param sep:\n :return:\n \"\"\"\n try:\n rs_text = ''\n results = self.response_html.select(query)\n for result in results:\n rs_text = rs_text + sep + result.getText()\n return rs_text.strip()\n except Exception:\n return None\n\n def test_select(self, query):\n return self.response_html.select(query)\n\n def select_first(self, query):\n \"\"\"\n Get first element from query\n :param query:\n :return:\n \"\"\"\n try:\n return self.response_html.select(query)[0].getText()\n except Exception:\n return None\n\n def get_links(self, allow_pattern='.*'):\n \"\"\"\n Get all links from current page\n All links not matched regex will be removed\n :param allow_pattern:\n :return:\n \"\"\"\n self.all_links = self.response_html.find_all(\"a\", href=True)\n selected_links = set()\n for link in self.all_links:\n # get absolute url\n abs_url = urllib.parse.urljoin(self.page, link.get('href'))\n\n if not (not (self.domain in abs_url) or not re.match(allow_pattern, abs_url) or re.match(\n WebDriverWrapper.ignore_extension_regex, abs_url)):\n selected_links.add(UrlFormatter(abs_url).normalize())\n\n return list(selected_links)\n\n def scrape(self, rule: dict):\n \"\"\"\n Scrape data from given html by using selector (input dictionary)\n Return a dictionary of selected elements\n :param rule: rule as dictionary. ex: {title: h1, content: div}\n :return: dictionary of scraped data {title: this is title of page, content: this is a content page}\n \"\"\"\n results = dict()\n for key, value in rule.items():\n try:\n text = self.select_text(value)\n if text == '':\n results[key] = None\n else:\n results[key] = self.select_text(value)\n except Exception:\n results[key] = None\n\n return results\n\n def scrape_elements(self, rule: dict):\n \"\"\"\n Scrape data from given html by using selector (input dictionary)\n Return a dictionary of selected elements\n :param rule: rule as dictionary. ex: {title: h1, content: div}\n :return: dictionary of scraped data {title: this is title of page, content: this is a content page}\n \"\"\"\n results = dict()\n for key, query in rule.items():\n try:\n if key.startswith('pre'):\n if query is None or query == 'null':\n results[key] = None\n else:\n results[key] = query\n else:\n if query is None or query == 'null':\n results[key] = None\n else:\n attr_matcher = re.match(\".*(attr:(.+))$\", query)\n\n # element = None\n if attr_matcher:\n query = query.replace(attr_matcher.group(1), '').strip()\n attr_key = attr_matcher.group(2)\n elements = self.response_html.select(query)\n selected_elements = []\n for element in elements:\n selected_elements.append(element.get(attr_key))\n else:\n elements = self.response_html.select(query)\n selected_elements = []\n for element in elements:\n selected_elements.append(element.getText().strip())\n\n if key == 'title' and len(selected_elements) == 0:\n selected_elements.append(self.response_html.title.string)\n\n # check selected element is null or not\n # if null, assign null to dictionary, then\n if len(selected_elements) == 0:\n results[key] = None\n else:\n results[key] = selected_elements\n except:\n results[key] = None\n\n return results\n\n def execute_script(self, script):\n if self.selenium:\n self.driver.execute_script(script)\n self.response_html = BeautifulSoup(re.sub(r' ', '\\n', self.driver.page_source), \"lxml\")\n\n def get_html(self, url):\n try:\n self.driver.get(url)\n page_source = self.driver.page_source\n page_source = re.sub(r' ', '\\n', page_source)\n page_source = re.sub(r'<\\s*/p>', '\\n', page_source)\n\n self.html = BeautifulSoup(page_source, \"lxml\")\n time.sleep(0.5)\n except TimeoutException as toe:\n print(toe)\n self.driver.refresh()\n print(url)\n except Exception as ex:\n print(ex)\n","sub_path":"application/crawler/scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":12989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"176316123","text":"import datetime\nimport sqlalchemy\nimport pytest\n\nfrom acondbs.db.sa import sa\nfrom acondbs.models import Map\n\n# __________________________________________________________________||\ndef test_type(app):\n '''confirm the type of the date field\n '''\n\n with app.app_context():\n map = Map.query.filter_by(name='lat20200120').first()\n\n # The type of the field \"date_posted\" of Map is \"datetime.date\"\n assert isinstance(map.date_posted, datetime.date)\n\n# __________________________________________________________________||\ndef test_add(app):\n '''A simple test of adding an object with a date field\n '''\n\n # date_posted needs to be initialized with a datetime.date\n date_posted = datetime.date(2019, 2, 23)\n map1 = Map(name=\"map1\", date_posted=date_posted)\n\n with app.app_context():\n sa.session.add(map1)\n sa.session.commit()\n\n with app.app_context():\n map1 = Map.query.filter_by(name='map1').first()\n assert datetime.date(2019, 2, 23) == map1.date_posted\n\n# __________________________________________________________________||\ndef test_add_raise(app):\n '''A simple test of adding an object with a wrong type\n '''\n\n # It is not impossible to instnaiate a date field with a wrong\n # type, e.g, str\n map1 = Map(name=\"map1\", date_posted=\"2019-02-13\")\n\n with app.app_context():\n\n # It is also possible to add\n sa.session.add(map1)\n\n # However, it is not possible to commit\n with pytest.raises(sqlalchemy.exc.StatementError):\n sa.session.commit()\n\n# __________________________________________________________________||\n","sub_path":"tests/models/test_example_date.py","file_name":"test_example_date.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"129274319","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport argparse\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom romt import error\nimport romt.download\n\n\ndef verify_commands(commands: List[str], valid_commands: List[str]) -> None:\n for command in commands:\n if command not in valid_commands:\n raise error.UsageError(\"invalid COMMAND {}\".format(repr(command)))\n\n\ndef add_downloader_arguments(parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--assume-ok\",\n action=\"store_true\",\n default=False,\n help=\"assume already-downloaded files are OK (skip hash check)\",\n )\n\n\nclass BaseMain:\n def __init__(self, args: argparse.Namespace) -> None:\n self.args = args\n self._downloader = None # type: Optional[romt.download.Downloader]\n\n @property\n def downloader(self) -> romt.download.Downloader:\n if self._downloader is None:\n self._downloader = romt.download.Downloader()\n return self._downloader\n\n def get_archive_path(self) -> Path:\n if not self.args.archive:\n raise error.UsageError(\"missing archive name\")\n return Path(self.args.archive)\n","sub_path":"src/romt/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"382057579","text":"class Solution:\n def rotate(self, matrix):\n n = len(matrix)\n matrix.reverse() # 这步使得对角线上元素都实现了反转\n # 接下来只要对非对角线上的元素交换位置即可\n for i in range(n):\n for j in range(i + 1, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n \n \ntest = [\n [ 5, 1, 9,11],\n [ 2, 4, 8,10],\n [13, 3, 6, 7],\n [15,14,12,16]\n]\ntest.reverse()\nprint(test)\nSolution().rotate(test)\nprint(test)","sub_path":"048_rotateimage.py","file_name":"048_rotateimage.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"317124051","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# **************************************************************************\n# Copyright © 2016 jianglin\n# File Name: api.py\n# Author: jianglin\n# Email: mail@honmaple.com\n# Created: 2016-11-11 16:05:44 (CST)\n# Last Update: Sunday 2018-09-30 17:50:04 (CST)\n# By:\n# Description:\n# **************************************************************************\nfrom flask import request\nfrom flask import Blueprint\nfrom flask.views import MethodView\nfrom apscheduler.jobstores.base import ConflictingIdError, JobLookupError\nfrom sche import sche\nfrom .utils import HTTP, Serializer\nimport json\n\n\nclass ScheView(MethodView):\n def get(self):\n ins = sche.status()\n return HTTP.OK(data=ins)\n\n def post(self):\n \"\"\"start scheduler.\"\"\"\n if not sche.running:\n sche.start(paused=True)\n ins = sche.status()\n return HTTP.OK(data=ins)\n\n def delete(self):\n \"\"\"shutdown scheduler.\"\"\"\n if sche.running:\n sche.resume()\n ins = sche.status()\n return HTTP.OK(data=ins)\n\n\nclass ScheJobView(MethodView):\n def get(self):\n request_data = request.args.to_dict()\n trigger = request_data.get('trigger')\n jobs = sche.get_jobs()\n ins = Serializer(jobs, trigger=trigger).data\n return HTTP.OK(data=ins)\n\n def post(self):\n '''\n :param trigger:date or interval or crontab\n :param job:if job is None,the default func is http_request\n '''\n request_data = request.get_json()\n trigger = request_data.get('trigger')\n kwargs = request_data.get('kwargs')\n if trigger == 'interval' and kwargs:\n request_data['kwargs'] = json.loads(kwargs)\n try:\n job = sche.add_job(**request_data)\n ins = Serializer(job).data\n return HTTP.OK(data=ins)\n except ConflictingIdError:\n msg = 'Job ID %s is exists' % request_data.get('id')\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n\n def put(self):\n request_data = request.get_json()\n job_ids = request_data.pop('jobs', [])\n success_ids = []\n for pk in job_ids:\n try:\n sche.remove_job(pk)\n msg = 'Job ID %s delete success' % pk\n success_ids.append(pk)\n except JobLookupError:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n msg = '{} delete success!'.format(','.join(success_ids))\n return HTTP.OK(data=success_ids, message=msg)\n\n\nclass ScheJobItemView(MethodView):\n def get(self, pk):\n job = sche.get_job(pk)\n if not job:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n ins = Serializer(job).data\n return HTTP.OK(data=ins)\n\n def put(self, pk):\n request_data = request.get_json()\n try:\n sche.modify_job(pk, **request_data)\n job = sche.get_job(pk)\n ins = Serializer(job).data\n return HTTP.OK(data=ins)\n except JobLookupError:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n\n def delete(self, pk):\n try:\n sche.remove_job(pk)\n msg = 'Job ID %s delete success' % pk\n return HTTP.OK(message=msg)\n except JobLookupError:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n\n\nclass ScheJobPauseView(MethodView):\n def post(self, pk):\n \"\"\"Pauses a job.\"\"\"\n try:\n sche.pause_job(pk)\n job = sche.get_job(pk)\n ins = Serializer(job).data\n return HTTP.OK(data=ins)\n except JobLookupError:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n\n\nclass ScheJobResumeView(MethodView):\n def post(self, pk):\n \"\"\"Resumes a job.\"\"\"\n try:\n sche.resume_job(pk)\n job = sche.get_job(pk)\n ins = Serializer(job).data\n return HTTP.OK(data=ins)\n except JobLookupError:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n\n\nclass ScheJobExecuteView(MethodView):\n def post(self, pk):\n \"\"\"Executes a job.\"\"\"\n try:\n sche.run_job(pk)\n job = sche.get_job(pk)\n ins = Serializer(job).data\n return HTTP.OK(data=ins)\n except JobLookupError:\n msg = 'Job ID %s not found' % pk\n return HTTP.BAD_REQUEST(message=msg)\n except Exception as e:\n msg = str(e)\n return HTTP.SERVER_ERROR(message=msg)\n\n\ndef init_app(app, url_prefix='/api/scheduler'):\n site = Blueprint('sche', __name__, url_prefix=url_prefix)\n\n sche_endpoint = [\n (\"/status\", ScheView.as_view('status')),\n (\"\", ScheJobView.as_view('job')),\n (\"/\", ScheJobItemView.as_view('job_item')),\n ('//pause', ScheJobPauseView.as_view('job_pause')),\n ('//resume', ScheJobResumeView.as_view('job_resume')),\n ('//execute', ScheJobExecuteView.as_view('job_execute')),\n ]\n\n for url, endpoint in sche_endpoint:\n site.add_url_rule(\n url,\n view_func=endpoint,\n )\n app.register_blueprint(site)\n","sub_path":"sche/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"557484825","text":"def rzucKostka():\r\n import random\r\n return random.randint(1, 6)\r\n\r\n \r\nprint(\"Wyrzucone oczka: \", end='')\r\n\r\nsuma = 0\r\nfor x in range(3):\r\n liczba = (rzucKostka())\r\n print(liczba, end=' ')\r\n\r\n suma = suma + liczba\r\n\r\nprint()\r\nprint(\"Suma oczek: \", suma)","sub_path":"04-Subroutines/04.17.py","file_name":"04.17.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"250871216","text":"#!/usr/bin/env python\nimport yaml\nimport os\nimport sys\nimport subprocess\n\n\ndef print_msg(msg):\n cmd = 'echo {0}:'.format(msg)\n subprocess.call(cmd.split(' '))\n\n\ndef clone_repo(pinned_url, is_pinned, workspace_dir, path=None):\n url, version = pinned_url.split('@') if '@' in pinned_url else [pinned_url, None]\n if is_pinned and not version:\n raise Exception(\"You must explicitly pin a version (e.g. append `@v1.0`) to {0}\".format(url))\n repo_parent_dir = os.path.join(workspace_dir, path) if path else workspace_dir\n repo_name = url.split('/')[4].replace(\".git\", \"\")\n repo_dir = os.path.join(repo_parent_dir, repo_name)\n if not os.path.exists(repo_parent_dir):\n os.makedirs(repo_parent_dir)\n \n print_msg(\"\\nCloning or Updating {0}\".format(repo_name))\n if not os.path.exists(repo_dir):\n cmd = 'git clone {0}'.format(url)\n subprocess.call(cmd.split(' '), cwd=repo_parent_dir)\n else:\n cmd = 'git remote set-url origin {0}'.format(url)\n subprocess.call(cmd.split(' '), cwd=repo_dir)\n cmd = 'git checkout master'\n subprocess.call(cmd.split(' '), cwd=repo_dir)\n cmd = 'git reset --hard'\n subprocess.call(cmd.split(' '), cwd=repo_dir)\n cmd = 'git pull --rebase'\n subprocess.call(cmd.split(' '), cwd=repo_dir)\n\n if is_pinned:\n if not version:\n raise Exception('url must be pinned to a particular commit, e.g. https://github.com/project/repo.git@hashOrTag')\n cmd = 'git checkout {0}'.format(version)\n subprocess.call(cmd.split(' '), cwd=repo_dir)\n\n return repo_name\n\n\ndef read_repo_data(repo_name, workspace_dir):\n \"\"\" Return repositories.yml data from an already-cloned repository \"\"\"\n repo_path = os.path.join(workspace_dir, repo_name)\n repo_data_path = os.path.join(repo_path, 'ansible/group_vars/all/repositories.yml')\n try:\n with open(repo_data_path, 'r') as f:\n repo_data = yaml.load(f)\n except IOError:\n repo_data_path = os.path.join(repo_path, 'repositories.yml')\n try:\n with open(repo_data_path, 'r') as f:\n repo_data = yaml.load(f)\n except IOError: \n return {} # Ok to ignore, repo may not have a repositories.yml file\n\n return repo_data\n\n\ndef build_repo_data(repo_name, is_pinned, workspace_dir):\n \"\"\" Return merged repositories.yml data from both public and private repos \"\"\"\n repo_data = read_repo_data(repo_name, workspace_dir)\n proj_url = repo_data.get('project_repository')\n if proj_url:\n proj_name = clone_repo(proj_url, is_pinned, workspace_dir)\n proj_data = read_repo_data(proj_name, workspace_dir)\n proj_data.update(repo_data)\n repo_data = proj_data\n if \"private_repositories\" in repo_data or \"public_repositores\" in repo_data:\n repo_data['repositories'] = repo_data.get(\"private_repositories\", []) + repo_data.get(\"public_repositores\", [])\n else:\n proj_name = repo_name\n\n return repo_data, proj_name\n\n\ndef load_repositories(repo_name, is_pinned, workspace_dir):\n repo_data, proj_name = build_repo_data(repo_name, is_pinned, workspace_dir)\n \n for pinned_url in repo_data['repositories']:\n clone_repo(pinned_url, is_pinned, workspace_dir, proj_name+'-repos')\n\n try:\n unitybox_repository = repo_data['unitybox_repository']\n except KeyError: \n raise Exception(\"repositories.yml must contain an entry for unitybox_repository: e.g. `unitybox_repository: https://github.com/cfpb/Unitybox.git@xxx`\")\n\n clone_repo(unitybox_repository, True, workspace_dir)\n\nif __name__ == \"__main__\":\n repo_name = sys.argv[1]\n is_pinned = len(sys.argv) <= 2 or sys.argv[2].upper() not in [\"FALSE\", \"0\"]\n workspace_dir = os.environ['WORKSPACE']\n load_repositories(repo_name, is_pinned, workspace_dir)\n","sub_path":"lib/loadRepositories.py","file_name":"loadRepositories.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"557613163","text":"import requests, os\nfrom holder import *\nfrom time_help import *\n\n#flask import session is client side session.\nfrom flask import Flask, session, render_template, request, jsonify\n\n#flask_session is server side.\nfrom flask_session import Session\nfrom flask_socketio import SocketIO, emit\n\n# Configure application\napp = Flask(__name__)\n\n# Check for environment variables\nif not os.getenv(\"SECRET_KEY\"):\n raise RuntimeError(\"SECRET_KEY is not set in os\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\n# app.config[\"TEMPLATES_AUTO_RELOAD\"] = True\nSession(app)\n\n# Set up SECRET_KEY\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\n# App lists\nchannel_list = ['General']\nuser_list = []\nchannel_dict = {}\n\n# Testing\nmessage_1 = Message(user='Philip', content_time='07-21-18', content='This is my first test message -phil')\nmessage_2 = Message(user='Oliver', content_time='07-22-18', content='And this is my first test message -oliver')\nmessage_3 = Message(user='Natasha', content_time='07-22-18', content='Hi Guys. -natasha')\ngeneral = Channel(name='General')\ngeneral.add(message_1)\ngeneral.add(message_2)\ngeneral.add(message_3)\nchannel_dict['General'] = general\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/check_in\", methods=[\"POST\"])\ndef check_in():\n \"\"\"Check in user\"\"\"\n data = ''\n\n # See if this is from storage or new.\n if not request.form.get(\"from_storage\"):\n data = request.form.get(\"user_name\").title()\n\n # Check if username already exists.\n if data in user_list:\n return jsonify({\"success\": False, \"name\": data})\n else:\n user_list.append(data)\n else:\n data = request.form.get(\"from_storage\").title()\n if data not in user_list:\n user_list.append(data)\n\n gate_key = gate()\n print(user_list)\n return jsonify({\"success\": True, \"name\": data, \"key\": gate_key, \"channel\": channel_list})\n\n\n@app.route(\"/channels\", methods=[\"GET\", \"POST\"])\ndef channels():\n \"\"\"Channel Management\"\"\"\n if request.method == \"POST\":\n\n # Ensure message came through.\n if not request.form.get(\"channel_name\"):\n return jsonify({\"success\": False})\n\n # Get new channel, make sure channel name does not already exist.\n channel_name = request.form.get(\"channel_name\").title()\n\n if channel_name in channel_list:\n return jsonify({\"success\": False, \"channel_name\": channel_name})\n\n channel_list.append(channel_name)\n print(channel_list)\n\n # Make new channel object insert into channel dictionary.\n channel_dict[channel_name] = Channel(name=channel_name)\n\n return jsonify({\"success\": True, \"channel_name\": channel_name, \"channel\": channel_list})\n\n else:\n return jsonify({\"success\": True, \"channel\": channel_list})\n\n\n@app.route(\"/messages/\", methods=[\"GET\", \"POST\"])\ndef messages(page_name):\n \"\"\"Message Management\"\"\"\n if request.method == \"POST\":\n print(\"POSTED to messages route\")\n\n\n else:\n # Get channel object using page_name.\n channel_object = channel_dict[page_name]\n contents = channel_object.get_message_data()\n\n # check if class is currently empty.\n if len(contents) < 1:\n return jsonify({\"success\": False})\n\n return jsonify(contents)\n\n@socketio.on(\"submit post\")\ndef post(data):\n new_content, cur_user, cur_channel = data[\"post\"], data[\"user\"], data[\"channel\"]\n\n\n # Get time.\n cur_time = current_time()\n eastern = eastern_time(cur_time)\n\n # Create new message object.\n new_message = Message(user=cur_user, content_time=eastern, content=new_content)\n cur_channel_obj = channel_dict[cur_channel]\n\n # Push new message object to channel memory.\n cur_channel_obj.add(new_message)\n\n message_package = {\"channel\": cur_channel, \"user\": cur_user, \"content_time\": eastern, \"content\": new_content}\n emit(\"incoming messages\", message_package, broadcast=True)\n\n","sub_path":".~c9_invoke_6DkmBD.py","file_name":".~c9_invoke_6DkmBD.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"263754180","text":"# 每次更新 显卡,self.model_path\nfrom word2vec import Word2Vec_subword_extend,train_epoch\nfrom data_process import build_data_subword_extend\nimport tensorflow as tf\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\nclass Config():\n def __init__(self):\n #\n self.original_file ='../data/text8'\n self.vocab_file = None\n self.split_ratio = [0.8, 0.1, 0.1]\n self.if_train = True\n self.if_test = False\n self.if_extend = True\n self.model_path = '../data/lm_model_subword_extend_using_ulm/'\n\n self.batch_size = 1024\n self.vocabulary_size = 50000\n self.embedding_size = 256\n self.skip_window = 2\n self.max_epochs = 30\n self.num_skip = None\n self.num_sampled = 64\n\n\ndef main():\n\n config = Config()\n model = Word2Vec_subword_extend(config)\n\n gpu_options =tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True)\n tf_config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True,)\n\n with tf.Session(config=tf_config) as sess:\n if config.if_train:\n outfile = os.path.join(os.path.dirname(config.original_file), 'training_data_' + str(config.vocabulary_size) + '_winsize' + str(\n config.skip_window) + '_subword_extend_using_ulm')\n subword_dict_path = '../data/sp_subword.txt'\n build_data_subword_extend(config.original_file, outfile, config.vocabulary_size, config.skip_window, subword_dict_path=subword_dict_path)\n init=tf.global_variables_initializer()\n sess.run(init)\n word2id_path = os.path.join(os.path.dirname(config.original_file),\n 'word2id_dict_'+os.path.basename(outfile))\n if not os.path.exists(word2id_path):\n print(\"Not found the word2id_dict\")\n return\n\n train_epoch(model, sess, outfile, config.model_path, word2id_path)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"lm/train_subword_extend_using_ulm.py","file_name":"train_subword_extend_using_ulm.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"373997356","text":"\r\narray = [1,4,5,8,9,12,18]\r\nprint(array)\r\nlow = int(input(\"Enter low: \"))\r\nhigh = int(input(\"Enter high: \"))\r\n\r\n'''BINARY SEARCH ALGORITHM ADAPTED TO SEARCH IN\r\n INTERVAL, NOT FOR A SPECIFIC VALUE'''\r\n\r\ndef binary_search(array,low, high):\r\n start = 0\r\n end = len(array)-1\r\n middle = 0\r\n while start <= end:\r\n middle = (start+end)/2\r\n middle = int(middle)\r\n if array[middle] in range (low, high+1):\r\n return True\r\n elif array[middle] > high:\r\n end = middle - 1\r\n elif array[middle] < low:\r\n start = middle + 1\r\n return False\r\n \r\nprint(binary_search(array,low,high))\r\n\r\n# Big O = log n\r\n","sub_path":"task9.py","file_name":"task9.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"284555593","text":"#-*- coding: UTF-8 -*-\n\nimport re\n\nfrom flask import render_template, request, redirect, url_for, json\n\nfrom xichuangzhu import app\n\nfrom xichuangzhu.models.work_model import Work\nfrom xichuangzhu.models.author_model import Author\nfrom xichuangzhu.models.dynasty_model import Dynasty\nfrom xichuangzhu.models.review_model import Review\nfrom xichuangzhu.models.topic_model import Topic\nfrom xichuangzhu.models.quote_model import Quote\n\nfrom xichuangzhu.utils import time_diff, content_clean\n\n# page home\n#--------------------------------------------------\n\n# view (public)\n@app.route('/')\ndef index():\n\tworks = Work.get_works_by_random(4)\n\tfor work in works:\n\t\twork['Content'] = content_clean(work['Content'])\n\n\treviews = Review.get_reviews_by_random(4)\n\tfor r in reviews:\n\t\tr['Time'] = time_diff(r['Time'])\n\t\n\tauthors = Author.get_authors_by_random(5)\n\tfor a in authors:\n\t\tquote = Quote.get_quote_by_random(a['AuthorID'])\n\t\ta['Quote'] = quote['Quote'] if quote else \"\"\n\t\n\tdynasties = Dynasty.get_dynasties()\n\ttopics = Topic.get_topics(8)\n\treturn render_template('index.html', works=works, reviews=reviews, authors=authors, dynasties=dynasties, topics=topics)\n\n# json - gene 4 works of different type (public)\n@app.route('/4works', methods=['POST'])\ndef four_works():\n\tworks = Work.get_works_by_random(4)\n\tfor work in works:\n\t\twork['Content'] = content_clean(work['Content'])\n\treturn render_template('four_works.widget', works=works)\n\n# page about\n#--------------------------------------------------\n\n# view (public)\n@app.route('/about')\ndef about():\n\treturn render_template('about.html')","sub_path":"xichuangzhu/controllers/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"627594087","text":"'''\nService support for RHEL-based systems. This interface uses the service and\nchkconfig commands, and for upstart support uses helper functions from the\nupstart module, as well as the ``start``, ``stop``, and ``status`` commands.\n'''\n\n# Import python libs\nimport glob\nimport logging\nimport os\n\n# Import salt libs\nimport salt.utils\n\nlog = logging.getLogger(__name__)\n\n# Import upstart module if needed\nHAS_UPSTART = False\nif salt.utils.which('initctl'):\n try:\n # Don't re-invent the wheel, import the helper functions from the\n # upstart module.\n from salt.modules.upstart \\\n import _upstart_enable, _upstart_disable, _upstart_is_enabled\n except Exception as exc:\n log.error('Unable to import helper functions from '\n 'salt.modules.upstart: {0}'.format(exc))\n else:\n HAS_UPSTART = True\n\n\n\ndef __virtual__():\n '''\n Only work on systems which default to systemd\n '''\n # Enable on these platforms only.\n enable = set((\n 'RedHat',\n 'CentOS',\n 'Scientific',\n 'CloudLinux',\n 'Amazon',\n 'Fedora',\n 'ALT',\n 'OEL',\n 'SUSE Enterprise Server'\n ))\n if __grains__['os'] in enable:\n if __grains__['os'] == 'Fedora':\n if __grains__.get('osrelease', 0) > 15:\n return False\n return 'service'\n return False\n\n\ndef _runlevel():\n '''\n Return the current runlevel\n '''\n out = __salt__['cmd.run']('/sbin/runlevel')\n # unknown will be returned while inside a kickstart environment, since\n # this is usually a server deployment it should be safe to assume runlevel\n # 3. If not all service related states will throw an out of range\n # exception here which will cause other functions to fail.\n if 'unknown' in out:\n return '3'\n else:\n return out.split()[1]\n\n\ndef _add_custom_initscript(name):\n '''\n If the passed service name is not in the output from get_all(), runs a\n 'chkconfig --add' so that it is available.\n '''\n initscript_path = os.path.join('/etc/init.d', name)\n if name not in get_all() and os.access(initscript_path, os.X_OK):\n cmd = '/sbin/chkconfig --add {0}'.format(name)\n if __salt__['cmd.retcode'](cmd):\n log.error('Unable to add initscript \"{0}\"'.format(name))\n else:\n log.info('Added initscript \"{0}\"'.format(name))\n # Disable initscript by default. If a user wants it enabled, he/she\n # can configure that in a state. Since we're adding the service\n # automagically, we shouldn't also enable it, as the user may not\n # be aware that the service was added to chkconfig and thus would\n # not be expecting it to start on boot (which is the default).\n cmd = '/sbin/chkconfig {0} off'.format(name)\n __salt__['cmd.run'](cmd)\n\n\ndef _sysv_is_enabled(chkconfig_line, rlevel):\n '''\n Given a list of columns from a line of 'chkconfig --list' output, and the\n runlevel, return True if enabled. Otherwise, return False.\n '''\n if len(chkconfig_line) > 3 and '{0}:on'.format(rlevel) in chkconfig_line:\n return True\n elif len(chkconfig_line) < 3 and chkconfig_line[1] \\\n and chkconfig_line[1] == 'on':\n return True\n return False\n\n\ndef _service_is_upstart(name):\n '''\n Return true if the service is an upstart service, otherwise return False.\n '''\n return name in get_all(limit='upstart')\n\n\ndef _services():\n '''\n Return a dict of services and their types (sysv or upstart), as well\n as whether or not the service is enabled.\n '''\n if 'service.all' in __context__:\n return __context__['service.all']\n\n # First, parse sysvinit services from chkconfig\n rlevel = _runlevel()\n ret = {}\n for line in __salt__['cmd.run']('/sbin/chkconfig --list').splitlines():\n cols = line.split()\n try:\n name = cols[0]\n except IndexError:\n continue\n if name in ret:\n continue\n ret.setdefault(name, {})['type'] = 'sysvinit'\n ret[name]['enabled'] = _sysv_is_enabled(cols, rlevel)\n if HAS_UPSTART:\n for line in glob.glob('/etc/init/*.conf'):\n name = os.path.basename(line)[:-5]\n if name in ret:\n continue\n ret.setdefault(name, {})['type'] = 'upstart'\n ret[name]['enabled'] = _upstart_is_enabled(name)\n __context__['service.all'] = ret\n return ret\n\n\ndef get_enabled(limit=''):\n '''\n Return the enabled services. Use the ``limit`` param to restrict results\n to services of that type.\n\n CLI Examples::\n\n salt '*' service.get_enabled\n salt '*' service.get_enabled limit=upstart\n salt '*' service.get_enabled limit=sysvinit\n '''\n limit = limit.lower()\n if limit in ('upstart', 'sysvinit'):\n return sorted([x for x, y in _services().iteritems()\n if y['enabled'] and y['type'] == limit])\n else:\n return sorted([x for x, y in _services().iteritems()\n if y['enabled']])\n\n\ndef get_disabled(limit=''):\n '''\n Return the disabled services. Use the ``limit`` param to restrict results\n to services of that type.\n\n CLI Example::\n\n salt '*' service.get_disabled\n salt '*' service.get_disabled limit=upstart\n salt '*' service.get_disabled limit=sysvinit\n '''\n limit = limit.lower()\n if limit in ('upstart', 'sysvinit'):\n return sorted([x for x, y in _services().iteritems()\n if not y['enabled'] and y['type'] == limit])\n else:\n return sorted([x for x, y in _services().iteritems()\n if not y['enabled']])\n\n\ndef get_all(limit=''):\n '''\n Return all installed services. Use the ``limit`` param to restrict results\n to services of that type.\n\n CLI Example::\n\n salt '*' service.get_all\n salt '*' service.get_all limit=upstart\n salt '*' service.get_all limit=sysvinit\n '''\n limit = limit.lower()\n if limit in ('upstart', 'sysvinit'):\n return sorted([x for x, y in _services().iteritems()\n if y['type'] == limit])\n else:\n return sorted([x for x, y in _services().iteritems()])\n\n\ndef start(name):\n '''\n Start the specified service\n\n CLI Example::\n\n salt '*' service.start \n '''\n if _services().get(name, {}).get('type', '') == 'upstart':\n cmd = 'start {0}'.format(name)\n else:\n _add_custom_initscript(name)\n cmd = '/sbin/service {0} start'.format(name)\n return not __salt__['cmd.retcode'](cmd)\n\n\ndef stop(name):\n '''\n Stop the specified service\n\n CLI Example::\n\n salt '*' service.stop \n '''\n if _service_is_upstart(name):\n cmd = 'stop {0}'.format(name)\n else:\n _add_custom_initscript(name)\n cmd = '/sbin/service {0} stop'.format(name)\n return not __salt__['cmd.retcode'](cmd)\n\n\ndef restart(name, **kwargs):\n '''\n Restart the named service\n\n CLI Example::\n\n salt '*' service.restart \n '''\n if _service_is_upstart(name):\n cmd = 'restart {0}'.format(name)\n else:\n _add_custom_initscript(name)\n cmd = '/sbin/service {0} restart'.format(name)\n return not __salt__['cmd.retcode'](cmd)\n\n\ndef status(name, sig=None):\n '''\n Return the status for a service, returns a bool whether the service is\n running.\n\n CLI Example::\n\n salt '*' service.status \n '''\n if _service_is_upstart(name):\n cmd = 'status {0}'.format(name)\n return 'start/running' in __salt__['cmd.run'](cmd)\n _add_custom_initscript(name)\n if sig:\n return bool(__salt__['status.pid'](sig))\n cmd = '/sbin/service {0} status'.format(name)\n return __salt__['cmd.retcode'](cmd) == 0\n\n\ndef enable(name, **kwargs):\n '''\n Enable the named service to start at boot\n\n CLI Example::\n\n salt '*' service.enable \n '''\n if _service_is_upstart(name):\n return _upstart_enable(name)\n _add_custom_initscript(name)\n cmd = '/sbin/chkconfig {0} on'.format(name)\n return not __salt__['cmd.retcode'](cmd)\n\n\ndef disable(name, **kwargs):\n '''\n Disable the named service to start at boot\n\n CLI Example::\n\n salt '*' service.disable \n '''\n if _service_is_upstart(name):\n return _upstart_disable(name)\n _add_custom_initscript(name)\n cmd = '/sbin/chkconfig {0} off'.format(name)\n return not __salt__['cmd.retcode'](cmd)\n\n\ndef enabled(name):\n '''\n Check to see if the named service is enabled to start on boot\n\n CLI Example::\n\n salt '*' service.enabled \n '''\n if _service_is_upstart(name):\n return _upstart_is_enabled(name)\n _add_custom_initscript(name)\n return name in get_enabled()\n\n\ndef disabled(name):\n '''\n Check to see if the named service is disabled to start on boot\n\n CLI Example::\n\n salt '*' service.disabled \n '''\n if _service_is_upstart(name):\n return not _upstart_is_enabled(name)\n _add_custom_initscript(name)\n return name in get_disabled()\n","sub_path":"salt/modules/rh_service.py","file_name":"rh_service.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"190940718","text":"import tensorflow as tf\nfrom tfidf_logistic import TfidfLogistic\n\n\nvocab_size = 20000\n\n\nif __name__ == '__main__':\n (X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.imdb.load_data(num_words=vocab_size)\n\n model = TfidfLogistic(vocab_size)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n \n final_acc = (y_pred == y_test).mean()\n print(\"final testing accuracy: %.4f\" % final_acc)\n","sub_path":"nlp-models/python/tfidf_imdb_test.py","file_name":"tfidf_imdb_test.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"298099384","text":"import sqlite3, config, datetime\nimport alpaca_trade_api as tradeapi\nimport pandas as pd \n\nfrom datetime import date\nfrom alphaVantageAPI.alphavantage import AlphaVantage ##pip install alphaVantage-api##\nimport notifications\n\nconnection = sqlite3.connect(config.DB_FILE)\nconnection.row_factory = sqlite3.Row\n\ncursor = connection.cursor()\n\n#access the id from strategy table to reference \ncursor.execute(\"\"\"\n SELECT id FROM strategy WHERE name == 'opening_range_breakout'\n\"\"\")\n#store the id from the accessed db row object \nstrategy_id = cursor.fetchone()['id']\n\n ##scan barsets of stock_prices table for strategy criteria\n\n##join TABLE stock symbols, names with the TABLE stock_strategy\n##where the stock_strategy id contains stock.id \ncursor.execute(\"\"\"\n SELECT symbol, name FROM stock\n JOIN stock_strategy on stock_strategy.stock_id = stock.id \n WHERE stock_strategy.strategy_id = ?\n \"\"\",(strategy_id,))\n\n#store db objects\nstocks = cursor.fetchall()\n#get row cell w/ table['column'] index\nsymbols = [stock['symbol'] for stock in stocks]\n\napi = tradeapi.REST(config.API_KEY, config.SECRET_KEY, base_url=config.BASE_URL)\n\ncurrent_date = datetime.datetime.utcnow().date()\n###alpaca orders\n###debug algo\n#orders = api.list_orders(status='all', limit=300, after=f'{current_date}T1:30:00Z')\norders = api.list_orders()\nexisting_order_symbols = [order.symbol for order in orders]\n\n# start_minute_bar = f'{current_date} 09:30-04:00' \n# end_minute_bar = f'{current_date} 09:45-04:00'\nNY = 'America/New_York'\nstart=pd.Timestamp('2021-01-06 9:30', tz=NY).isoformat()\nend=pd.Timestamp('2021-01-06 9:45', tz=NY).isoformat()\n\n# current_date = '2021-01-05' #date.datetime.utcnow().date()\n# start_minute_bar = f'{current_date} 09:30-04:00' \n# end_minute_bar = f'{current_date} 09:45-04:00'\nstart_date=pd.Timestamp('2021-01-06 9:30', tz=NY).isoformat()\nend_date=pd.Timestamp('2021-01-06 16:00', tz=NY).isoformat()\nmessages = []\nfor symbol in symbols:\n # minute_bars = api.polygon.historic_agg_v2(symbol, 15, 'minute', _from='2021-01-05', to='2021-01-05')\n minute_bars = api.get_barset(symbol, '5Min', start=start_date, end=end_date).df\n\n opening_range_mask = (minute_bars.index >= start) & (minute_bars.index < end)\n opening_range_bars = minute_bars.loc[opening_range_mask]\n \n opening_range_low = opening_range_bars[symbol]['low'].min()\n opening_range_high = opening_range_bars[symbol]['high'].max()\n opening_range = opening_range_high - opening_range_low\n print(symbol, opening_range_low, opening_range_high)\n #find 1st min bar wtih close above opening range high\n after_opening_range_mask = minute_bars.index >= end\n after_opening_range_bars = minute_bars.loc[after_opening_range_mask]\n \n after_opening_range_breakout = after_opening_range_bars[after_opening_range_bars[symbol]['close'] > opening_range_high]\n \n if not after_opening_range_breakout.empty:\n '''alpaca trade check for existing orders (line 37)'''\n #if symbol not in existing_orders_symbols \n limit_price = after_opening_range_breakout.iloc[0][symbol]['close']\n print(\"limit: \", limit_price)\n # print(f'placing order for{symbol} at {limit_price}, closed above {opening_range_high} at {after_opening_range_breakout.iloc[0]}')\n messages.append(f'Subject: ORB strategy pew pew\\n\\nplacing order for{symbol} at {limit_price}, closed above {opening_range_high}\\n\\n{after_opening_range_breakout.iloc[0]}\\n\\n')\n #https://youtu.be/RZ_4OI_K6Aw?list=PLvzuUVysUFOuoRna8KhschkVVUo2E2g6G&t=1772\n ###alpaca api order\n api.submit_order(\n symbol=symbol,\n side='buy',\n type='limit',\n qty='100',\n time_in_force='day',\n order_class='bracket',\n limit_price= limit_price,\n take_profit=dict(\n limit_price = round(limit_price, 2) + round(opening_range, 2)+ .01,\n ),\n stop_loss=dict(\n stop_price=round(limit_price, 2) - round(opening_range, 2)-.01,\n )\n )\n print('prices:', round(limit_price, 2) + round(opening_range, 2))\n print('prices:', round(limit_price, 2) - round(opening_range, 2))\n ''' cron job\n */1 * * * * /Users/administrator/db_Trade_app/venv/bin/python/opening_range_breakout.py >> trade.log 2>&1\n '''\n print(messages)\n notifications.email(messages)","sub_path":"opening_range_breakout.py","file_name":"opening_range_breakout.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"361066650","text":"\"\"\"\n price, turnover, mktcap 데이터를 이용한 멀티팩터 계산 모듈\n\"\"\"\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport quantM as qnt\n\nclass CalculateMultifactors:\n \" multifactor calculator \"\n\n def __init__(self, price, turnover, mktcap):\n \" Initialization \"\n\n self.today = datetime.datetime.now()\n self.prev = self.today - datetime.timedelta(days=1)\n self.today = int(self.today.strftime('%Y%m%d'))\n self.prev = int(self.prev.strftime('%Y%m%d'))\n\n self.price_path = price\n self.turnover_path = turnover\n self.mktcap_path = mktcap\n\n self.price = None\n self.turnover = None\n self.mktcap = None\n\n self.codes = None\n\n self.get_data()\n\n self.multifactors = None\n self.forders = None\n self.mf_score = None\n\n self._set_factor_orders()\n\n def calculate_dataset(self, path=None):\n self.calculate_overall_multifactors()\n self.calculate_multifactor_score()\n\n self.save(path)\n\n def save(self, path=None):\n \" save data \"\n if path == None:\n mf_path = \"./dataset/multifactors_\" + self.price_path[13:]\n mf_score_path = \"./dataset/multifactor_score_\" + self.price_path[13:]\n\n self.multifactors.to_csv(mf_path)\n self.mf_score.to_csv(mf_score_path)\n\n def calculate_overall_multifactors(self, thres=40):\n \" calculate all stocks multifactors \"\n\n for i, code in enumerate(self.codes):\n if i == 0:\n multifactors = self.calculate_multifactors(code, thres=thres)\n else:\n mf = self.calculate_multifactors(code, thres=thres)\n multifactors = multifactors.append(mf)\n print(\"{}, {}\".format(i, code), end='\\r')\n print(\"\\n\")\n\n multifactors = pd.DataFrame(multifactors.values, columns=multifactors.columns)\n\n self.multifactors = multifactors\n\n def get_data(self):\n \"get data\"\n\n price_data = pd.read_csv(self.price_path, index_col=0, header=0)\n turnover_data = pd.read_csv(self.turnover_path, index_col=0, header=0)\n mktcap_data = pd.read_csv(self.mktcap_path, index_col=0, header=0)\n\n assert((price_data.index == turnover_data.index).sum() ==\\\n price_data.shape[0])\n assert((turnover_data.index == mktcap_data.index).sum() ==\\\n turnover_data.shape[0])\n\n self.price = price_data\n self.turnover = turnover_data\n self.mktcap = mktcap_data\n\n self.codes = self.price.columns.values.tolist()\n\n def calculate_multifactors(self, code, thres=40):\n \" calculate multifactors \"\n price = self.price[code]\n turnover = self.turnover[code]\n mktcap = self.mktcap[code]\n\n returns = np.log(price / price.shift(1)).iloc[1:]\n\n turnover = turnover.reindex(returns.index)\n mktcap = mktcap.reindex(returns.index)\n\n multifactors = pd.DataFrame(index=returns.index)\n multifactors['code'] = code\n multifactors['trade_date'] = returns.index.values\n\n # Price Momentum\n multifactors['pm_5'] = returns.rolling(5).sum()\n multifactors['pm_10'] = returns.rolling(10).sum()\n multifactors['pm_20'] = returns.rolling(20).sum()\n multifactors['pm_40'] = returns.rolling(40).sum()\n multifactors['pm_60'] = returns.rolling(60).sum()\n multifactors['pm_90'] = returns.rolling(90).sum()\n multifactors['pm_120'] = returns.rolling(120).sum()\n multifactors['pm_250'] = returns.rolling(250).sum()\n\n # Frog in the Pan\n multifactors['fip_5'] = qnt.logfip(returns, p=5)\n multifactors['fip_10'] = qnt.logfip(returns, p=10)\n multifactors['fip_20'] = qnt.logfip(returns, p=20)\n multifactors['fip_40'] = qnt.logfip(returns, p=40)\n multifactors['fip_60'] = qnt.logfip(returns, p=60)\n multifactors['fip_90'] = qnt.logfip(returns, p=90)\n multifactors['fip_120'] = qnt.logfip(returns, p=120)\n multifactors['fip_250'] = qnt.logfip(returns, p=250)\n\n # Volatility\n multifactors['vol_5'] = returns.rolling(5).std()\n multifactors['vol_10'] = returns.rolling(10).std()\n multifactors['vol_20'] = returns.rolling(20).std()\n multifactors['vol_40'] = returns.rolling(40).std()\n multifactors['vol_60'] = returns.rolling(60).std()\n multifactors['vol_90'] = returns.rolling(90).std()\n multifactors['vol_120'] = returns.rolling(120).std()\n multifactors['vol_250'] = returns.rolling(250).std()\n\n # Skew\n multifactors['skew_5'] = returns.rolling(5).skew()\n multifactors['skew_10'] = returns.rolling(10).skew()\n multifactors['skew_20'] = returns.rolling(20).skew()\n multifactors['skew_40'] = returns.rolling(40).skew()\n multifactors['skew_60'] = returns.rolling(60).skew()\n multifactors['skew_90'] = returns.rolling(90).skew()\n multifactors['skew_120'] = returns.rolling(120).skew()\n multifactors['skew_250'] = returns.rolling(250).skew()\n\n # Average Turnover\n turnover_5 = turnover.rolling(5).mean()\n turnover_10 = turnover.rolling(10).mean()\n turnover_20 = turnover.rolling(20).mean()\n turnover_40 = turnover.rolling(40).mean()\n turnover_60 = turnover.rolling(60).mean()\n turnover_90 = turnover.rolling(90).mean()\n turnover_120 = turnover.rolling(120).mean()\n turnover_250 = turnover.rolling(250).mean()\n\n turnover_5 = turnover_5.apply(lambda x: 1. if x == 0. else x)\n turnover_10 = turnover_10.apply(lambda x: 1. if x == 0. else x)\n turnover_20 = turnover_20.apply(lambda x: 1. if x == 0. else x)\n turnover_40 = turnover_40.apply(lambda x: 1. if x == 0. else x)\n turnover_60 = turnover_60.apply(lambda x: 1. if x == 0. else x)\n turnover_90 = turnover_90.apply(lambda x: 1. if x == 0. else x)\n turnover_120 = turnover_120.apply(lambda x: 1. if x == 0. else x)\n turnover_250 = turnover_250.apply(lambda x: 1. if x == 0. else x)\n\n multifactors['turnover_5'] = np.log(turnover_5)\n multifactors['turnover_10'] = np.log(turnover_10)\n multifactors['turnover_20'] = np.log(turnover_20)\n multifactors['turnover_40'] = np.log(turnover_40)\n multifactors['turnover_60'] = np.log(turnover_60)\n multifactors['turnover_90'] = np.log(turnover_90)\n multifactors['turnover_120'] = np.log(turnover_120)\n multifactors['turnover_250'] = np.log(turnover_250)\n\n # Mktcap\n multifactors['log_mktcap'] = np.log(mktcap)\n\n multifactors = multifactors.iloc[249:]\n null_test = multifactors.isnull().sum(1)\n multifactors = multifactors[null_test != thres]\n\n return multifactors\n\n def calculate_multifactor_score(self):\n \" calculte multifactor score \"\n mf_score = pd.DataFrame(index=self.multifactors.index,\n columns=self.multifactors.columns)\n mf_score[self.multifactors.columns[:2]] =\\\n self.multifactors[['code', 'trade_date']]\n\n cnt = 0\n for i, date in enumerate(self.multifactors['trade_date'].unique()):\n for j, col in enumerate(self.multifactors.columns[2:]):\n cross_data =\\\n self.multifactors[self.multifactors['trade_date'] ==\\\n date][col]\n normalized = self.normalize_multifactors(cross_data,\n order=self.forders[col])\n mf_score.loc[normalized.index, col] = normalized\n cnt += 1\n if cnt % 1000 == 0:\n print(i, j, end='\\r')\n\n # 시가총액 제외 nan 값 0으로 채우기\n mf_score[mf_score.columns[:-1]] =\\\n mf_score[mf_score.columns[:-1]].\\\n applymap(lambda x: 0 if pd.isnull(x) else x)\n mf_score = mf_score.dropna()\n self.mf_score = mf_score\n\n def normalize_multifactors(self, cross_data, order=1):\n \"\"\"\n minmax scaling\n\n order:\n 1: Desc\n 0: Asc\n \"\"\"\n cmax = max(cross_data)\n cmin = min(cross_data)\n\n if order == 1:\n normalized = (cross_data - cmin) / (cmax - cmin)\n elif order == 0:\n normalized = (cmax - cross_data) / (cmax - cmin)\n\n return normalized\n\n def _set_factor_orders(self):\n \" set factor orders \"\n self.forders = {}\n self.forders['pm_5'] = 1\n self.forders['pm_10'] = 1\n self.forders['pm_20'] = 1\n self.forders['pm_40'] = 1\n self.forders['pm_60'] = 1\n self.forders['pm_90'] = 1\n self.forders['pm_120'] = 1\n self.forders['pm_250'] = 1\n\n self.forders['fip_5'] = 1\n self.forders['fip_10'] = 1\n self.forders['fip_20'] = 1\n self.forders['fip_40'] = 1\n self.forders['fip_60'] = 1\n self.forders['fip_90'] = 1\n self.forders['fip_120'] = 1\n self.forders['fip_250'] = 1\n\n self.forders['vol_5'] = 0\n self.forders['vol_10'] = 0\n self.forders['vol_20'] = 0\n self.forders['vol_40'] = 0\n self.forders['vol_60'] = 0\n self.forders['vol_90'] = 0\n self.forders['vol_120'] = 0\n self.forders['vol_250'] = 0\n\n self.forders['skew_5'] = 0\n self.forders['skew_10'] = 0\n self.forders['skew_20'] = 0\n self.forders['skew_40'] = 0\n self.forders['skew_60'] = 0\n self.forders['skew_90'] = 0\n self.forders['skew_120'] = 0\n self.forders['skew_250'] = 0\n\n self.forders['turnover_5'] = 1\n self.forders['turnover_10'] = 1\n self.forders['turnover_20'] = 1\n self.forders['turnover_40'] = 1\n self.forders['turnover_60'] = 1\n self.forders['turnover_90'] = 1\n self.forders['turnover_120'] = 1\n self.forders['turnover_250'] = 1\n\n self.forders['log_mktcap'] = 1\n\n def calculate_sharpe_ratio(self, price, period=250, annualize=250):\n \"\"\"\n Share ratio를 계산하는 함수\n Args:\n price: price series dataframe\n period: 계산 윈도우\n \"\"\"\n returns = price.pct_change().iloc[1:]\n\n sharpe = returns.rolling(period).mean() * annualize / \\\n (returns.rolling(period).std() * np.sqrt(annualize))\n\n return sharpe\n","sub_path":"calculate_multifactors.py","file_name":"calculate_multifactors.py","file_ext":"py","file_size_in_byte":10508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"503997235","text":"\n\nfrom xai.brain.wordbase.verbs._hurtle import _HURTLE\n\n#calss header\nclass _HURTLING(_HURTLE, ):\n\tdef __init__(self,): \n\t\t_HURTLE.__init__(self)\n\t\tself.name = \"HURTLING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"hurtle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_hurtling.py","file_name":"_hurtling.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"529031064","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Virtual Cable S.L.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# * Neither the name of Virtual Cable S.L. nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n@author: Adolfo Gómez, dkmaster at dkmon dot com\n'''\nimport threading\nimport ipaddress\nimport time\nimport typing\nimport functools\n\nif typing.TYPE_CHECKING:\n from udsactor.types import InterfaceInfoType\n\n# Simple cache for n seconds (default = 30) decorator\ndef cache(seconds: int = 30) -> typing.Callable:\n '''\n Simple cache for n seconds (default = 30) decorator\n '''\n def decorator(func) -> typing.Callable:\n @functools.wraps(func)\n def wrapper(*args, **kwargs) -> typing.Any:\n if not hasattr(wrapper, 'cache'):\n wrapper.cache = {} # type: ignore\n cache = wrapper.cache # type: ignore\n\n # Compose a key for the cache\n key = '{}:{}'.format(args, kwargs)\n if key in cache:\n if time.time() - cache[key][0] < seconds:\n return cache[key][1]\n \n # Call the function\n result = func(*args, **kwargs)\n cache[key] = (time.time(), result)\n return result\n\n return wrapper\n\n return decorator\n\n\n# Simple sub-script exectution thread\nclass ScriptExecutorThread(threading.Thread):\n def __init__(self, script: str) -> None:\n super(ScriptExecutorThread, self).__init__()\n self.script = script\n\n def run(self) -> None:\n from udsactor.log import logger\n\n try:\n logger.debug('Executing script: {}'.format(self.script))\n exec(\n self.script, globals(), None\n ) # nosec: exec is fine, it's a \"trusted\" script\n except Exception as e:\n logger.error('Error executing script: {}'.format(e))\n logger.exception()\n\n\nclass Singleton(type):\n '''\n Metaclass for singleton pattern\n Usage:\n\n class MyClass(metaclass=Singleton):\n ...\n '''\n\n _instance: typing.Optional[typing.Any]\n\n # We use __init__ so we customise the created class from this metaclass\n def __init__(self, *args, **kwargs) -> None:\n self._instance = None\n super().__init__(*args, **kwargs)\n\n def __call__(self, *args, **kwargs) -> typing.Any:\n if self._instance is None:\n self._instance = super().__call__(*args, **kwargs)\n return self._instance\n\n\n# Convert \"X.X.X.X/X\" to ipaddress.IPv4Network\ndef strToNoIPV4Network(\n net: typing.Optional[str],\n) -> typing.Optional[ipaddress.IPv4Network]:\n if not net: # Empty or None\n return None\n try:\n return ipaddress.IPv4Interface(net).network\n except Exception:\n return None\n\n\ndef validNetworkCards(\n net: typing.Optional[str], cards: typing.Iterable['InterfaceInfoType']\n) -> typing.List['InterfaceInfoType']:\n try:\n subnet = strToNoIPV4Network(net)\n except Exception as e:\n subnet = None\n\n if subnet is None:\n return list(cards)\n\n def isValid(ip: str, subnet: ipaddress.IPv4Network) -> bool:\n if not ip:\n return False\n try:\n return ipaddress.IPv4Address(ip) in subnet\n except Exception:\n return False\n\n return [c for c in cards if isValid(c.ip, subnet)]\n","sub_path":"actor/src/udsactor/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"231277193","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nfrom time import gmtime, strftime\nimport json\nimport tweepy\nfrom search import *\nfrom tokens import *\n\n# setup twitter\nauth = tweepy.OAuthHandler(C_KEY, C_SECRET) \nauth.set_access_token(A_TOKEN, A_TOKEN_SECRET) \napi = tweepy.API(auth)\n# tweet = \"New update, and bot is now live!\"\n# api.update_status(tweet)\n\n\ndef tweetInvalid(username, tweetId):\n\ttweet = \"@\"+username+\" \"\n\ttweet += \"Invalid input!\\nPlease check pinned tweet for correct format.\\n\\n\"\n\ttweet += strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\t# print(tweet)\n\tapi.update_status(tweet, tweetId) \n\ndef loadTwitter(twitterInput):\n\tusername = \"\"\n\ttweetId = 0\n\t# # open file, read in log\n\twith open('mentionLog.txt', 'r') as outfile:\n\t\tmentionLog = json.load(outfile)\n\n\t# tweet = \"Starting advanced monitor...\"\n\tmentions = api.mentions_timeline(count=1)\n\tnewTweet = 0\n\tfor mention in mentions:\n\t\tif str(mention.created_at) not in mentionLog:\n\t\t\tprint(\"New tweet:\\n\")\n\t\t\tprint(mention.text)\n\t\t\ttweetId = mention.id\n\t\t\tprint(\"\\n\")\n\t\t\t\n\t\t\tif \"hello\" in str(mention.text).lower():\n\t\t\t\ttweet = \"@%s Hello!\" % (mention.user.screen_name)\n\t\t\t\ttweet += \"\\n\"\n\t\t\t\ttweet += strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\t\t\t\tapi.update_status(tweet, tweetId)\n\t\t\telif \"thanks\" in str(mention.text).lower():\n\t\t\t\ttweet = \"@%s You're Welcome!\" % (mention.user.screen_name)\n\t\t\t\ttweet += \"\\n\"\n\t\t\t\ttweet += strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\t\t\t\tapi.update_status(tweet, tweetId)\n\t\t\telse:\n\t\t\t\tuserNameRegex = re.compile('@UmbcMonitor',flags=re.IGNORECASE)\n\t\t\t\tupdatedTweet = (re.sub(userNameRegex, '', str(mention.text))).replace(\" \",\"\")\n\t\t\t\tsearchTerms = updatedTweet.split(\",\")\n\t\t\t\t\n\t\t\t\twhile(len(searchTerms) > 4):\n\t\t\t\t\tsearchTerms.pop()\n\n\n\t\t\t\tfor term in searchTerms:\n\t\t\t\t\ttwitterInput.append(term)\n\n\t\t\tusername = str(mention.user.screen_name)\n\t\t\tmentionLog.append(str(mention.created_at))\n\t\t\tnewTweet = 1\n\tif newTweet:\n\t\twith open('mentionLog.txt', 'w') as outfile:\n\t\t\tjson.dump(mentionLog, outfile)\n\t\n\tif len(twitterInput) == 0:\n\t\tprint(\"No new tweets\")\n\t\texit(1)\n\telif len(twitterInput) < 4:\n\t\tprint(\"Invalid tweet!\")\n\t\ttweetInvalid(username, tweetId)\n\t\texit(1)\n\treturn username, tweetId\n\n\ndef validateList(usrInput, username, tweetId):\n\tif usrInput in subjects:\n\t\treturn usrInput.upper()\n\telif usrInput.upper() in convert.keys():\n\t\treturn convert[usrInput]\n\telif usrInput.upper() in convert.values():\n\t\treturn usrInput.upper()\n\telse:\n\t\ttry:\n\t\t\tint(usrInput)\n\t\t\treturn usrInput\n\t\texcept:\n\t\t\tprint(\"Wrong input\\n\")\n\t\t\ttweetInvalid(username, tweetId)\n\t\t\texit(1)\n\ndef getUrl(inputList, username, tweetId):\n\tprint(\"Inserting values into link...\")\n\tprint(inputList)\n\t\n\tsearchData[10][1] = (validateList(inputList[0], username, tweetId))\n\tsearchData[11][1] = (validateList(inputList[1], username, tweetId))\n\tsearchData[12][1] = (validateList(inputList[2], username, tweetId))\n\t# searchData[12][1] = (validateList(\"100\", username, tweetId))\n\tsearchData[13][1] = (validateList(inputList[3], username, tweetId))\n\n\tprint(\"Getting search terms from search.py...\\n\")\n\tstring = \"\"\n\tfor eachTerm in searchData:\n\t\tstring += eachTerm[0]\n\t\tstring += \"=\"\n\t\tstring += eachTerm[1]\n\t\tstring += \"&\"\n\tstring = string[:-1]\n\tprint(string+\"\\n\")\n\treturn string\n\n\ndef loadPage(classData,url,url2):\n\ts = requests.Session()\n\tr = s.get(url)\n\tr2 = s.get(url2, cookies=r.cookies)\n\tdata = r2.text\n\tsoup = BeautifulSoup(data,\"html.parser\")\n\n\tclassCount = 0\n\tfor className in soup.find_all(\"td\",{\"class\": \"PAGROUPBOXLABELLEVEL1 PSLEFTCORNER\"}):\n\t\tclassData.append([])\n\t\tclassData[classCount].append(str(className.text).lstrip().rstrip())\n\t\tclassCount += 1\n\n\tskuRegex = re.compile('\\d{4}(?=\\<\\/a)')\n\tprofRegex = re.compile('(?<=R\\$\\d\\\"\\>)\\w.*?\\s\\w.*?(?=\\<)|(?<=R\\$\\d\\d\\\"\\>)\\w.*?\\s\\w.*?(?=\\<)|Staff')\n\tdivId = \"win0divSSR_CLSRSLT_WRK_GROUPBOX2$\"\n\tskuCount = 0\n\tfor eachClass in range(0,classCount):\n\t\tclassDivID = divId+str(eachClass)\n\t\tfor sections in soup.find_all(\"div\",{\"id\": classDivID}):\n\t\t\tskuList = re.findall(skuRegex,str(sections))\n\t\t\tclassData[eachClass].append(skuList)\n\n\n\t\t\tprofList = re.findall(profRegex,str(sections))\n\t\t\tclassData[eachClass].append(profList)\n\n\t\t\tstatusList = []\n\t\t\tfor status in sections.find_all(\"img\", alt=[\"Open\",\"Wait List\"]):\n\t\t\t\tstatusList.append(status['alt'].replace(\" \",\"\"))\n\t\t\tclassData[eachClass].append(statusList)\n\n\ndef tweetInput(classData, username, tweetId):\n\n\tif len(classData) == 0:\n\t\ttweet = \"No classes open\\n@\"+username\n\t\ttweet += \"\\n\\n\"\n\t\ttweet += strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\t\t# print(tweet)\n\t\tapi.update_status(tweet, tweetId)\n\t\texit(1)\n\n\n\t# Max of 2 open sections per class\n\t# need better algorithm, since search.py now updated to only include open classes!\n\tidClassData = []\n\tclassCount = 0\n\tfor classes in classData:\n\t\tmaxCount = 0\n\t\tsectionCount = 0\n\t\tidSectionData = []\n\t\tonlyOnce = 1\n\t\tfor status in classes[3]:\n\t\t\tif status == \"Open\":\n\t\t\t\tif onlyOnce:\n\t\t\t\t\tidSectionData.append(classCount)\n\t\t\t\t\tonlyOnce = 0\n\t\t\t\tidSectionData.append(sectionCount)\n\t\t\t\tmaxCount += 1\n\t\t\tsectionCount += 1\n\t\t\t# change this if you want more classes\n\t\t\tif maxCount == 2:\n\t\t\t\tbreak;\n\t\tif len(idSectionData) != 0:\n\t\t\tidClassData.append(idSectionData)\n\t\tclassCount += 1\n\n\n\tprint(idClassData)\n\tfor eachClass in idClassData:\n\t\tfor eachSection in range(1,len(eachClass)):\n\t\t\ttweet = \"\"\n\t\t\ttweet += str(classData[eachClass[0]][0])\n\t\t\ttweet += \"\\n\"\n\t\t\ttweet += str(classData[eachClass[0]][1][eachClass[eachSection]])\n\t\t\ttweet += \"\\n\"\n\t\t\ttweet += str(classData[eachClass[0]][2][eachClass[eachSection]])\n\t\t\ttweet += \"\\n\"\n\t\t\ttweet += str(classData[eachClass[0]][3][eachClass[eachSection]])\n\t\t\ttweet += \"\\n\"\n\t\t\ttweet += \"@\"+username\n\t\t\ttweet += \"\\n\"\n\t\t\ttweet += strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\t\t\tprint(tweet)\n\t\t\tapi.update_status(tweet, tweetId)\n\n\ndef addOpen(url2):\n\topenClasses = [\"SSR_CLSRCH_WRK_SSR_OPEN_ONLY$chk$3\",\"Y\",\"SSR_CLSRCH_WRK_SSR_OPEN_ONLY$3\",\"Y\"]\n\turl2 += \"&\"+openClasses[0]+\"=\"+openClasses[1]+\"&\"+openClasses[2]+\"=\"+openClasses[3]\n\treturn url2\n\ndef main():\n\tprint(\"Get latest mention...\\n\")\n\ttwitterInput = []\n\tusername, tweetId = loadTwitter(twitterInput)\n\n\t# Create URl\n\turl = 'https://csprd-web.psg.umbc.edu/psc/csprdg/EMPLOYEE/HRMS/c/COMMUNITY_ACCESS.CLASS_SEARCH.GBL'\n\turl2 = url+\"?%3f\"+getUrl(twitterInput, username, tweetId)\n\n\t# Only search open classes:\n\turl2 = addOpen(url2)\n\n\tprint(url2)\n\tclassData = []\n\tprint(\"Connecting to UMBC...\\n\")\n\tloadPage(classData,url,url2)\n\tprint(classData)\n\ttweetInput(classData, username, tweetId)\nmain()\n","sub_path":"v1.0/replyBot.py","file_name":"replyBot.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"1973461","text":"# Jizhong Wang\n# ITP 115, Fall 2018\n# Assignment A3\n# jizhongw@usc.edu\n\n\nreplay = True\nwhile replay == True:\n # ask the user for inputs\n print(\"\\nInput an integer greater than or equal to 0 or -1 to quit: \")\n repeat = True\n # initialize a variable for storing largest number\n numLarge = -1;\n # keep reading in integers until -1\n while repeat == True:\n num = int(input(\"> \"))\n if num == -1:\n repeat = False\n else:\n # update largest number\n if num > numLarge:\n numLarge = num\n # check if any valid input entered\n if numLarge == -1:\n print(\"No valid numbers entered. \")\n # print largest number\n else:\n print(\"The largest number is\", numLarge)\n # ask user if they want to run the program again\n valid = False\n while valid == False:\n res = input(\"\\nWould you like to find the largest number again?(y/n): \")\n if res == \"y\" or res == \"Y\":\n valid = True\n elif res == \"n\" or res == \"N\":\n print(\"\\nGoodbye!\")\n valid = True\n replay = False\n else:\n print(\"Invalid choice. Please enter again. \")","sub_path":"Assignments/A3/ITP115_a3P2_Wang_Jizhong.py","file_name":"ITP115_a3P2_Wang_Jizhong.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"200022762","text":"def swap_elements(my_list, i, j):\n '''(list) --> NoneType\n>>>a = [1, 2, 3, 4, 5]\n>>>swap_elements(a, 0, 3) #func doesn't return anything\n>>>a\n[4, 2, 3, 1, 5]\n'''\n ai = my_list[i]\n aj = my_list[j]\n my_list[i] = aj\n my_list[j] = ai\n\n #for functions that modifies lists, don't need to return anything\n\n\n\n# - LectureExercises\ndef move_DOWN_by1(list_a):\n '''\n >>>[2,4,6,8,1,2,3]\n [4,6,8,1,2,3,2]\n '''\n '''\n # without a new list\n first_element = list_a[0]\n for each in range(0, len(list_a) - 1):\n list_a[each] = list_a[each + 1]\n # [2,4,6,8,1,2,3] --> [4,4,6,8,1,2,3]\n # [4,4,6,8,1,2,3] --> [4,6,6,8,1,2,3]\n # [4,6,6,8,1,2,3] --> [4,6,8,8,1,2,3]\n # [4,6,8,8,1,2,3] --> [4,6,8,1,1,2,3]\n # [4,6,8,1,1,2,3] --> [4,6,8,1,2,2,3]\n # [4,6,8,1,2,2,3] --> [4,6,8,1,2,3,3]\n\n list_a[len(list_a) - 1] = first_element\n # [4,6,8,1,2,3,3] --> [4,6,8,1,2,3, 2 ]\n '''\n \n # With a new list_for loop\n new_list = []\n for index in range( -len(list_a), 0):\n new_list.append(list_a[index + 1])\n\n # Modify global list 'a' or parameter 'list_a'\n for index in range(len(list_a)):\n list_a[index] = new_list[index]\n '''\n \n # With a new list_while loop\n new_list = []\n i = -len(list_a)\n while i < 0:\n new_list.append(list_a[i + 1])\n i += 1\n # Modify global list 'a' or parameter 'list_a'\n for index in range(len(list_a)):\n list_a[index] = new_list[index]\n '''\n \n '''\n #following code works here, but don't work for move_UP_by1(a).\n first_element = list_a[0]\n list_a.remove(list_a[0])\n list_a.append(first_element)\n '''\na = [2,4,6,8,1,2,3]\nmove_DOWN_by1(a)\nprint(a, '|o')\n\n#Write a void function that takes a list as input and reverse the order of its elements\ndef reverse_list(list_a): \n new_list = []\n for index in range(len(list_a), 0, -1):\n new_list.append(list_a[index - 1])\n # Modify 'list_a' / 'a':\n for index in range(len(a)):\n a[index] = new_list[index]\n\na = [4, 4, 3, 8, 3, 3, 3]\nreverse_list(a)\n#print(a)\n\n\n# Write a function that takes as input a list of integers and returns\n# a list with the same elements, but no duplicates.\ndef remove_duplicates(list_a):\n new_list = [ list_a[0] ] #第一个肯定不重复\n for index in range(len(list_a)):\n if list_a[index] not in new_list:\n new_list.append(list_a[index])\n return new_list\n\na = [4, 4, 3, 8, 3, 3, 3]\n#print(remove_duplicates(a))\n\n\n# Write a function that takes two list of integers as input and\n# returns a list corresponding to the union of the two lists.\n# No duplicates should be in the union.\ndef union_list(list_a, list_b):\n repeated_total = list_a + list_b\n return remove_duplicates(repeated_total)\n # We must also return the remove_duplicates in this\n # function, although remove_duplicates itself returns a list.\n\na = [1,2,3,4,5,6]\nb = [4,5,6,7,8,9]\n#print(union_list(a, b))\n\n\n\n# Write a function that takes two list of integers as input and\n# returns a list corresponding to the intersection of the two list\n# (i.e. the elements the lists have in common with no repetitions) \ndef intersection_list(list_a, list_b):\n intersection = []\n for each in list_a:\n if each in list_b:\n intersection.append(each)\n return remove_duplicates(intersection)\n\n#print(intersection_list(a, b))\n\n\n\n\n\n","sub_path":"Comp202/L13_Oject_immutable_mutable.py","file_name":"L13_Oject_immutable_mutable.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"211591778","text":"# -*- coding: utf-8 -*-\nfrom reconhece import reconhece\nfrom entidades.busca_entidades import busca_entidades, checa_anterior\nimport validacoes\n\ndef obter_valor(io, msg_analizada, cliente, modo, cm):\n\n\t#checa se o valor foi inicialmente forncido\n\tprincipal = u'amount_of_money'\n\tentidades = [principal, u'valor', u'valor_anterior']\n\tvalor = busca_entidades(principal = principal, entidades = entidades, nova = msg_analizada, antiga = msg_analizada)\n\n\t# busca o último valor transferido\n\tvalor = checa_anterior(valor=valor, valor_padrao=\"mesmo valor\", entidade=principal, cm=cm, contexto=[\"aplicacao\", \"transferencia\"])\n\n\t# Se o valor não foi inicialmente fornecido\n\twhile (valor == \"nao encontrado\"):\n\t\tio.imprime(\"Diga o valor\")\n\t\tmsg_analizada_valor = reconhece(io, cliente, modo)\n\t\tvalidacoes.cancelar_check(msg_analizada_valor)\n\t\t# Aqui diferente do número da conta, um número só é aceita como valor após ser impresso \"Diga o valor\"\n\t\tentidades.append(u'number')\n\t\tvalor = busca_entidades(principal = principal, entidades = entidades, nova = msg_analizada_valor, antiga = msg_analizada)\n\t\tvalor = checa_anterior(valor=valor, valor_padrao=\"mesmo valor\", entidade=principal, cm=cm, contexto=[\"aplicacao\", \"transferencia\"])\n\treturn valor","sub_path":"entidades/obter_valor.py","file_name":"obter_valor.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"122960596","text":"\n\nfrom xai.brain.wordbase.adjectives._haughty import _HAUGHTY\n\n#calss header\nclass _HAUGHTIER(_HAUGHTY, ):\n\tdef __init__(self,): \n\t\t_HAUGHTY.__init__(self)\n\t\tself.name = \"HAUGHTIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"haughty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_haughtier.py","file_name":"_haughtier.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"159744525","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/david/source/lyrics/lyrics/database.py\n# Compiled at: 2013-01-30 15:40:13\nimport sqlite3, threading, settings, debug\n\ndef load(artist, song, album):\n if settings.use_database:\n return _LyricsDb.load(artist, song, album)\n raise LookupError('settings.use_database = False')\n\n\ndef save(artist, song, album, lyrics):\n if settings.use_database:\n debug.debug('save')\n return _LyricsDb.save(artist, song, album, lyrics)\n\n\ndef _get_db_cursor():\n connection = sqlite3.connect(settings.database_path)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n return (\n connection, cursor)\n\n\n_db_lock = threading.Lock()\n\ndef _sqlite_threadsafe(func):\n\n def wrapper(*args, **kwargs):\n _db_lock.acquire()\n try:\n result = func(*args, **kwargs)\n except:\n _db_lock.release()\n raise\n\n _db_lock.release()\n return result\n\n return wrapper\n\n\nclass _LyricsDb(object):\n _create_table = '\\n CREATE TABLE IF NOT EXISTS lyrics(\\n artist text not null,\\n song text not null,\\n album text not null,\\n lyrics text null,\\n unique(artist, song, album)\\n )\\n '\n _select = 'SELECT lyrics FROM lyrics WHERE artist=? and song=? and album=?'\n _insert = 'INSERT INTO lyrics VALUES (?, ?, ?, ?)'\n\n def get_cursor(self):\n \"\"\"create new connections, sqlite cannot handle multi threading\"\"\"\n connection, cursor = _get_db_cursor()\n cursor.execute(self._create_table)\n return (connection, cursor)\n\n @_sqlite_threadsafe\n def save(self, *args):\n debug.debug('savex', args)\n connection, cursor = self.get_cursor()\n cursor.execute(self._insert, args)\n connection.commit()\n\n @_sqlite_threadsafe\n def load(self, *args):\n connection, cursor = self.get_cursor()\n cursor.execute(self._select, args)\n row = cursor.fetchone()\n if row is None:\n raise LookupError('Row not found')\n return row[0]\n\n\nclass ID3Cache(object):\n _create_table = '\\n CREATE TABLE IF NOT EXISTS id3_cache(\\n path text primary key,\\n artist text not null,\\n song text not null,\\n album text not null,\\n genre text not null,\\n year text not null,\\n track text not null\\n )\\n '\n _select = 'SELECT * FROM id3_cache WHERE path=?'\n _insert = 'INSERT INTO id3_cache VALUES (\\n :path, :artist, :song, :album, :genre, :year, :track)'\n\n def get_cursor(self):\n \"\"\"create new connections, sqlite cannot handle multi threading\"\"\"\n connection, cursor = _get_db_cursor()\n (connection, cursor.execute(self._create_table))\n return (connection, cursor)\n\n @_sqlite_threadsafe\n def save(self, dct):\n connection, cursor = self.get_cursor()\n debug.debug('save id3 db song', dct)\n cursor.execute(self._insert, dct)\n connection.commit()\n\n @_sqlite_threadsafe\n def load(self, path):\n connection, cursor = self.get_cursor()\n cursor.execute(self._select, (path,))\n row = cursor.fetchone()\n if row is None:\n return\n else:\n row = dict(zip(row.keys(), row))\n return row\n\n\n_LyricsDb = _LyricsDb()\nID3Cache = ID3Cache()","sub_path":"pycfiles/lyrics-0.0.3.tar/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"629078489","text":"import maya.cmds\n\ndef fix_rootOrient():\n \"\"\"Re-orient RootX ctrl so that X axis points up (previously Y up).\"\"\"\n ns = ''\n root_anim = '{0}RootX_M'.format(ns)\n offset = '{0}RootOffsetX_M'.format(ns)\n all_shapes= maya.cmds.listRelatives(root_anim, s=True)\n children = maya.cmds.listRelatives(root_anim, c=True)\n children = list(set(children).difference(set(all_shapes)))\n\n maya.cmds.parent(children, w=True)\n maya.cmds.setAttr('{0}.r'.format(offset), 90, 0, 90)\n\n ## Rotate Shape\n center = maya.cmds.xform(root_anim, q=True, ws=True, t=True)\n\n maya.cmds.select(cl=True)\n for shape in all_shapes:\n maya.cmds.select('{0}.cv[0:6]'.format(shape), tgl=True)\n\n maya.cmds.rotate(0,0,90, r=True, p=center, os=True, fo=True)\n\n maya.cmds.parent(children, root_anim)\n\n maya.cmds.select(root_anim, r=True)","sub_path":"Maya/ProjectRelated/Samsam/tdTools/SAM_Scripts_Pack/SAM_karlab/commands_lib/fix_lib/fix_rootOrient.py","file_name":"fix_rootOrient.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"165939337","text":"\n\nfrom xai.brain.wordbase.nouns._bargain import _BARGAIN\n\n#calss header\nclass _BARGAINED(_BARGAIN, ):\n\tdef __init__(self,): \n\t\t_BARGAIN.__init__(self)\n\t\tself.name = \"BARGAINED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bargain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_bargained.py","file_name":"_bargained.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"164751677","text":"# encoding=utf-8\nimport pdb\n\nimport torchvision.transforms.functional as F\nimport os\nfrom PIL import Image\nimport torch.utils.data.dataset as dataset\nfrom torchvision import transforms\n\nimport misc_utils as utils\nimport random\nimport numpy as np\nimport cv2\n\n\ndef paired_cut(img_1: Image.Image, img_2: Image.Image, crop_size):\n def get_params(img, output_size):\n w, h = img.size\n th, tw = output_size\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw)\n return i, j, th, tw\n\n r = random.randint(-1, 6)\n if r >= 0:\n img_1 = img_1.transpose(r)\n img_2 = img_2.transpose(r)\n\n i, j, h, w = get_params(img_1, crop_size)\n img_1 = F.crop(img_1, i, j, h, w)\n img_2 = F.crop(img_2, i, j, h, w)\n\n return img_1, img_2\n\n\nclass ListTrainValDataset(dataset.Dataset):\n \"\"\"ImageDataset for training.\n\n Args:\n file_list(str): dataset list, input and label should be split by ','\n aug(bool): data argument (×8)\n norm(bool): normalization\n\n Example:\n train_dataset = ImageDataset('train.txt', aug=False)\n for i, data in enumerate(train_dataset):\n input, label = data['input']. data['label']\n\n \"\"\"\n\n def __init__(self, file_list, scale=None, crop=None, aug=True, norm=False, max_size=None):\n self.im_names = []\n self.labels = []\n with open(file_list, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip('\\n')\n img, label = line.split()\n img, label = img.strip(), label.strip()\n self.im_names.append(img)\n self.labels.append(label)\n\n self.trans_dict = {0: Image.FLIP_LEFT_RIGHT, 1: Image.FLIP_TOP_BOTTOM, 2: Image.ROTATE_90, 3: Image.ROTATE_180,\n 4: Image.ROTATE_270, 5: Image.TRANSPOSE, 6: Image.TRANSVERSE}\n\n if isinstance(scale, int):\n scale = (scale, scale)\n\n self.scale = scale\n if isinstance(crop, int):\n crop = (crop, crop)\n\n self.crop = crop\n self.aug = aug\n self.norm = norm\n self.max_size = max_size\n\n def __getitem__(self, index):\n \"\"\"Get indexs by index\n\n Args:\n index(int): index\n\n Returns:\n {'input': input,\n 'label': label,\n 'path': path\n }\n\n \"\"\"\n\n input = Image.open(self.im_names[index]).convert(\"RGB\")\n label = Image.open(self.labels[index]).convert(\"RGB\")\n\n if self.crop:\n input, label = paired_cut(input, label, self.crop)\n\n if self.scale:\n input = F.resize(input, self.scale)\n label = F.resize(label, self.scale)\n\n r = random.randint(0, 7)\n if self.aug and r != 7:\n input = input.transpose(self.trans_dict[r])\n label = label.transpose(self.trans_dict[r])\n\n if self.norm: # 分割可以norm 复原不能norm\n input = F.normalize(F.to_tensor(input), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n else:\n input = F.to_tensor(input)\n label = F.to_tensor(label)\n return {'input': input, 'label': label, 'path': self.im_names[index]}\n\n def __len__(self):\n if self.max_size is not None:\n return min(self.max_size, len(self.im_names))\n\n return len(self.im_names)\n\n\nclass ListTestDataset(dataset.Dataset):\n \"\"\"ImageDataset for test.\n\n Args:\n file_list(str): dataset path'\n norm(bool): normalization\n\n Example:\n test_dataset = ImageDataset('test', crop=256)\n for i, data in enumerate(test_dataset):\n input, file_name = data\n\n \"\"\"\n def __init__(self, file_list, scale=None, norm=False, max_size=None):\n self.im_names = []\n with open(file_list, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip('\\n')\n img = line\n self.im_names.append(img)\n\n if isinstance(scale, int):\n scale = (scale, scale)\n\n self.scale = scale\n self.norm = norm\n self.max_size = max_size\n\n def __getitem__(self, index):\n\n input = Image.open(self.im_names[index]).convert(\"RGB\")\n\n if self.scale:\n input = F.resize(input, self.scale)\n\n if self.norm:\n input = F.normalize(F.to_tensor(input), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n else:\n input = F.to_tensor(input)\n\n return {'input': input, 'path': self.im_names[index]}\n\n def __len__(self):\n if self.max_size is not None:\n return min(self.max_size, len(self.im_names))\n\n return len(self.im_names)\n\n\ndef preview_dataset(dataset, path='path'):\n for i, data in enumerate(dataset):\n if i == min(10, len(dataset)):\n break\n\n c = 'input'\n img = data[c]\n img = np.array(img)\n img = np.transpose(img, (1, 2, 0))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if path in data:\n cv2.putText(img, os.path.basename(data[path]), (1, 35), 0, 1, (255, 255, 255), 2)\n\n if 'label' in data:\n label = data['label']\n label = np.array(label)\n label = np.transpose(label, (1, 2, 0))\n label = cv2.cvtColor(label, cv2.COLOR_BGR2RGB)\n cv2.putText(label, 'gt', (1, 35), 0, 1, (255, 255, 255), 2)\n\n preview = np.hstack((img, label))\n\n else:\n preview = img\n\n cv2.imshow('preview', preview)\n\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n\n dataset = ListTrainValDataset('../datasets/train.txt', crop=256, aug=False)\n preview_dataset(dataset)\n\n # dataset = ListTestDataset('../datasets/test.txt', scale=256)\n # preview_dataset(dataset)\n\n\n\n\n","sub_path":"dataloader/image_list.py","file_name":"image_list.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"129389258","text":"# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n self.result = list()\n self.cal_num(root)\n return self.result\n\n def cal_num(self, root, weight=0):\n if root is None:\n return\n if len(self.result) <= weight:\n self.result.append(list())\n self.result[weight].append(root.val)\n num_left = weight\n self.cal_num(root.left, num_left + 1)\n self.cal_num(root.right, num_left + 1)\n","sub_path":"102. Binary Tree Level Order Traversal/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"263017747","text":"from __future__ import print_function\nfrom ATM import atm\nfrom encrypt import rot13 \t\t\t\t\t\t\t#Imports atm function from ATM.py file\nfrom Data import data,join\nimport os\nimport sys\nimport re\n\n\n# Using input() in python 2 or 3\ntry:\n # set raw_input as input in python2\n input = raw_input\nexcept:\n pass\n\n#main funtion which calls further funtions,execution starts from here\ndef login_user():\n #data funtion is called to check or make changes in it\n d = data()\n\n user = input(\"Select One : \\n1. Login \\n2. Create New Account \\n3. Delete Existing Account \\n0. Exit \\n\")\n os.system('cls' if os.name == 'nt' else 'clear')\n\n if not str(user).isdigit():\n print (\"Invalid Selection!\")\n return login_user()\n\n #login function called for further execution\n if int(user) == 1:\n os.system('cls' if os.name == 'nt' else 'clear')\n login(d)\n\n #new_account function called for further execution\n elif int(user) == 2:\n os.system('cls' if os.name == 'nt' else 'clear')\n new_account()\n\n elif int(user) == 3:\n os.system('cls' if os.name == 'nt' else 'clear')\n del_account()\n\n elif int(user) == 0: #exits the main funtion\n print (\"Good Bye!\")\n\n else:\n print (\"Invalid Selection!\")\n return login_user()\t\t\t\t\t\t#in case any other number is entered except those listed above\n\t\t\t\t\t\t\t\t\t\t\t\t#recursion(main function called again)\n return\n\n\ndef login(d):\n os.system('cls' if os.name == 'nt' else 'clear')\n user_name = input(\"Login\\nEnter Full Name : \")\n entry = 0\n if (d == None):\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Please create an account first!\")\n return login_user()\n if user_name in d.keys():\n while int(entry) != 3:\n print(\"Entries left :\",(3-entry))\n pin = str(input(\"Enter 4-Digit Pin : \"))\n\n if pin == d[user_name][0]:\n Net_balance = d[user_name][1]\n Pin = d[user_name][0]\n os.system('cls' if os.name == 'nt' else 'clear')\n return atm(user_name,Net_balance,Pin)\n\n else:\n entry += 1\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Incorrect Pin!\")\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Login Unsuccessful\\n\")\n return login_user()\n\n else:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Invalid User!\")\n return login_user()\n\n\ndef new_account():\n filename = join()\n user_name1 = input(\"New Account\\nEnter First Name : \")\n os.system('cls' if os.name == 'nt' else 'clear')\n user_name2 = input(\"Enter Last Name : \")\n\n if (user_name1.isalpha() == False) or (user_name2.isalpha() == False) or (user_name1 == user_name2):\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Invalid Name\")\n return new_account()\n\n\n os.system('cls' if os.name == 'nt' else 'clear')\n pin_count = 0\n while pin_count != 3:\n print(\"Entries left :\",(3-pin_count))\n pin = str(input (\"Enter 4-Digit Pin : \"))\n os.system('cls' if os.name == 'nt' else 'clear')\n\n if (len(pin) == 4) and (pin.isdigit() == True):\n os.system('cls' if os.name == 'nt' else 'clear')\n confirm_pin = str(input (\"Confirm Pin : \"))\n\n if pin == confirm_pin:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Name :\",user_name1+' '+user_name2,\"\\nPin :\",pin)\n confirm = input(\"Please Confirm \\n1. Yes \\n2. No \\n\")\n\n if (confirm == '1') or (confirm.lower().startswith('y')):\n os.system('cls' if os.name == 'nt' else 'clear')\n with open(filename, \"a\") as wr:\n enc_name = user_name1+' '+user_name2\n #rot13() function is called for encoding\n enc = rot13(enc_name)\n new = \"\\n\"+enc+\":\"+pin+\",0.0\"\n wr.write(new)\n wr.close()\n print (\"Account Created Successfully! \\n\")\n return login_user()\n\n elif (confirm == '2') or (confirm.lower().startswith('n')):\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Not Created!\")\n return login_user()\n\n else:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Not Created!\")\n return new_account()\n\n else:\n print (\"Your Pin Did Not Match!\")\n pin_count +=1\n\n else:\n pin_count = pin_count\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Invalid Pin\")\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Not Created!\")\n return login_user()\n\ndef del_account():\n d = data()\n filename = join()\n\n os.system('cls' if os.name == 'nt' else 'clear')\n acc_name = input(\"Delete Account\\nEnter Full Name : \")\n\n if acc_name in d.keys():\n acc_pin = str(input(\"Enter 4-Digit Pin : \"))\n\n if acc_pin == d[acc_name][0]:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Delete Account :\",acc_name)\n confirm = input(\"Please Confirm \\n1. Yes \\n2. No \\n\")\n\n if (confirm == '1') or (confirm.lower().startswith('y')):\n os.system('cls' if os.name == 'nt' else 'clear')\n if d[acc_name] == d['abc xyz']:\n del d[acc_name]\n else:\n del d[acc_name],d['abc xyz']\n #over_writing of existing file\n with open(filename,\"w\") as rd:\n rd.write('nop klm:1234,0.0')\n rd.close()\n with open(filename,\"a\") as ow:\n for item in d.keys():\n items = rot13(item)\n over_write = '\\n'+items+':'+d[item][0]+','+str(d[item][1])\n ow.write(over_write)\n ow.close()\n print (\"Account Deleted Successfully! \\n\")\n return login_user()\n\n elif (confirm == '2') or (confirm.lower().startswith('n')):\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Not Deleted!\")\n return login_user()\n\n else:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Not Deleted!\")\n return login_user()\n\n else:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Incorrect Pin!\")\n return login_user()\n\n else:\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Account Does Not Exist!\")\n return login_user()\n\n\ntry:\n os.system('cls' if os.name == 'nt' else 'clear')\n login_user()\nexcept:\n Exception\n os.system('cls' if os.name == 'nt' else 'clear')\n print (\"Sorry for inconvenience.\")\n print (\"Some errors were encountered,\\nPlease be careful next time.\\nGood bye!\")\n","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":7306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"85360856","text":"import numpy as np\nfrom scipy.stats import expon, uniform, gamma\nimport math\n\n\n\"\"\"Functions for radial distribution for ball position around the players\"\"\"\ndef radial_dist(size, r_mean):\n \"\"\"Exponential and uniform distribution for r and theta\"\"\"\n r = expon.rvs(scale=r_mean, size=size)\n theta = uniform.rvs(0, 2*math.pi, size=size)\n return r, theta\n\ndef gamma_dist(t, tau_remove_balls, a = 3):\n proba = gamma.cdf(t-tau_remove_balls, a=a, loc=0, scale=1)\n return np.random.binomial(1,proba)\n\ndef expo_dist(t, tau_remove_balls):\n proba = expon.cdf(t-tau_remove_balls)\n return np.random.binomial(1,proba)\n\ndef sample_position(pos, r_mean):\n \"\"\"Sample of size 1 from position pos = (i,j)\"\"\"\n r, theta = radial_dist(1, r_mean)\n r = 1 + r[0]\n theta = theta[0]\n delta = (r * math.cos(theta), r * math.sin(theta))\n delta = np.round(delta)\n output_sample = (int(pos[0]+delta[0]) , int(pos[1]+delta[1]))\n return output_sample\n\ndef sample_positions(pos, size, r_mean):\n \"\"\"Sample of size >=1 from position pos = (i,j)\"\"\"\n r, theta = radial_dist(size, r_mean)\n new_r = 1 + r\n (i,j) = pos\n sample_deltas = [(r_i * math.cos(theta_i), r_i * math.sin(theta_i)) for (r_i, theta_i) in zip(new_r, theta)]\n sample_deltas = np.round(sample_deltas)\n output_sample = [(int(i+x) , int(j+y)) for (x,y) in sample_deltas]\n return output_sample\n\n\nCIRC_PROBAS_4P_1C = \\\n [[0.25,0.50,0.25,0.00],\n [0.00,0.25,0.50,0.25],\n [0.25,0.00,0.25,0.50],\n [0.50,0.25,0.00,0.25]\n ]\n\n#print(CIRC_PROBAS_4P_1C)\n\ni_p = 0\n#print(np.random.choice(4, p=CIRC_PROBAS_4P_1C[i_p]))\n\n\n","sub_path":"circular_collect/circular_collect/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"574921200","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport zmq\nimport tensorflow as tf\n\ndef zmq_setup(MSG_PORT):\n context = zmq.Context()\n print(\"Connecting to evaluator with port %s\" % (MSG_PORT))\n socket = context.socket(zmq.PAIR)\n socket.connect(\"tcp://localhost:%s\" % (MSG_PORT))\n return socket\n \n \ndef notify_new_ckpt(msg_socket, step): \n \"\"\"\n used as a client\n when a new checkpoint is created, notify the server(evaluator) to make an one time evaluation\n \"\"\"\n msg = '***********' + 'New checkpoint has been created for Step ' + str(step) + '***********' \n print(msg)\n msg_socket.send(msg) \n \ndef clean_dir(t_dir):\n if tf.gfile.Exists(t_dir):\n tf.gfile.DeleteRecursively(t_dir)\n tf.gfile.MakeDirs(t_dir) ","sub_path":"classification/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"268683346","text":"from django import forms\r\nfrom app.product.models.product import Product \r\n\r\n\r\nclass ProductUpdateForm(forms.ModelForm):\r\n\r\n image = forms.ImageField(label='商品画像', required=False)\r\n\r\n class Meta:\r\n model = Product\r\n exclude = ['create_at', 'update_at', 'create_user', 'update_user',]\r\n","sub_path":"app/product/forms/product_update_forms.py","file_name":"product_update_forms.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"147176139","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about.html', views.about, name='about'),\n path('contact.html', views.contact, name='contact'),\n path('offer.html', views.offer, name='offer'),\n path('appointment.html', views.appointment, name='appointment')\n\n]\n","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"312370466","text":"import random\nfrom collections import defaultdict\n\nfrom discord import Color, Embed, Guild, Member, Message, TextChannel\nfrom discord.ext.commands import Bot, Cog, Context, command, errors\n\nfrom assets.words import word_list\n\nfrom .utils import constants\n\n\nclass Games(Cog):\n def __init__(self, bot: Bot) -> None:\n self.bot = bot\n self.hangman_players = defaultdict(lambda: defaultdict(list))\n\n @command()\n async def roll(self, ctx: Context, min_limit: int = 1, max_limit: int = 10) -> None:\n \"\"\"Roll a random number.\"\"\"\n if max_limit - min_limit > 2:\n number = random.randint(min_limit, max_limit)\n embed = Embed(title=\"Random Roll\", color=Color.blurple(), description=f\"The random number is: {number}\")\n await ctx.send(embed=embed)\n else:\n embed = Embed(title=\"Random Roll\", color=Color.red(), description=\"Please specify numbers with difference of **at least 2**\")\n await ctx.send(embed=embed)\n\n @command(aliases=[\"8ball\"])\n async def ball8(self, ctx: Context, *, question: str) -> None:\n \"\"\"Play 8ball.\"\"\"\n reply_type = random.randint(1, 3)\n\n if reply_type == 1:\n answer = random.choice(constants.POSITIVE_REPLIES)\n elif reply_type == 2:\n answer = random.choice(constants.NEGATIVE_REPLIES)\n elif reply_type == 3:\n answer = random.choice(constants.ERROR_REPLIES)\n\n embed = Embed(title=\"Magic 8-ball\", color=Color.blurple())\n embed.add_field(name=\"Question\", value=question)\n embed.add_field(name=\"Answer\", value=answer)\n\n @command()\n async def hangman(self, ctx: Context) -> None:\n \"\"\"Play Hangman game.\"\"\"\n\n def display_hangman(tries: int) -> str:\n stages = [\n # final state: head, torso, both arms, and both legs\n r\"\"\"```\n--------\n| |\n| O\n| \\|/\n| |\n| / \\\n-\n ```\"\"\",\n # head, torso, both arms, and one leg\n r\"\"\"```\n--------\n| |\n| O\n| \\|/\n| |\n| /\n-\n ```\"\"\",\n # head, torso, and both arms\n r\"\"\"```\n--------\n| |\n| O\n| \\|/\n| |\n|\n-\n ```\"\"\",\n # head, torso, and one arm\n r\"\"\"```\n--------\n| |\n| O\n| \\|\n| |\n|\n-\n ```\"\"\",\n # head and torso\n r\"\"\"```\n--------\n| |\n| O\n| |\n| |\n|\n-\n ```\"\"\",\n # head\n r\"\"\"```\n--------\n| |\n| O\n|\n|\n|\n-\n ```\"\"\",\n # initial empty state\n r\"\"\"```\n--------\n| |\n|\n|\n|\n|\n-\n ```\"\"\",\n ]\n return stages[tries]\n\n def check(message: Message) -> bool:\n return message.author == ctx.author and message.channel == ctx.channel\n\n word = random.choice(word_list).upper()\n word_completion = \"#\" * len(word)\n guessed = False\n guessed_letters = []\n guessed_words = []\n tries = 6\n\n embed = Embed(title=\"Let's play Hangman!\", color=Color.dark_green())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=\"**Not Yet Guessed**\", inline=False)\n embed.set_footer(text=\">>hangexit to exit the game! | Powered By HotWired.\")\n message = await ctx.send(embed=embed)\n guess_embed = await ctx.send(embed=Embed(description=\"Please guess a letter or word: \", color=Color.gold()))\n if not self.is_playing_hangman(ctx.author, ctx.guild, ctx.channel):\n try:\n self.add_hangman_player(ctx.author, ctx.guild, ctx.channel)\n except errors.BadArgument:\n await ctx.send(f\"Active games by {ctx.author.mention} found. Use `>>hangexit` to exit!\")\n\n while not guessed and tries > 0 and self.is_playing_hangman(ctx.author, ctx.guild, ctx.channel):\n input = await self.bot.wait_for(\"message\", check=check)\n guess = input.content.upper()\n\n await input.delete()\n\n if len(guess) == 1 and guess.isalpha():\n if guess in guessed_letters:\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=f\"You already guessed the letter {guess}\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n elif guess not in word:\n tries -= 1\n guessed_letters.append(guess)\n\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=f\"{guess} is not in the word.\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n else:\n guessed_letters.append(guess)\n word_as_list = list(word_completion)\n indices = [i for i, letter in enumerate(word) if letter == guess]\n\n for index in indices:\n word_as_list[index] = guess\n word_completion = \"\".join(word_as_list)\n if \"#\" not in word_completion:\n guessed = True\n\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=f\"Good job, {guess} is in the word!\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n\n elif len(guess) == len(word) and guess.isalpha():\n if guess in guessed_words:\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=f\"You already guessed the word {guess}\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n elif guess != word:\n tries -= 1\n guessed_words.append(guess)\n\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=f\"{guess} is not in the word.\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n else:\n guessed = True\n word_completion = word\n else:\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.add_field(name=\"**❯❯ Word Status**\", value=\"Not a valid guess.\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n\n await guess_embed.delete()\n if guessed:\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n await ctx.send(embed=Embed(description=\"Congrats, you guessed the word! You win! :partying_face: \", color=Color.dark_green()))\n else:\n embed = Embed(title=\"Hangman Stats\", color=Color.dark_blue())\n embed.add_field(name=\"**❯❯ Hang Status**\", value=display_hangman(tries), inline=False)\n embed.add_field(name=\"**❯❯ Word Completion Status**\", value=f\"**{word_completion}**\", inline=False)\n embed.set_footer(text=\"Powered By HotWired.\")\n await message.edit(embed=embed)\n await ctx.send(\n embed=Embed(description=f\"Sorry, you ran out of tries. The word was {word}. Maybe next time! :frowning: \", color=Color.red())\n )\n\n @command()\n async def hangexit(self, ctx: Context) -> None:\n try:\n self.del_hangman_player(ctx.author, ctx.guild, ctx.channel)\n except errors.BadArgument:\n await ctx.send(f\"No active games by {ctx.author.mention} found!\")\n\n def is_playing_hangman(self, player: Member, guild: Guild, channel: TextChannel) -> bool:\n if player in self.hangman_players[guild][channel]:\n return True\n else:\n return False\n\n def add_hangman_player(self, player: Member, guild: Guild, channel: TextChannel) -> None:\n if not self.is_playing_hangman(player, guild, channel):\n self.hangman_players[guild][channel].append(player)\n else:\n raise errors.BadArgument(\"Player is already in game!\")\n\n def del_hangman_player(self, player: Member, guild: Guild, channel: TextChannel) -> None:\n if self.is_playing_hangman(player, guild, channel):\n self.hangman_players[guild][channel].remove(player)\n else:\n raise errors.BadArgument(\"Player is not in game!\")\n\n\ndef setup(bot: Bot) -> None:\n bot.add_cog(Games(bot))\n","sub_path":"cogs/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":11008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"443798127","text":"import os\nimport csv\nimport torch\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom plot import *\nfrom os.path import join\nfrom pathlib import Path\nfrom sklearn.cluster import KMeans\n\nfrom torch.utils.data import DataLoader, Subset\nfrom customLoader import *\nfrom torchvision.transforms import transforms\nfrom sklearn.metrics import pairwise_distances_argmin_min\n\n\nfrom IPython import embed\n\n\ndef get_loader(trajectories, transform, conf, shuffle=False, limit=None):\n train, _ = get_train_val_split(trajectories, 1)\n train_dataset = CustomHabitatData(train, transform=transform, delay=False, **conf)\n\n if not limit == None:\n train_dataset = Subset(train_dataset, list(range(limit)))\n train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=shuffle, num_workers=0)\n return train_dataloader\n\ndef compute_kmeans(embeddings, num_clusters):\n return KMeans(n_clusters=num_clusters, random_state=0).fit(embeddings)\n\n\ndef compute_embeddings_curl(loader, encode):\n #embed()\n print(\"Computing embeddings\")\n return np.array([encode(data[:,0].cuda()).detach().cpu().numpy() for data in loader]).squeeze()\n\n\ndef compute_embeddings(loader, model):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return np.array([model.compute_embedding(batch, device).detach().cpu().numpy() for batch in loader]).squeeze()\n\n\n\ndef get_images(loader):\n return torch.cat([data[:,0] for data in loader])\n\n\"\"\"\ndef load_trajectories(trajectories):\n print(\"Loading trajectories...\")\n\n all_trajectories = []\n files = sorted([x for x in os.listdir(f\"./results/{trajectories}/\") if 'coords' in x], key=lambda x: int(x.split('.')[1]))\n for file in files:\n with open(f\"./results/{trajectories}/{file}\") as csv_file:\n trajectory = []\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for i, row in enumerate(csv_reader):\n trajectory.append(row)\n all_trajectories.append(trajectory)\n return np.array(all_trajectories).reshape(-1, 3)\n\"\"\"\ndef load_trajectories(trajectories):\n print(\"Loading trajectories...\")\n\n all_trajectories = []\n files = sorted([x for x in os.listdir(f\"../results/\"+trajectories+\"_positions/\")], key=lambda x: int(x.split('.')[0].split('_')[2]))\n for file in files:\n all_trajectories.append(np.load(\"../results/\"+trajectories+\"_positions/\"+file))\n #IF EXPERTS:\n #all_trajectories = [y for x in all_trajectories for y in x]\n f = np.array(all_trajectories, dtype=object).reshape(-1,3)\n return f\n\n\n\ndef store_goal_positions(enc):\n loader = get_loader(\n enc.trajectories,\n enc.transform,\n enc.conf,\n shuffle=enc.shuffle,\n limit=enc.limit)\n trajectories = load_trajectories(enc.trajectories[0])\n embeddings = compute_embeddings(loader, enc.encode)\n values = pd.DataFrame(columns=['x', 'y', 'Code:'])\n\n\n \"\"\"\n for i, (e, p) in enumerate(zip(embeddings, trajectories)):\n x = float(p[2])\n y = float(p[0])\n e = torch.from_numpy(e).cuda()\n k = enc.compute_argmax(e.unsqueeze(dim=0))\n values = values.append({'x': x, 'y': y, 'Code:': int(k)}, ignore_index=True)\n #embed()\n values['Code:'] = values['Code:'].astype('int32')\n \n \n means = values.groupby('Code:').mean()\n means.to_csv('skill_mean_position.csv')\n\n \"\"\"\n\n goals_path = sorted(os.listdir(\"goal_states/white_DEF/\"))\n goals = []\n i = 0\n print(\"Computing closest embeddings\")\n for goal in goals_path:\n g = np.load(\"goal_states/white_DEF/\"+goal)\n \n closest = enc.compute_argmax(torch.from_numpy(g).squeeze().float().cuda(), embeddings)\n #closest, _ = pairwise_distances_argmin_min(g.reshape(1,-1), embeddings)\n print(closest)\n with open(f\"goal_states/white_positions\"+str(i)+\".npy\", 'wb') as f:\n np.save(f, trajectories[closest])\n i = i+1\n \n\ndef construct_map(enc, model):\n if not enc.limit == None:\n limit = [x*50 for x in range(enc.limit)]\n else: \n limit = None\n\n loader = get_loader(\n enc.trajectories,\n enc.transform,\n enc.conf,\n shuffle=enc.shuffle,\n limit=enc.limit)\n trajectories = load_trajectories(enc.trajectories[0])\n \n if model == \"vqvae\":\n embeddings = compute_embeddings(loader, enc.model)\n elif model == \"curl\":\n embeddings = compute_embeddings_curl(loader, enc.encode)\n\n print(trajectories.shape)\n print(embeddings.shape)\n if enc.type == \"index\":\n index_map(trajectories, embeddings, enc, model)\n elif enc.type == \"reward\":\n reward_map(trajectories, embeddings, enc, model)\n elif enc.type == \"embed\":\n images = get_images(loader) + 0.5\n embed_map(embeddings, images, enc.experiment)\n else:\n raise NotImplementedError()\n\ndef index_map(trajectories, embeddings, enc, model):\n\n print(\"Get index from all data points...\")\n values = pd.DataFrame(columns=['x', 'y', 'Code:'])\n for i, (e, p) in enumerate(zip(embeddings, trajectories)):\n x = float(p[2])\n y = float(p[0])\n e = torch.from_numpy(e).cuda()\n k = enc.compute_argmax(e.unsqueeze(dim=0))\n values = values.append({'x': x, 'y': y, 'Code:': int(k)}, ignore_index=True)\n values['Code:'] = values['Code:'].astype('int32')\n palette = sns.color_palette(\"Paired\", n_colors=len(list(values['Code:'].unique())))\n plot_idx_maps(values, palette, \"brief\", model)\n\n\ndef reward_map(trajectories, embeddings, enc, model):\n print(\"Get index from all data points...\")\n data_list = []\n for g in range(enc.num_clusters):\n print(f\"Comparing data points with goal state {g}\", end=\"\\r\")\n values = pd.DataFrame(columns=['x', 'y', 'reward'])\n for i, (e, p) in enumerate(zip(embeddings, trajectories)):\n x = float(p[2])\n y = float(p[0])\n e = torch.from_numpy(e).cuda()\n\n coord = None\n if not enc.conf[\"data_type\"] == \"pixel\":\n coord = np.array(p, dtype=np.float32)\n mu = loader.dataset.coord_mean\n std = loader.dataset.coord_std\n coord = (coord-mu)/std\n\n\n r = enc.compute_reward(e.unsqueeze(dim=0), g, coord)\n\n\n values = values.append({'x': x, 'y': y, 'reward': r}, ignore_index=True)\n\n\n data_list.append(values)\n \n #experiment = enc.test['path_weights'].split('/')[0]\n \n plot_reward_maps(data_list, \"vqvae\")\n\n\n\"\"\"\n##### CURL SPARSE \ndef reward_map(trajectories, embeddings, enc, model):\n print(\"Get index from all data points...REWARD\")\n data_list = []\n for g in range(enc.num_clusters):\n print(f\"Comparing data points with goal state {g}\", end=\"\\r\")\n values = pd.DataFrame(columns=['x', 'y', 'reward'])\n for i, (e, p) in enumerate(zip(embeddings, trajectories)):\n x = float(p[2])\n y = float(p[0])\n e = torch.from_numpy(e).cuda()\n logits = enc.compute_logits(e.unsqueeze(dim=0))\n r = 0\n if k == g:\n r = 1 \n #embed()\n max = logits[0][k].detach().cpu().item()\n #logits[0][k] = 0\n #k2 = torch.argmax(logits).cpu().item()\n #sec_max = logits[0][k2].detach().cpu().item()\n #r = (max-sec_max)/max\n \n values = values.append({'x': x, 'y': y, 'reward': r}, ignore_index=True)\n data_list.append(values)\n\n plot_reward_maps(data_list, model)\n\"\"\"\n\ndef embed_map(embeddings, images, exp):\n import tensorflow\n from torch.utils.tensorboard import SummaryWriter\n import tensorboard\n\n tensorflow.io.gfile = tensorboard.compat.tensorflow_stub.io.gfile\n writer = SummaryWriter(log_dir=os.path.join(\"./results\", exp))\n writer.add_embedding(embeddings, label_img=images)\n writer.close()\n\ndef trainValSplit(traj_list, split):\n num_traj = len(traj_list)\n if split == 1:\n return traj_list, []\n else:\n # Since we can mix trajectories from different tasdks, we want to shuffle them\n # e.g: otherwise we could have all treechop trajectories as validation\n shuffle(traj_list)\n return traj_list[:int(split*num_traj)], traj_list[int(split*num_traj):]\n\ndef get_train_val_split(t, split):\n path = Path('../results')\n total_t = []\n #items = sorted(os.listdir(path / t[0]))\n items = sorted(os.listdir(path / t[0]), key=lambda x: int(x.split('.')[0].split('_')[1]))\n items = [path / t[0] / x for x in items]\n total_t.extend(items)\n return trainValSplit(total_t, split)\n","sub_path":"src/main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"571572285","text":"import uvicorn\nfrom fastapi import FastAPI\n\nfrom logger import init_logger\nfrom db import database, engine, metadata\nfrom api.routes import stats, users, auth\n\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n await init_logger()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\napp.include_router(stats.router, tags=[\"statistics\"])\napp.include_router(auth.router, prefix=\"/auth\", tags=[\"auth\"])\napp.include_router(users.router, prefix=\"/users\", tags=[\"users\"])\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host='0.0.0.0', port=8000, reload=True, debug=True, workers=1)\n","sub_path":"src/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"392701683","text":"from django.core.cache import cache\nfrom django.utils.deprecation import MiddlewareMixin\n\nfrom user_app.constant import NEED_LOGIN\nfrom user_app.logic import render_json\nfrom user_app.models import User\n\n\nclass AuthMiddleware(MiddlewareMixin):\n def process_request(self, request):\n path = request.path\n need_login_path = ['/user/update_self_data', '/user/issue', '/user/get_self_issue','/user/delete_self_issue']\n if path in need_login_path:\n token = request.GET.get('token')\n uid = cache.get(token)\n if uid:\n user = User.objects.filter(pk=uid).first()\n request.user = user\n else:\n return render_json('need login', NEED_LOGIN)\n else:\n return\n","sub_path":"confession/middleware/authMiddleware.py","file_name":"authMiddleware.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"168621798","text":"import json\nimport requests\nimport secrets\nimport csv\nimport argparse\n\nsecretsVersion = input('To edit production server, enter the name of the secrets file: ')\nif secretsVersion != '':\n try:\n secrets = __import__(secretsVersion)\n print('Editing Production')\n except ImportError:\n print('Editing Development')\nelse:\n print('Editing Development')\n\ntypes = []\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-k', '--keyword', help='Keyword to retreive. optional - if not provided, the script will ask for input')\nparser.add_argument('-t', '--type', choices=['accession', 'resource', 'subject', 'agent', 'location', 'archival_object'], help='What type of records do you want to search? Type out which of the following you want: accession, resource, subject, agent, location, archival_object. optional - if not provided, the script will ask for input')\nargs = parser.parse_args()\n\nif args.keyword:\n keyword = args.keyword\nelse:\n keyword = input('Enter keyword to search: ')\nif args.type:\n type = args.type\n type = type.split(',')\n for item in type:\n item = item.strip()\n item = '&type[]='+item\n types.append(item)\nelse:\n type = input('What type of records do you want to search? Type out which of the following you want in a list: accession, resource, subject, agent, location, archival_object. ')\n type = type.split(',')\n for item in type:\n item = item.strip()\n item = '&type[]='+item\n types.append(item)\n\ntypes = ''.join(types)\nprint(types)\n\n\nbaseURL = secrets.baseURL\nuser = secrets.user\npassword = secrets.password\nrepository = secrets.repository\n\nauth = requests.post(baseURL + '/users/'+user+'/login?password='+password).json()\nsession = auth[\"session\"]\nheaders = {'X-ArchivesSpace-Session': session, 'Content_Type': 'application/json'}\n\n\nendpoint = '/repositories/3/search?q='+keyword+types+'&page_size=2000&page=1'\n\nresults = requests.get(baseURL + endpoint, headers=headers).json()\nresults = results['results']\n\nf = csv.writer(open(keyword+'Search.csv', 'w'))\nf.writerow(['uri'])\n\nfor result in results:\n uri = result['uri']\n f.writerow([uri])\n\nprint(len(results))\n","sub_path":"getURIsFromKeywordSearch.py","file_name":"getURIsFromKeywordSearch.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"653402675","text":"\n# ITERATIVE SOLUTION / DFS APPROACH\n\n# ===> Solution : BETTER than creating a list out of the inorder traversal of tree and then checking if it is in increasing order as this solution returns False immediately when a breach happens.\n\n# Time Complexity: O(N), WORST CASE of perfect BST, it has to traverse through all nodes to return True\n# Space Complexity: No additional space, except the Recursive stack\n# Did problem run on Leetcode: yes\n\nclass TreeNode:\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n\nclass Solution:\n\n def isValidBST(self, root):\n return self.helper(root, None, None)\n\n def helper(self, root, min, max):\n # BASE CASE\n if not root:\n return True\n\n # LOGIC\n if min != None and root.key <= min:\n return False\n if max != None and root.key >= max:\n return False\n return self.helper(root.left, min, root.key) and self.helper(root.right, root.key, max)\n \n\n\na = TreeNode(20)\na.left = TreeNode(15)\na.right = TreeNode(25)\na.left.left = TreeNode(13)\na.left.right = TreeNode(18)\na.right.left = TreeNode(24)\na.right.right = TreeNode(27)\na.left.left.left = TreeNode(10)\na.left.left.right = TreeNode(14)\na.left.right.left = TreeNode(16)\na.left.right.right = TreeNode(19)\n\nobj = Solution()\nprint(obj.isValidBST(root = a))\n\n\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"valid BST_iterative .py","file_name":"valid BST_iterative .py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"161181748","text":"\"\"\"\n Author: Dhananjay Mehta (mehta.dhananjay28@gmail.com)\n Version: v1.0\n\n -----------------------------------------------------------\n INSIGHT DATA ENGINEERING CODING CHALLENGE: DIGITAL WALLET\n -----------------------------------------------------------\n\n C H A L L A N G E S U M M A R Y -\n\n This programs has been implemented to detect fraudulent payment requests from untrusted users using digital payment\n wallet. This program has been implemented in following stages with CORE and ADDITIONAL features:\n\n ADDITIONAL FEATURES: These are additional features crafted.\n ------------------------------------------------------------\n\n FEATURE 1: Payment heat graph.\n -------------------------------\n As payments stream in, I am generating a graph of payments made during a 60 seconds sliding window.\n This heat graph will help us identify users that are making a very high volume of payments in a short window.\n\n FEATURE 2: Expired payments\n -------------------------------\n Check if the payment requested is ACTIVE or EXPIRED.\n\n FEATURE 3: Payments exceeding maximum limit.\n ----------------------------------------------\n Check if a payment amount is exceeding limit of maximum amount.\n\n\"\"\"\n\n\nclass AdditionalFeatures:\n \"\"\"\n AdditionalFeatures class implements the additional features implemented to detect fraudulent transaction.\n Firstly, it builds a heat graph of payments during a 60 seconds window from incoming payments' stream.\n It will then chek for suspicious payment based on features created and write status report for any doubtful payment.\n \"\"\"\n def __init__(self, h_graph=None):\n \"\"\"\n initializes a objects of class.\n :param h_graph: dictionary of payments made between users and count of number of transaction between them.\n It represent graph of payments made in a sliding window of 60 seconds,\n \"\"\"\n if h_graph is None:\n h_graph = {}\n self.__h_graph = h_graph\n\n self.payments_in_60sec = {} # dictionary of payments in 60 seconds window with timestamp(ts) as key.\n self.timestamp_in_60sec = [] # list of timestamps in the 60 seconds sliding window.\n self.max_timestamp = -1 # timestamp of latest payments.\n self.active = None # payment is active initially\n self.suspected = False # considering payment initially is not supicious\n\n def update_heat_graph(self, ts, user1, user2):\n \"\"\"\n This function will update heat graph with new incoming payments. Heat graph will contain payment\n payments within last 60 seconds. Any payment before 60 seconds is purged.\n\n There are following conditions that need to be checked before the graph is updated:\n 1. If incoming payment appears in order of timestamp.\n 2. If incoming payment is out of order of timestamp.\n :param ts: ts of incoming payment\n :param user1: user making payment\n :param user2: user initiating payment\n\n :return: Status of a payment as active or expired and updated heat graph.\n \"\"\"\n # Step 1: check incoming payments:\n # --------------------------------\n # check if it is ACTIVE: i.e. if payment made in last two days\n # max timestamp - incoming timestamp < (2 * 86400) .\n if self.max_timestamp - ts < 172800:\n self.active = True\n \n # Check if payment arrives in order of timestamp-\n if ts >= self.max_timestamp:\n # Check if the timestamp is already in list - self.timestamp_in_60sec\n if ts != self.max_timestamp:\n # append if not in the list.\n self.timestamp_in_60sec.append(ts)\n # update self.max_timestamp\n self.max_timestamp = ts\n\n # check if already in payments_in_60Sec.\n if ts in self.payments_in_60sec:\n # append incoming payments at given time\n self.payments_in_60sec[ts].append([user1, user2])\n else:\n # add the new payments at given time\n self.payments_in_60sec[ts] = [[user1, user2]]\n\n # Step 2: Update the heat graph by adding new edge to h_graph\n # -------------------------------------------------------------\n self.add_graph_edge(\n users=[user1, user2]\n )\n\n # Step 3: Delete edge from heat_graph for payments older than 60 seconds.\n # -----------------------------------------------------------------------\n while self.max_timestamp - self.timestamp_in_60sec[0] > 60:\n # remove edge from heat map\n self.delete_edge_graph()\n # remove elements from payments_in_60sec for the timestamp\n self.payments_in_60sec.pop(self.timestamp_in_60sec[0])\n # remove the timestamp from list of last 60 seconds\n self.timestamp_in_60sec.pop(0)\n\n # If payment does not arrive in order:\n else:\n # Check if incoming payment is in last 60 Seconds\n if self.max_timestamp - ts <= 60:\n \n # find index to insert the new payment in the sliding window\n index = bisect.bisect_left(self.timestamp_in_60sec, ts)\n if (ts != self.timestamp_in_60sec[index]):\n self.timestamp_in_60sec.insert(index, ts)\n\n # Check if timestamp is in list of timestamps\n if ts not in self.timestamp_in_60sec:\n self.timestamp_in_60sec.append(ts)\n\n # Check if ts already in payments60Sec\n if ts in self.payments_in_60sec:\n self.payments_in_60sec[ts].append([user1, user2])\n else:\n self.payments_in_60sec[ts] = [[user1, user2]]\n\n # Update the heat graph for new payment.\n self.add_graph_edge(users=[user1, user2])\n\n # Check if incoming payment is older than 60 Seconds\n else:\n pass\n else:\n # if incoming payment is older than 2 days:\n self.active = False\n \n return self.active\n\n def add_graph_edge(self, users):\n \"\"\"\n This function adds edges to the heat graph.\n :param users: list of users (user1 and user2 ) that will form edge in the graph.\n\n :return:\n graph - updated heat graph with new edges from new payments by users.\n \"\"\"\n # for both the users in users list\n for user1 in users:\n for user2 in users:\n if user1 != user2:\n # if user1 exist for user2 in current graph\n if user1 in self.__h_graph:\n # check if user2 is connected to user1\n if user2 in self.__h_graph[user1]:\n # increase number of payments that connect user1 and user2\n self.__h_graph[user1][user2] += 1\n # if user2 does not exist for user1 in current graph\n else:\n self.__h_graph[user1][user2] = 1\n # if user1 does not exist in graph at all: Add user1 to graph\n else:\n self.__h_graph[user1] = {user2: 1}\n\n def delete_edge_graph(self):\n \"\"\"\n This function deletes edges from heat graph for payments made before 60 seconds.\n :return:\n graph - updated heat graph with edges from payments that happened before 60 seconds window.\n \"\"\"\n for users in self.payments_in_60sec[self.timestamp_in_60sec[0]]:\n # check if there are users for current timestamp to delete\n for user1 in users:\n for user2 in users:\n if user2 != user1:\n # reduce count of connecting edges between user1 and user2:\n self.__h_graph[user1][user2] -= 1\n # if user1 and user2 are no longer connected:\n if self.__h_graph[user1][user2] == 0:\n # remove user2 from connection of user1:\n self.__h_graph[user1].pop(user2)\n # if user1 is not connected to any other user:\n if self.__h_graph[user1] == {}:\n # remove user1 from heat graph:\n self.__h_graph.pop(user1)\n\n def check_if_suspicious(self, users):\n \"\"\"\n This function checks if an incoming payment looks suspicious.\n A payment is SUSPICIOUS if:\n - if user1 or user2 have significantly large transactions with other user in last 60 seconds.\n - if user1 and user2 have large number of transactions between them in last 60 seconds.\n NOTE: I have set limit for suspicious transactions as >100 in 60 seconds; this can be increased or decreased\n as per requirement in future.\n\n :param users: list of two users[user1, user2] between who payment needs to be checked\n\n :return:\n suspected: boolean value indicating if a payment is suspicious or not.\n \"\"\"\n for user1 in users:\n for user2 in users:\n if user2 != user1:\n # if user 1 has any payment in last 60 seconds\n if user1 in self.__h_graph:\n # if user1 have many transactions in less than 60 seconds: e.g.10 with different user \n if len(self.__h_graph[user1]) > 10:\n self.suspected = True\n # if user1 has large number of transactions with user2\n elif user2 in self.__h_graph[user1] and self.__h_graph[user1][user2] > 10:\n self.suspected = True\n\n return self.suspected","sub_path":"insight_testsuite/temp/src/addedfeatures.py","file_name":"addedfeatures.py","file_ext":"py","file_size_in_byte":10313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"373990614","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# IkaLog\n# ======\n# Copyright (C) 2015 Takeshi HASEGAWA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cv2\nimport os\nimport sys\n\nsys.path.append('.')\nbase_dir = sys.argv[1]\n\nfrom ikalog.utils import IkaGlyphRecoginizer\n\nweapons = IkaGlyphRecoginizer()\nweapons.load_model_from_file(\"data/weapons.trained\")\n\nfor root, dirs, files in os.walk(base_dir):\n l = []\n results = {}\n for file in files:\n if file.endswith(\".png\"):\n f = os.path.join(root, file)\n img = cv2.imread(f)\n r, model = weapons.guessImage(img)\n #print(\" %s %d \" % (r['name'], r['score'], f))\n name = r['name']\n if not name in results:\n results[name] = []\n\n results[name].append( { 'img': f, 'score': r['score'] } )\n\nfor weapon in weapons.models:\n name = weapon['name']\n if not name in results:\n count = 0\n else:\n count = len(results[name])\n print(\"%s (%d)\" % (name, count))\n if count == 0:\n continue\n for e in results[name]:\n print(\" \" % (e['score'], e['img']))\n","sub_path":"tools/test_weapons.py","file_name":"test_weapons.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"41687180","text":"# 将前面的链表,封装成容器\n# 要求:\n# 1、提供__getitem__、__iter__、__setitem__方法\n# 2、使用一个列表,辅助完成上面的方法\n# 3、进阶:不使用列表,完成上面的方法\n# 本例未采用list,使用链表完成插入、删除,但是查询效率低\n\n\nclass Node: # 节点保存内容和前后节点信息\n def __init__(self, item, next=None, prev=None):\n self.item = item\n self.next = next\n self.prev = prev\n\n def __repr__(self):\n return \"({} <= {} => {})\".format(\n self.prev.item if self.prev else None,\n self.item,\n self.next.item if self.next else None)\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.__size = 0\n\n def __len__(self):\n return self.__size\n\n\n def append(self, item):\n node = Node(item)\n if self.head is None:\n self.head = node # 设置开头结点,以后不变\n else:\n self.tail.next = node # 当前最后一个结点关联下一个跳\n node.prev = self.tail # 前后关联\n self.tail = node # 更新结尾结点\n\n self.__size += 1\n return self\n\n def iternodes(self, reverse=False):\n current = self.head if not reverse else self.tail\n while current:\n yield current\n current = current.next if not reverse else current.prev\n\n def insert(self, index, item):\n if index < 0: # 不接受负数\n raise IndexError('Not negative index {}'.format(index))\n\n current = None\n for i, node in enumerate(self.iternodes()):\n if i == index:\n current = node\n break\n else:\n self.append(item)\n return\n\n node = Node(item) # 待加入的结点\n prev = current.prev # 前一个\n next = current # 后一个\n\n if prev is None: # 头部 i==0\n self.head = node\n else: # 不是首元素\n node.prev = prev\n prev.next = node\n node.next = next\n next.prev = node\n self.__size += 1\n\n def pop(self): # 尾部移除\n if self.tail is None: # 空\n raise Exception('Empty')\n\n node = self.tail\n item = node.item\n prev = node.prev\n\n if prev is None: # only one node\n self.head = None\n self.tail = None\n else:\n prev.next = None\n self.tail = prev\n\n self.__size -= 1\n return item\n\n def remove(self, index):\n if self.tail is None: # 空\n raise Exception('Empty')\n\n if index < 0:\n raise IndexError('Not negative index {}'.format(index))\n\n current = None\n for i, node in enumerate(self.iternodes()):\n if i == index:\n current = node\n break\n else: # not found\n raise IndexError('Wrong index {}'.format(index))\n\n prev = current.prev\n next = current.next\n\n # 4种情况\n if prev is None and next is None:\n self.head = None\n self.tail = None\n elif prev is None: # 头部\n self.head = next\n next.prev = None\n elif next is None:\n self.tail = prev\n prev.next = None\n else:\n prev.next = next\n next.prev = prev\n\n del current\n self.__size -= 1\n\n def __getitem__(self, index):\n reverse = True if index < 0 else False\n start = 1 if index < 0 else 0\n for i,node in enumerate(self.iternodes(reverse),start):\n if i == abs(index):\n return node\n else:\n raise IndexError\n\n def __setitem__(self, index, value):\n self[index].item = value\n\n __iter__ = iternodes\n\n\nl = LinkedList()\nl.append(1)\nl.append(2)\nl.append(3)\nl.append(4)\nl.append(5)\nl.remove(0)\nl.pop()\nl.insert(2, 5)\nprint(len(l))\nl[0] = 'f'\nfor i in l.iternodes():\n print(i)\n","sub_path":"day_21.py","file_name":"day_21.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"404559899","text":"# useful tool for copying partner work\nimport os\n\npartner_pd = 1\npartner_first = 'Yaru'\npartner_last = 'Lu'\nwork_name = '16_flasherit'\n\nif __name__ == '__main__':\n submodule_name = str(partner_pd) + '/' + partner_last.lower() + partner_first[0].upper()\n print('Getting [%s] from %s %s in period %d' % (work_name, partner_first, partner_last, partner_pd))\n os.system('git clone https://github.com/stuy-softdev/workshop19-20 temp/workshop/')\n os.chdir('temp/workshop')\n os.system('git submodule update --remote --init %s' % submodule_name)\n os.chdir('../..')\n os.system('rm -rf %s' % work_name)\n os.system('mkdir %s' % work_name)\n os.system('mv temp/workshop/%s/%s .' % (submodule_name, work_name))\n os.system('rm -rf temp')\n print('Done!')\n","sub_path":"Fall/15_login/copier.py","file_name":"copier.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"403356772","text":"from __future__ import print_function\nfrom __future__ import division\n\"\"\"\ntest: True\n\"\"\"\nfrom six.moves import range\nimport numpy as np\nimport sympy as sp\nimport mpi4py.MPI as mpi\nimport pyLBM\n\nX, Y, LA = sp.symbols('X, Y, LA')\nrho, qx, qy = sp.symbols('rho, qx, qy')\n\ndef bc_up(f, m, x, y, driven_velocity):\n m[qx] = driven_velocity\n\ndef vorticity(sol):\n #sol.f2m()\n qx_n = sol.m[qx]\n qy_n = sol.m[qy]\n vort = np.abs(qx_n[1:-1, 2:] - qx_n[1:-1, :-2]\n - qy_n[2:, 1:-1] + qy_n[:-2, 1:-1])\n return vort.T\n\ndef run(dx, Tf, generator=\"cython\", sorder=None, withPlot=True):\n \"\"\"\n Parameters\n ----------\n\n dx: double\n spatial step\n\n Tf: double\n final time\n\n generator: pyLBM generator\n\n sorder: list\n storage order\n\n withPlot: boolean\n if True plot the solution otherwise just compute the solution\n\n \"\"\"\n la = 1.\n rhoo = 1.\n mu = 1.e-4\n zeta = 1.e-4\n driven_velocity = 0.2 # velocity of the upper border\n dummy = 3.0/dx\n s1 = 1.0/(0.5+zeta*dummy)\n s2 = 1.0/(0.5+mu*dummy)\n s = [0.,0.,0.,s1,s1,s1,s1,s2,s2]\n Tf = 10.\n dummy = 1./(LA**2*rhoo)\n qx2 = dummy*qx**2\n qy2 = dummy*qy**2\n q2 = qx2+qy2\n qxy = dummy*qx*qy\n\n lid_cavity = {\n 'parameters':{LA: la},\n 'box':{'x':[0., 1.], 'y':[0., 1.], 'label':[0, 0, 0, 1]},\n 'space_step': dx,\n 'scheme_velocity':LA,\n 'schemes':[\n {\n 'velocities':list(range(9)),\n 'polynomials':[\n 1, LA*X, LA*Y,\n 3*(X**2+Y**2)-4,\n 0.5*(9*(X**2+Y**2)**2-21*(X**2+Y**2)+8),\n 3*X*(X**2+Y**2)-5*X, 3*Y*(X**2+Y**2)-5*Y,\n X**2-Y**2, X*Y\n ],\n 'relaxation_parameters':s,\n 'equilibrium':[\n rho,\n qx, qy,\n -2*rho + 3*q2,\n rho - 3*q2,\n -qx/LA, -qy/LA,\n qx2 - qy2, qxy\n ],\n 'conserved_moments': [rho, qx, qy],\n 'init': {rho: 1., qx: 0., qy: 0.},\n },\n ],\n 'relative_velocity': [qx/rho, qy/rho],\n 'boundary_conditions':{\n 0:{'method':{0: pyLBM.bc.Bouzidi_bounce_back}},\n 1:{'method':{0: pyLBM.bc.Bouzidi_bounce_back}, 'value':(bc_up, (driven_velocity,))}\n },\n 'generator': generator,\n }\n\n sol = pyLBM.Simulation(lid_cavity, sorder=sorder)\n\n if withPlot:\n # init viewer\n viewer = pyLBM.viewer.matplotlibViewer\n fig = viewer.Fig()\n ax = fig[0]\n image = ax.image(vorticity, (sol,), cmap='jet', clim=[0, .1])\n\n def update(iframe):\n nrep = 100\n for i in range(nrep):\n sol.one_time_step()\n\n image.set_data(vorticity(sol))\n ax.title = \"Solution t={0:f}\".format(sol.t)\n\n # run the simulation\n fig.animate(update, interval=1)\n fig.show()\n else:\n while sol.t < Tf:\n sol.one_time_step()\n\n return sol\n\nif __name__ == '__main__':\n dx = 1./256\n Tf = 10.\n run(dx, Tf)\n","sub_path":"demo/2D/lid_driven_cavity.py","file_name":"lid_driven_cavity.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"3777686","text":"\nimport pickle\nimport logging\n\nfrom ..task import call_later\n\n\nDEBUG = True\n_logger = logging.getLogger(__name__)\n__all__ = ['UDPActor', 'UDPPickleActor']\n\n\nclass UDPActor:\n \"\"\"\n UDPActor\n Actors are helper objects for a director.\n There is one actor for each peer.\n \"\"\"\n def __init__(self, director, peer):\n if DEBUG: _logger.debug(\"__init__ %r %r\", director, peer)\n self.director = director\n self.peer = peer\n # add a timer\n self.timeout = director.timeout\n if self.timeout > 0:\n self.timeout_handle = call_later(self.timeout, self.idle_timeout)\n else:\n self.timeout_handle = None\n # tell the director this is a new actor\n self.director.add_actor(self)\n\n def idle_timeout(self):\n if DEBUG: _logger.debug(\"idle_timeout\")\n # tell the director this is gone\n self.director.del_actor(self)\n\n def indication(self, pdu):\n if DEBUG: _logger.debug(\"indication %r\", pdu)\n # reschedule the timer\n if self.timeout_handle:\n self.timeout_handle.cancel()\n self.timeout_handle = call_later(self.timeout, self.idle_timeout)\n # put it in the outbound queue for the director\n self.director.send_request(pdu)\n\n def response(self, pdu):\n if DEBUG: _logger.debug(\"response %r\", pdu)\n # reschedule the timer\n if self.timeout_handle:\n self.timeout_handle.cancel()\n self.timeout_handle = call_later(self.timeout, self.idle_timeout)\n # process this as a response from the director\n self.director.response(pdu)\n\n def handle_error(self, error=None):\n if DEBUG: _logger.debug(\"handle_error %r\", error)\n # pass along to the director\n if error is not None:\n self.director.actor_error(self, error)\n\n\nclass UDPPickleActor(UDPActor):\n\n def __init__(self, *args):\n if DEBUG: _logger.debug(\"__init__ %r\", args)\n UDPActor.__init__(self, *args)\n\n def indication(self, pdu):\n if DEBUG: _logger.debug(\"indication %r\", pdu)\n # pickle the data\n pdu.pduData = pickle.dumps(pdu.pduData)\n # continue as usual\n UDPActor.indication(self, pdu)\n\n def response(self, pdu):\n if DEBUG: _logger.debug(\"response %r\", pdu)\n # unpickle the data\n try:\n pdu.pduData = pickle.loads(pdu.pduData)\n except:\n _logger.exception(\"pickle error\")\n return\n # continue as usual\n UDPActor.response(self, pdu)","sub_path":"bacpypes/transport/udp_actor.py","file_name":"udp_actor.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"566887893","text":"# KMP \n\ndef failure(p):\n lps=[0]*len(p)\n j=0\n i=1\n while iTest it on this page first!\r\n {injected} ''')\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(error):\r\n template = '''\r\n Oops! This page doesn't exist! \r\n %s does not work! \r\n ''' % (urllib.parse.unquote(request.url))\r\n return render_template_string(template)\r\n\r\n\r\nDATABASE = 'controllers/site.db'\r\nreturnedMessage = None\r\n\r\ndef query_db(query):\r\n db = sqlite3.connect(DATABASE)\r\n cur = db.cursor()\r\n cur.execute(query)\r\n rv = cur.fetchall()\r\n cur.close()\r\n return (rv if rv else None)\r\n\r\ndef insert_db(query):\r\n print(query)\r\n db = sqlite3.connect(DATABASE)\r\n cur = db.cursor()\r\n cur.execute(query)\r\n print('success')\r\n db.commit()\r\n\r\ndef get_reset_token(user, expires_sec=300):\r\n s = Serializer(current_app.config['SECRET_KEY'], expires_sec)\r\n return s.dumps({'user_id': user[0]}).decode('utf-8')\r\n\r\n\r\ndef verify_reset_token(token):\r\n s = Serializer(current_app.config['SECRET_KEY'])\r\n try:\r\n user_id = s.loads(token)['user_id']\r\n except:\r\n return None\r\n return user_id\r\n\r\n\r\n\r\n# \"\"\"\r\n# Functions \r\n# \"\"\"\r\n# Refreshes the json file if admin adds a new product in##\r\ndef refresh():\r\n with open('json_files/product.json', 'r+') as f:\r\n data = json.load(f)\r\n return data \r\n\r\ndef refreshEvents():\r\n with open('json_files/events.json', 'r+') as f:\r\n data = json.load(f)\r\n return data\r\n\r\ndef refreshAnalytics():\r\n with open('json_files/analytics.json', 'r+') as f:\r\n data = json.load(f)\r\n return data \r\n\r\n# ##Ensures that allowed images are accepted##\r\ndef allowed_image(filename):\r\n if not \".\" in filename:\r\n return False\r\n ext = filename.rsplit(\".\", 1)[1]\r\n if ext.upper() in app.config[\"ALLOWED_IMAGE_EXTENSIONS\"]:\r\n return True\r\n else:\r\n return True\r\n\r\n\r\n# \"\"\"\r\n# Home, contact-us, about pages \r\n# \"\"\"\r\n\r\n@app.route('/files', defaults={'req_path': ''})\r\n@app.route('/files')\r\ndef dir_listing(req_path):\r\n BASE_DIR = 'files'\r\n\r\n # Joining the base and the requested path\r\n abs_path = os.path.join(BASE_DIR, req_path)\r\n\r\n # Return 404 if path doesn't exist\r\n if not os.path.exists(abs_path):\r\n return abort(404)\r\n\r\n # Check if path is a file and serve\r\n if os.path.isfile(abs_path):\r\n return send_file(abs_path)\r\n\r\n # Show directory contents\r\n files = os.listdir(abs_path)\r\n return render_template('files.html', files=files)\r\n\r\n@app.route('/files/')\r\ndef fi(path):\r\n try:\r\n readme_file = open(f\"files/{path}\", \"r\")\r\n md_template_string = markdown.markdown(\r\n readme_file.read(), extensions=[\"fenced_code\"]\r\n )\r\n except:\r\n return send_file(f\"files/{path}\")\r\n return md_template_string\r\n@app.route('/')\r\ndef home():\r\n if \"user_id\" in session:\r\n orginalCartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n if orginalCartItems == None:\r\n orginalCartItems = []\r\n else:\r\n orginalCartItems = []\r\n return render_template('homepage.html', cartItems=orginalCartItems)\r\n\r\n@app.route('/about')\r\ndef about():\r\n if \"user_id\" in session:\r\n orginalCartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n if orginalCartItems == None:\r\n orginalCartItems = []\r\n else:\r\n orginalCartItems = []\r\n return render_template('aboutus.html', cartItems=orginalCartItems)\r\n\r\n@app.route('/contactUs', methods=['GET', 'POST'])\r\ndef contactUs():\r\n if \"user_id\" in session:\r\n orginalCartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n if orginalCartItems == None:\r\n orginalCartItems = []\r\n else:\r\n orginalCartItems = []\r\n form = ContactUs()\r\n if request.method == 'POST':\r\n sendEmail(form.email.data, form.fullname.data, form.feedback.data, request.form['optradio'])\r\n flash(f'Message has been send! Thank you for leaving a message :) ', 'success')\r\n return redirect(url_for('home'))\r\n else:\r\n pass\r\n return render_template(\"contactUs.html\", form=form, cartItems=orginalCartItems)\r\n\r\n# \"\"\"\r\n# ERROR ROUTE\r\n# \"\"\"\r\n# \"\"\"\r\n# Account Related Routes \r\n# \"\"\"\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef register():\r\n # if current_user.is_authenticated:\r\n # return redirect(url_for('registerStep2'))\r\n form = RegistrationForm()\r\n if request.method == 'POST' and form.validate_on_submit():\r\n # hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\r\n query = f\"\"\"INSERT INTO user(user_id, fullname, email, password, security_question, security_answer) VALUES(NULL, \\'{form.fullname.data}\\', \\'{form.email.data}\\' , \\'{form.password.data}\\', \\'{form.security_question.data}\\', \\'{form.security_answer.data}\\');\"\"\"\r\n insert_db(query)\r\n flash('Your account has been created! You are now able to log in', 'success')\r\n return redirect(url_for('login'))\r\n return render_template('register.html', title='Register', form=form)\r\n\r\n@app.route('/registerStep2', methods=['GET', 'POST'])\r\ndef registerStep2():\r\n form = Billing()\r\n if form.validate_on_submit():\r\n # exisr = AddressInfo.query.filter_by(address=form.address.data, user_id=current_user.id).first()\r\n # print(exisr)\r\n exisr = query_db(f\"SELECT * FROM address_info WHERE address= \\'{form.address.data}\\' AND user_id={session['user_id']}\")\r\n print(exisr)\r\n if exisr == None:\r\n address = form.address.data\r\n address = address.strip()\r\n country = form.country.data\r\n state = form.state.data\r\n postal = form.postal.data\r\n insert_db(f\"Update address_info set default_add = 0 WHERE user_id={session['user_id']}\")\r\n insert_db(f\"INSERT INTO address_info(addressID, address, country, state, postal, default_add, user_id) VALUES(NULL, \\'{address}\\', \\'{country}\\', \\'{state}\\', \\'{postal}\\', 1, {session['user_id']})\")\r\n return redirect(url_for('myAccount'))\r\n # elif str(exisr).find('SELECT') != None and exisr == None:\r\n # address = form.address.data\r\n # country = form.country.data\r\n # state = form.state.data\r\n # postal = form.postal.data\r\n # address = AddressInfo(address=address, country=country, state=state, postal=postal, user_id = current_user.id, default='True')\r\n # db.session.add(address)\r\n # db.session.commit()\r\n # return redirect(url_for('myAccount'))\r\n else:\r\n flash('Address already added! Please add a different address', 'danger')\r\n return render_template('registerStep2.html', form = form, data=[])\r\n\r\n@app.route('/editAddress', methods=['GET', 'POST'])\r\ndef editBilling():\r\n address = request.args.get('address')\r\n addressinfo = query_db(f\"SELECT * FROM address_info WHERE address= \\'{address}\\'\")[0]\r\n print(addressinfo[1])\r\n # addressinfo = AddressInfo.query.filter_by(address=address, user_id=current_user.id).first()\r\n form = UpdateBilling()\r\n if form.validate_on_submit() and request.method=='POST':\r\n exisr = query_db(f\"SELECT address FROM address_info WHERE address= \\'{form.address.data}\\' AND user_id = {session['user_id']}\")\r\n if exisr == None:\r\n print('new address')\r\n insert_db(f\"UPDATE address_info set country = \\'{form.country.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE address_info set postal = \\'{form.postal.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE address_info set state = \\'{form.state.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE address_info set address = \\'{form.address.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n # addressinfo.address = form.address.data\r\n # addressinfo.country = form.country.data\r\n # addressinfo.state = form.state.data\r\n # addressinfo.postal = form.postal.data\r\n # db.session.commit()\r\n return redirect(url_for('myAccount'))\r\n elif addressinfo[1] == form.address.data:\r\n print(\"same address\")\r\n insert_db(f\"UPDATE address_info set address = \\'{form.address.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE address_info set country = \\'{form.country.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE address_info set postal = \\'{form.postal.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE address_info set state = \\'{form.state.data}\\' WHERE address=\\'{address}\\' AND user_id = {session['user_id']}\")\r\n return redirect(url_for('myAccount'))\r\n else:\r\n flash('Address already added! Please add a different address', 'danger')\r\n return render_template('editAddress.html', form=form, data = addressinfo)\r\n return render_template('editAddress.html', form=form, data = addressinfo)\r\n \r\n@app.route('/editCardInfo', methods=['GET', 'POST'])\r\ndef editCardInfo():\r\n cardno = request.args.get('card')\r\n cardinfo = query_db(f\"SELECT * FROM card_info WHERE cardno= \\'{cardno}\\'\")[0]\r\n print(cardinfo[1])\r\n # addressinfo = AddressInfo.query.filter_by(address=address, user_id=current_user.id).first()\r\n form = UpdateCard()\r\n if form.validate_on_submit() and request.method=='POST':\r\n exisr = query_db(f\"SELECT cardno FROM card_info WHERE cardno= \\'{form.cardno.data}\\' AND user_id = {session['user_id']}\")\r\n if exisr == None:\r\n insert_db(f\"UPDATE card_info set month = \\'{form.exp.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set year = \\'{form.year.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set card_name = \\'{form.name.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set cardno = \\'{form.cardno.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n # addressinfo.address = form.address.data\r\n # addressinfo.country = form.country.data\r\n # addressinfo.state = form.state.data\r\n # addressinfo.postal = form.postal.data\r\n # db.session.commit()\r\n return redirect(url_for('myAccount'))\r\n elif cardinfo[2] == form.cardno.data:\r\n insert_db(f\"UPDATE card_info set month = \\'{form.exp.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set year = \\'{form.year.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set card_name = \\'{form.name.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set cardno = \\'{form.cardno.data}\\' WHERE cardno= \\'{cardno}\\' AND user_id = {session['user_id']}\")\r\n return redirect(url_for('myAccount'))\r\n else:\r\n flash('Card already added! Please add a different card', 'danger')\r\n return render_template('editCard.html', form=form, data = cardinfo) \r\n return render_template('editCard.html', form=form, data = cardinfo)\r\n\r\n@app.route('/registerStep3', methods=['GET', 'POST'])\r\ndef registerStep3():\r\n form = PaymentInfo()\r\n if form.validate_on_submit():\r\n # exisr = AddressInfo.query.filter_by(address=form.address.data, user_id=current_user.id).first()\r\n # print(exisr)\r\n exisr = query_db(f\"SELECT * FROM card_info WHERE cardno = \\'{form.cardno.data}\\' AND user_id={session['user_id']}\")\r\n print(exisr)\r\n if exisr == None:\r\n cardno = form.cardno.data\r\n cardno = cardno.strip()\r\n if cardno[0] == '5':\r\n card_type = \"master\"\r\n else:\r\n card_type = \"visa\"\r\n cardname = form.name.data\r\n exp = form.exp.data\r\n year = form.year.data\r\n insert_db(f\"Update card_info set default_CARD = 0 WHERE user_id={session['user_id']}\")\r\n insert_db(f\"INSERT INTO card_info(cardID, card_name, cardno, month, year, default_CARD, card_type,user_id) VALUES(NULL, \\'{cardname}\\', \\'{cardno}\\', \\'{exp}\\', \\'{year}\\', 1, \\'{card_type}\\',{session['user_id']})\")\r\n return redirect(url_for('myAccount'))\r\n else:\r\n flash('Card already added! Please add a different card', 'danger')\r\n return render_template('registerStep3.html', form = form, data=None)\r\n\r\n \r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login(): \r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n # emailExist = (f\"SELECT email FROM user WHERE email = '{form.email.data}' and password = '{form.password.data}';\")\r\n user = f\"SELECT * FROM user WHERE email = '{form.email.data}' AND password = \\'{form.password.data}\\';\"\r\n email = query_db(f\"SELECT * FROM user WHERE email = '{form.email.data}'\")[0]\r\n if email == None:\r\n flash('Invalid email. Please input a valid email', 'danger')\r\n else:\r\n if query_db(user) != None:\r\n # print(user)\r\n # passwordExist = query_db(f\"SELECT password FROM user WHERE email = \\'{form.email.data}\\';\")\r\n # if passwordExist[0][0] == form.password.data:\r\n activeCheck = query_db(f\"SELECT status FROM user WHERE email = \\'{form.email.data}\\'\")[0][0]\r\n adminMail = email[3]\r\n session['user_id'] = query_db(user)[0][0]\r\n # id = query_db(user)[0][0]\r\n if activeCheck == \"Inactive\":\r\n return redirect(url_for('activateask'))\r\n elif \"@prestigium.com\" in adminMail:\r\n return redirect(url_for('admin'))\r\n print(query_db(user))\r\n return redirect(url_for('home'))\r\n else:\r\n flash('Wrong password. Please check your password', 'danger')\r\n return render_template('login.html', title='Login', form=form)\r\n \r\n\r\n@app.route('/myAccount', methods=['GET', 'POST'])\r\ndef myAccount():\r\n orginalCartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n if orginalCartItems == None:\r\n orginalCartItems = []\r\n removeConfirmation = request.args.get('delete')\r\n removeAddress = request.args.get('address')\r\n removeCard = request.args.get('card')\r\n removeReview = request.args.get('name')\r\n if removeAddress != None and removeConfirmation == 'true':\r\n # address = AddressInfo.query.filter_by(address=removeAddress, user_id = current_user.id).first()\r\n insert_db(f\"DELETE FROM address_info WHERE address= \\'{removeAddress}\\' AND user_id= {session['user_id']}\")\r\n addresses = query_db(f\"SELECT * FROM address_info WHERE user_id = {session['user_id']}\")[-1][1]\r\n insert_db(f\"UPDATE address_info set default_add = {1} WHERE address=\\'{addresses}\\' AND user_id= {session['user_id']}\")\r\n return redirect(url_for('myAccount'))\r\n elif removeReview != None and removeConfirmation == 'true': \r\n # review = Review.query.filter_by(prod_name=removeReview, user_id=current_user.id).first()\r\n insert_db(f\"DELETE FROM review WHERE prod_name= \\'{removeReview}\\' AND user_id= {session['user_id']}\")\r\n elif removeCard != None and removeConfirmation == 'true':\r\n # cards = CardInfo.query.filter_by(user_id=current_user.id)\r\n insert_db(f\"DELETE FROM card_info WHERE cardno = \\'{removeCard}\\' AND user_id = {session['user_id']} \")\r\n cards = query_db(F\"SELECT * FROM card_info WHERE user_id= {session['user_id']}\")\r\n insert_db(f\"UPDATE card_info set default_CARD = {1} WHERE cardno = \\'{cards[-1][2]}\\' AND user_id = {session['user_id']}\")\r\n return redirect(url_for('myAccount'))\r\n old = request.args.get('old')\r\n new = request.args.get('new')\r\n if old != None:\r\n old_password = query_db(f\"SELECT password FROM user WHERE user_id= {session['user_id']}\")[0][0]\r\n if old_password != old:\r\n return \"wrong\"\r\n if new != None:\r\n insert_db(f\"UPDATE user set password = \\'{new}\\' WHERE user_id = {session['user_id']}\")\r\n return redirect(url_for('myAccount'))\r\n form = UpdateAccountForm()\r\n if form.submit.data and form.validate_on_submit():\r\n print('no')\r\n image = request.files['image']\r\n filename = request.files['image'].filename \r\n print(filename) \r\n if filename:\r\n image.save(os.path.join(app.config[\"PROFILE_UPLOADS\"], filename))\r\n insert_db(f\"UPDATE user set image_file = \\'../static/img/profile_pic/{filename}\\' WHERE user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE user set fullname = \\'{form.fullname.data}\\' WHERE user_id = {session['user_id']}\")\r\n insert_db(f\"UPDATE user set email = \\'{form.email.data}\\' WHERE user_id = {session['user_id']}\")\r\n # flash('Your account has been updated!', 'success')\r\n return redirect(url_for('myAccount'))\r\n elif request.method == 'GET':\r\n tran_list = []\r\n card_list = []\r\n form.fullname.data = query_db(f\"SELECT * FROM user WHERE user_id={session['user_id']}\")[0][2]\r\n form.email.data = query_db(f\"SELECT * FROM user WHERE user_id={session['user_id']}\")[0][3]\r\n image_file = query_db(f\"SELECT * FROM user WHERE user_id={session['user_id']}\")[0][4][0]\r\n address = query_db(f\"SELECT * FROM address_info WHERE user_id = {session['user_id']}\")\r\n cards = query_db(f\"SELECT * FROM card_info WHERE user_id = {session['user_id']}\")\r\n if cards == None:\r\n cards = []\r\n else:\r\n for i in cards:\r\n card_list.append({'id':i[0],'card_name':i[1], 'cardno':i[2], 'exp':i[3], 'year':i[4], \"card_type\":i[5], \"default\":i[6]})\r\n previous_transactions = query_db(f\"SELECT * FROM previous_transactions WHERE user_id={session['user_id']}\")\r\n reviews = query_db(f\"SELECT * FROM review WHERE user_id={session['user_id']}\")\r\n if previous_transactions == None:\r\n pass\r\n else:\r\n for i in previous_transactions:\r\n total = 0\r\n date = i[2]\r\n items = ast.literal_eval(i[1])\r\n for j in items:\r\n total += j['prod_price']*j['prod_quantity']\r\n tran_list.append({'id':i[0], 'total':total ,'date': str(date), 'status': i[3],'items':ast.literal_eval(i[1])})\r\n tran_list = []\r\n card_list = []\r\n image_file = query_db(f\"SELECT image_file from user WHERE user_id={session['user_id']}\")\r\n address = query_db(f\"SELECT * FROM address_info WHERE user_id={session['user_id']}\")\r\n cards = query_db(f\"SELECT * FROM card_info WHERE user_id={session['user_id']}\")\r\n print(cards)\r\n if cards == None:\r\n cards = []\r\n else:\r\n for i in cards:\r\n print(i[0])\r\n card_list.append({'id':i[0],'card_name':i[1], 'cardno':i[2], 'exp':i[3], 'year':i[4], \"card_type\":i[6], \"default\":i[5]})\r\n previous_transactions = query_db(f\"SELECT * FROM previous_transactions WHERE user_id={session['user_id']}\") \r\n reviews = query_db(f\"SELECT * FROM review WHERE user_id={session['user_id']}\")\r\n if previous_transactions == None:\r\n pass\r\n else:\r\n for i in previous_transactions:\r\n total = 0\r\n date = i[2]\r\n items = ast.literal_eval(i[1])\r\n for j in items:\r\n total += j['prod_price']*j['prod_quantity']\r\n tran_list.append({'id':i[0], 'total':total ,'date': str(date), 'status': i[3],'items':ast.literal_eval(i[1])})\r\n user = query_db(f\"SELECT * FROM user WHERE user_id = {session['user_id']}\")[0] \r\n if address == None:\r\n address = [] \r\n if reviews == None:\r\n reviews = [] \r\n return render_template('myAccount.html', title='Account', image_file=image_file, form=form,accountInfo = address, previous_transactions = tran_list, review=reviews, card=card_list, user=user, cartItems=orginalCartItems)\r\n\r\n\r\n@app.route(\"/disable\", methods=[\"GET\", \"POST\"])\r\ndef disable():\r\n form = Disable()\r\n if form.validate_on_submit():\r\n password = query_db(f\"SELECT password FROM user WHERE user_id={session['user_id']}\")[0][0]\r\n if password != None and form.password.data == password:\r\n inactive_user = (f\"UPDATE user SET status = 'Inactive' WHERE user_id='{session['user_id']}'\")\r\n insert_db(inactive_user)\r\n session.pop('user_id', None)\r\n return redirect(url_for('home'))\r\n else:\r\n flash('Password is inncorrect. Please retype your password.', 'danger')\r\n return redirect(url_for('disable'))\r\n return render_template('disable.html', form=form)\r\n # if user and bcrypt.check_password_hash(user.password, form.password.data):\r\n # current_user.active = \"Inactive\"\r\n # db.session.commit()\r\n # logout_user()\r\n # return redirect(url_for('home'))\r\n # else:\r\n # flash('Password is inncorrect. Please retype your password.', 'danger')\r\n # return redirect(url_for('disable'))\r\n # return render_template('disable.html', form=form)\r\n\r\n@app.route(\"/activate\", methods=[\"GET\", \"POST\"])\r\ndef activate():\r\n form = Activate()\r\n if form.validate_on_submit():\r\n password = query_db(f\"SELECT password FROM user WHERE email = '{form.email.data}'\")[0][0]\r\n if password != None and form.password.data == password:\r\n #LOGIN\r\n user = query_db(f\"SELECT * FROM user WHERE email = \\'{form.email.data}\\'\")[0]\r\n session['user_id'] = user[0]\r\n active_user = (f\"UPDATE user SET status = 'active' WHERE user_id='{session['user_id']}'\")\r\n insert_db(active_user)\r\n return redirect(url_for('home'))\r\n else:\r\n flash('Password is inncorrect. Please retype your password.', 'danger')\r\n return redirect(url_for('activate'))\r\n return render_template('activate.html', form=form)\r\n # user = User.query.filter_by(email=form.email.data).first()\r\n # if user and bcrypt.check_password_hash(user.password, form.password.data):\r\n # login_user(user)\r\n # user.active = \"Active\"\r\n # db.session.commit()\r\n # return redirect(url_for('home'))\r\n # else:\r\n # flash('Password is inncorrect. Please retype your password.', 'danger')\r\n # return redirect(url_for('activate'))\r\n # return render_template('activate.html', form=form)\r\n\r\n@app.route(\"/activateask\", methods=[\"GET\", \"POST\"])\r\ndef activateask():\r\n return render_template('activateask.html')\r\n\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n session.pop('user_id', None)\r\n return redirect(url_for('home'))\r\n\r\n@app.route('/review', methods=[\"GET\", \"POST\"])\r\ndef review():\r\n if request.method == 'GET': \r\n edit = request.args.get('edit')\r\n name = request.args.get('name')\r\n rating = request.args.get('rating')\r\n message = request.args.get('message')\r\n transaction_id = request.args.get('id')\r\n print(\"trans_id\", transaction_id)\r\n pre_tran = query_db(f\"SELECT * FROM previous_transactions WHERE Transaction_Id={transaction_id}\")[0]\r\n cartItems = ast.literal_eval(pre_tran[1])\r\n for i in cartItems: \r\n if i['prod_name'] == name:\r\n prod_name = i['prod_name']\r\n prod_quantity = i['prod_quantity']\r\n prod_price = i['prod_price']\r\n img = i['img']\r\n if edit == 'true':\r\n # review = Review.query.filter_by(prod_name = prod_name, transaction_id = transaction_id, user_id = current_user.id).first()\r\n # review.rating = rating \r\n # review.comment = message \r\n insert_db(f\"UPDATE review set rating = \\'{int(rating)}\\' WHERE prod_name= \\'{name}\\' AND transaction_id = {transaction_id}\")\r\n insert_db(f\"UPDATE review set comment = \\'{message}\\' WHERE prod_name= \\'{name}\\' AND transaction_id = {transaction_id}\")\r\n else:\r\n # review = Review(rating=rating, comment=message, prod_name = prod_name, prod_quantity = prod_quantity, prod_price = prod_price, prod_desc = 'null',img = img,\r\n # date_purchase = pre_tran.transaction_date, transaction_id = transaction_id, user_id = current_user.id)\r\n insert_db(f\"INSERT INTO REVIEW(review_id, rating, comment, prod_name, prod_qty, prod_price, prod_desc, date_purchase, img, Transaction_id, user_id) VALUES(NULL, \\'{rating}\\', \\'{message}\\', \\'{prod_name}\\', \\'{prod_quantity}\\', \\'{prod_price}\\', 'null', \\'{pre_tran[2]}\\' ,\\'{img}\\', \\'{transaction_id}\\', \\'{session['user_id']}\\')\")\r\n return redirect(url_for('myAccount'))\r\n\r\n@app.route('/defaultAddress', methods=['GET', 'POST'])\r\ndef defaultAddress():\r\n address = request.args.get('address')\r\n insert_db(f\"Update address_info set default_add = 0 WHERE user_id={session['user_id']}\")\r\n insert_db(f\"Update address_info set default_add = 1 WHERE address = \\'{address}\\' AND user_id={session['user_id']}\")\r\n \r\n@app.route('/defaultCard', methods=['GET', 'POST'])\r\ndef defaultCard():\r\n card = request.args.get('card')\r\n insert_db(f\"Update card_info set default_CARD = 0 WHERE user_id={session['user_id']}\")\r\n insert_db(f\"Update card_info set default_CARD = 1 WHERE cardno = \\'{card}\\' AND user_id={session['user_id']}\")\r\n \r\n# \"\"\"\r\n# Shop Related Routes \r\n# \"\"\"\r\n@app.route('/shop', methods=['GET', 'POST'])\r\ndef shop():\r\n global returnedMessage\r\n if \"user_id\" in session:\r\n orginalCartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n if orginalCartItems == None:\r\n orginalCartItems = []\r\n id = request.args.get('id')\r\n itemName = request.args.get('name')\r\n quantity = request.args.get('quantity')\r\n delete = request.args.get('delete')\r\n # product = Product.query.filter_by(prod_name=itemName, user_id = current_user.id)\r\n if itemName != None:\r\n product = query_db(f\"SELECT * FROM product WHERE prod_name=\\'{itemName}\\' AND user_id= {session['user_id']}\")\r\n else:\r\n product = None\r\n if product != None and quantity != None:\r\n insert_db(f\"UPDATE product set prod_quantity = \\'{int(quantity)}\\' WHERE prod_name=\\'{itemName}\\' AND user_id= {session['user_id']}\")\r\n print('changed')\r\n elif product != None and delete == 'true':\r\n insert_db(f\"DELETE FROM product WHERE prod_name= \\'{itemName}\\' AND user_id= {session['user_id']}\")\r\n else:\r\n with open('json_files/product.json', 'r+') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if str(i['id']) == id:\r\n current = i \r\n # singleProduct = Product(prod_quantity=1, prod_name=current['prod_name'], prod_price=current['prod_price'], prod_desc=current['prod_desc'], img=current['prod_img'], user_id = current_user.id)\r\n insert_db(f\"INSERT INTO product(productID, prod_quantity, prod_name, prod_price, prod_desc, img, user_id) VALUES(NULL, 1, \\'{current['prod_name']}\\', \\'{current['prod_price']}\\' , \\'{current['prod_desc']}\\',\\'{current['prod_img']}\\', {session['user_id']});\")\r\n if returnedMessage == None:\r\n data = refresh()\r\n for i in data:\r\n if i['id'] > 12:\r\n data.remove(i)\r\n data.remove(data[-1])\r\n else: \r\n data = returnedMessage[\"data\"]\r\n returnedMessage = None\r\n current = session['user_id']\r\n else:\r\n if returnedMessage == None:\r\n data = refresh()\r\n for i in data: \r\n if i['id'] > 12:\r\n data.remove(i)\r\n data.remove(data[-1])\r\n else: \r\n data = returnedMessage[\"data\"]\r\n returnedMessage = None\r\n orginalCartItems = []\r\n current = \"None\"\r\n return render_template(\"shop.html\", data = data, cartItems = orginalCartItems, current = current)\r\n\r\n\r\n@app.route('/searchProduct')\r\ndef search():\r\n name=request.args.get('q')\r\n global returnedMessage\r\n returnedMessage = {\"status\":\"success\", \"data\":[]}\r\n product = query_db(f\"SELECT * FROM store_product WHERE prod_name= '{name}' \") \r\n if product == None: \r\n products = query_db(f\"SELECT * FROM store_product\")\r\n filtered_list = []\r\n for i in products:\r\n if name.capitalize() in i[2]:\r\n filtered_list.append(query_db(f\"SELECT * FROM store_product WHERE prod_name = \\'{i[2]}\\'\")[0])\r\n for i in filtered_list:\r\n returnedMessage[\"data\"].append({\"id\":i[0], \"product_quantity\":i[1], \"prod_name\":i[2], \"prod_price\":i[3], \"prod_desc\":i[4], \"prod_img\":i[6], \"status\":i[5]})\r\n else: \r\n for i in product: \r\n returnedMessage[\"data\"].append({\"id\":i[0], \"product_quantity\":i[1], \"prod_name\":i[2], \"prod_price\":i[3], \"prod_desc\":i[4], \"prod_img\":i[6], \"status\":i[5]})\r\n return jsonify(returnedMessage)\r\n\r\n\r\n\r\n@app.route('/single_product/')\r\ndef single_product(id):\r\n global returnedMessage\r\n if \"user_id\" in session:\r\n orginalCartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n curr = session['user_id']\r\n if orginalCartItems == None:\r\n orginalCartItems = []\r\n product_id = request.args.get('id')\r\n itemName = request.args.get('name')\r\n quantity = request.args.get('quantity')\r\n delete = request.args.get('delete')\r\n if itemName != None:\r\n product = query_db(f\"SELECT * FROM product WHERE prod_name=\\'{itemName}\\' AND user_id= {session['user_id']}\")\r\n else:\r\n product = None\r\n if product != None and quantity != None:\r\n insert_db(f\"UPDATE product set prod_quantity = \\'{int(quantity)}\\' WHERE prod_name=\\'{itemName}\\' AND user_id= {session['user_id']}\")\r\n print('changed')\r\n elif product != None and delete == 'true':\r\n insert_db(f\"DELETE FROM product WHERE prod_name= \\'{itemName}\\' AND user_id= {session['user_id']}\")\r\n else:\r\n with open('json_files/product.json', 'r+') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if str(i['id']) == id:\r\n current = i \r\n # singleProduct = Product(prod_quantity=1, prod_name=current['prod_name'], prod_price=current['prod_price'], prod_desc=current['prod_desc'], img=current['prod_img'], user_id = current_user.id)\r\n insert_db(f\"INSERT INTO product(productID, prod_quantity, prod_name, prod_price, prod_desc, img, user_id) VALUES(NULL, 1, \\'{current['prod_name']}\\', \\'{current['prod_price']}\\' , \\'{current['prod_desc']}\\',\\'{current['prod_img']}\\', {session['user_id']});\")\r\n else:\r\n orginalCartItems = []\r\n with open('json_files/product.json', 'r+') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if i['id'] == id:\r\n current = i\r\n reviews = query_db(f'SELECT * FROM product_review WHERE prodID = \\'{int(id)}\\'')\r\n if reviews == None:\r\n reviews = []\r\n if \"user_id\" not in session: \r\n curr = \"None\"\r\n else:\r\n curr = session['user_id']\r\n returnedMessage = None\r\n return render_template(\"single_product.html\", data = current, cartItems = orginalCartItems, current2 = curr, reviews = reviews)\r\n\r\n@app.route('/productReview', methods=['GET', 'POST'])\r\ndef productReview():\r\n if request.method == 'GET': \r\n id = request.args.get('prodID')\r\n rating = request.args.get('rating')\r\n comments = request.args.get('comment')\r\n user = session['user_id']\r\n comments = comments.replace('',\"\")\r\n email = query_db(f\"SELECT email FROM user WHERE user_id = \\'{session['user_id']}\\' \")[0][0]\r\n insert_db(f\"INSERT INTO product_review(review_id, prodID, rating, comment, email, user_id) VALUES(NULL, \\'{id}\\', \\'{rating}\\', \\\"{comments}\\\", \\\"{email}\\\",\\'{user}\\')\")\r\n\r\n@app.route('/checkout', methods=['GET', 'POST'])\r\ndef checkout():\r\n if request.method == 'GET':\r\n card = None\r\n user = query_db(f\"SELECT * FROM user WHERE user_id = {session['user_id']}\")[0]\r\n cartItems = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n cardinfo = query_db(f\"SELECT * FROM card_info WHERE default_CARD = 1 AND user_id = {session['user_id']}\")\r\n if cardinfo == None:\r\n cardinfo = []\r\n else:\r\n cardinfo = query_db(f\"SELECT * FROM card_info WHERE default_CARD = 1 AND user_id = {session['user_id']}\")[0]\r\n print(cardinfo)\r\n try:\r\n card = {'card_name':cardinfo[1], 'cardno':cardinfo[2], 'exp':cardinfo[3], 'year':cardinfo[4]}\r\n except:\r\n pass\r\n address_info = query_db(f\"SELECT * FROM address_info WHERE default_add = 1 AND user_id = {session['user_id']}\")\r\n if address_info == None:\r\n address_info =[]\r\n else:\r\n address_info = query_db(f\"SELECT * FROM address_info WHERE default_add = 1 AND user_id = {session['user_id']}\")[0]\r\n if cartItems == None:\r\n cartItems = []\r\n elif request.method == 'POST':\r\n return redirect(url_for('myAccount'))\r\n return render_template('checkout.html', cartItems = cartItems, address = address_info, card = card, user=user)\r\n\r\n@app.route('/paymentConfirmation', methods=['GET', 'POST'])\r\ndef confirm():\r\n if request.method == 'GET':\r\n print('Successful Transaction')\r\n transaction_list = []\r\n li = []\r\n bought_products = query_db(f\"SELECT * FROM product WHERE user_id = {session['user_id']}\")\r\n data = refreshAnalytics()\r\n data2 = refresh()\r\n total = 0\r\n for index, i in enumerate(bought_products): \r\n transaction_list.append({'prod_name':i[2], 'prod_quantity':i[1], 'prod_price':i[3], 'img':i[5]})\r\n for y in data:\r\n if y['name'] == i[2]: \r\n y['stock'] -= int(i[1]) \r\n y['count'] += int(i[1])\r\n y['amount_earned'] += (int(i[1])*int(i[3]))\r\n for x in data2:\r\n if x['prod_name'] == i[2]: \r\n x['stock'] -= int(i[1])\r\n with open('json_files/analytics.json', 'w') as f:\r\n json.dump(data, f)\r\n with open('json_files/product.json', 'w') as f:\r\n json.dump(data2, f)\r\n unique = random.randint(100000000000,999999999999)\r\n insert_db(f\"INSERT INTO previous_transactions(Transaction_id, cartItems, transaction_date, user_id) VALUES(\\'{str(unique)}\\', \\\"{str(transaction_list)}\\\", \\'{datetime.now()}\\', \\'{session['user_id']}\\' )\")\r\n insert_db(f\"DELETE FROM product WHERE user_id = {session['user_id']}\")\r\n\r\n# \"\"\"\r\n# Admin Related Routes\r\n# \"\"\"\r\n\r\n# ## Admin Static Routes ##\r\n@app.route('/admin')\r\ndef admin():\r\n # if session['user_id'] == 3:\r\n # previousTransaction = PreviousTransactions.query.all()\r\n # user = query_db(f\"SELECT * FROM user WHERE user_id = {session['user_id']}\")[0]\r\n previousTransaction = query_db(f\"SELECT * FROM previous_transactions\")\r\n if previousTransaction == None:\r\n previousTransaction= []\r\n li = []\r\n for i in previousTransaction: \r\n if i[1] == 'Awaiting order':\r\n li.append(i)\r\n number = len(li)\r\n return render_template('admin/admin.html', previousTransaction = previousTransaction, number = number)\r\n # else:\r\n # return redirect(url_for('home'))\r\n\r\n@app.route('/adminAnalytics')\r\ndef analytics():\r\n # user = query_db(f\"SELECT * FROM user WHERE user_id = {session['user_id']}\")[0]\r\n data = refreshAnalytics()\r\n return render_template('admin/adminAnalytics.html', data = data)\r\n\r\n@app.route('/stats')\r\ndef stats():\r\n data = refreshAnalytics()\r\n return jsonify(data)\r\n\r\n@app.route('/downloadcsvs')\r\ndef downloadcsv():\r\n data = refreshAnalytics()\r\n return send_csv(data,\r\n \"data.csv\", [\"id\", \"name\",\"stock\",\"count\",\"amount_earned\"]) \r\n\r\n@app.route('/trans')\r\ndef trans():\r\n transactions = query_db(f\"SELECT * FROM previous_transactions\")\r\n if transactions == None:\r\n tran_list = []\r\n else:\r\n tran_list = [] \r\n for i in transactions:\r\n total = 0\r\n date = i[2]\r\n items = ast.literal_eval(i[1])\r\n for j in items:\r\n total += int(j['prod_price']*j['prod_quantity'])\r\n tran_list.append({'user_id':i[4],'id':i[0], 'total':total ,'date': str(date), 'status': i.status,'items':ast.literal_eval(i.cartItems)})\r\n return render_template('admin/adminTranList.html', trans = tran_list)\r\n\r\n# @app.route('/adminIndvTran')\r\n# def indv():\r\n# id = request.args.get('id')\r\n# trans = PreviousTransactions.query.filter_by(transactionId=id).first()\r\n# transactions = []\r\n# transactions.append(trans)\r\n# tran_list = []\r\n# for i in transactions:\r\n# total = 0\r\n# date = i.transaction_date\r\n# items = ast.literal_eval(i.cartItems)\r\n# for j in items:\r\n# total += int(j['prod_price']*j['prod_quantity'])\r\n# tran_list.append({'user_id':i.user_id,'id':i.transactionId, 'total':total ,'date': str(date), 'status': i.status,'items':ast.literal_eval(i.cartItems)})\r\n# return render_template('admin/adminTransactions.html', trans = tran_list)\r\n@app.route('/Calendar')\r\ndef Calander(): \r\n if request.method == 'GET':\r\n name = request.args.get('ename')\r\n description = request.args.get('edesc')\r\n date = request.args.get('edate')\r\n className = request.args.get('ecolor')\r\n icon = request.args.get('eicon')\r\n new_dict = {\r\n \"title\": name,\r\n \"description\": description,\r\n \"start\": date,\r\n \"end\": date,\r\n \"className\": className,\r\n \"icon\" : icon\r\n }\r\n if name == None:\r\n pass\r\n else:\r\n with open('json_files/events.json', 'r') as f:\r\n data = json.load(f)\r\n data.append(new_dict)\r\n with open('json_files/events.json', 'w') as f:\r\n json.dump(data, f)\r\n return render_template('admin/calander.html')\r\n\r\n\r\n# @app.route('/announcement')\r\n# def announcement():\r\n# if request.method == 'GET':\r\n# name = request.args.get('ann_ename')\r\n# if name == '':\r\n# return redirect(url_for('Calander'))\r\n# description = request.args.get('anouncements')\r\n# desc = request.args.get('edesc_ann')\r\n# start_date = request.args.get('edateStart')\r\n# end_date = request.args.get('edateEnd')\r\n# className = request.args.get('ecolor_ann')\r\n# icon = request.args.get('eicon_ann')\r\n# new_dict = {\r\n# \"title\": name,\r\n# \"anouncement\": description,\r\n# \"description\":desc,\r\n# \"start\": start_date,\r\n# \"end\": end_date,\r\n# \"className\": className,\r\n# \"icon\" : icon\r\n# }\r\n# if name == None:\r\n# pass\r\n# else:\r\n# with open('json_files/events.json', 'r') as f:\r\n# data = json.load(f)\r\n# data.append(new_dict)\r\n# with open('json_files/events.json', 'w') as f:\r\n# json.dump(data, f)\r\n# return redirect(url_for('Calander'))\r\n\r\n@app.route('/events')\r\ndef events():\r\n data = refreshEvents()\r\n return jsonify(data)\r\n\r\n# ## Admin User Section Routes##\r\n# @app.route('/viewUser')\r\n# def viewUser():\r\n# if current_user.email == 'admin@gmail.com':\r\n# return render_template('admin/viewUser.html')\r\n# else:\r\n# return redirect(url_for('home'))\r\n\r\n@app.route('/viewIndividualUser', methods=['GET', 'POST'])\r\ndef viewIndividualUser():\r\n id = request.args.get('id')\r\n user = query_db(f\"SELECT * FROM user WHERE user_id = {id}\")[0]\r\n address = query_db(f\"SELECT * FROM address_info WHERE user_id = {id}\")\r\n reviews = query_db(f\"SELECT * FROM review WHERE user_id = {id}\")\r\n previous_transactions = query_db(f\"SELECT * FROM previous_transactions WHERE user_id = {id}\")\r\n tran_list = []\r\n if previous_transactions == None:\r\n previous_transactions = []\r\n for i in previous_transactions:\r\n total = 0\r\n date = i[2]\r\n items = ast.literal_eval(i[1])\r\n for j in items:\r\n total += int(j['prod_price']*j['prod_quantity'])\r\n tran_list.append({'id':i[0], 'total':total ,'date': str(date), 'status': i[3],'items':ast.literal_eval(i[1])})\r\n if reviews == None:\r\n reviews = []\r\n if address == None: \r\n address = []\r\n return render_template('admin/viewIndividualUser.html', user=user, address=address, reviews=reviews, previous_transactions = tran_list)\r\n \r\n@app.route('/orderStatus', methods=['GET', 'POST'])\r\ndef orderStatus():\r\n if request.method == 'GET':\r\n id = request.args.get('id')\r\n transaction = query_db(f\"SELECT * FROM previous_transactions WHERE Transaction_id=\\'{id}\\'\")[0]\r\n return render_template('admin/orderStatus.html', transaction=transaction)\r\n else:\r\n option = request.form['options']\r\n id = request.form['id']\r\n insert_db(f\"UPDATE previous_transactions set status = \\'{option}\\' WHERE Transaction_id=\\'{id}\\'\")\r\n return redirect(url_for('admin'))\r\n\r\n@app.route('/listUser')\r\ndef listUser():\r\n users = query_db(f\"SELECT * FROM user\")\r\n for i in users:\r\n if i[3] == 'admin@gmail.com':\r\n users.remove(i)\r\n return render_template('admin/usersList.html', users = users)\r\n\r\n\r\n# @app.route('/deleteUser')\r\n# def deleteUser():\r\n# if current_user.email == 'admin@gmail.com':\r\n# id = request.args.get('userId')\r\n# delete = request.args.get('delete')\r\n# if id != None and delete == 'true':\r\n# user = User.query.filter_by(id=int(id)).first()\r\n# db.session.delete(user)\r\n# db.session.commit()\r\n# return redirect(url_for('listUser'))\r\n# else:\r\n# return redirect(url_for('home'))\r\n\r\n# ## Admin E-commerce Section Routes ##\r\n@app.route('/productList')\r\ndef productList():\r\n data = refresh()\r\n return render_template('admin/productList.html', data = data)\r\n\r\n\r\n@app.route('/adminViewproduct', methods=['GET', 'POST'])\r\ndef viewProduct():\r\n id = request.args.get('id')\r\n with open('json_files/product.json', 'r') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if i['id'] == int(id):\r\n product = i \r\n break \r\n review = query_db(f'SELECT * FROM review WHERE prod_name = \\'{product[\"prod_name\"]}\\'')\r\n num = 0 if review == None else len(review) \r\n with open('json_files/analytics.json', 'r') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if i['id'] == int(id):\r\n analytics = i \r\n break \r\n return render_template('admin/productDetail.html', product= product, review = review, analytics = analytics, num=num)\r\n\r\n@app.route('/adminAddproduct', methods=['GET', 'POST'])\r\ndef adminAdd():\r\n form = AdminAddProductForm()\r\n if request.method == \"POST\":\r\n image = request.files['image']\r\n filename = request.files['image'].filename\r\n print(filename)\r\n image.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], filename))\r\n new_product_name = form.name.data\r\n new_product_price = form.price.data\r\n new_product_description = form.description.data\r\n new_product_id = form.id.data\r\n new_product_img = f\"../static/product_img/{filename}\"\r\n with open('json_files/product.json', 'r') as f:\r\n data = json.load(f)\r\n data.append({\"id\": int(new_product_id), \"prod_name\": new_product_name, \"prod_price\": new_product_price, \"prod_desc\": new_product_description, \"prod_img\": new_product_img})\r\n with open('json_files/product.json', 'w') as f:\r\n json.dump(data, f)\r\n return redirect(url_for('admin'))\r\n else:\r\n with open('json_files/product.json', 'r') as f: \r\n data = json.load(f)\r\n latest_id = len(data)\r\n return render_template('admin/adminAddProduct.html', latest_id = latest_id+1, form=form)\r\n \r\n@app.route('/adminUpdateproduct', methods=['POST', 'GET'])\r\ndef update():\r\n form = AdminUpdateProductForm()\r\n productId = request.args.get('id')\r\n if request.method == 'POST':\r\n item_id = form.id.data\r\n item_name = form.name.data \r\n item_desc = form.description.data\r\n item_price = form.price.data\r\n image = request.files['image']\r\n filename = request.files['image'].filename \r\n print(filename) \r\n if filename:\r\n image.save(os.path.join(app.config[\"IMAGE_UPLOADS\"], filename))\r\n imagesrc = f'../static/product_img/{filename}'\r\n with open('json_files/product.json', 'r') as f:\r\n data = json.load(f)\r\n for i in data:\r\n if i[\"id\"] == int(item_id):\r\n i['prod_img'] = imagesrc\r\n i['prod_name'] = item_name\r\n i['prod_price'] = int(item_price)\r\n i['prod_desc'] = item_desc\r\n break\r\n with open('json_files/product.json', 'w') as f:\r\n json.dump(data, f)\r\n return redirect(url_for('admin'))\r\n elif productId != None:\r\n with open('json_files/product.json') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if i[\"id\"] == int(productId):\r\n product = i\r\n break\r\n f.close()\r\n return render_template('admin/adminUpdateProduct.html', product = product, form=form)\r\n else:\r\n with open('json_files/product.json') as f:\r\n data = json.load(f)\r\n for i in data: \r\n if i[\"id\"] == 1:\r\n product = i\r\n break\r\n return render_template('admin/adminUpdateProduct.html', product = product, form=form)\r\n\r\n\r\n@app.route('/addStock', methods=['POST', 'GET'])\r\ndef stock():\r\n if request.method == 'GET':\r\n id = request.args.get('id')\r\n data = refresh()\r\n for i in data: \r\n if i['id'] == int(id):\r\n current = i\r\n break\r\n return render_template('admin/adminStock.html', data = current)\r\n else:\r\n with open('json_files/product.json', 'r') as f:\r\n data = json.load(f)\r\n productId = request.args.get('id')\r\n cun = request.form['quant[1]']\r\n for i in data: \r\n if i[\"id\"] == int(productId):\r\n i['stock'] += int(cun)\r\n break\r\n with open('json_files/product.json', 'w') as f:\r\n json.dump(data, f)\r\n\r\n with open('json_files/analytics.json', 'r') as f:\r\n data = json.load(f)\r\n productId = request.args.get('id')\r\n cun = request.form['quant[1]']\r\n for i in data: \r\n if i[\"id\"] == int(productId):\r\n i['stock'] += int(cun)\r\n break\r\n with open('json_files/analytics.json', 'w') as f:\r\n json.dump(data, f)\r\n return redirect(url_for('admin'))\r\n\r\n\r\n@app.route('/delete', methods=['POST', 'GET'])\r\ndef delete(): \r\n form = AdminUpdateProductForm()\r\n productId = request.args.get('id')\r\n if request.method == 'POST':\r\n with open('json_files/product.json', 'r') as f:\r\n data = json.load(f)\r\n productId = request.args.get('id')\r\n for i in data: \r\n if i[\"id\"] == int(productId):\r\n data.remove(i)\r\n break\r\n with open('json_files/product.json', 'w') as f:\r\n json.dump(data, f)\r\n return redirect(url_for('admin'))\r\n else:\r\n with open('json_files/product.json', 'r+') as f:\r\n data = json.load(f)\r\n productId = request.args.get('id')\r\n for i in data: \r\n if i[\"id\"] == int(productId):\r\n product = i\r\n break\r\n return render_template('admin/adminDeleteProduct.html', product=product, form=form)\r\n\r\n\r\n# \"\"\"Reset Password token routes\"\"\"\r\n\r\ndef send_reset_email(user):\r\n token = get_reset_token(user)\r\n print(token)\r\n msg = Message('Password Reset Request', sender='testemailnyp@gmail.com', recipients=[user[3]])\r\n msg.body = f'''To reset your password, visit the following link:\r\n{url_for('reset_token', token=token, _external=True)}\r\n\r\n# If you did not make this request then simply ignore this email and no changes will be made.\r\n# '''\r\n mail.send(msg)\r\n \r\n# @app.route(\"/reset_password\", methods=['GET', 'POST'])\r\n# def reset_request():\r\n# form = RequestResetForm()\r\n# if form.validate_on_submit():\r\n# user = query_db(f\"SELECT * FROM user WHERE email = \\'{form.email.data}\\'\")\r\n# if user == None:\r\n# return redirect(url_for('home'))\r\n# else:\r\n# user = user[0]\r\n# send_reset_email(user)\r\n# flash('An email has been sent with instructions to reset your password.', 'info')\r\n# return redirect(url_for('login'))\r\n# return render_template('reset_request.html', title='Reset Password', form=form)\r\n\r\n# @app.route(\"/reset_password/\", methods=['GET', 'POST'])\r\n# def reset_token(token):\r\n# user = verify_reset_token(token)\r\n# print(user)\r\n# if user is None:\r\n# flash('That is an invalid or expired token', 'danger')\r\n# return redirect(url_for('reset_request'))\r\n# form = ResetPasswordForm()\r\n# if form.validate_on_submit():\r\n# insert_db(f\"UPDATE user set password = \\'{form.password.data}\\' WHERE user_id=\\'{int(user)}\\'\")\r\n# flash('Your password has been updated! You are now able to log in', 'success')\r\n# return redirect(url_for('login'))\r\n# return render_template('reset_token.html', title='Reset Password', form=form)\r\n\r\n\r\n\r\n\r\n# @app.route('/resetpass', methods=['GET', 'POST'])\r\n# def resetpass():\r\n# form = PasswordForm()\r\n# return render_template('resetpass.html', form= form )\r\n\r\n#Reset Password Security Form\r\n@app.route('/ResetPassword', methods=['GET', 'POST'])\r\ndef resetpassword():\r\n form = ResetPassForm()\r\n if request.method == 'POST':\r\n email = form.email.data\r\n securityAnswer = form.securityAnswer.data\r\n new_password = form.password.data\r\n confirm_password = form.confirm_password\r\n valid_user = query_db(f\"SELECT * FROM user WHERE email= \\'{email}\\' AND security_answer=\\'{securityAnswer}\\'\")\r\n if valid_user != None:\r\n insert_db(f\"UPDATE user SET password = \\'{new_password}\\' WHERE email=\\'{email}\\' AND security_answer = \\'{securityAnswer}\\'\")\r\n return redirect(url_for('login'))\r\n else:\r\n flash('Wrong security answer', 'danger')\r\n return redirect(url_for('resetpassword'))\r\n else:\r\n email = request.args.get('email')\r\n if email != None:\r\n user = query_db(f\"SELECT * FROM user WHERE email = \\'{email}\\'\")\r\n if user == None:\r\n return \"None\"\r\n else:\r\n return user[0][6]\r\n return render_template('forgetPassword.html', form= form ) \r\n\r\n# \"\"\"Error Handling Routes\"\"\"\r\n\r\n# @app.errorhandler(404)\r\n# def page_not_found(e):\r\n# # note that we set the 404 status explicitly\r\n# return render_template('pageNotFound.html'), 404\r\n\r\n# @app.errorhandler(500)\r\n# def page_not_found500(x):\r\n# return render_template('feedbackError.html'), 500\r\n\r\n\r\n# @app.route('/forgetpass', methods=['GET', 'POST'])\r\n# def forgetpass():\r\n\r\n# #if current_user.is_authenticated:\r\n# #return redirect(url_for('home'))\r\n \r\n# form = PasswordForm()\r\n# if form.validate_on_submit():\r\n# user = User.query.filter_by(email=form.email.data).first()\r\n# if user:\r\n# port = 465 \r\n# smtp_server = \"smtp.gmail.com\"\r\n# sender_email = \"testemailnyp@gmail.com\" \r\n# receiver_email = form.email.data \r\n# password = \"Valentia01\"\r\n\r\n# message = MIMEMultipart(\"alternative\")\r\n# message[\"Subject\"] = \"multipart test\"\r\n# message[\"From\"] = sender_email\r\n# message[\"To\"] = receiver_email\r\n\r\n# text = \"\"\"\\\r\n# Hi,\r\n# How are you?\r\n# Real Python has many great tutorials:\r\n# www.realpython.com\"\"\"\r\n# html = \"\"\"\\\r\n# \r\n# \r\n# Hello! \r\n# You are receiving this email because we received a password reset \r\n# request for your account. \r\n# Reset Password \r\n#
\r\n# \r\n# \r\n# \"\"\"\r\n\r\n# part1 = MIMEText(text, \"plain\")\r\n# part2 = MIMEText(html, \"html\")\r\n\r\n# message.attach(part1)\r\n# message.attach(part2)\r\n \r\n# # message = \"\"\"\\\r\n# # Subject: Change your password\r\n\r\n# # Please click on this link to change your password.\"\"\"\r\n\r\n# context = ssl.create_default_context()\r\n# with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\r\n# server.login(sender_email, password)\r\n# server.sendmail(sender_email, receiver_email, message.as_string())\r\n# return render_template('forgetpass.html', title='Login', form=form)\r\n\r\n# @app.route('/resetpass', methods=['GET', 'POST'])\r\n# def resetpass():\r\n# form = ResetForm()\r\n# if form.validate_on_submit():\r\n# hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\r\n# current_user.password = hashed_password\r\n# db.session.commit()\r\n\r\n# return render_template('resetpass.html', form=form)\r\n\r\n\r\npath = os.getcwd()+\"/controllers\"\r\nlist_of_files = {}\r\n\r\n@app.route('/list')\r\ndef tree():\r\n for filename in os.listdir(path):\r\n list_of_files[filename] = \"http://127.0.0.1:5000/\"+filename\r\n return list_of_files\r\n\r\n\r\n\r\n\r\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":56806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"170990353","text":"from Elements import *\nfrom Scheduler import Scheduler\nfrom Generation import Generation\nclass Model:\n\tdef __init__(self, bin_sum_size, load_ratio):\n\t\t'''\n\t\tArgs:\n\t\t\tbin_set: \t\t\ttype list of bin, takes in the bins\n\t\t\titem_set: \t\t\ttype list of items, takes in the items, assume items inside sorted by the arrival time already\n\t\t\tstate_now:\t\t\ttype list of float, the state of the system now, includes the capacity of bins and the size of next item\n\t\t\tis_schedulable:\t\ttype bool, the state of the system now, showing whether the system misses any deadline or not\n\t\t\ttotal_item:\t\t\ttype int, number of items processed\n\t\t\titem_success:\t\ttype int, number of items successfully mapped\n\t\t\titem_counter:\t\ttype int, the index of next item\n\t\t\tload_ratio:\t\t\ttype float, the sum of sizes of items/ the sum of sizes of bins\n\t\t\tbin_sum_size\t\ttype float, the sum of sizes of bins\n\t\t\tstate_size\t\t\ttype int, the size of the state\n\t\t\taction_size\t\t\ttype int, the range of the action\n\t\t'''\n\t\tself._load_ratio = load_ratio\n\t\tself._bin_sum_size = bin_sum_size\n\t\tg = Generation()\n\t\t#self._bin_set = g.generate_bins(bin_sum_size) #randomly generate a bin_set that does not change during the learning\n\t\tself._bin_set, self._bin_sum_size = g.generate_n_bins(5)\n\t\tself._item_set = []\n\t\tself._state_now = []\n\t\tself._item_counter = 0\n\t\tself._scheduler = Scheduler('best_fit')\n\t\tself._action_size = len(self._bin_set)\n\t\tself._state_size = self._action_size+1\n\n\tdef reset(self):\n\t\tg = Generation()\n\t\tself._item_set = g.generate_items(self._bin_sum_size*self._load_ratio)#reset the item set\n\t\tself._state_now = []\n\t\tself._item_counter = 0\n\t\tfor i in range(len(self._bin_set)):\n\t\t\tself._bin_set[i]._capacity = self._bin_set[i]._size\n\t\t\tself._state_now.append(self._bin_set[i]._capacity)\n\t\tself._state_now.append(self._item_set[self._item_counter]._size)\n\t\tself._item_counter+=1\n\t\treturn self._state_now\n\n\tdef step(self, action):\n\t\tif action<0 or action > len(self._bin_set):\n\t\t\treturn self._state_now, -1, True, \"Ends\"\n\t\titem_now = self._item_set[self._item_counter]\n\t\tself._item_counter += 1\n\t\treward = 0\n\t\tif self._bin_set[action]._capacity < item_now._size:\n\t\t\treward = -10000\n\t\t\tmessage = \"A non-fittable chocie is made.\"\n\t\t\treturn self._state_now, reward, True, message\n\t\telse:\n\t\t\tbf_choice = self._scheduler.schedule(item_now, self._bin_set)\n\t\t\tdone = False\n\t\t\tif bf_choice == action:\n\t\t\t\treward = 1\n\t\t\telse:\n\t\t\t\treward = -1\n\t\t\tmessage = 'Item assigned to Bin '+str(action)\n\t\t\tprint(message)\n\t\t\tself._bin_set[action]._capacity -= item_now._size\n\t\t\tself._state_now[action] -= item_now._size\n\t\t\tif self._item_counter>= len(self._item_set):\n\t\t\t\tdone = True\n\t\t\telse:\n\t\t\t\tself._state_now[len(self._state_now)-1] = self._item_set[self._item_counter]._size\n\t\t\treturn self._state_now, reward, done, message\n\tdef get_state_size(self):\n\t\treturn self._state_size\n\tdef get_action_size(self):\n\t\treturn self._action_size\n","sub_path":"PolicyGradient/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"228467556","text":"\"\"\"invitr\n\na bottle project for auto-invitations to slack groups\nforked from invitr https://github.com/coolharsh55/invitr\n\nUses the Slack Web API to send invites to the specified group.\nThe app page displays the current number of users, which can be\ncontrolled using env vars.\nUpon entering a valid email address, the app will attempt to send\nthe invitation from the server-side. Any errors or status messages\nreceived from slack are shown to the user such as\n\"invitation sent\", \"exists in team\", etc.\n\nEnvironmental variables:\n GROUP_NAME : djenesis\n SLACK_GROUP : djenesis\n TOKEN : env API token obtained from slack\n SHOW_USERS : True\n BG_COLOR : env background color of the app page\n\"\"\"\n\nfrom json import loads as json_load\nfrom os import environ as env\nfrom requests import post as request_post\nfrom re import match\n\nfrom bottle import post\nfrom bottle import request\nfrom bottle import response\nfrom bottle import route\nfrom bottle import run\nfrom bottle import template\n\n# TOKEN is the API token obtained from Slack\ntoken = env.get('TOKEN')\n# SHOW USERS is a python-flag used to determine if no of users should be\n# shown on the app page\nshow_users = env.get('SHOW_USERS')\n\n# BG COLOR is the background color of the page\nbg_color = env.get('BG_COLOR', '#0099CC')\n# check and format the bg color\nif not match('^#?([a-fA-F0-9]{3}){1,2}$', bg_color):\n bg_color = '#0099CC'\nelse:\n if not bg_color.startswith('#'):\n bg_color = '#' + bg_color\n\n\n@route('/')\ndef home():\n \"\"\"homepage of the app\n gui:\n shows the group name as title along with an input field for emails\n shows number of users in group, if specified in flag (see SHOW_USERS)\n view:\n sends a POST request to slack to get the number of users\n entering a valid email results in a POST request to /invite\n \"\"\"\n # get number of users if flag is True\n if show_users:\n url = 'https://djenesis.slack.com/api/users.list'\n data = {\n 'token': token,\n }\n r = request_post(url, data)\n data = json_load(r.text)\n users = len(data['members'])\n else:\n users = ''\n # render the template 'homepage.tpl'\n return template(\n 'homepage',\n show_users=show_users,\n users=users,\n bg_color=bg_color,\n )\n\n\n@post('/invite')\ndef invite():\n \"\"\"invite\n REST endpoint for sending invites to slack from homepage\n retrieves email id from form, and sends it to slack\n uses the slack web API along with supplied token to authenticate user\n returns response as json to hompeage to be displayed to the user\n \"\"\"\n # retrieve email field from form\n email = request.POST.allitems()[0][1]\n # send invite request to slack\n url = 'https://djenesis.slack.com/api/users.admin.invite'\n data = {\n 'email': email,\n 'token': token,\n 'set_active': True,\n }\n # set response as json, and return response\n response.content_type = 'application/json'\n return request_post(url, data).text\n\n\n# run bottle project\n# run(host='localhost', port=8080, debug=True)\nif __name__ == '__main__':\n run(server='gunicorn', host='0.0.0.0', port=int(env.get(\"PORT\", 5000)))\n","sub_path":"invitr.py","file_name":"invitr.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"147864196","text":"\n# import pickle \n# import loadmodel as LM\n\n#metal gear solid :its result: [2713469, 3855447, 6582527]\n#next annotated entity : konami\n#126: [' ', 45700, ' ']\n#row in the table: psx\tkonami\tmetal gear solid\t2.81\n\n# model = LM.Model.load(models_directory=\"/home/yasamin/Codes/WebTableAnnotation/data/models/Model_Creation\",\n# filename=\"3-wikidata-20190229-truthy-BETA-cbow-size=100-window=1-min_count=1\")\n\n# try:\n# print(\"Similarity of konami with 2713469:\",model.similarity(\"Q45700\", \"Q2713469\" ))\n# except Exception:\n# print(\"id not found\")\n# try:\n# print(\"Similarity of konami with 2713469:\",model.similarity(\"Q45700\", \"Q3855447\" ))\n# except Exception:\n# print(\"id not found\")\n# try:\n# print(\"Similarity of konami with 2713469:\",model.similarity(\"Q45700\", \"Q6582527\" )) #correct one!, not annotated now!\n# except Exception:\n# print(\"id not found\")\n\n\n#========---------========----------==========---------==========\n# from collections import defaultdict\n# import pandas as pd\n\n\n# T = pd.DataFrame({'Country': [1,2,3,4],\n# \t 'Population': [1000,2000,3000,4000],\n# \t 'Capital': [100,200,300,400]})\n\n# mydict = defaultdict(list)\n\n# mydict = {'a':[1,'',3],'b':[4,'',''],'c':['','5','6']}\n# LabelColumn = 2\n\n# for k in mydict:\n# if(T[k][LabelColumn]==)\n\n#========---------========----------==========---------==========\n# from collections import defaultdict\n\n# TPrimeIsAnnotated = defaultdict(list) \n# TPrimeIsAnnotated = {0: [True, False, False, False], 1: [True, False, False, False], 2: [True, False, False, False], 3: [True, False, False, False], 4: [True, False, False, False], 5: [True, False, False, False]}\n# TPrimeAnnotation = defaultdict(list)\n# TPrimeAnnotation = {0: [1419, ' ', ' ', ' '], 1: [1421, ' ', ' ', ' '], 2: [1423, ' ', ' ', ' '], 3: [1645078, ' ', ' ', ' '], 4: [1425, ' ', ' ', ' '], 5: [1427, ' ', ' ', ' ']}\n# labelColumn = 0\n# RefBased_TPrimeAnnotation = defaultdict(lambda: defaultdict(list))\n\n# RefBased_TPrimeAnnotation = {2:{0: [1419, 1421, ' ', ' '], 1: [1421, ' ', ' ', ' '], 2: [1423, ' ', ' ', ' '], 3: [1645078, ' ', ' ', ' '], 4: [1425, ' ', ' ', ' '], 5: [1427, ' ', ' ', ' ']},\n# 3:{0: [1419, ' ', ' ', ' '], 1: [1421, ' ', ' ', ' '], 2: [1423, ' ', ' ', ' '], 3: [1645078, ' ', ' ', ' '], 4: [1425, ' ', ' ', ' '], 5: [1427, ' ', ' ', ' ']}, \n# 4:{0: [1419, ' ', ' ', ' '], 1: [1421, ' ', ' ', ' '], 2: [1423, ' ', ' ', ' '], 3: [1645078, ' ', ' ', ' '], 4: [1425, ' ', ' ', ' '], 5: [1427, ' ', ' ', ' ']}\n# }\n# for annotation_key in TPrimeAnnotation:\n# TPrimeAnnotation[annotation_key] = \" \"\n# for isannotated_key in TPrimeAnnotation:\n# TPrimeIsAnnotated[isannotated_key] = False\n\n# for RefColIndex in RefBased_TPrimeAnnotation:\n# for Row_k in RefBased_TPrimeAnnotation[RefColIndex]:\n# if(TPrimeAnnotation[Row_k] == \" \"):\n# if not(RefBased_TPrimeAnnotation[RefColIndex][Row_k][labelColumn]== \" \"):\n# print(\"RefBased_TPrimeAnnotation[RefColIndex][Row_k][labelColumn]\",RefBased_TPrimeAnnotation[RefColIndex][Row_k][labelColumn])\n# TPrimeAnnotation[Row_k] = RefBased_TPrimeAnnotation[RefColIndex][Row_k][labelColumn]\n# print(\"TPrimeAnnotation[Row_k]\",TPrimeAnnotation[Row_k])\n# TPrimeIsAnnotated[Row_k] = True\n# else: \n# # we have annotation for the label column from previous Ref Col run\n# # If they want add model similarity!! now it is the first Ref Col val\n# continue\n# print(\"TPrimeAnnotation\",TPrimeAnnotation)\n# print(\"TPrimeIsAnnotated\",TPrimeIsAnnotated)\n#========---------========----------==========---------==========\n\n# # from collections import Counter\n\n# # c = Counter([])\n\n# # print(\"most common:\",c.most_common())\n\n# from collections import defaultdict\n\n# d = defaultdict(list)\n\n# d[\"a\"] = ['1']\n\n# print(\"d\",d)\n\n# d = defaultdict(list)\n\n# print(\"d2\",d)\n\n#========---------========----------==========---------==========\nimport pandas as pd\nimport copy\nEntities = []\nlabelColumn = 1\nRefColIndexList = [0,2]\nT = pd.DataFrame({0: ['a','b','c','d'],\n 1: ['Dominica','Tajikistan','Djibouti','Gabon'],\n\t 2: [1000,2000,3000,4000],\n\t 3: ['Roseau','Dushanbe','Djibouti','Libreville']})\n\nT = T.drop(0, axis=1)\nT = T.T.reset_index(drop=True).T\n\nprint(\"Table:\",T)\nfor RefColIndex in RefColIndexList:\n for index, row in T.iterrows():\n for e in row:\n if(e == row[labelColumn] or e == row[RefColIndex]):\n try:\n if(len(e)>3):\n # #just put label column entity \n # if(e == row[labelColumn]):\n Entities.append(e)\n except Exception:\n print(\"Cant get length\")\n continue\n\nprint(\"Entities\",Entities)\nprint(\"Hello outside\")\n","sub_path":"webTables/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"188380236","text":"# -*- coding: utf-8 -*-\nimport logging\nimport socket\nfrom Queue import Queue\n\nimport serial\n\nfrom zigbee_hub.http import setup_http_interface\nfrom zigbee_hub.serial_reader import SerialReader\nfrom zigbee_hub.telegesis.r3xx_layout import Etrx3Usb\n\n__author__ = 'Ruggero Ferretti '\n\n\nclass Hub(object):\n def __init__(self, serial_conf, http_conf):\n \"\"\"\n :param serial_conf: the serial port configuration\n :type serial_conf: SerialConf\n :param serial_conf: the http interface configuration\n :type serial_conf: HttpConf\n \"\"\"\n self.serial_conf = serial_conf\n self.http_conf = http_conf\n\n self.serial_reader = None\n self.http_interface = None\n\n def start(self):\n\n try:\n dongle = serial.Serial(self.serial_conf.port, self.serial_conf.baud_rate, timeout=self.serial_conf.timeout)\n at_queue = Queue()\n incoming_queue = Queue()\n\n coordinator = Etrx3Usb(dongle, at_queue)\n self.serial_reader = SerialReader(dongle, at_queue, incoming_queue)\n self.http_interface = setup_http_interface(coordinator)\n\n self.serial_reader.start()\n self.http_interface.run(host=self.http_conf.host, port=self.http_conf.port)\n\n return 0\n\n except serial.SerialException as serial_error:\n logging.error(str(serial_error))\n return 1\n\n except socket.error as socket_error:\n self.stop_serial_reader()\n logging.error(str(socket_error))\n return 2\n\n def stop(self):\n self.stop_serial_reader()\n # self.stop_http_interface()\n\n def stop_serial_reader(self):\n if self.serial_reader is not None:\n self.serial_reader.stop()\n\n def stop_http_interface(self):\n if self.http_interface is not None:\n self.http_interface.stop()\n","sub_path":"zigbee_hub/hub/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"84818425","text":"\n\n#calss header\nclass _GIBBON():\n\tdef __init__(self,): \n\t\tself.name = \"GIBBON\"\n\t\tself.definitions = [u'a small ape with long arms that lives in trees in the forests of South Asia']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_gibbon.py","file_name":"_gibbon.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"475543987","text":"from flask import Flask, request\nimport json\n\nAPP = Flask(__name__)\n\nendpoint_hits = {}\nopsgenie_endpoint_hits = 0\nslack_endpoint_hits = 0\nkeybase_endpoint_hits = 0\n\n\n@APP.route(\"/\")\ndef show_endpoint_hits():\n return endpoint_hits\n\n\n@APP.route(\"/opsgenie\", methods=[\"POST\"])\ndef opsgenie_payload_verification():\n with open(\"/app/opsgenie_payload.json\", \"r\") as readfile:\n opsgenie_test_payload = json.load(readfile)\n if request.is_json is False:\n return (\"\", 500)\n payload = request.json\n if (\n payload.keys() != opsgenie_test_payload.keys()\n or payload.get(\"details\").keys() != opsgenie_test_payload.get(\"details\").keys()\n ):\n APP.logger.error(\"Received payload differs from expected payload.\")\n return (\"\", 500)\n global opsgenie_endpoint_hits\n opsgenie_endpoint_hits += 1\n endpoint_hits.update(\n {\"successful_requests_to_opsgenie_endpoint\": opsgenie_endpoint_hits}\n )\n return (\"\", 200)\n\n\n@APP.route(\"/slack\", methods=[\"POST\"])\ndef slack_payload_verification():\n with open(\"/app/slack_payload.json\", \"r\") as readfile:\n slack_test_payload = json.load(readfile)\n payload = request.json\n if (\n payload.keys() != slack_test_payload.keys()\n or payload[\"blocks\"][1].keys() != slack_test_payload[\"blocks\"][1].keys()\n ):\n APP.logger.error(\"Received payload differs from expected payload.\")\n return (\"\", 500)\n global slack_endpoint_hits\n slack_endpoint_hits += 1\n endpoint_hits.update({\"successful_requests_to_slack_endpoint\": slack_endpoint_hits})\n return (\"\", 200)\n\n\n@APP.route(\"/keybase\", methods=[\"POST\"])\ndef keybase_payload_verification():\n with open(\"/app/keybase_payload.json\", \"r\") as readfile:\n keybase_test_payload = json.load(readfile)\n payload = request.json\n if payload.keys() != keybase_test_payload.keys():\n APP.logger.error(\"Received payload differs from expected payload.\")\n return (\"\", 500)\n global keybase_endpoint_hits\n keybase_endpoint_hits += 1\n endpoint_hits.update(\n {\"successful_requests_to_keybase_endpoint\": keybase_endpoint_hits}\n )\n return (\"\", 200)\n\n\nif __name__ == \"__main__\":\n APP.run(host=\"0.0.0.0\", port=56243)\n","sub_path":"tests/integration/alerting/app/alert_checker.py","file_name":"alert_checker.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"400184215","text":"# -*- coding: utf-8 -*-\n\nfrom . import Filter\n\nimport numpy as np\nfrom scipy import optimize\nfrom math import exp\n\n\nclass Abspline(Filter):\n r\"\"\"\n Abspline Filterbank\n\n Inherits its methods from Filters\n\n Parameters\n ----------\n G : Graph\n Nf : int\n Number of filters from 0 to lmax (default = 6)\n lpfactor : int\n Low-pass factor lmin=lmax/lpfactor will be used to determine scales,\n the scaling function will be created to fill the lowpass gap.\n (default = 20)\n t : ndarray\n Vector of scale to be used (Initialized by default at\n the value of the log scale)\n\n Returns\n -------\n out : Abspline\n\n Examples\n --------\n >>> from pygsp import graphs, filters\n >>> G = graphs.Logo()\n >>> F = filters.Abspline(G)\n\n \"\"\"\n\n def __init__(self, G, Nf=6, lpfactor=20, t=None, **kwargs):\n super(Abspline, self).__init__(G, **kwargs)\n\n def kernel_abspline3(x, alpha, beta, t1, t2):\n M = np.array([[1, t1, t1**2, t1**3],\n [1, t2, t2**2, t2**3],\n [0, 1, 2*t1, 3*t1**2],\n [0, 1, 2*t2, 3*t2**2]])\n v = np.array([1, 1, t1**(-alpha) * alpha * t1**(alpha - 1),\n -beta*t2**(- beta - 1) * t2**beta])\n a = np.linalg.solve(M, v)\n\n r1 = x <= t1\n r2 = (x >= t1)*(x < t2)\n r3 = (x >= t2)\n\n if isinstance(x, np.float64):\n\n if r1:\n r = x[r1]**alpha * t1**(-alpha)\n if r2:\n r = a[0] + a[1] * x + a[2] * x**2 + a[3] * x**3\n if r3:\n r = x[r3]**(-beta) * t2**beta\n\n else:\n r = np.zeros(x.shape)\n\n x2 = x[r2]\n\n r[r1] = x[r1]**alpha * t1**(-alpha)\n r[r2] = a[0] + a[1] * x2 + a[2] * x2**2 + a[3] * x2**3\n r[r3] = x[r3]**(-beta) * t2 ** beta\n\n return r\n\n G.lmin = G.lmax / lpfactor\n\n if t is None:\n self.t = self.wlog_scales(G.lmin, G.lmax, Nf - 1)\n else:\n self.t = t\n\n gb = lambda x: kernel_abspline3(x, 2, 2, 1, 2)\n gl = lambda x: np.exp(-np.power(x, 4))\n\n lminfac = .4 * G.lmin\n\n self.g = [lambda x: 1.2 * exp(-1) * gl(x / lminfac)]\n for i in range(0, Nf - 1):\n self.g.append(lambda x, ind=i: gb(self.t[ind] * x))\n\n f = lambda x: -gb(x)\n xstar = optimize.minimize_scalar(f, bounds=(1, 2),\n method='bounded')\n gamma_l = -f(xstar.x)\n lminfac = .6 * G.lmin\n self.g[0] = lambda x: gamma_l * gl(x / lminfac)\n","sub_path":"pygsp/filters/abspline.py","file_name":"abspline.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"872199","text":"import numpy as np\nimport math\ndef calF(p,flag):\n x1 = np.array([], dtype=np.int64).reshape(0, len(a[0]))\n\n for i in p:\n v = i - 1;\n x1 = np.append(x1, np.array([a[v]]), axis=0)\n # print(v,a[v])\n\n # print(x1)\n\n # cal the entropy of this array\n res1 = 0\n res2 = 0\n for i in range(len(x1)):\n if x1[i][-1] == 0:\n res1 += 1;\n else:\n res2 += 1;\n\n # print(res1,res2)\n e = 0\n if res1 != 0:\n e += ((res1 / len(x1)) * math.log((1 / (res1 / len(x1))), 2))\n if res2 != 0:\n e += ((res2 / len(x1)) * math.log((1 / (res2 / len(x1))), 2))\n # print(\"entropy of\",p,\"is\",e)\n if e == 0:\n # print(\"no need to classify this partition further\", p)\n return 0;\n\n # now chk entropy for each attribute\n\n maxGain = -9999\n maxAttribute = 0\n col_len = len(x1[0]) - 1\n\n for i in range(col_len): # for each attribute\n ecal = np.array([[0, 0], [0, 0], [0, 0]])\n attr_entropy = 0\n attr_entropy1 = 0\n attr_entropy2 = 0\n attr_entropy3 = 0\n for j in range(len(x1)):\n if x1[j][i] == 0:\n ecal[0][x1[j][-1]] += 1\n elif x1[j][i] == 1:\n ecal[1][x1[j][-1]] += 1\n else:\n ecal[2][x1[j][-1]] += 1\n\n # print(ecal)\n if (ecal[0][0] + ecal[0][1]) != 0:\n if ecal[0][0] != 0:\n attr_entropy1 = (ecal[0][0] / (ecal[0][0] + ecal[0][1]) * math.log(\n (1 / ((ecal[0][0] / (ecal[0][0] + ecal[0][1])))), 2))\n if ecal[0][1] != 0:\n attr_entropy1 += (ecal[0][1] / (ecal[0][0] + ecal[0][1]) * math.log(\n (1 / ((ecal[0][1] / (ecal[0][0] + ecal[0][1])))), 2))\n attr_entropy1 *= ((ecal[0][0] + ecal[0][1]) / len(x1))\n # print(\"0\",attr_entropy1)\n\n if (ecal[1][0] + ecal[1][1]) != 0:\n if ecal[1][0] != 0:\n attr_entropy2 = (ecal[1][0] / (ecal[1][0] + ecal[1][1]) * math.log(\n (1 / ((ecal[1][0] / (ecal[1][0] + ecal[1][1])))), 2))\n if ecal[1][1] != 0:\n attr_entropy2 += (ecal[1][1] / (ecal[1][0] + ecal[1][1]) * math.log(\n (1 / ((ecal[1][1] / (ecal[1][0] + ecal[1][1])))), 2))\n attr_entropy2 *= ((ecal[1][0] + ecal[1][1]) / len(x1))\n # print(\"1\",attr_entropy2)\n\n if (ecal[2][0] + ecal[2][1]) != 0:\n if ecal[2][0] != 0:\n attr_entropy3 = (ecal[2][0] / (ecal[2][0] + ecal[2][1]) * math.log(\n (1 / ((ecal[2][0] / (ecal[2][0] + ecal[2][1])))), 2))\n if ecal[2][1] != 0:\n attr_entropy3 += (ecal[2][1] / (ecal[2][0] + ecal[2][1]) * math.log(\n (1 / ((ecal[2][1] / (ecal[2][0] + ecal[2][1])))), 2))\n attr_entropy3 *= ((ecal[2][0] + ecal[2][1]) / len(x1))\n # print(\"2\",attr_entropy3)\n attr_entropy = attr_entropy1 + attr_entropy2 + attr_entropy3\n if (e - attr_entropy) > maxGain:\n maxAttribute = i\n maxGain = max(maxGain, (e - attr_entropy))\n\n F = maxGain * (len(x1) / len(a))\n if flag == 1:\n a0 = []\n a1 = []\n a2 = []\n for j in range(len(x1)):\n if x1[j][maxAttribute] == 0:\n a0.append(p[j])\n elif x1[j][maxAttribute] == 1:\n a1.append(p[j])\n else:\n a2.append(p[j])\n i = 0\n if len(a0) != 0:\n f.write(str(key)+str(i)+\" \"+str(a0)+\"\\n\")\n i += 1\n if len(a1) != 0:\n f.write(str(key)+str(i)+\" \"+str(a1)+\"\\n\")\n i += 1\n if len(a2) != 0:\n f.write(str(key)+str(i)+\" \"+str(a2)+\"\\n\")\n return F\n # print(\"maxGain is: \", maxGain, \"maxAttr: \",maxAttribute, \"F: \", maxGain*(len(x1)/len(a)))\n\nprint(\"Enter names of the files dataset input-partition output-partition\")\ndataset = input()\ninputpartitn = input()\noutputpartitn = input()\n\nwith open(dataset) as f:\n row, col = [int(x) for x in next(f).split()] # read first line\n a = []\n for line in f: # read rest of lines\n a.append([int(x) for x in line.split()])\n\nd = {}\nwith open(inputpartitn) as f:\n for line in f:\n temp = []\n key, val = line.rstrip().split(None,1)\n temp.append([int(x) for x in val.split()])\n d[key] = temp\n\nmaxF = 0\nmaxKey = \"\"\nfor key in d:\n # print(\"calF\",calF(d[key][0]))\n res = calF(d[key][0],0)\n if maxF < res:\n maxF = res\n maxKey = key\n\nwith open(outputpartitn,'w') as f:\n for k,v in d.items():\n if k != maxKey:\n f.write(str(k)+ \" \"+ str(v[0]))\n f.write(\"\\n\")\n else:\n res = calF(d[k][0],1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Project1.py","file_name":"Project1.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"358369507","text":"from __future__ import division\n\n\nimport tensorflow as tf\nfrom keras.models import Model\nfrom keras.layers.convolutional import (\n Conv2D\n)\nfrom keras.layers.merge import add\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.layers import RepeatVector, Flatten, Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, \\\n ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation, concatenate, Lambda, Permute, Multiply,Conv1D,MaxPool1D,AvgPool1D\n\ndef _1D_bn_relu(input):\n \"\"\"Helper to build a BN -> relu block\n do batch normalization in channel axis\n \"\"\"\n norm = BatchNormalization(axis=2)(input) \n return Activation(\"relu\")(norm)\ndef _2D_bn_relu(input):\n \"\"\"Helper to build a BN -> relu block\n do batch normalization in height axis\n \"\"\"\n norm = BatchNormalization(axis=2)(input)\n return Activation(\"relu\")(norm)\n\n\ndef _conv_bn_relu(**conv_params):\n \"\"\"Helper to build a conv -> BN -> relu block\n \"\"\"\n filters = conv_params[\"filters\"]\n kernel_size = conv_params[\"kernel_size\"]\n strides = conv_params.setdefault(\"strides\", (1, 1))\n padding = conv_params.setdefault(\"padding\", \"same\")\n kernel_regularizer = conv_params.setdefault(\"kernel_regularizer\", l2(0.0003))\n\n def f(input):\n conv = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=strides, padding=padding,\n kernel_regularizer=kernel_regularizer)(input)\n return _2D_bn_relu(conv)\n\n return f\n\n\n\n\ndef _shortcut(input, residual):\n \"\"\"Adds a shortcut between input and residual block and merges them with \"sum\"\n \"\"\"\n return add([input, residual])\n\n\n\nclass Nixae_Builder(object):\n @staticmethod\n def build(input_shape, num_outputs, C=50, brn=5):\n \"\"\"Builds a custom Nixae like architecture.\n\n Args:\n input_shape: The input shape in the form (packet_length)\n num_outputs: The number of outputs at final softmax layer\n C: The number of filters in the first 1D convolutional layer of the Inception block.\n brn: The number of branches to use\n Returns:\n The keras `Model`.\n \"\"\"\n\n if K.image_dim_ordering() == 'tf':\n input_shape = (input_shape[0])\n input = Input(shape=input_shape, dtype=tf.int32)\n\n cast_int = Lambda(lambda x: K.cast(x, dtype=tf.int32))\n my_input_one_hot = Lambda(lambda x: K.one_hot(cast_int(x), num_classes=256))\n output = my_input_one_hot(input)\n output = Reshape([1, input_shape[0], 256])(output)\n\n ###the 2D-conv in the first layer of 1*256\n conv1 = _conv_bn_relu(filters=32, kernel_size=(1, 256), strides=(1, 1), padding=\"valid\")(output)\n output_layer_1 = Reshape([input_shape[0],32])(conv1)\n\n\n ###begin of Inception\n\n first_brn_filter = C\n second_brn_filter = 32\n residuals = []\n for i in range(brn):\n output_1 = Conv1D(filters=first_brn_filter, kernel_size=i+1,strides=1,padding=\"same\",activation=\"relu\")(output_layer_1)\n output_1 = _1D_bn_relu(Conv1D(filters=second_brn_filter, kernel_size=1, strides=1, padding=\"same\")(output_1))\n if i==0:\n _, a, b = output_1.shape.as_list() # a=packet_length,b=C0\n output_1 = Reshape([a, b, 1])(output_1)\n residuals.append(output_1)\n ###end of Inception\n ###begin of attention\n my_sum_fn = Lambda(lambda x: K.sum(x, axis=2))\n my_reshape_length = Lambda(lambda x: K.reshape(x, [-1, brn]))\n residual = concatenate(residuals, axis=3)\n alpha = my_sum_fn(residual) # packet_length*brn\n alpha = my_reshape_length(alpha)\n Dense_layer_1 = Dense(brn * brn, activation=\"relu\", use_bias=False)\n Dense_layer_2 = Dense(units=brn, activation=\"softmax\")\n\n alpha = Dense_layer_1(alpha)\n alpha = Dense_layer_2(alpha) # (batch_size*packet_length)*brn\n my_reshape_length_2 = Lambda(lambda x: K.reshape(x, [-1, a, 1, brn]))\n alpha = my_reshape_length_2(alpha) # batch_size*packet_length*1*brn\n output = Multiply()([alpha, residual])\n finnal_sum = Lambda(lambda x: K.sum(x, axis=-1))\n output = finnal_sum(output)\n output_layer_2 = Reshape([a, b])(output)\n output_layer_2 = _shortcut(output_layer_1, output_layer_2)\n output_layer_2 = _1D_bn_relu(output_layer_2)\n ###end of attention\n\n\n output = AvgPool1D()(output_layer_2)\n\n flatten1 = Flatten()(output)\n flatten1 = Dropout(rate=0.5)(flatten1)\n dense = Dense(units=256, activation=\"relu\")(flatten1)\n dense = Dense(units=128, activation=\"relu\")(dense)\n dense = Dense(units=num_outputs, activation=\"softmax\")(dense)\n model = Model(inputs=input, outputs=dense)\n return model\n\n @staticmethod\n def build_model(input_shape, num_outputs, C, brn):\n return Nixae_Builder.build(input_shape, num_outputs, C, brn)\n","sub_path":"Nixae.py","file_name":"Nixae.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"134793692","text":"\n\nfrom scipy.stats import powerlaw\nimport numpy as np\nimport random\n\ndef rafaels_algorithm(array_sz, nsegs):\n i = 1\n start_idx_set = {0}\n while i < nsegs:\n idx = random.randint(0, array_sz-1)\n if idx not in start_idx_set:\n start_idx_set.add(idx)\n i = i+1\n start_idx_list = list(start_idx_set)\n start_idx_list.sort()\n start_idx_list.append(array_sz)\n l = [start_idx_list[i+1]-start_idx_list[i] for i in range(len(start_idx_list)-1)]\n # Sort the segments sizes\n l.sort(reverse=True)\n return l\n\n\ndef generate_uniform(array_sz, nsegs):\n sizes = np.random.uniform(low=0.0, high=1.0, size=(nsegs,))\n # Normalize the segments' sizes so that the total sum = array_sz\n total = sum(sizes)\n l = [int((i*array_sz)/total) for i in sizes]\n # Distribute the remaining elements\n remaining = array_sz - sum(l)\n i = 0\n while remaining > 0:\n l[i] += 1\n remaining -= 1\n i = (i+1) % len(l)\n # Sort the segments sizes\n l.sort(reverse=True)\n return l\n\ndef powerlaw(x0,x1,n):\n y = random.uniform(0, 1.0)\n return ((x1**(n+1) - x0**(n+1))*y + x0**(n+1))**(1/(n+1))\n\ndef generate_powerlaw(array_sz, nsegs,a):\n sizes = [powerlaw(0.0,1.0,a) for i in range(nsegs)]\n total = sum(sizes)\n l = [int((i*array_sz)/total) for i in sizes]\n # Distribute the remaining elements\n remaining = array_sz - sum(l)\n i = 0\n while remaining > 0:\n l[i] += 1\n remaining -= 1\n i = (i+1) % len(l)\n # Sort the segments sizes\n l.sort(reverse=True)\n return l\n\nimport matplotlib.pyplot as plt\n\ndef add_data(dataset,title,seg_sizes):\n if title not in dataset:\n dataset[title] = []\n dataset[title].append(seg_sizes)\n\ndef plot_array_sizes(array_sz,nsegs,N,filename=\"\",extra_plots={}):\n\n maxy = 0\n data = {}\n for i in range(N):\n add_data(data,'1:red:Power-law dist. (a=-0.9)',generate_powerlaw(array_sz,nsegs,-0.9))\n add_data(data,'2:blue:Power-law dist. (a=-0.8)',generate_powerlaw(array_sz,nsegs,-0.8))\n add_data(data,'3:orange:Rafael\\'s algoritm',rafaels_algorithm(array_sz,nsegs))\n add_data(data,'4:gray:Power-law dist. (a=-0.6)',generate_powerlaw(array_sz,nsegs,-0.6))\n add_data(data,'5:green:Uniform dist.',generate_uniform(array_sz,nsegs))\n\n for ep_title, ep_ds in extra_plots.items():\n ep_ds.sort(reverse=True)\n add_data(data,\"e:blue:{}\".format(ep_title),ep_ds)\n\n datal = list(data.items())\n datal.sort()\n for k,v in datal:\n id = k.split(\":\")[0]\n if id != \"e\":\n for i in v:\n m = max(i)\n if m > maxy: maxy = m\n\n fig, ax = plt.subplots(1,len(data),figsize=(15,5))\n fig.suptitle(\"Segment sizes distribution (Array Sz: {}, # of Segments {}, # of samples: {}\".format(array_sz,nsegs,N), fontsize=16)\n i=0\n for info,dataset in datal:\n inf = info.split(\":\")\n id = inf[0]\n chart_title = inf[2] \n line_color = inf[1]\n if id != \"e\":\n ax[i].set_ylim([0,maxy])\n ax[i].label_outer()\n\n ax[i].set_title(chart_title)\n if i == 0:\n ax[i].set(xlabel='Segments sorted by size', ylabel='Segment size')\n else:\n ax[i].set(xlabel='Segments sorted by size')\n\n ax[i].ticklabel_format(axis='y',style='plain')\n\n for d in dataset:\n ax[i].plot(d,\"tab:\"+line_color)\n\n i = i + 1\n\n if filename == \"\":\n plt.show()\n else:\n plt.savefig(filename)\n\n plt.close()\n\n\ndef plot_segments_sizes(N,fileprefix=\"\",extra_plots=None):\n array_szs = [2**i for i in [13,16,19,22,25]]\n nsegs = [2**i for i in range(2,19)]\n# array_szs = [2**i for i in [13,19,25]]\n# nsegs = [2**i for i in range(2,12)]\n for n in nsegs:\n for asz in array_szs:\n if n < asz :\n filename = \"{}-nsegs:{}-array_sz:{}.pdf\".format(fileprefix,n,asz)\n print(\"Generating\",filename,\"with 100 samples each.\")\n plot_array_sizes(asz,n,100,filename,extra_plots=extra_plots)\n\n\ndef readCit_Patents_Adj_List_Sizes():\n ajd_list_sizes = []\n with open(\"cit-Patents.nedges-per-node.txt\") as fh:\n while True:\n line = fh.readline()\n if not line:\n return ajd_list_sizes\n else:\n ajd_list_sizes.append(int(line))\n \n\nextra_plots = {}\nextra_plots[\"Cit-Patents Adj. List. Sizes\"] = readCit_Patents_Adj_List_Sizes()\nplot_segments_sizes(100,fileprefix=\"segments-sizes-distribution-extra\",extra_plots=extra_plots)\n","sub_path":"scripts/segment-sizes-distribution-analysis/plot-segments-sizes.py","file_name":"plot-segments-sizes.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"22342202","text":"# -*- coding: utf-8 -*\n'''\nimport numpy as np # 一般以np作为numpy的别名\n\na = np.array([2, 0, 1]) # 创建数组\nprint(a) # 输出数组\n# print(a[0:3])\n# 引用前三个数字(切片)\n# print(a.min())\n# 输出a的最小值\n# a.sort()\n# 将a的元素从小到大排序,此操作直接修改a,因此这时候a为[0, 1, 2, 5]\n# print(a)\nb = np.array([[1, 2, 3], [4, 5, 6]]) # 创建二维数组\nprint(b)\n# print(b * b)\n# 输出数组的平方阵,即[[1, 4, 9], [16, 25, 36]]\nprint(a*b)\n'''\n# -*- coding: utf-8 -*\n# 求解非线性方程组2x1-x2^2=1,x1^2-x2=2\nfrom scipy.optimize import fsolve # 导入求解方程组的函数\nfrom scipy import integrate # 导入积分函数\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# 求解方程组\ndef f(x): # 定义要求解的方程组\n x1 = x[0]\n x2 = x[1]\n return [2 * x1 - x2 ** 2 - 1, x1 ** 2 - x2 - 2]\n\n\nresult = fsolve(f, [1, 2]) # 输入初值[1, 1]并求解\nprint(result) # 输出结果,为array([ 1.91963957, 1.68501606])\n\n\n# 数值积分\ndef g(x): # 定义被积函数\n return (1 - x ** 2) ** 0.5\n\n\npi_2, err = integrate.quad(g, -1, 1) # 积分结果和误差\nprint(pi_2 * 2) # 由微积分知识知道积分结果为圆周率pi的一半\n\n\ny = np.linspace(-2, 2, num=20)\ny_int = integrate.cumtrapz(y)\nplt.plot(y_int, 'ro', y, 'b-')\nplt.show()","sub_path":"PythonLearning/Python_Introduction/test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"47989136","text":"'''\nLinear regression using Stochastic gradient descent from Scratch\n\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.genfromtxt('./datasets/sgd_data.csv',delimiter=',')\n\n# create the metrices\n\nX = data[:, 0].reshape(-1,1) # -1 tells numpy to figure out the dimension by itself\nones = np.ones([X.shape[0], 1]) # create a array containing only ones \nX = np.concatenate([ones, X],1) # cocatenate the ones to X matrix\ny = data[:, 1].reshape(-1,1) # create the y matrix\n\n# Check the plot\n#plt.scatter(data[:, 0].reshape(-1,1), y)\n#plt.show()\n\n\n# Set the hyper parameters\n\n# set small alpha value\nalpha = 0.0001\niters = 1000\n\n# theta is a row vector\ntheta = np.array([[1.0, 1.0]])\n\n# Create the cost function\ndef computeCost(X,y, theta):\n\tinner = np.power(((X @ theta.T) - y), 2)\n\treturn np.sum(inner) / (2 * len(X))\n\n\n# Create the Gradient Descent function\n\n# We're going to use Gradient Descent to minimize cost function.\n\n\ndef gradientDescent(X, y, theta, alpha, iters):\n for i in range(iters):\n theta = theta - (alpha/len(X)) * np.sum((X @ theta.T - y) * X, axis=0)\n cost = computeCost(X, y, theta)\n # if i % 10 == 0: # just look at cost every ten loops for debugging\n # print(cost)\n return (theta, cost)\n\ncomputeCost(X, y, theta) # outputs 319.40631589398157\n\ng, cost = gradientDescent(X, y, theta, alpha, iters) \nprint(g, cost) # gives 56.0419737779817 that's huge decrease in cost\n\n\nplt.scatter(data[:, 0].reshape(-1,1), y)\naxes = plt.gca()\nx_vals = np.array(axes.get_xlim()) \ny_vals = g[0][0] + g[0][1]* x_vals #the line equation\nplt.plot(x_vals, y_vals, '--')\n#plt.show()\nplt.savefig(\"linear_reg_sgd_scratch.png\")\n\n","sub_path":"linear_reg_sgd_scratch.py","file_name":"linear_reg_sgd_scratch.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"184369540","text":"import sys\nimport traceback\n\nimport tornado\nimport tornado.web\nimport tornado.gen\nfrom tornado.ioloop import IOLoop\nfrom objects import glob\nfrom logger import log\nfrom raven.contrib.tornado import SentryMixin\n\nclass asyncRequestHandler(SentryMixin, tornado.web.RequestHandler):\n\t\"\"\"\n\tTornado asynchronous request handler\n\tcreate a class that extends this one (requestHelper.asyncRequestHandler)\n\tuse asyncGet() and asyncPost() instead of get() and post().\n\tDone. I'm not kidding.\n\t\"\"\"\n\t@tornado.web.asynchronous\n\t@tornado.gen.engine\n\tdef get(self, *args, **kwargs):\n\t\ttry:\n\t\t\tyield tornado.gen.Task(runBackground, (self.asyncGet, tuple(args), dict(kwargs)))\n\t\tfinally:\n\t\t\tif not self._finished:\n\t\t\t\tself.finish()\n\n\t@tornado.web.asynchronous\n\t@tornado.gen.engine\n\tdef post(self, *args, **kwargs):\n\t\ttry:\n\t\t\tyield tornado.gen.Task(runBackground, (self.asyncPost, tuple(args), dict(kwargs)))\n\t\tfinally:\n\t\t\tif not self._finished:\n\t\t\t\tself.finish()\n\n\tdef asyncGet(self, *args, **kwargs):\n\t\tself.send_error(405)\n\n\tdef asyncPost(self, *args, **kwargs):\n\t\tself.send_error(405)\n\n\tdef getRequestIP(self):\n\t\t\"\"\"\n\t\tReturn CF-Connecting-IP (request IP when under cloudflare, you have to configure nginx to enable that)\n\t\tIf that fails, return X-Forwarded-For (request IP when not under Cloudflare)\n\t\tif everything else fails, return remote IP\n\n\t\t:return: Client IP address\n\t\t\"\"\"\n\t\treturn self.request.headers.get(\"X-Real-IP\")\n\ndef runBackground(data, callback):\n\t\"\"\"\n\tRun a function in the background.\n\tUsed to handle multiple requests at the same time\n\n\t:param data: (func, args, kwargs)\n\t:param callback: function to call when `func` (data[0]) returns\n\t:return:\n\t\"\"\"\n\tfunc, args, kwargs = data\n\tdef _callback(result):\n\t\tIOLoop.instance().add_callback(lambda: callback(result))\n\tglob.pool.apply_async(func, args, kwargs, _callback)\n\ndef checkArguments(arguments, requiredArguments):\n\t\"\"\"\n\tCheck that every requiredArguments elements are in arguments\n\n\t:param arguments: full argument list, from tornado\n\t:param requiredArguments: required arguments list\n\t:return: True if all arguments are passed, False if not\n\t\"\"\"\n\tfor i in requiredArguments:\n\t\tif i not in arguments:\n\t\t\treturn False\n\treturn True\n\ndef printArguments(t):\n\t\"\"\"\n\tPrint passed arguments, for debug purposes\n\n\t:param t: tornado object (self)\n\t\"\"\"\n\tmsg = \"ARGS::\"\n\tfor i in t.request.arguments:\n\t\tmsg += \"{}={}\\r\\n\".format(i, t.get_argument(i))\n\tlog.debug(msg)\n","sub_path":"web/requestsManager.py","file_name":"requestsManager.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"161474789","text":"from contact import Contact\n\nclass Contacts:\n from session import Session\n\n def __init__(self, session = Session()):\n self.session = session\n self.response = session.request('GET', 'contacts')['Contacts']\n self.list = {}\n\n for item in self.response:\n contact = Contact(item, session)\n self.list[contact.ID] = contact\n\n def get(self, ID):\n return self.list[ID] if ID in self.list else None\n\n def find(self, email):\n for contact in self.list.values():\n if contact.email.lower() == email.lower():\n return contact\n\n return None\n\n def getlevel(self, *levels):\n contacts = {}\n\n for contact in self.list.values():\n if contact.level in levels:\n yield contact\n\n @property\n def levels(self):\n levels = set()\n\n for contact in self.list.values():\n if contact.level is not None:\n levels.add(contact.level)\n\n return levels\n","sub_path":"contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"541199600","text":"'''\r\n\r\nDefiniamo adiacenti di un pixel p di un immagine i pixel adiacenti a p in orizzontale o in verticale.\r\nSe un pixel e' sul bordo dell'immagine il suo vicinato non comprende i pixel non contenuti nell'immagine.\r\nIl pixel dell'immagine con coordinate(x,y) ha dunque come adiacenti i pixel \r\ncon coordinate (x-1,y),(x+1,y),(x,y-1),(x,y+1) appartenenti all'immagine. \r\n \r\nDefiniamo connessi due pixel se e' possibile dall'uno raggiungere l'altro spostandosi solo su \r\npixel adiacenti e dello stesso colore (ovviamente perche' cio' sia possobile e' necessario \r\nche i due pixel abbiano lo stesso colore).\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione ricolora(fname, lista, fnameout) che presi:\r\n- il percorso di un file che contiene un'immagine in formato PNG\r\n- una lista di quadruple del tipo (x,y,c1,c2) dove x e y sono coordinate di un pixel dell'immagine e c1 e c2 due triple colore RGB\r\n- il percorso di un file (fnameout) da creare\r\nlegge l'immagine in fname, esegue un'operazione di ricolorazione di alcuni pixel dell'immagine e \r\nregistra l'immagine ricolorata nel file fnameout.\r\n\r\nL'operazione di ricolorazione e' la seguente. Per ciascuna delle quadruple (x,y,c1,c2) della lista (nell'ordine), \r\n- tutti i pixel connessi al pixel di coordinate (x,y) nell'immagine vanno ricolorati col colore c1, \r\n- tutti i pixel del perimetro (che si trovano sul 'bordo') della zona che si e' appena colorata devono essere ricolorati col colore c2.\r\nIl perimetro della zona colorata è l'insieme dei pixel che non hanno tutti e 4 i vicini che fanno parte della zona ricolorata \r\n(ovvero almeno uno è di un colore diverso da quello che si sta ricolorando oppure almeno uno non esiste perchè sarebbe fuori dall'immagine)\r\n\r\nSi consideri ad esempio l'immagine 'I1.png', l'invocazione di ricolora('I1.png',[(10,10,(255,0,0), (0,0,255))],’OUT1.png')\r\nprodurra' l'immagine 'OUT1.png' identica all'immagine di partenza se non per il fatto che,\r\n tutti i pixel adiacenti al pixel di coordinate (10,10) (e di colore verde), verranno ricolorati \r\n di rosso ((255,0,0)), mentre i pixel sul bordo della zona inizialmente verde vengono ricolorati di blu.\r\n\r\nPer ciascuna area ricolorata bisogna inoltre calcolare area interna e perimetro, che sono definite come segue:\r\n- l'area interna e' il numero di pixel ricolorati con il colore c1\r\n- il perimetro è il numero di pixel ricolorati con il colore c2\r\n\r\nLa funzone deve tornare la lista di coppie (area interna, perimetro) nello stesso ordine in cui sono state colorate le aree.\r\n \r\nPer altri esempi vedere il file grade03.txt \r\n'''\r\n\r\nfrom immagini import *\r\n\r\ndef disegna(img, x, y, w, h, c):\r\n \r\n for colonna in range(y, y+h):\r\n for riga in range(x, x+w):\r\n img[riga][colonna] = c\r\n \r\n\r\ndef disegnasurface(img, x, y, w, h, c):\r\n \r\n for colonna in range(y, y+h):\r\n for riga in range(x, x+w):\r\n img[riga][colonna] = c\r\n \r\ndef ricolora(fname, lista, fnameout):\r\n\r\n img = load(fname)\r\n listRow = []\r\n listColumn = []\r\n listHeight = []\r\n listCs = []\r\n ordine = []\r\n listCp = []\r\n\r\n i=-1\r\n \r\n while i!= len(lista)-1:\r\n\r\n i+=1\r\n \r\n column = lista[i][0]\r\n row = lista[i][1]\r\n \r\n surfaceColor = lista[i][2]\r\n perimeterColor = lista[i][3]\r\n baseColor = img[row][column] \r\n newRow = row\r\n newColumn = column \r\n originRow = row\r\n originColumn = column\r\n height = 0\r\n width = 0\r\n \r\n while newRow > 0:\r\n newRow -= 1\r\n if img[newRow][column] == baseColor:\r\n originRow -= 1\r\n else: break\r\n \r\n while newColumn > 0:\r\n newColumn -= 1\r\n if img[row][newColumn] == baseColor:\r\n originColumn -= 1\r\n else: break\r\n \r\n newRow = originRow\r\n newColumn = originColumn\r\n \r\n while newRow < len(img) and img[newRow][newColumn] == baseColor:\r\n height += 1\r\n newRow += 1 \r\n \r\n listRow.append(originRow)\r\n listColumn.append(originColumn)\r\n listHeight.append(height)\r\n listCs.append(surfaceColor)\r\n listCp.append(perimeterColor)\r\n \r\n disegna(img, listRow[i],listColumn[i],listHeight[i],listHeight[i], listCp[i])\r\n disegnasurface(img, listRow[i]+1,listColumn[i]+1,listHeight[i]-2,listHeight[i]-2, listCs[i])\r\n \r\n heightp = height -1\r\n heighta= height-2\r\n \r\n if i>= 1 and listRow[i] == listRow[i-1] and listColumn[i] == listColumn[i-1] and listCs[i] == listCs[i-1]:\r\n heighta = 0\r\n \r\n misure = ((heighta)*(heighta),((heightp)*4)) \r\n ordine.append(misure)\r\n \r\n save(img,fnameout)\r\n \r\n return ordine\r\n\r\n \r\n \r\n\r\n \r\n\r\n\r\n","sub_path":"students/1795039/homework03/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"407657011","text":"import argparse\nimport datetime\nimport os\n\nfrom pymei import XmlImport, XmlExport, MeiElement\nimport pymei\n\nclass MeiCombiner:\n '''\n Combines mei files created by the barline finding algorithm.\n '''\n\n def __init__(self, input_mei_paths):\n '''\n PARAMETERS\n ----------\n input_mei_paths {list}: list of mei paths to combine\n '''\n\n self._input_mei_paths = input_mei_paths\n if len(self._input_mei_paths):\n self._meidoc = pymei.read(self._input_mei_paths[0])\n else:\n self._meidoc = None\n\n def combine(self):\n if self._meidoc and len(self._input_mei_paths) > 1:\n base_facsimile = self._meidoc.getElementsByName('facsimile')[0]\n base_section = self._meidoc.getElementsByName('section')[0]\n for f in self._input_mei_paths[1:]:\n mei = pymei.read(f)\n\n # combine surface\n surface = mei.getElementsByName('surface')\n if len(surface):\n # have to remove the child from the old document in memory\n # or else pymei segfaults ...\n surface[0].getParent().removeChild(surface[0])\n base_facsimile.addChild(surface[0])\n\n # combine measures\n pb = MeiElement('pb')\n base_section.addChild(pb)\n\n # get last measure number\n measures = base_section.getChildrenByName('measure')\n last_measure_n = int(measures[-1].getAttribute('n').value)\n\n new_section = mei.getElementsByName('section')[0]\n music_elements = new_section.getChildren()\n\n for e in music_elements:\n if e.getName() == 'measure':\n last_measure_n += 1\n e.addAttribute('n', str(last_measure_n))\n\n base_section.addChild(e)\n\n # remove all musical elements from the old document or else pymei segfaults\n new_section.getParent().deleteAllChildren()\n\n self._add_revision()\n\n def _add_revision(self):\n # add a revision\n today = datetime.date.today().isoformat()\n change = MeiElement('change')\n\n # get last change number\n changes = self._meidoc.getElementsByName('change')\n if len(changes):\n last_change = int(changes[-1].getAttribute('n').value)\n\n change.addAttribute('n', str(last_change+1))\n resp_stmt = MeiElement('respStmt')\n corp_name = MeiElement('corpName')\n corp_name.setValue('Distributed Digital Music Archives and Libraries Lab (DDMAL)')\n change_desc = MeiElement('changeDesc')\n ref = MeiElement('ref')\n p = MeiElement('p')\n application = self._meidoc.getElementsByName('application')\n app_name = 'RODAN/barlineFinder'\n if len(application):\n ref.addAttribute('target', '#'+application[0].getId())\n ref.setValue(app_name)\n ref.setTail('.')\n p.addChild(ref)\n\n p.setValue('Combining individual page MEIs using ')\n date = MeiElement('date')\n date.setValue(today)\n\n revision_descs = self._meidoc.getElementsByName('revisionDesc')\n if len(revision_descs):\n revision_descs[0].addChild(change)\n change.addChild(resp_stmt)\n resp_stmt.addChild(corp_name)\n change.addChild(change_desc)\n change_desc.addChild(p)\n change.addChild(date)\n\n def write_mei(self, output_path):\n if self._meidoc:\n pymei.write(self._meidoc, output_path)\n\n def get_mei(self):\n return self._meidoc\n","sub_path":"old_code/old_jobs/mei_resources/meicombine.py","file_name":"meicombine.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"382919577","text":"# coding=utf-8\nimport six\nimport socket\nfrom collections import namedtuple\nfrom decimal import Decimal\nfrom xml.etree import ElementTree\nfrom contextlib import closing\n\nfrom . import constants\n\nProtocol = namedtuple('Protocol', ['result', 'points', 'compile_log', 'tests'])\nProtocolTest = namedtuple('ProtocolTest', ['name', 'result', 'time', 'details'])\n\n\nclass JudgeClient(object):\n\n def __init__(self, tester_id, tester_url, tester_port):\n \"\"\"Initializes JudgeClient instance.\n\n :param tester_id:\n :param tester_url:\n :param tester_port:\n :param storage:\n \"\"\"\n self.tester_id = tester_id\n self.tester_url = tester_url\n self.tester_port = tester_port\n\n def submit(self, submit_id, user_id, task_id, submission_content, language, priority=0):\n \"\"\"Submits a file to the judge system.\n\n :param submit_id: unique id of the submit.\n :param user_id: user id for the judge system in form of contestid-userid.\n :param task_id: task id for the judge system in form of contestid-taskid.\n :param submission_content: submission file content uploaded by user.\n :param language: programming language for the submission_file.\n :returns: submit_id\n \"\"\"\n\n header = self._create_header(submit_id, user_id, task_id, language, priority)\n self._send_data_to_server(header, submission_content)\n\n @staticmethod\n def parse_protocol(protocol_content, max_points):\n \"\"\"Parses protocol returned by judge system.\n\n If protocol contains it means there was a testing error.\n If any of tag in contains non OK result, the overall result is the first non OK result.\n The points are computed from percentage of maximum points stored in tag.\n\n :param protocol_content: content of the protocol.\n :param max_points: maximum possible points for the task.\n :return: testing_result, number_of_points\n \"\"\"\n try:\n tree = ElementTree.fromstring(protocol_content)\n except (SyntaxError, ElementTree.ParseError):\n raise ProtocolCorruptedError(\n 'Error while parsing protocol.', protocol_content)\n\n compile_log = tree.find(\"compileLog\")\n if compile_log is not None:\n return Protocol(\n points=0, # Returns zero points if there was error.\n result=constants.SUBMIT_RESPONSE_ERROR,\n compile_log=compile_log.text,\n tests=tuple())\n\n result = constants.SUBMIT_RESPONSE_OK\n tests = []\n run_log = tree.find(\"runLog\")\n for test in run_log:\n if test.tag != 'test':\n continue\n test_result = test[2].text\n details = test[4].text if len(test) > 4 else None\n tests.append(ProtocolTest(name=test[0].text,\n result=test_result,\n time=test[3].text,\n details=details))\n if test_result != constants.SUBMIT_RESPONSE_OK and result == constants.SUBMIT_RESPONSE_OK:\n result = test_result\n try:\n score = Decimal(tree.find(\"runLog/score\").text)\n except (ValueError, TypeError, AttributeError):\n raise ProtocolFormatError(\"Invalid score.\", protocol_content)\n points = (max_points * score) / Decimal(100)\n return Protocol(result=result, points=points, compile_log=None, tests=tests)\n\n def _create_header(self, submit_id, user_id, task_id, language, priority):\n \"\"\"Creates a raw header from submit parameters\"\"\"\n return 'submit1.3\\n%s\\n%s\\n%s\\n%s\\n%s\\n%s\\nmagic_footer\\n' % (\n self.tester_id,\n submit_id,\n user_id,\n task_id,\n language,\n priority,\n )\n\n def _encode(self, s):\n if isinstance(s, bytes):\n return s\n if not isinstance(s, six.string_types):\n if six.PY3:\n return six.text_type(s).encode('utf-8')\n return bytes(s)\n return s.encode('utf-8')\n\n def _send_data_to_server(self, header, submission_file_content):\n \"\"\"Sends submission to the judge system.\"\"\"\n try:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.connect((self.tester_url, self.tester_port))\n sock.sendall(self._encode(header))\n sock.sendall(self._encode(submission_file_content))\n except socket.error:\n raise JudgeConnectionError(\n 'Failed to connect to judge system (%s:%s)' % (self.tester_url, self.tester_port))\n\n\nclass DebugJudgeClient(JudgeClient):\n def __init__(self):\n super(DebugJudgeClient, self).__init__('TEST_ID', 'TEST_URL', 47)\n\n def _send_data_to_server(self, header, submission_file_content):\n print('Submit RAW:')\n print(self._encode(header))\n print(self._encode(submission_file_content))\n\n\nclass JudgeConnectionError(IOError):\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return '%s: %s' % (self.__class__.__name__, repr(self.message))\n\n\nclass ProtocolError(ValueError):\n def __init__(self, message, protocol):\n self.message = message\n self.protocol = protocol\n\n def __str__(self):\n return '%s: %s\\n%s\\n' % (self.__class__.__name__, repr(self.message), self.protocol)\n\n\nclass ProtocolCorruptedError(ProtocolError):\n pass\n\n\nclass ProtocolFormatError(ProtocolError):\n pass\n","sub_path":"judge_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"118943453","text":"from twisted.trial import unittest\n\nfrom methanal.imethanal import IEnumeration\nfrom methanal import enums, errors\n# Adapter registration side-effect.\nfrom methanal import view\n# To quell Pyflakes' fears.\nview\n\n\n\nclass EnumerationAdapterTests(unittest.TestCase):\n \"\"\"\n Tests for L{IEnumeration} adapters.\n \"\"\"\n def test_list(self):\n \"\"\"\n Adapting a C{list} to L{IEnumeration} results in an L{Enum} accurately\n representing the list.\n \"\"\"\n values = [\n (u'foo', u'Foo'),\n (u'bar', u'Bar')]\n enum = IEnumeration(values)\n self.assertEquals(enum.asPairs(), values)\n for value, desc in values:\n item = enum.get(value)\n self.assertEquals(item.value, value)\n self.assertEquals(item.desc, desc)\n\n\n def test_groupList(self):\n \"\"\"\n Adapting a C{list} of nested C{list}s, as used by\n L{methanal.view.GroupedSelectInput}, results in an L{Enum} with\n L{EnumItems} with a C{'group'} extra value the same as the first\n element in each C{tuple}. L{IEnumeration.asPairs} returns a flat\n C{list} for nested C{list}s adapted to L{IEnumeration}.\n \"\"\"\n values = [\n (u'Group', [\n (u'foo', u'Foo'),\n (u'bar', u'Bar')]),\n (u'Group 2', [\n (u'quux', u'Quux'),\n (u'frob', u'Frob')])]\n\n enum = IEnumeration(values)\n for groupName, innerValues in values:\n for value, desc in innerValues:\n item = enum.get(value)\n self.assertEquals(item.value, value)\n self.assertEquals(item.desc, desc)\n self.assertEquals(item.get('group'), groupName)\n\n pairs = sum(zip(*values)[1], [])\n self.assertEquals(enum.asPairs(), pairs)\n\n\n def test_notAdapted(self):\n \"\"\"\n Adapting C{tuple}, C{iter} or generator expression raises L{TypeError}.\n \"\"\"\n values = (n for n in xrange(5))\n self.assertRaises(TypeError, IEnumeration, tuple(values))\n self.assertRaises(TypeError, IEnumeration, iter(values))\n self.assertRaises(TypeError, IEnumeration, values)\n\n\n\nclass _EnumTestsMixin(object):\n \"\"\"\n Test case mixin for enumerations.\n \"\"\"\n def test_duplicateValues(self):\n \"\"\"\n Constructing an enumeration with duplicate values results in\n C{ValueError} being raised.\n \"\"\"\n values = [\n enums.EnumItem(u'foo', u'Foo'),\n enums.EnumItem(u'bar', u'Bar'),\n enums.EnumItem(u'foo', u'Not Foo', quux=u'frob')]\n self.assertRaises(ValueError, enums.Enum, 'Doc', values)\n\n pairs = [(e.value, e.desc) for e in values]\n self.assertRaises(ValueError, enums.Enum.fromPairs, 'Doc', pairs)\n\n\n def test_fromPairs(self):\n \"\"\"\n Construct an enumeration from an iterable of pairs.\n \"\"\"\n pairs = [\n (u'foo', u'Foo'),\n (u'bar', u'Bar')]\n enum = enums.Enum.fromPairs('Doc', pairs)\n self.assertEquals(enum.doc, 'Doc')\n self.assertEquals(enum.asPairs(), pairs)\n\n\n def test_asPairs(self):\n \"\"\"\n Representing an enumeration as a list of pairs.\n \"\"\"\n pairs = [(e.get('id', e.value), e.desc) for e in self.values]\n self.assertEquals(self.enum.asPairs(), pairs)\n\n\n def test_get(self):\n \"\"\"\n Getting an enumeration item by value returns the relevant\n L{methanal.enums.EnumItem} instance or raises L{InvalidEnumItem} in the\n case where no item is represented by the given value.\n \"\"\"\n for e in self.values:\n self.assertEquals(self.enum.get(e.value), e)\n self.assertRaises(errors.InvalidEnumItem,\n self.enum.get, u'DOESNOTEXIST')\n\n\n def test_getDesc(self):\n \"\"\"\n Getting an enumeration item's description by value returns the\n description or an empty C{unicode} string if no item is represented\n by the given value.\n \"\"\"\n for e in self.values:\n self.assertEquals(self.enum.getDesc(e.value), e.desc)\n self.assertEquals(self.enum.getDesc(u'DOESNOTEXIST'), u'')\n\n\n def test_hidden(self):\n \"\"\"\n Enumeration items that have their C{hidden} flag set are not listed in\n the result of L{methanal.eums.Enum.asPairs}.\n \"\"\"\n values = [\n enums.EnumItem(u'foo', u'Foo', hidden=True),\n enums.EnumItem(u'bar', u'Bar'),\n enums.EnumItem(u'pop', u'Pop')]\n enum = enums.Enum('Doc', values)\n enum.get(u'pop').hidden = True\n\n pairs = enum.asPairs()\n self.assertEquals(pairs, [(u'bar', u'Bar')])\n\n\n def test_find(self):\n \"\"\"\n Finding an enumeration item by extra value gets the first matching item\n or C{None} if there are no matches. Passing fewer or more than one\n query raises C{ValueError}.\n \"\"\"\n self.assertIdentical(self.enum.find(quux=u'hello'), self.values[0])\n self.assertIdentical(self.enum.find(frob=u'world'), self.values[0])\n self.assertIdentical(self.enum.find(quux=u'goodbye'), self.values[1])\n\n self.assertIdentical(self.enum.find(haha=u'nothanks'), None)\n self.assertRaises(ValueError, self.enum.find)\n self.assertRaises(ValueError, self.enum.find, foo=u'foo', bar=u'bar')\n\n\n def test_findAll(self):\n \"\"\"\n Finding all enumeration items by extra value gets an iterable of all\n matching items. Passing fewer or more than one\n query raises C{ValueError}.\n \"\"\"\n results = list(self.enum.findAll(frob=u'world'))\n self.assertEquals(len(results), 2)\n for res in results:\n self.assertIn(res, self.values)\n\n self.assertEquals(list(self.enum.findAll(asdf=u'qwer')), [])\n\n # Consume the generators to trigger the exception.\n self.assertRaises(ValueError, list, self.enum.findAll())\n self.assertRaises(ValueError, list, self.enum.findAll(foo=u'foo',\n bar=u'bar'))\n\n\n def test_iterator(self):\n \"\"\"\n L{Enum} implements the iterator protocol and will iterate over\n L{EnumItem}s in the order originally specified, omitting L{EnumItem}s\n that are marked as hidden.\n \"\"\"\n items = [enums.EnumItem(u'foo', u'Foo'),\n enums.EnumItem(u'bar', u'Bar'),\n enums.EnumItem(u'baz', u'Baz', hidden=True)]\n enum = enums.Enum('Doc', items)\n\n # The hidden Enum is omitted.\n self.assertEquals(len(list(enum)), 2)\n\n for expected, item in zip(items, enum):\n self.assertIdentical(expected, item)\n\n\n\nclass EnumTests(_EnumTestsMixin, unittest.TestCase):\n \"\"\"\n Tests for L{methanal.enums.Enum}.\n \"\"\"\n def setUp(self):\n self.values = [\n enums.EnumItem(u'foo', u'Foo', quux=u'hello', frob=u'world'),\n enums.EnumItem(u'bar', u'Bar', quux=u'goodbye'),\n enums.EnumItem(u'doh', u'Doh', frob=u'world')]\n self.enum = enums.Enum('Doc', self.values)\n\n\n def test_getExtra(self):\n \"\"\"\n Getting an enumeration item extra value by enumeration value returns\n the extra's value or a default value, defaulting to C{None}.\n \"\"\"\n self.assertEquals(self.enum.getExtra(u'foo', 'quux'), u'hello')\n self.assertEquals(self.enum.getExtra(u'foo', 'frob'), u'world')\n self.assertEquals(self.enum.getExtra(u'bar', 'quux'), u'goodbye')\n\n self.assertEquals(self.enum.getExtra(u'bar', 'nope'), None)\n self.assertEquals(self.enum.getExtra(u'bar', 'nope', u''), u'')\n\n\n def test_extra(self):\n \"\"\"\n Extra parameters are retrieved by L{methanal.enums.EnumItem.get} if they\n exist otherwise a default value is returned instead. Extra parameters\n can also be accessed via attribute access but C{AttributeError} is\n raised if no such extra parameter exists.\n \"\"\"\n self.assertEquals(self.enum.get(u'foo').get('quux'), u'hello')\n self.assertEquals(self.enum.get(u'foo').get('frob'), u'world')\n self.assertEquals(self.enum.get(u'bar').get('quux'), u'goodbye')\n\n self.assertEquals(self.enum.get(u'bar').get('boop'), None)\n self.assertEquals(self.enum.get(u'bar').get('beep', 42), 42)\n\n self.assertEquals(self.enum.get(u'foo').quux, u'hello')\n self.assertEquals(self.enum.get(u'foo').frob, u'world')\n self.assertEquals(self.enum.get(u'bar').quux, u'goodbye')\n\n e = self.assertRaises(AttributeError,\n getattr, self.enum.get(u'bar'), 'boop')\n self.assertIn('boop', str(e))\n e = self.assertRaises(AttributeError,\n getattr, self.enum.get(u'bar'), 'beep')\n self.assertIn('beep', str(e))\n\n\n def test_reprEnum(self):\n \"\"\"\n L{methanal.enums.Enum} has a useful representation that contains the\n type name and the enumeration description.\n \"\"\"\n self.assertEquals(\n repr(enums.Enum('Foo bar', [])),\n '')\n\n lorem = \"\"\"\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. In vitae sem\n felis, sit amet tincidunt est. Cras convallis, odio nec accumsan\n vestibulum, lectus dolor feugiat magna, sit amet tempus lorem diam ac\n enim. Curabitur nisl nibh, bibendum ac tempus non, blandit ac turpis.\n \"\"\"\n\n self.assertEquals(\n repr(enums.Enum(lorem, [])),\n '')\n\n\n def test_reprEnumUndocumented(self):\n \"\"\"\n L{methanal.enums.Enum} has a useful representation even when the\n enumeration has no description.\n \"\"\"\n self.assertEquals(\n repr(enums.Enum('', [])),\n '')\n\n\n\nclass EnumItemTests(unittest.TestCase):\n \"\"\"\n Tests for L{methanal.enums.EnumItem}.\n \"\"\"\n def test_reprEnumItem(self):\n \"\"\"\n L{methanal.enums.EnumItem} has a useful representation that contains\n the value, description and hidden state.\n \"\"\"\n self.assertEquals(\n repr(enums.EnumItem(u'foo', u'Foo')),\n \"\")\n\n self.assertEquals(\n repr(enums.EnumItem(u'foo', u'Foo', hidden=True)),\n \"\")\n\n\n\nclass ObjectEnumTests(_EnumTestsMixin, unittest.TestCase):\n \"\"\"\n Tests for L{methanal.enums.ObjectEnum}.\n \"\"\"\n def setUp(self):\n self.object1 = object()\n self.object2 = object()\n self.object3 = object()\n self.values = [\n enums.EnumItem(self.object1, u'Foo', quux=u'hello', frob=u'world'),\n enums.EnumItem(self.object2, u'Bar', quux=u'goodbye'),\n enums.EnumItem(self.object3, u'Doh', frob=u'world', id=u'chuck')]\n self.enum = enums.ObjectEnum('Doc', self.values)\n\n\n def test_idExtra(self):\n \"\"\"\n L{methanal.enums.ObjectEnum} automatically creates an C{'id'} EnumItem\n extra value, based on the result of C{id}, if one does not already\n exist.\n \"\"\"\n expected = [\n unicode(id(self.object1)),\n unicode(id(self.object2)),\n u'chuck']\n\n self.assertEquals(\n expected,\n [value.id for value in self.values])\n\n\n def test_getExtra(self):\n \"\"\"\n Getting an enumeration item extra value by enumeration value returns\n the extra's value or a default value, defaulting to C{None}.\n \"\"\"\n self.assertEquals(self.enum.getExtra(self.object1, 'quux'), u'hello')\n self.assertEquals(self.enum.getExtra(self.object1, 'frob'), u'world')\n self.assertEquals(self.enum.getExtra(self.object2, 'quux'), u'goodbye')\n\n self.assertEquals(self.enum.getExtra(u'bar', 'nope'), None)\n self.assertEquals(self.enum.getExtra(u'bar', 'nope', u''), u'')\n\n\n def test_extra(self):\n \"\"\"\n Extra parameters are retrieved by L{methanal.enums.EnumItem.get} if they\n exist otherwise a default value is returned instead. Extra parameters\n can also be accessed via attribute access but C{AttributeError} is\n raised if no such extra parameter exists.\n \"\"\"\n self.assertEquals(self.enum.get(self.object1).get('quux'), u'hello')\n self.assertEquals(self.enum.get(self.object1).get('frob'), u'world')\n self.assertEquals(self.enum.get(self.object2).get('quux'), u'goodbye')\n\n self.assertEquals(self.enum.get(self.object2).get('boop'), None)\n self.assertEquals(self.enum.get(self.object2).get('beep', 42), 42)\n\n self.assertEquals(self.enum.get(self.object1).quux, u'hello')\n self.assertEquals(self.enum.get(self.object1).frob, u'world')\n self.assertEquals(self.enum.get(self.object2).quux, u'goodbye')\n\n e = self.assertRaises(AttributeError,\n getattr, self.enum.get(self.object2), 'boop')\n self.assertIn('boop', str(e))\n e = self.assertRaises(AttributeError,\n getattr, self.enum.get(self.object2), 'beep')\n self.assertIn('beep', str(e))\n\n\n def test_reprEnum(self):\n \"\"\"\n L{methanal.enums.ObjectEnum} has a useful representation that contains the\n type name and the enumeration description.\n \"\"\"\n self.assertEquals(\n repr(enums.ObjectEnum('Foo bar', [])),\n '')\n\n lorem = \"\"\"\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. In vitae sem\n felis, sit amet tincidunt est. Cras convallis, odio nec accumsan\n vestibulum, lectus dolor feugiat magna, sit amet tempus lorem diam ac\n enim. Curabitur nisl nibh, bibendum ac tempus non, blandit ac turpis.\n \"\"\"\n\n self.assertEquals(\n repr(enums.ObjectEnum(lorem, [])),\n '')\n","sub_path":"methanal/test/test_enums.py","file_name":"test_enums.py","file_ext":"py","file_size_in_byte":14240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"469622602","text":"from .basic import *\nfrom .residual import *\nfrom .convlstm import ConvLSTM\nfrom .convlstmcenter import ConvLSTMCenter\nfrom .slicelstm import SliceLSTM\nfrom .slicelstmconv import SliceLSTMConv\nfrom .convLSTMCell3D import ConvLSTMCell3D\nfrom .aspp_3d import ASPP\n\n__all__ = ['conv2d_pad',\n 'conv2d_bn_non',\n 'conv2d_bn_elu',\n 'conv2d_bn_relu',\n 'conv3d_pad',\n 'conv3d_bn_non',\n 'conv3d_bn_elu',\n 'conv3d_bn_relu',\n 'residual_block_2d',\n 'residual_block_3d',\n 'bottleneck_dilated_2d',\n 'bottleneck_dilated_3d',\n 'dilated_fusion_block',\n 'squeeze_excitation_2d',\n 'squeeze_excitation_3d',\n 'ConvLSTMCenter',\n 'ConvLSTM',\n 'SliceLSTM',\n 'SliceLSTMConv',\n 'ConvLSTMCell3D',\n 'ASPP']\n","sub_path":"torch_connectomics/model/blocks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"619905297","text":"import os\nimport sys\nimport urllib\nfrom httplib import HTTPException\n\nfrom CRABClient import __version__\nfrom RESTInteractions import HTTPRequests\nfrom CRABClient.UserUtilities import getFileFromURL\nfrom CRABClient.Commands.SubCommand import SubCommand\nfrom CRABClient.Commands.getcommand import getcommand\nfrom CRABClient.ClientUtilities import colors\nfrom CRABClient.ClientExceptions import ConfigurationException, RESTCommunicationException, ClientException, MissingOptionException\nimport CRABClient.Emulator\n\n\nclass getlog(getcommand):\n \"\"\"\n Retrieve the log files of a number of jobs specified by the -q/--quantity option.\n -q logfiles per exit code are returned if transferLogs = False; otherwise all the log files\n collected by the LogCollect job are returned. The task is identified by the -d/--dir option.\n \"\"\"\n name = 'getlog'\n shortnames = ['log']\n visible = True #overwrite getcommand\n\n def __call__(self):\n if self.options.short:\n inputlist = {'subresource': 'webdir', 'workflow': self.cachedinfo['RequestName']}\n serverFactory = CRABClient.Emulator.getEmulator('rest')\n server = serverFactory(self.serverurl, self.proxyfilename, self.proxyfilename, version=__version__) \n uri = self.getUrl(self.instance, resource = 'task')\n dictresult, status, reason = server.get(uri, data = inputlist)\n self.logger.info('Server result: %s' % dictresult['result'][0])\n dictresult = self.processServerResult(dictresult)\n if status != 200:\n msg = \"Problem retrieving information from the server:\\ninput:%s\\noutput:%s\\nreason:%s\" % (str(inputlist), str(dictresult), str(reason))\n raise RESTCommunicationException(msg)\n self.setDestination()\n self.logger.info(\"Setting the destination to %s \" % self.dest)\n self.logger.info(\"Retrieving...\")\n success = []\n failed = [] \n for item in self.options.jobids:\n jobid = str(item[1])\n filename = 'job_out.'+jobid+'.0.txt'\n url = dictresult['result'][0]+'/'+filename\n try:\n getFileFromURL(url, self.dest+'/'+filename)\n self.logger.info ('Retrieved %s' % (filename))\n success.append(filename)\n retry = 1\n #To retrieve retried joblog, if there is any.\n while urllib.urlopen(dictresult['result'][0]+'/'+'job_out.'+jobid+'.'+str(retry)+'.txt').getcode() == 200:\n filename = 'job_out.'+jobid+'.'+str(retry)+'.txt'\n url = dictresult['result'][0]+'/'+filename\n getFileFromURL(url, self.dest+'/'+filename)\n self.logger.info ('Retrieved %s' % (filename))\n success.append(filename)\n retry = retry + 1\n except ClientException as ex:\n self.logger.debug(str(ex))\n failed.append(filename)\n if failed:\n msg = \"%sError%s: Failed to retrieve the following files: %s\" % (colors.RED,colors.NORMAL,failed)\n self.logger.info(msg)\n else:\n self.logger.info(\"%sSuccess%s: All files successfully retrieved.\" % (colors.GREEN,colors.NORMAL))\n returndict = {'success': success, 'failed': failed}\n else:\n returndict = getcommand.__call__(self, subresource = 'logs')\n if ('success' in returndict and not returndict['success']) or \\\n ('failed' in returndict and returndict['failed']):\n msg = \"You can use the --short option to retrieve a short version of the log files from the Grid scheduler.\"\n self.logger.info(msg)\n \n return returndict\n\n def setOptions(self):\n \"\"\"\n __setOptions__\n\n This allows to set specific command options\n \"\"\"\n self.parser.add_option( '--quantity',\n dest = 'quantity',\n help = 'The number of logs you want to retrieve (or \"all\"). Ignored if --jobids is used.' )\n self.parser.add_option( '--parallel',\n dest = 'nparallel',\n help = 'Number of parallel download, default is 10 parallel download.',)\n self.parser.add_option( '--wait',\n dest = 'waittime',\n help = 'Increase the sendreceive-timeout in second.',)\n self.parser.add_option( '--short',\n dest = 'short',\n default = False,\n action = 'store_true',\n help = 'Get the short version of the log file. Use with --dir and --jobids.',)\n getcommand.setOptions(self)\n\n\n def validateOptions(self):\n getcommand.validateOptions(self)\n if self.options.short:\n if self.options.jobids is None:\n msg = \"%sError%s: Please specify the job ids for which to retrieve the logs.\" % (colors.GREEN, colors.NORMAL)\n msg += \" Use the --jobids option.\"\n ex = MissingOptionException(msg)\n ex.missingOption = \"jobids\"\n raise ex\n","sub_path":"src/python/CRABClient/Commands/getlog.py","file_name":"getlog.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"148334597","text":"from time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\n\ndriver: WebDriver = webdriver.Chrome()\ndriver.maximize_window()\ndriver.implicitly_wait(20)\n\nSEARCH_STRING = (By.ID, \"searchval\")\nSEARCH_BTN = (By.CSS_SELECTOR, \"button.btn.btn-info.banner-search-btn\")\nITEMS_DESCRIPTIONS = (By.CSS_SELECTOR, \"a.description\")\nITEMS_TO_CHOOSE = (By.CSS_SELECTOR, \"input.btn.btn-cart.btn-small\")\nPRODUCTS = (By.CSS_SELECTOR, '.ag-item.gtm-product')\nCART_BTN = (By.NAME, \"cart\")\nEMPTY_CROSS_SIGN = (By.CSS_SELECTOR, \"a.deleteCartItemButton.close\")\nCART_ITEM = (By.ID, \"cartItemCountSpan\")\n\n# I am on Homepage\ndriver.get( 'https://www.webstaurantstore.com/' )\n\n\n# Input item into search string\nsearch = driver.find_element(*SEARCH_STRING)\nsearch.clear()\nsearch.send_keys('stainless work table')\n\n\n# Click search button\ndriver.find_element(*SEARCH_BTN).click()\n\n\n# Verify all items with Table in the title are here\nprint('There are : ', len(driver.find_elements( *ITEMS_TO_CHOOSE )), 'items.')\nprint('There are : ', len(driver.find_elements( *ITEMS_DESCRIPTIONS)), 'descriptions.')\nproducts = driver.find_elements( *PRODUCTS )\nfor product in list(products):\n title = product.find_element( *ITEMS_DESCRIPTIONS )\nprint('Title:', title.text, '.')\nassert 'Table' in title.text\n\n\n# Add the last of found items to cart\nsleep(8)\n# target = driver.find_element(By.CSS_SELECTOR, \"input.btn.btn-cart.btn-small\")\n# actions = ActionChains(driver)\n# actions.move_to_element(target)\n# sleep(2)\n# actions.click(target)\n# actions.perform()\ndriver.find_elements( *ITEMS_TO_CHOOSE )[-1].click()\n# wait until pop-up desappears\nsleep(8)\n\n\n# Click on cart button\ntarget = driver.find_element(By.NAME, \"cart\")\nactions = ActionChains(driver)\nactions.move_to_element(target)\nsleep(2)\nactions.click(target)\nactions.perform()\ndriver.find_element( *CART_BTN ).click()\nsleep(8)\n\n\n# Click on cross symbol empty cart\ntarget = driver.find_element(By.CSS_SELECTOR, \"a.deleteCartItemButton.close\")\nactions = ActionChains(driver)\nactions.move_to_element(target)\nsleep(2)\nactions.click(target)\nactions.perform()\n# driver.find_element( *EMPTY_CROSS_SIGN ).click()\n# wait until cart is empthy\nsleep(4)\n\n\n# Verify cart is empty\nprint('Text in the cart button: ', str(driver.find_element( *CART_ITEM ).text),'.')\n\n# Exit\ndriver.quit()","sub_path":"TDD/test_dt_19_aug_2020.py","file_name":"test_dt_19_aug_2020.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"593785140","text":"\"\"\"\nCopyright (c) 2015 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\n\nPre build plugin which injects custom yum repository in dockerfile.\n\"\"\"\nimport os\nimport re\nimport uuid\nfrom dock.plugin import PreBuildPlugin\n\n\ndef alter_yum_commands(df, wrap_str):\n regex = re.compile(r\"RUN\\s+(?Pyum((\\s.+\\\\\\n)+)?(.+))\", re.MULTILINE)\n sub_func = lambda match: wrap_str % {'yum_command': match.group('yum_command').rstrip()}\n return regex.sub(sub_func, df)\n\n\nclass InjectYumRepoPlugin(PreBuildPlugin):\n key = \"inject_yum_repo\"\n\n def __init__(self, tasker, workflow):\n \"\"\"\n constructor\n\n :param tasker: DockerTasker instance\n :param workflow: DockerBuildWorkflow instance\n \"\"\"\n # call parent constructor\n super(InjectYumRepoPlugin, self).__init__(tasker, workflow)\n self.yum_repos_dir = '/etc/yum.repos.d/'\n self.repofile_name = 'dock-injected.repo'\n self.repo_path = os.path.join(self.yum_repos_dir, self.repofile_name)\n\n def run(self):\n \"\"\"\n run the plugin\n \"\"\"\n def escape_dollar(v):\n if isinstance(v, str):\n return v.replace('$', '\\$')\n else:\n return v\n\n rendered_repos = ''\n for repo in self.workflow.repos.get('yum', []):\n repo.setdefault(\"name\", str(uuid.uuid4().hex[:6]))\n rendered_repo = ''\n for key, value in repo.items():\n rendered_repo += r\"%s=%s\\n\" % (key, escape_dollar(value))\n rendered_repo = r'[%(name)s]\\n' % repo + rendered_repo\n rendered_repos += rendered_repo\n\n wrap_cmd = 'RUN printf \"%s\"' % rendered_repos + \\\n ' >%(repo_path)s && %%(yum_command)s && yum clean all && rm -f %(repo_path)s' \\\n % {'repo_path': self.repo_path}\n\n with open(self.workflow.builder.df_path, \"r+\") as fd:\n df = fd.read()\n out = alter_yum_commands(df, wrap_cmd)\n fd.seek(0)\n fd.truncate()\n fd.write(out)\n","sub_path":"dock/plugins/pre_inject_yum_repo.py","file_name":"pre_inject_yum_repo.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"59321896","text":"import numpy as np\nfrom algotimos_otimizacao.funcao_AG import func_obj\n\nclass Selection:\n def __init__(self, fo = func_obj):\n self.fo = fo\n # dim_pop = dimensão da populção\n # sim_pop dimensão de uma unica solução\n # Retorna matriz com dim_pop x dim_s, onde cada valor da solução tem chance de ser um random entre serch_interval\n def RandomPopulation(self, dim_pop = 100, dim_s = 6, search_interval = [-1.0, 1.0], neuralnet = False, env = False, params_nn = False, onehot_encode = False):\n pop = np.random.uniform(search_interval[0], search_interval[1], size = (dim_pop, dim_s+1))\n for i in range(pop.shape[0]):\n pop[i][0] = self.fo(pop[i][1:], neuralnet, env, params_nn, onehot_encode) \n return pop\n\n # seleciona metade da população com maior fo\n def Select_kMax(self, pop):\n half_number = int(pop.shape[0]/2)\n selected_fo = sort(pop[:][0])[half_number:]\n\n list_selectd_s = []\n for i in range(pop.shape[0]):\n if pop[i,0] in selected_fo:\n list_selectd_s.append(pop[i])\n if(len(list_selectd_s) == half_number):\n return np.array(list_selectd_s)\n\n return np.array(list_selectd_s)\n\n # metodo de seleção por torneio, retorna sempre a metade da população\n def Select_Tournament(self, pop):\n candidates = list(range(0, pop.shape[0]))\n half_number = int(pop.shape[0]/2)\n selected_candidates = []\n\n for i in range(half_number):\n\n value1 = np.random.randint(0, len(candidates))\n candidate1 = candidates[value1]\n candidates.remove(candidate1)\n\n value2 = np.random.randint(0, len(candidates))\n candidate2 = candidates[value2]\n candidates.remove(candidate2)\n\n if(pop[candidate1][0] < pop[candidate2][0]):\n selected_candidates.append(candidate1)\n else:\n selected_candidates.append(candidate2)\n\n new_pop = []\n for s in selected_candidates:\n new_pop.append(pop[s])\n #print('Fim seleção')\n return np.array(new_pop)\n\n\n def Select_Roulette(self, pop):\n candidates = list(range(0, pop.shape[0]))\n pop_size = int(pop.shape[0]/2)\n selected_candidates = []\n #print('Selecao: ')\n\n sum_fitnes = np.sum(pop[:,1])\n if (sum_fitnes == 0.0):\n #print(\"FITNESS 0 DE POPULAÇÃO\")\n sum_fitnes = 0.000000000001\n addeds = []\n roulette = pop[:,1]/sum_fitnes\n #print(pop)\n for i in range(pop_size):\n value = np.random.random()\n counter = 0.0\n for j in range(len(roulette)):\n counter += roulette[j]\n # mais elitismo - sem retirar ja sorteado\n\n if counter > pop[j][0]:\n selected_candidates.append(pop[j])\n addeds.append(j)\n break\n #print('Fim Selecao: ')\n return np.array(selected_candidates) \n\n\n def Select_Roulette2(self, pop):\n candidates = list(range(0, pop.shape[0]))\n pop_size = int(pop.shape[0]/2)\n selected_candidates = []\n #print('Selecao: ')\n\n sum_fitnes = np.sum(pop[:,0])\n if (sum_fitnes == 0.0):\n #print(\"FITNESS 0 DE POPULAÇÃO\")\n sum_fitnes = 0.000000000001\n addeds = []\n roulette = pop[:,0]/sum_fitnes\n #print(pop)\n for i in range(pop_size):\n value = np.random.random()\n counter = 0.0\n for j in range(len(roulette)):\n counter += roulette[j]\n # mais elitismo - sem retirar ja sorteado\n if j not in addeds:\n if counter > pop[j][0]:\n selected_candidates.append(pop[j])\n addeds.append(j)\n break\n #print('Fim Selecao: ')\n return np.array(selected_candidates) \n","sub_path":"AG/algotimos_otimizacao/selectionOperator.py","file_name":"selectionOperator.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"302988453","text":"import pandas as pd\r\nfrom openpyxl import load_workbook\r\nfrom textblob import TextBlob\r\n\r\ndef add_data(strr):\r\n \r\n data=pd.read_excel(\"App-data.xlsx\")\r\n\r\n filepath='App-data.xlsx'\r\n \r\n wb=load_workbook(filepath)\r\n \r\n sheet=wb.active\r\n \r\n #enter=['PUBG','GAME','NAN']\r\n \r\n maxx=(len(data['App']))+1\r\n #print(maxx)\r\n \r\n c=1\r\n for item in range(len(strr)):\r\n sheet.cell(row=maxx+1, column=c).value=strr[item]\r\n c=c+1 \r\n \r\n wb.save(filepath)\r\n \r\n lst=[]\r\n lst.append(\"The data is sucessfully added\")\r\n return lst\r\n\r\ndef user_review(appnreview):\r\n strr=[]\r\n for k,v in appnreview.items():\r\n analysis=TextBlob(v)\r\n print(analysis.sentiment)\r\n strr.append(k)\r\n strr.append(v)\r\n if analysis.sentiment.polarity<0:\r\n strr.append(\"Negative\")\r\n elif analysis.sentiment.polarity==0:\r\n strr.append(\"Neutral\")\r\n elif analysis.sentiment.polarity>0:\r\n strr.append(\"Positive\")\r\n else:\r\n strr.append(\"nan\")\r\n strr.append(analysis.sentiment.polarity)\r\n strr.append(analysis.sentiment.subjectivity)\r\n print(strr)\r\n data=pd.read_excel(\"user_reviews.xlsx\")\r\n\r\n filepath='user_reviews.xlsx'\r\n wb=load_workbook(filepath)\r\n \r\n sheet=wb.active\r\n \r\n #enter=['PUBG','GAME','NAN']\r\n \r\n maxx=(len(data['App']))+1\r\n #print(maxx)\r\n \r\n c=1\r\n for item in range(len(strr)):\r\n sheet.cell(row=maxx+1, column=c).value=strr[item]\r\n c=c+1 \r\n \r\n wb.save(filepath)\r\n \r\n lst=[]\r\n lst.append(\"The data is sucessfully added\")\r\n return lst\r\n\r\n ","sub_path":"prob18.py","file_name":"prob18.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"138143980","text":"import sys\nimport math\n\nfirst_input_length = 2\n\ndef validateInput(inp, length):\n\tif len(inp) != length: # check argument count\n\t\tprint('You should enter {} numbers'.format(length))\n\t\tsys.exit(1)\n\n\tfor data in inp: # make sure all values are integers\n\t\ttry:\n\t\t\tdata = int(data)\n\t\texcept Exception as e:\n\t\t\tprint('You should enter integers')\n\t\t\tsys.exit(1)\n\n\nfirst_input = input('Enter HH:MM: ').split(':')\nvalidateInput(first_input, first_input_length)\n\n(hours, minutes) = int(first_input[0]), int(first_input[1])\n\nif (hours < 1 or hours > 12 or minutes < 0 or minutes > 59):\n\tprint('0< HH < 13 and 0<= MM < 60');\n\tsys.exit()\n\nangle_per_hour = 360 / 12\nangle_per_minute = 360 / 60\nangle_for_hour_minute = 0.5\n\nhours_angle = hours * angle_per_hour\n\nif minutes > 0:\n\thours_angle += minutes * angle_for_hour_minute\n\nminutes_angle = minutes * angle_per_minute\n\ny = abs(hours_angle - minutes_angle)\n\nif (360 - y < y):\n\ty = 360 - y\n\nprint(\"{0:.3f}\".format(y))\n","sub_path":"fast_tasks/clock/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"512550076","text":"import locale\nimport os\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nplt.style.use(['seaborn-whitegrid', 'ggplot', 'fast'])\nsns.set_theme()\nwarnings.filterwarnings('ignore')\nlocale.setlocale(locale.LC_ALL, 'pt_BR')\n\n\ndef get_dataframe():\n lista_df = []\n for dirname, _, filenames in os.walk('D:/data/dataset/covid/', topdown=False):\n for filename in filenames:\n if filename.endswith('.csv'):\n df = pd.read_csv(os.path.join(dirname, filename), sep=';', encoding='utf-8')\n df.rename(columns=str.lower, inplace=True)\n df.rename(columns=str.strip, inplace=True)\n df['dt_notific'] = pd.to_datetime(df['dt_notific'], format=\"%d/%m/%Y\")\n df['dt_evoluca'] = pd.to_datetime(df['dt_evoluca'], format=\"%d/%m/%Y\")\n df['dt_encerra'] = pd.to_datetime(df['dt_encerra'], format=\"%d/%m/%Y\")\n df['hospital'] = df['hospital'].fillna(9)\n lista_df.append(df)\n\n if lista_df:\n df = pd.concat(lista_df, axis=0, ignore_index=True)\n df = df.drop_duplicates()\n return df\n return None\n\n\ndef gera_grafico(df):\n # Ajusta os valores no Dataframe.\n df1 = df[['nu_idade_n', 'hospital', 'dt_notific', 'dt_evoluca', 'dt_encerra']]\n # Mantém somente linhas com internação.\n df1 = df1.query('(hospital == 1) and (dt_evoluca == dt_evoluca or dt_encerra == dt_encerra)')\n # Remove linhas com valores nulos.\n df1 = df1.dropna()\n # Ajustando as faixas etárias.\n df1.loc[df1['nu_idade_n'] < 20, 'faixa_etaria'] = '0-19 anos'\n df1.loc[np.logical_and(df1['nu_idade_n'] > 19, df1['nu_idade_n'] < 60), 'faixa_etaria'] = '20-59 anos'\n df1.loc[df1['nu_idade_n'] > 59, 'faixa_etaria'] = '60+ anos'\n # Calcula o número de dias internado.\n df1['dt_fim'] = np.where(df1['dt_evoluca'].notnull(), df1['dt_evoluca'], df1['dt_encerra'])\n df1['dias_internacao'] = (df1['dt_fim'] - df1['dt_notific']).dt.days.astype('int16')\n # Criando as colunas de faixas de internação.\n df1['Até 14 dias'] = np.where(df1['dias_internacao'] < 15, 1, 0)\n df1['15-30 dias'] = np.where(np.logical_and(df1['dias_internacao'] > 14, df1['dias_internacao'] < 31), 1, 0)\n df1['31-60 dias'] = np.where(np.logical_and(df1['dias_internacao'] > 30, df1['dias_internacao'] < 61), 1, 0)\n df1['Mais de 60 dias'] = np.where(df1['dias_internacao'] > 60, 1, 0)\n df1 = df1.drop(['nu_idade_n', 'hospital', 'dt_notific', 'dt_evoluca', 'dt_encerra', 'dt_fim', 'dias_internacao'],\n axis=1)\n df1 = df1.groupby('faixa_etaria').agg(soma1=pd.NamedAgg(column='Até 14 dias', aggfunc=sum),\n soma2=pd.NamedAgg(column='15-30 dias', aggfunc=sum),\n soma3=pd.NamedAgg(column='31-60 dias', aggfunc=sum),\n soma4=pd.NamedAgg(column='Mais de 60 dias', aggfunc=sum)).reset_index()\n df1.columns = ['Faixa etária', '1-14 dias', '15-30 dias', '31-60 dias', '61+ dias']\n df1 = df1.set_index('Faixa etária')\n # Monta e gera o gráfico.\n ax = df1.plot.area(stacked=False)\n fig = ax.get_figure()\n fig.savefig(\"C:/Users/jorge/Downloads/Imagens/04_grafico_area.png\")\n plt.show()\n\n\ndef main():\n # Obtém os dados da API.\n df = get_dataframe()\n # Gera os gráficos.\n gera_grafico(df)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"grafico_area.py","file_name":"grafico_area.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"429109716","text":"# coding: utf-8\nimport sys\nimport xlsxwriter\n\nimport models\n\n\ndef jcatalog():\n # Cria a pasta Excel e adiciona uma planilha\n workbook = xlsxwriter.Workbook('output/pde_not_respond.xlsx')\n worksheet = workbook.add_worksheet()\n\n # format_date = workbook.add_format({'num_format': 'dd/mm/yyyy'})\n\n # Header\n row = 0\n col = 0\n\n wrap = workbook.add_format({'text_wrap': True})\n\n headers = ['issn',\n 'title',\n 'indexado WoS',\n 'citações SciELO no SciELO CI/WoS 2017',\n 'citações WoS ALL Databases no SciELO CI/WoS 2017',\n 'Indexado JCR',\n 'Fator de Impacto JCR 2017',\n 'indexado Scopus',\n 'CiteScore 2017',\n 'Google H5 2017',\n 'Google M5 2017']\n\n for h in headers:\n worksheet.write(0, col, h, wrap)\n col += 1\n\n # Extraction date - get date from SciELO collection\n # extraction_date = models.Scielo.objects.first().extraction_date\n\n row = 1\n\n with open('data/scielo/pde/issns_nao_responderam.txt') as f:\n content = f.readlines()\n # you may also want to remove whitespace characters like `\\n` at the end\n # of each line\n issns = [x.strip() for x in content]\n\n for i in issns:\n\n doc = models.Scielofapesp.objects.filter(issn_scielo=i)[0]\n print(str(doc.issn_list))\n\n col = 0\n\n # Issn SciELO\n worksheet.write(row, col, doc.issn_scielo)\n col += 1\n\n # titulo\n worksheet.write(row, col, doc.title)\n col += 1\n\n # --- indexação WoS, 0, 1\n worksheet.write(row, col, doc.is_wos)\n col += 1\n\n # --- citações SciELO no SciELO CI/WoS\n worksheet.write(row, col, doc['scieloci']['scieloci_cited_2017'] or 0)\n col += 1\n\n # --- citações WoS ALL Databases no SciELO CI/WoS\n worksheet.write(row, col, doc['scieloci'][\n 'scieloci_wos_cited_2017'] or 0)\n col += 1\n\n # --- indexação JCR, 0,1\n worksheet.write(row, col, doc.is_jcr)\n col += 1\n\n # --- Fator de Impacto JCR\n if doc.is_jcr == 1:\n docjcr = models.Jcr.objects(id=str(doc.jcr_id))[0]\n year = 2017\n if hasattr(docjcr, str(year)):\n if 'journal_impact_factor' in docjcr[str(year)]:\n worksheet.write(row, col, docjcr[str(year)][\n 'journal_impact_factor'])\n col += 1\n\n # --- Indexação Scopus, 0,1\n worksheet.write(row, col, doc.is_scopus)\n col += 1\n\n # --- CiteScore\n if doc.is_scopus == 1:\n docscopus = models.Scopus.objects(id=str(doc.scopus_id))[0]\n year = 2017\n if hasattr(docscopus, str(year)):\n if 'citescore' in docscopus[str(year)]:\n worksheet.write(row, col, docscopus[\n str(year)]['citescore'])\n col += 1\n\n ndoc = models.Scielo.objects.filter(issn_scielo=i)[0]\n # Google H5 M5\n if 'google_scholar_h5_2017' in ndoc:\n worksheet.write(row, col, ndoc['google_scholar_h5_2017'])\n col += 1\n\n if 'google_scholar_m5_2017' in ndoc:\n worksheet.write(row, col, ndoc['google_scholar_m5_2017'])\n col += 1\n\n # Avançar linha - prox. documento\n row += 1\n\n # Grava planilha Excel\n try:\n workbook.close()\n except IOError as e:\n print(e)\n sys.exit(1)\n\n\ndef main():\n jcatalog()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"jcatalog/reports/pde_not_respond.py","file_name":"pde_not_respond.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"28069985","text":"from threading import Thread\nfrom tornado.ioloop import IOLoop\nimport tornado.web\nimport time\nimport collections\nimport tornado.gen\nimport itertools\n\nDATA = open('static/28k.html').read()\n\nclass MainHandler(tornado.web.RequestHandler):\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n self.set_status(200)\n self.set_header('Content-Type', 'text/html; charset=utf-8')\n self.write(DATA)\n\n try:\n delay = float(self.get_argument('delay', 0))\n except ValueError:\n delay = 0\n\n try:\n num = int(self.get_argument('num', 0))\n except ValueError:\n num = 0\n\n self.write('%02d' % num)\n\n if delay:\n yield tornado.gen.Task(IOLoop.instance().add_timeout, time.time() + delay)\n\n self.finish()\n\ndef main():\n app = tornado.web.Application([\n (r\"^.*\", MainHandler),\n ])\n app._listen_port = 9000\n app.listen(9000)\n\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"476571269","text":"from __future__ import annotations\n\nimport itertools\nimport re\nfrom abc import ABC, abstractmethod\nfrom typing import (\n Any,\n Callable,\n Iterator,\n Mapping,\n MutableMapping,\n MutableSequence,\n Optional,\n Pattern,\n Sequence,\n Tuple,\n TypedDict,\n TypeVar,\n)\n\nfrom snuba.clickhouse.formatter.nodes import FormattedQuery\n\nColumn = TypedDict(\"Column\", {\"name\": str, \"type\": str})\nRow = MutableMapping[str, Any]\nResult = TypedDict(\n \"Result\",\n {\"meta\": Sequence[Column], \"data\": MutableSequence[Row], \"totals\": Row},\n total=False,\n)\n\n\ndef iterate_rows(result: Result) -> Iterator[Row]:\n if \"totals\" in result:\n return itertools.chain(result[\"data\"], [result[\"totals\"]])\n else:\n return iter(result[\"data\"])\n\n\ndef transform_rows(result: Result, transformer: Callable[[Row], Row]) -> None:\n \"\"\"\n Transforms the Result dictionary in place replacing each Row object\n with the one returned by the transformer function.\n \"\"\"\n for index, row in enumerate(result[\"data\"]):\n result[\"data\"][index] = transformer(row)\n\n if \"totals\" in result:\n result[\"totals\"] = transformer(result[\"totals\"])\n\n\nNULLABLE_RE = re.compile(r\"^Nullable\\((.+)\\)$\")\n\n\ndef unwrap_nullable_type(type: str) -> Tuple[bool, str]:\n match = NULLABLE_RE.match(type)\n if match is not None:\n return True, match.groups()[0]\n else:\n return False, type\n\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\n\ndef transform_nullable(\n function: Callable[[T], R]\n) -> Callable[[Optional[T]], Optional[R]]:\n def transform_column(value: Optional[T]) -> Optional[R]:\n if value is None:\n return value\n else:\n return function(value)\n\n return transform_column\n\n\ndef build_result_transformer(\n column_transformations: Sequence[Tuple[Pattern[str], Callable[[Any], Any]]],\n) -> Callable[[Result], None]:\n \"\"\"\n Builds and returns a function that can be used to mutate a ``Result``\n instance in-place by transforming all values for columns that have a\n transformation function specified for their data type.\n \"\"\"\n\n def transform_result(result: Result) -> None:\n for column in result[\"meta\"]:\n is_nullable, type = unwrap_nullable_type(column[\"type\"])\n\n transformer = next(\n (\n transformer\n for pattern, transformer in column_transformations\n if pattern.match(type)\n ),\n None,\n )\n\n if transformer is None:\n continue\n\n if is_nullable:\n transformer = transform_nullable(transformer)\n\n name = column[\"name\"]\n for row in iterate_rows(result):\n row[name] = transformer(row[name])\n\n return transform_result\n\n\nclass Reader(ABC):\n @abstractmethod\n def execute(\n self,\n query: FormattedQuery,\n settings: Optional[Mapping[str, str]] = None,\n with_totals: bool = False,\n robust: bool = False,\n ) -> Result:\n \"\"\"Execute a query.\"\"\"\n raise NotImplementedError\n","sub_path":"snuba/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"325252812","text":"# https://www.jiuzhang.com/solutions/reverse-linked-list\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n rev, last = None, head\n while last:\n head = last\n last = last.next\n head.next = rev\n rev = head\n return head\n\n","sub_path":"leetcode/lc206_Reverse_Linked_List.py","file_name":"lc206_Reverse_Linked_List.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"582943110","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nimport os, sys\nimport time\nimport calendar\nimport datetime\nimport openpyxl\nimport logging\nsys.path.append(\"/root\")\nfrom pyzabbix import ZabbixAPI\nfrom openpyxl.styles import Border, Side, PatternFill, Font, GradientFill, Alignment\nfrom openpyxl import Workbook\nfrom settings import *\n\nlogging.basicConfig(level=logging.ERROR,\n format='%(asctime)s %(name)s %(levelname)s %(message)s',\n datefmt='%d-%m %H:%M:%S',\n filename='./logs/log_hist')\nlogger = logging.getLogger('-')\nlogger.setLevel(logging.INFO)\n\nthin = Side(border_style=\"thin\", color=\"000000\")\ndouble = Side(border_style=\"double\", color=\"ff0000\")\nborder = Border(top=double, left=thin, right=thin, bottom=double)\nfill = PatternFill(\"solid\", fgColor=\"DDDDDD\")\n#fill = GradientFill(stop=(\"000000\", \"FFFFFF\"))\nfont = Font(b=True, color=\"000000\")\nal = Alignment(horizontal=\"center\", vertical=\"center\")\npathe = '/mnt/prin/print.xlsx'\n\nserver = 'http://127.0.0.1/zabbix'\nzapi = ZabbixAPI(server)\nzapi.login(login, pswd)\n\nprint(datetime.datetime.now())\ndatyear = datetime.datetime.today().strftime(\"%Y\")\ndatmonth = datetime.datetime.today().strftime(\"%m\")\ndatmoday = datetime.datetime.today().strftime(\"%d\")\ntime_till = time.mktime(datetime.datetime.now().timetuple())\n\ndef histry (item_id, s_date, s_month, s_year, s_time, e_date, e_month, e_year, e_time):\n global datt, vall, history\n dat = str(s_date) + \"/\" + str(s_month) + \"/\" + str(s_year) + str(s_time)\n time_start = time.mktime(datetime.datetime.strptime(dat, \"%d/%m/%Y %H:%M\").timetuple())\n time_start = (str(time_start)).split('.')[0]\n logger.info(\"histry - time_start: \" + str(time_start))\n dat = str(e_date) + \"/\" + str(e_month) + \"/\" + str(e_year) + str(e_time)\n time_end = time.mktime(datetime.datetime.strptime(dat, \"%d/%m/%Y %H:%M\").timetuple())\n time_end = (str(time_end)).split('.')[0]\n logger.info(\"histry - time_end: \" + str(time_end))\n history = zapi.history.get(itemids=[item_id],\n time_from=time_start,\n time_till=time_end,\n output='extend',\n limit='1',\n )\n if history:\n datt = format(datetime.datetime.fromtimestamp(int(history[0]['clock'])).strftime(\"%x %X\")) #datetime\n vall = history[0]['value']\n logger.info(\"Дата: \" + datt + \" Распечатано на дату: \" + vall)\n return datt, vall, history\n\nwb = openpyxl.load_workbook(filename = pathe)\nsheetsnames = wb.get_sheet_names()\nsheetsnames.reverse()\npoint = sheetsnames[0]\nif point != datyear:\n sheet = wb.create_sheet(title=datyear)\n sheet.cell(row=1, column=1).value = 'Подразделение'\n sheet.cell(row=1, column=2).value = 'Кабинет'\n sheet.cell(row=1, column=3).value = 'ID'\n sheet.cell(row=1, column=4).value = 'Модель'\n sheet.cell(row=1, column=5).value = 'IP'\n sheet.cell(row=1, column=6).value = 'MAK'\n sheet.cell(row=1, column=7).value = 'Январь'\n sheet.cell(row=1, column=8).value = 'Февраль'\n sheet.cell(row=1, column=9).value = 'Март'\n sheet.cell(row=1, column=10).value = 'Апрель'\n sheet.cell(row=1, column=11).value = 'Май'\n sheet.cell(row=1, column=12).value = 'Июнь'\n sheet.cell(row=1, column=13).value = 'Июль'\n sheet.cell(row=1, column=14).value = 'Август'\n sheet.cell(row=1, column=15).value = 'Сентябрь'\n sheet.cell(row=1, column=16).value = 'Октябрь'\n sheet.cell(row=1, column=17).value = 'Ноябрь'\n sheet.cell(row=1, column=18).value = 'Декабрь'\n sheet.cell(row=1, column=19).value = 'Итого за год'\n sheet.cell(row=1, column=20).value = 'Всего'\n cel = 2\n y = 1\n while y < 21:\n sheet.cell(row=1, column=y).alignment = al\n sheet.cell(row=1, column=y).font = font\n sheet.cell(row=1, column=y).fill = fill\n y = y + 1.\n while cel < 1002:\n y = 1\n sheet.cell(row=cel, column=3).value = 'U-0' + '{:03}'.format(cel - 1)\n while y < 21:\n sheet.cell(row=cel, column=y).alignment = al\n y = y + 1\n cel = cel + 1\nelif point == datyear and datmonth != \"01\":\n sheet = wb[datyear]\nelif point == datyear and datmonth == \"01\":\n datyear_december = str(int(datyear)-1)\n sheet = wb[datyear_december]\n\nif pathe:\n hosts = zapi.host.get(groupids=[11,12,14,19],\n output=[\"hostid\", \"name\"]\n )\n # Получение данных по принтерам (id принтера, кол-во отпечатанных страниц, местоположение, модель, mac адрес, ip адрес) с db Zabbix\n for point in hosts:\n hostname = point['name']\n print(hostname)\n hid = point['hostid']\n item = zapi.item.get(hostids=hid,\n output=[\"itemid\", \"name\"],\n filter={'name':'Отпечатанно страниц'}\n )\n iten = zapi.item.get(hostids=hid,\n output=[\"lastvalue\", \"name\"],\n filter={'name':'Местоположение'}\n )\n location = iten[0]['lastvalue']\n iten = zapi.item.get(hostids=hid,\n output=[\"lastvalue\", \"name\"],\n filter={'name':'Модель'}\n )\n mdel = iten[0]['lastvalue']\n iten = zapi.item.get(hostids=hid,\n output=[\"lastvalue\", \"name\"],\n filter={'name':'MAC адрес'}\n )\n maca = str(iten[0]['lastvalue'])\n ips = zapi.hostinterface.get(hostids=hid,\n output=[\"ip\"],\n filter={'name':'ip'}\n )\n ip = ips[0]['ip']\n\n m = j = 2\n hos = sheet.cell(row=m, column=3).value\n\n logger.info(\"--- Месяц \" + datmonth + \" -------------------------------------------------------------------------------\")\n logger.info(\"МФУ: \" + hostname)\n logger.info(\"Модель: \" + mdel)\n logger.info(\"MAC: \" + maca)\n logger.info(\"IP: \" + ip)\n logger.info(\"Местоположение: \" + location)\n logger.info(\"------------------------------\")\n\n # Получение данных по печати за прошлый месяц\n for point in item:\n item_id = point['itemid']\n if datmonth != \"01\":\n lastm = calendar.monthrange(int(datyear),int(datmonth)-1)[1]\n histry (item_id, 1, str(int(datmonth)-1), datyear, \" 00:00\", str(lastm), str(int(datmonth)-1), datyear, \" 23:30\")\n datt1 = datt\n vall1 = vall\n if not history:\n vall = 0\n histry (item_id, lastm, str(int(datmonth)-1), datyear, \" 18:00\", str(lastm), str(int(datmonth)-1), datyear, \" 23:30\")\n datt31 = datt\n vall31 = vall\n if not history:\n while not history and lastm != 1:\n lastm-=1\n histry (item_id, lastm, str(int(datmonth)-1), datyear, \" 18:00\", str(lastm), str(int(datmonth)-1), datyear, \" 23:30\")\n if history:\n datt31 = datt\n vall31 = vall\n if lastm == 1:\n vall = 0\n \n if datmonth == \"01\":\n lastm = calendar.monthrange(int(datyear)-1,12)[1]\n histry (item_id, 1, 12, str(int(datyear)-1), \" 00:00\", str(lastm), 12, str(int(datyear)-1), \" 23:30\")\n datt1 = datt\n vall1 = vall\n if not history:\n vall = 0\n histry (item_id, lastm, 12, str(int(datyear)-1), \" 18:00\", str(lastm), 12, str(int(datyear)-1), \" 23:30\")\n datt31 = datt\n vall31 = vall\n if not history:\n while not history and lastm != 1:\n lastm-=1\n histry (item_id, lastm, 12, str(int(datyear)-1), \" 18:00\", str(lastm), 12, str(int(datyear)-1), \" 23:30\")\n if history:\n datt31 = datt\n vall31 = vall\n if lastm == 1:\n vall = 0\n\n # Сохранение полученной информации в Excel книгу\n while hos and j != 1:\n if hos == hostname:\n vall_r = int(vall31) - int(vall1)\n if (datmonth != \"01\"):\n sheet.cell(row=m, column=(int(datmonth) + 5)).value = vall_r\n elif (datmonth == \"01\"):\n sheet.cell(row=m, column=18).value = vall_r\n i = 7\n vall_sum = 0\n while i < 18:\n vall_s = sheet.cell(row=m, column=i).value\n if vall_s is None:\n vall_s = 0\n vall_sum = int(vall_s) + int(vall_sum)\n i = i + 1\n sheet.cell(row=m, column=19).value = vall_sum\n sheet.cell(row=m, column=20).value = vall31\n if (location != \"0\" and location != None and location != 0):\n sheet.cell(row=m, column=2).value = location\n if (mdel != \"0\" and mdel != None and mdel != 0):\n sheet.cell(row=m, column=4).value = mdel\n HTTP = \"http://\" + ip\n sheet.cell(row=m, column=5).value = HTTP\n if (maca != \"0\" and maca != None and maca != 0):\n sheet.cell(row=m, column=6).value = maca\n j = 1\n logger.info(\"------------------------------\")\n logger.info(\"Распечатано за месяц: \" + str(vall_r))\n logger.info(\"Распечатано за год: \" + str(vall_sum))\n m = m + 1\n hos = sheet.cell(row=m, column=3).value\n vall = 0\n datt = 0\n vall_end = 0\n datt_end = 0\n\nsheet.column_dimensions['A'].width = 63\nsheet.column_dimensions['C'].width = 8\nsheet.column_dimensions['D'].width = 20\nsheet.column_dimensions['E'].width = 18\nsheet.column_dimensions['F'].width = 16\nsheet.column_dimensions['S'].width = 15\nwb.save(pathe)","sub_path":"z_hist.py","file_name":"z_hist.py","file_ext":"py","file_size_in_byte":10738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"410562641","text":"\"\"\"anote_rest URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api_1/', include('rest_framework.urls', namespace='api_admin')),\n url(r'^api_1/kegiatan/', include('anote_rest_apl.urls', namespace='api_1_kegiatan')),\n url(r'^api_1/butir/', include('anote_rest_butir.urls', namespace='api_1_butir')),\n url(r'^api_1/profil/', include('anote_rest_profil.urls', namespace='api_1_profil')),\n url(r'^api_1/utiliti/', include('anote_rest_utiliti.urls', namespace='api_1_utiliti')),\n]\n","sub_path":"anote_rest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"28721313","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 11 15:52:37 2018\n\n@author: ywanggp\n\"\"\"\nfrom heuristicSquareAlg import *\nfrom showPoint import *\nimport numpy as np\nfrom fillPath import *\nfrom obtainScore import *\n\ndef givePath(windGraph, Departure, destination, xsize, ysize, xCity, yCity):\n iniLoc = Departure.copy()\n newLoc = iniLoc.copy()\n hourNum = 18\n timePieceMin = 60\n locList = newLoc \n for i in range(hourNum):\n lastLoc = newLoc.copy()\n windGra = windGraph[i,:,:]\n (newLoc, flag) = heuristicSquareAlg(windGra, lastLoc.copy(), destination, timePieceMin)\n# print(windGra[newLoc[0], newLoc[1]])\n Fillpath = fillPath(lastLoc, newLoc)\n if(i == 0):\n locList = np.concatenate((np.asarray([locList]), Fillpath), axis = 0)\n else:\n locList = np.concatenate((locList, Fillpath), axis = 0) \n showPoint(windGra, xCity, yCity, locList)\n if(flag == True):\n break \n return(locList,obtainScore(locList,windGraph))","sub_path":"Functions/Linlong/givePath.py","file_name":"givePath.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"318046497","text":"# https://www.hackerrank.com/challenges/finding-the-percentage\nfrom functools import reduce\n\n\ndef student_input():\n s_list = [elem for elem in input().split()]\n s_list[1] = float(s_list[1])\n s_list[2] = float(s_list[2])\n s_list[3] = float(s_list[3])\n return s_list\n\n\ndef avg_grade(s_list):\n return reduce(lambda a, x: a + x, s_list) / len(s_list)\n\n\nstudent_num = int(input())\nstudent_dict = {}\nfor i in range(student_num):\n s_info = student_input()\n name = s_info[0]\n del s_info[0]\n student_dict[name] = s_info\n\nselected_student_name = input()\nprint(\"{0:.2f}\".format(avg_grade(student_dict[selected_student_name])))","sub_path":"hackerrank/python_tutorial/finding_the_percentage.py","file_name":"finding_the_percentage.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"279486094","text":"\"\"\"Plugins for CMS\"\"\"\n# import itertools\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.staticfiles.storage import staticfiles_storage\n\n# from tagging.models import TaggedItem\n\nfrom cms.plugin_pool import plugin_pool\nfrom cms.plugin_base import CMSPluginBase\n\n#from courses.models import Venue, Teacher, Course\n\nfrom cmsplugin_courses.models import BackgroundImage\n\n\nclass CoursesCMSPluginBase(CMSPluginBase):\n module = 'Courses'\n text_enabled = True\n\n def icon_src(self, instance):\n \"\"\"\n Base icon for Courses' plugins\n \"\"\"\n return staticfiles_storage.url('cmsplugin_courses/img/plugin.png')\n\n\nclass BackgroundImagePlugin(CMSPluginBase):\n model = BackgroundImage\n name = _(\"Background Image\")\n render_template = \"cmsplugin_courses/background_image.html\"\n\n def render(self, context, instance, placeholder):\n context.update({'object': instance})\n return context\n\n\nclass ChildListPlugin(CMSPluginBase):\n name = _('Child Elements')\n render_template = 'cmsplugin_courses/child_elements.html'\n\n def render(self, context, instance, placeholder):\n \"\"\"\n Update the context with children data\n \"\"\"\n context = super(ChildListPlugin, self).render(\n context, instance, placeholder)\n # refactor the next line, if there is a better way of accessing the page\n context['children'] = context.dicts[1].dicts[11]['current_page'].children.all()\n return context\n\n'''\nclass CMSTeacherListPlugin(CoursesCMSPluginBase):\n name = _('Teacher list')\n render_template = 'cmsplugin_courses/teacher_list.html'\n\n def render(self, context, instance, placeholder):\n \"\"\"\n Update the context with plugin's data\n \"\"\"\n context = super(CMSTeacherListPlugin, self).render(\n context, instance, placeholder)\n context['teachers'] = Teacher.objects.all()\n return context\n\n\nclass CMSVenueListPlugin(CoursesCMSPluginBase):\n name = _('Venue list')\n render_template = 'cmsplugin_courses/venue_list.html'\n\n def render(self, context, instance, placeholder):\n \"\"\"\n Update the context with plugin's data\n \"\"\"\n context = super(CMSVenueListPlugin, self).render(\n context, instance, placeholder)\n context['venues'] = Venue.objects.all()\n return context\n\n\nclass CMSCourseListPlugin(CoursesCMSPluginBase):\n name = _('Course list')\n render_template = 'cmsplugin_courses/course_list.html'\n\n def render(self, context, instance, placeholder):\n \"\"\"\n Update the context with plugin's data\n \"\"\"\n context = super(CMSCourseListPlugin, self).render(\n context, instance, placeholder)\n context['courses'] = Course.objects.select_related(\n \"teachers\", \"venue\", \"date_set\")\n return context\n'''\n\nplugin_pool.register_plugin(BackgroundImagePlugin)\nplugin_pool.register_plugin(ChildListPlugin)\n\n'''\nplugin_pool.register_plugin(CMSVenueListPlugin)\nplugin_pool.register_plugin(CMSTeacherListPlugin)\nplugin_pool.register_plugin(CMSCourseListPlugin)\n'''\n","sub_path":"cmsplugin_courses/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"313315923","text":"import cv2\nimport numpy as np\nimg=cv2.imread('flower.jpg')\n\n\ncv2.imshow('Original',img)\ncv2.waitKey(0)\nM=np.ones(img.shape,dtype=\"uint8\")*150\n\nadded=cv2.add(img,M)\ncv2.imshow('Added',added)\n\nsubtracted=cv2.subtract(img,M)\ncv2.imshow('Subtracted',subtracted)\n\nmultiply=cv2.multiply(img,M)\ncv2.imshow('Multiply',multiply)\n\n\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"aiet26.py","file_name":"aiet26.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"280677618","text":"from __future__ import absolute_import, division, print_function\n\nimport ConfigParser\nimport contextlib\nimport functools\nimport inspect\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport traceback\nimport unittest\n\nfrom optoolbox import optest\nfrom optoolbox.ciservers.executor import testconfig as ttf\n\nlogger = logging.getLogger(__name__)\n\n@optest.testConfig()\nclass TestTTConf(optest.TestCase):\n @optest.testConfig()\n def test_replace_ext(self):\n # self.assertFalse(True)\n replaced = ttf._replace_ext(r\"c:\\a\\b.cd\", \".cd\", \".ef\")\n self.assertEqual(replaced, r\"c:\\a\\b.ef\")\n self.failUnlessRaises(AssertionError, ttf._replace_ext, r\"c:\\a\\b.cd\", \"cd\", \".ef\")\n\n @optest.testConfig()\n def test_rules_list_to_dict(self):\n rules = ttf._rules_list_to_dict([(\"rule1\", \"val1\"), (\"rule2\", \"val2\")])\n self.assertEqual(rules[\"rule1\"], \"val1\")\n self.assertEqual(rules[\"rule2\"], \"val2\")\n # This should also warn, we can check for that in python 3\n rules = ttf._rules_list_to_dict([(\"rule1\", \"val1\"), (\"rule1\", \"val2\")])\n self.assertEqual(rules, {})\n\n @optest.testConfig()\n def test_apply_exclude(self):\n value, exclude = \"one;;two;three\", \".*\"\n self.assertEqual(ttf._apply_exclude(exclude, value), \"\")\n value = \"one;two;three;four;\"\n self.assertEqual(ttf._apply_exclude(exclude, value), \"\")\n exclude = \"t(w|h)\"\n self.assertEqual(ttf._apply_exclude(exclude, value), \"one;four\")\n value, exclude = r\"c:\\some\\path\\to;d:\\some\\other\\path\", r\"some\\\\path\"\n expected = r\"d:\\some\\other\\path\"\n self.assertEqual(ttf._apply_exclude(exclude, value), expected)\n\n @optest.testConfig()\n def test_apply_env_var_config(self):\n value = r\"c:\\some\\path\\to;d:\\some\\other\\path\"\n rules_list = [(\"exclude\", r\"some\\\\path\"), (\"prepend\", r\"d:\\my\\prepended\")]\n expected = r\"d:\\my\\prepended;d:\\some\\other\\path\"\n result = ttf._apply_env_var_config(value, rules_list)\n self.assertEqual(expected, result)\n\n value = r\";;c:\\some\\pth;c:\\OPAL-RT\\RT-LAB\\v11.1.1.1\\common;\" \\\n \"c:\\my\\other\\path;c:\\last\\one;;;\"\n rules_list = [(\"exclude\", r\"(opal-rt\\\\rt-lab|rt-lab\\\\build)\"),\n (\"prepend\", r\"d:\\my\\prepended\")]\n expected = r\"d:\\my\\prepended;c:\\some\\pth;c:\\my\\other\\path;c:\\last\\one\"\n result = ttf._apply_env_var_config(value, rules_list)\n self.assertEqual(expected, result)\n\n @optest.testConfig()\n def test_create_env_from_cfgprsr(self):\n cfgprsr = ConfigParser.SafeConfigParser({\"containing_dir\": r\"c:\\rando\"})\n cfgprsr.add_section(\"TESTVAR\")\n cfgprsr.set(\"TESTVAR\", \"exclude\", r\"dis\\\\allow-ed|not-per\\\\mitted\")\n cfgprsr.set(\"TESTVAR\", \"prepend\", r\"%(containing_dir)s\\m\")\n os.environ[\"TESTVAR\"] = r\"c:\\a\\good\\dir;c:\\my\\dis\\allow-ed\\dir;\" \\\n r\"c:\\another\\ok\\dir;c:\\not-per\\mitted\\folder\"\n try:\n env = ttf._create_env_from_cfgprsr(cfgprsr)\n expected = r\"c:\\rando\\m;c:\\a\\good\\dir;c:\\another\\ok\\dir\"\n self.assertEqual(expected, env[\"TESTVAR\"])\n finally:\n del os.environ[\"TESTVAR\"]\n self.failIf(\"TESTVAR\" in os.environ, \"TESTVAR key is still in os.environ after deleting it.\")\n\n @optest.testConfig()\n def test_get_env_config(self):\n @contextlib.contextmanager\n def close_and_unlink(fo):\n try:\n yield\n finally:\n fo.close()\n os.unlink(fo.name)\n\n fconf = tempfile.NamedTemporaryFile(delete=False)\n fenvconf = tempfile.NamedTemporaryFile(delete=False)\n with close_and_unlink(fconf):\n with close_and_unlink(fenvconf):\n fconf.write(\"[DEFAULT]\\n\")\n fconf.write(\"name: ephasorsim_testtask\\n\")\n fconf.write(\"\\n\")\n fconf.write(\"[env]\\n\")\n fconf.write(\"conf_file: %s\\n\" % fenvconf.name)\n fconf.close()\n fenvconf.write(\"[TESTVAR]\\n\")\n fenvconf.write(r\"exclude: (opal-rt\\\\rt-lab|rt-lab\\\\build)\" \"\\n\")\n relpath = r\"\\..\\..\\build\\debug\\win32\\common\\python\"\n fenvconf.write(r\"prepend: %%(containing_dir)s%s\" \"\\n\" % relpath)\n fenvconf.close()\n os.environ[\"TESTVAR\"] = r\";;c:\\some\\pth;c:\\OPAL-RT\\RT-LAB\\v11.1.1.1\\common;\" \\\n r\"c:\\my\\other\\path;c:\\last\\one;;;\"\n env = ttf._create_env_from(fconf.name)\n expected_dir = os.path.dirname(fenvconf.name) + relpath\n expected = expected_dir + r\";c:\\some\\pth;c:\\my\\other\\path;c:\\last\\one\"\n self.assertEqual(expected, env[\"TESTVAR\"])\n self.assertFalse(os.path.isfile(fconf.name))\n self.assertFalse(os.path.isfile(fenvconf.name))\n\n\nclass TestVirtualEnv(optest.TestCase):\n def test_create_venv_with_reqs(self):\n libname = \"wrapt\"\n _ensure_nonvenv_import_fails = functools.partial(\n _ensure_bad_import, sys.executable, libname)\n _ensure_nonvenv_import_fails()\n with make_fake_testtask(FakeTestTaskWithReqs, reqs_content=libname) as tt:\n venv_python = ttf.create_venv(tt.test_fpath)\n _do_an_import(interpreter=venv_python, libname=libname)\n _ensure_nonvenv_import_fails()\n\n def test_create_venv(self):\n with make_fake_testtask(DefaultFakeTestTask) as testtask:\n venv_python = ttf.create_venv(testtask.test_fpath)\n venv_python_fname = os.path.basename(venv_python)\n assert venv_python_fname.startswith(\"python\")\n assert os.path.isfile(venv_python)\n\n def test_create_duplicate_venv(self):\n with make_fake_testtask(DefaultFakeTestTask) as testtask:\n venv_python = ttf.create_venv(testtask.test_fpath)\n venv_python2 = ttf.create_venv(testtask.test_fpath)\n assert venv_python == venv_python2\n\n\nclass TempDirDeleter(object):\n def __init__(self, faketesttask):\n self.faketesttask = faketesttask\n\n def __enter__(self):\n return self.faketesttask\n\n def __exit__(self, exc_type, exc_value, traceback):\n logger.debug(\"Removing tempdir: %s\" % self.faketesttask.dirname)\n shutil.rmtree(self.faketesttask.dirname)\n\n\nclass FakeTestTask(object):\n def __init__(self, conf=None, dirname=None, name=None):\n assert conf is not None\n assert dirname is not None\n assert name is not None\n self.conf = conf\n self.dirname = dirname\n self.name = name\n logger.debug(\"Created %s\" % repr(FakeTestTask))\n\n @property\n def test_fname(self):\n return \"%s.py\" % self.name\n\n @property\n def conf_fname(self):\n return \"%s.conf\" % self.name\n\n @property\n def test_fpath(self):\n return os.path.join(self.dirname, self.test_fname)\n\n @property\n def conf_fpath(self):\n return os.path.join(self.dirname, self.conf_fname)\n\n def __repr__(self):\n args = (self.conf, self.dirname, self.name)\n return \"%s%s\" % (self.__class__.split(\".\")[-1], args)\n\n def make(self):\n with open(self.test_fpath, \"w\") as f:\n logger.debug(\"Created testtask: %s\" % self.test_fpath)\n os.utime(self.test_fpath, None)\n with open(self.conf_fpath, \"w\") as f:\n logger.debug(\"Created testtask conf: %s\" % self.conf_fpath)\n f.write(self.conf)\n\n\ndefault_conf = textwrap.dedent(\"\"\"\\\n [virtualenv]\n name: venv\n\"\"\")\n\nclass DefaultFakeTestTask(FakeTestTask):\n def __init__(self, conf=default_conf, dirname=None, name=None):\n assert dirname is not None\n assert name is not None\n super(DefaultFakeTestTask, self).__init__(\n conf=conf, dirname=dirname, name=name\n )\n\n\nclass FakeTestTaskWithReqs(FakeTestTask):\n \"\"\"A fake test task that uses a virtual environment with some dependencies.\"\"\"\n reqs_fname = \"requirements.txt\"\n default_conf = textwrap.dedent(\"\"\"\\\n [virtualenv]\n name: venv\n requirements_file: %s\n \"\"\" % reqs_fname)\n default_reqs_content = \"wrapt\"\n\n def __init__(self, **kwargs):\n \"\"\"Accepts two mandatory keyword arguments: `dirname' and `name', and\n two optional keyword arguments: `conf' and `reqs_content'.\n\n \"\"\"\n conf = kwargs.pop(\"conf\", FakeTestTaskWithReqs.default_conf)\n dirname = kwargs.pop(\"dirname\", None)\n name = kwargs.pop(\"name\", None)\n self.reqs_content = kwargs.pop(\n \"reqs_content\", FakeTestTaskWithReqs.default_reqs_content\n )\n if kwargs:\n raise TypeError(\"Too many arguments: %s\" % kwargs)\n super(FakeTestTaskWithReqs, self).__init__(\n conf=conf, dirname=dirname, name=name\n )\n\n @property\n def reqs_fpath(self):\n return os.path.join(self.dirname, self.reqs_fname)\n\n def make(self):\n super(FakeTestTaskWithReqs, self).make()\n with open(self.reqs_fpath, \"w\") as f:\n logger.debug(\"Created testtask conf: %s\" % self.reqs_fpath)\n f.write(self.reqs_content)\n\n\ndef make_fake_testtask(testtaskclass, *args, **kwargs):\n tempdir = tempfile.mkdtemp()\n logger.debug(\"Tempdir created: %s\" % tempdir)\n try:\n kwargs_ = {\"name\": \"fake_testtask\", \"dirname\": tempdir}\n kwargs_.update(kwargs)\n faketesttask = testtaskclass(*args, **kwargs_)\n faketesttask.make()\n return TempDirDeleter(faketesttask)\n except Exception as e:\n shutil.rmtree(tempdir)\n raise\n\ndef _do_an_import(interpreter, libname, output=True):\n run_import_fmt = '%s -c \"import %s\"'\n cmd = run_import_fmt % (interpreter, libname)\n logger.debug(\"Executing `%s`\" % cmd)\n subprocess.check_call(cmd)\n\ndef _ensure_bad_import(interpreter, libname):\n try:\n _do_an_import(interpreter=interpreter, libname=libname)\n except Exception:\n pass\n else:\n raise Exception(\"Import of `%s' succeeded but shouldn't have\" % libname)\n\nif __name__ == \"__main__\":\n optest.main()\n","sub_path":"tests/executor/test_testconfig.py","file_name":"test_testconfig.py","file_ext":"py","file_size_in_byte":10219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"292854305","text":"import unittest\nfrom getsub.downloader.zimuku import ZimukuDownloader\n\n\nclass TestZimuku(unittest.TestCase):\n def test_season_filter(self):\n video_name = \"supernatural.s08.mkv\"\n results = ZimukuDownloader().get_subtitles(video_name, sub_num=1)\n self.assertEqual(len(results), 1)\n\n def test_shooter_page(self):\n video_name = \"supernatural.s08e10.mkv\"\n ZimukuDownloader.search_url = ZimukuDownloader.site_url + \"/search?t=onlyst&q=\"\n results = ZimukuDownloader().get_subtitles(video_name, sub_num=1)\n self.assertEqual(len(results), 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/integration/test_zimuku.py","file_name":"test_zimuku.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"651228663","text":"# Copyright 2019 The TensorNetwork Authors\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom experiments.MPS_classifier import classifier\r\ntf.enable_v2_behavior()\r\n\r\n\r\ndef test_random_initializer_shapes():\r\n x = classifier.random_initializer(2, 3)\r\n assert x.shape == (2, 3, 3)\r\n y = classifier.random_initializer(2, 3, boundary=True)\r\n assert y.shape == (2, 3)\r\n\r\ndef test_environment_shapes():\r\n env = classifier.Environment(5, 2, 7)\r\n assert list(env.vector.shape) == [2, 7]\r\n assert list(env.matrices.shape) == [4, 2, 7, 7]\r\n\r\ndef test_environment_network():\r\n env = classifier.Environment(5, 2, 7)\r\n data = tf.cast(np.random.random([20, 5, 2]), dtype=tf.float32)\r\n net, var_nodes, data_nodes = env.create_network(data[:, 1:], data[:, 0])\r\n net.check_correct()\r\n np.testing.assert_allclose(data_nodes[0].tensor.numpy(), data[:, 0])\r\n np.testing.assert_allclose(data_nodes[1].tensor.numpy(), data[:, 1:])\r\n\r\ndef test_environment_predict():\r\n env = classifier.Environment(5, 2, 7)\r\n data = tf.cast(np.random.random([20, 5, 2]), dtype=tf.float32)\r\n # Compare with serialized contraction\r\n pred = tf.einsum(\"bs,sr->br\", data[:, 0], env.vector)\r\n for i in range(4):\r\n pred = tf.einsum(\"bl,bs,slr->br\", pred, data[:, i + 1], env.matrices[i])\r\n np.testing.assert_allclose(env.predict(data[:, 1:], data[:, 0]).numpy(),\r\n pred.numpy(), atol=1e-5)\r\n\r\ndef test_mps_shapes():\r\n mps = classifier.MatrixProductState(11, 10, 2, 7)\r\n assert list(mps.tensors[0].shape) == [2, 7]\r\n assert list(mps.tensors[1].shape) == [4, 2, 7, 7]\r\n assert list(mps.tensors[2].shape) == [10, 7, 7]\r\n assert list(mps.tensors[3].shape) == [4, 2, 7, 7]\r\n assert list(mps.tensors[4].shape) == [2, 7]\r\n\r\ndef test_calculate_flx():\r\n mps = classifier.MatrixProductState(11, 10, 2, 7)\r\n data = tf.cast(np.random.random([20, 10, 2]), dtype=mps.dtype)\r\n # Compare with serialized contraction\r\n flx_left = tf.einsum(\"bs,sr->br\", data[:, 0], mps.tensors[0])\r\n flx_right = tf.einsum(\"bs,sr->br\", data[:, -1], mps.tensors[4])\r\n for i in range(4):\r\n flx_left = tf.einsum(\"bl,bs,slr->br\", flx_left, data[:, i + 1],\r\n mps.tensors[1][i])\r\n flx_right = tf.einsum(\"bl, bs,slr->br\", flx_right, data[:, 8 - i],\r\n mps.tensors[3][i])\r\n flx = tf.einsum(\"bl,olr,br->bo\", flx_left, mps.tensors[2], flx_right)\r\n np.testing.assert_allclose(flx.numpy(), mps.flx(data).numpy(), atol=1e-5)\r\n","sub_path":"experiments/MPS_classifier/classifier_test.py","file_name":"classifier_test.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"130715967","text":"BACKENDS = [\"numpy\", \"bohrium\"]\n\n\ndef get_backend(backend_name):\n if backend_name not in BACKENDS:\n raise ValueError(\"unrecognized backend {} (must be either of: {!r})\"\n .format(backend_name, BACKENDS))\n\n backend_modules = {backend: None for backend in BACKENDS}\n\n import numpy\n import warnings\n\n if numpy.__name__ == \"bohrium\":\n warnings.warn(\"Running veros with 'python -m bohrium' is discouraged \"\n \"(use '--backend bohrium' instead)\")\n import numpy_force\n numpy = numpy_force\n\n backend_modules[\"numpy\"] = numpy\n\n try:\n import bohrium\n backend_modules[\"bohrium\"] = bohrium\n except ImportError:\n warnings.warn(\"Could not import Bohrium\")\n\n if backend_modules[backend_name] is None:\n raise ValueError(\"Backend '{}' failed to import\".format(backend_name))\n return backend_modules[backend_name], backend_name\n\n\ndef get_vector_engine(np):\n try:\n runtime_info = np.bh_info.runtime_info()\n except AttributeError:\n return None\n if \"OpenCL\" in runtime_info:\n return \"opencl\"\n if \"CUDA\" in runtime_info:\n return \"cuda\"\n return \"openmp\"\n\n\ndef flush(vs):\n if vs.backend_name == \"numpy\":\n pass\n\n elif vs.backend_name == \"bohrium\":\n vs.backend.flush()\n\n else:\n raise RuntimeError(\"Unrecognized backend %r\" % vs.backend_name)\n","sub_path":"veros/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"536447068","text":"import osmnx as ox\nimport networkx as nx\nimport csv\nimport math\nimport sys, getopt\n\n\ndef ox_shortest_path(midmap, origin, destination):\n \"\"\"osmnx计算最短路径\"\"\"\n \"\"\"midmap:地图中点经纬度wgs64 origin:起始点经纬度wgs64 destination:终点经纬度wgs64\"\"\"\n # city = ox.gdf_from_place('闵行区,Shanghai,China')\n # ox.plot_shape(ox.project_gdf(city))\n\n # ox.plot_graph(ox.graph_from_place(\"闵行区,上海市,中国\"))\n\n G = ox.graph_from_point(midmap, distance=700, network_type='drive_service', \\\n simplify=False) # 取消简化 即显示非相交的节点 为了获取弯道的多个节点\n # ox.plot_graph(G)\n\n # 接著我們給定原點跟目的地的坐標,然後計算其node的編號\n origin_node = ox.get_nearest_node(G, origin)\n destination_node = ox.get_nearest_node(G, destination)\n\n # 计算最短路径\n route = nx.shortest_path(G, origin_node, destination_node)\n # ox.plot_graph_route(G, route)\n distance = nx.shortest_path_length(G, origin_node, destination_node)\n # print(route)\n # print(len(route))\n # x = G.nodes[route[0]] # 确定节点属性 'y': 31.0249842, 'x': 121.4355697, 'osmid': 1439718495, 'highway': 'crossing'}\n # path = [origin]\n path = []\n for i in range(len(route)):\n x = G.nodes[route[i]]['x']\n y = G.nodes[route[i]]['y']\n path.append((y, x))\n # path.append(destination)\n # print(path)\n # print(len(path))\n return path\n\n\ndef B2A_distance(latA, latB, lngA, lngB):\n \"\"\"计算B点到A点的距离\"\"\"\n cc = math.sin(90 - latA) * math.sin(90 - latB) * math.cos(lngB - lngA) + math.cos(90 - latA) * math.cos(\n 90 - latB) # 求出cos(c)\n distance = 6371000 * math.acos(cc) * (math.pi / 180) # 地球半径m * 弧度(acos(cos(c))* pi/180)\n\n return distance\n\n\ndef w2csv(path_lat, path_lng):\n \"\"\"将计算好的路径点写入csv\"\"\"\n fileHeader = [\"id\", \"lat\", \"lng\"]\n\n csvFile = open(\"./planned_path.csv\", \"w\", newline='') # newline='' 取消空行\n # writer = csv.writer(csvFile)\n\n # #写入的内容都是以列表的形式传入函数\n # writer.writerow(fileHeader)\n # for i in range(len(broad.M1)):\n # x = [i, broad.M1[i], broad.N2[i]]\n # writer.writerow(x)\n # 写入的内容都是以字典的形式传入函数\n dict_writer = csv.DictWriter(csvFile, fileHeader)\n dict_writer.writeheader()\n for i in range(len(path_lat)):\n x = ({\"id\": i, \"lat\": path_lat[i], \"lng\": path_lng[i]})\n dict_writer.writerow(x)\n csvFile.close()\n\n\ndef sp(midmap, origin, destination):\n \"\"\"处理osmnx收尾节点和起始点终点的优化问题\"\"\"\n path = ox_shortest_path(midmap, origin, destination)\n olat, olng = origin\n deslat, deslng = destination\n plat1, plng1 = path[0]\n plat2, plng2 = path[1]\n plat3, plng3 = path[-1]\n plat4, plng4 = path[-2]\n # 起点部分\n # 计算起点坐标到第二个节点(osmnx返回的最短路径节点数组不包含起始点终点坐标)的距离和第一个节点到第二个节点作比较\n start_d1 = B2A_distance(plat2, olat, plng2, olng)\n start_d2 = B2A_distance(plat2, plat1, plng2, plng1)\n # print(start_d1)\n # print(start_d2)\n # 若长则直接在头插入起点\n # 若短将第一个节点替换成起点\n if start_d1 > start_d2:\n path.insert(0, origin)\n else:\n path[0] = origin\n\n # 终点\n # 计算倒数第二个节点到终点的距离和倒数第二个节点到最后一个点的距离\n des_d1 = B2A_distance(deslat, plat4, deslng, plng4)\n des_d2 = B2A_distance(plat3, plat4, plng3, plng4)\n # print(des_d1)\n # print(des_d2)\n # 若长则直接在最后添加终点\n # 若短则将最后一个节点替换成终点\n if des_d1 > des_d2:\n path.append(destination)\n else:\n path[-1] = destination\n\n return path\n\n\ndef arg_change(arg):\n# a, b = arg.split(',')\n a = arg[0]\n b = arg[1]\n return (float(a), float(b))\n\n\ndef main(argv):\n\n #mlat = mlng = olat = olng = dlat = dlng = 1.11\n # 命令行执行.py文件 的预处理\n try:\n opts, args = getopt.getopt(argv, \"hm:n:o:p:d:e:\", [\"midmaplat=\", \"midmaplng=\", \"originlat=\", \"originlng=\", \"destinationlat=\", \"destinationlng=\"])\n except getopt.GetoptError:\n print('osmnx_test.py -m -n -o -p -d -e ')\n sys.exit(2)\n for opt, arg in opts:\n # print(opts)\n # print(arg)\n # print(opt)\n if opt == '-h':\n print('osmnx_test.py -m -n -o -p -d -e ')\n sys.exit()\n elif opt in ('-m', '--midmaplat'):\n mlat = float(arg)\n elif opt in ('-n', '--midmaplng'):\n mlng = float(arg)\n elif opt in ('-o', '--originlat'):\n olat = float(arg)\n elif opt in ('-p', '--originlng'):\n olng = float(arg)\n elif opt in ('-d', '--destinationlat'):\n dlat = float(arg)\n elif opt in ('-e', '--destinationlng'):\n dlng = float(arg)\n \n # origin = (31.0259956642637, 121.43515693151937)\n # destination = (31.03037729233816, 121.44042251662931)\n # midmap = (31.028714411736857, 121.43916318394845)\n # path = ox_shortest_path(midmap, origin, destination)\n midmap = (mlat, mlng)\n origin = (olat, olng)\n destination = (dlat, dlng)\n \n #print(type(mlat))\n #print(mlat)\n #print(midmap)\n #print(type(midmap))\n spath = sp(midmap, origin, destination)\n\n w2csv([i for i, j in spath], [j for i, j in spath])\n\n print(len(spath))\n print(spath)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"后端/backend-of-unmanned-vehicle/planned_path/osmnx_test.py","file_name":"osmnx_test.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"451460899","text":"from sklearn.datasets import fetch_mldata\nmnist = fetch_mldata('MNIST original')\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nimport numpy as np\n\nX = mnist.data\ny = mnist.target\nX4 = X[y==4,:]\nX9 = X[y==9,:]\ny4= y[y==4,]\ny9= y[y==9,]\nXtrain=np.concatenate((X4[0:4000,],X9[0:4000,]), axis=0)\nytrain=np.concatenate((y4[0:4000,],y9[0:4000,]), axis=0)\nXt=np.concatenate((X4[0:2000,],X9[0:2000,]), axis=0)\nyt=np.concatenate((y4[0:2000,],y9[0:2000,]), axis=0)\nXh=np.concatenate((X4[2000:4000,],X9[2000:4000,]), axis=0)\nyh=np.concatenate((y4[2000:4000,],y9[2000:4000,]), axis=0)\nXtest=np.concatenate((X4[4000:,],X9[4000:,]), axis=0)\nytest=np.concatenate((y4[4000:,],y9[4000:,]), axis=0)\nC=np.logspace(-10,5, num=10)\nP=np.zeros(len(C))\nm=2\nn=3\nprint (\"m/n= \", int(m/n))\n\nP=np.zeros(len(C))\ni=0\nprint (\"Entering Training For Loop for Polynomial Kernal Degree 1\")\n\nclf = svm.SVC(C=0.000464,kernel='poly', degree=1)\nclf.fit(Xt,yt)\nPe = 1 - clf.score(Xh,yh)\n#print(\"i= \", i, \" Pe= \", Pe, \" C= \", c, \"clf.n_support_\", clf.n_support_)\nP[i]=Pe\n\ni=np.argmin(P)\nC_opt=0.000464\nclf = svm.SVC(C=0.000464,kernel='poly', degree=1)\nclf.fit(Xtrain,ytrain)\nPe=1-clf.score(Xtest,ytest)\nsv = clf.support_vectors_\nprint (\"For Polynomial Kernel of Degree 1 optimal Value of C is \", C_opt,\" with Pe (for Test dataset) \", Pe)\nprint (\"Number of Support Vectors for each class is given by \", clf.n_support_)\n\nplt.figure(1)\nd = clf.decision_function(sv)\nd_sorted = np.argsort(np.abs(d))\nd_sorted = d_sorted[-16:]\nSV_top=sv[d_sorted,:]\noid=clf.support_\ny_sv=ytrain[oid[d_sorted]]\n\nf, axarr = plt.subplots(4, 4) \nfor p in range(4):\n for q in range(4):\n axarr[p, q].imshow(SV_top[p*4+q].reshape((28,28)), cmap='gray') \n axarr[p, q].set_title('{label}'.format(label=int(y_sv[p*4+q]))) \n \nplt.show()\n","sub_path":"mnist_test.py","file_name":"mnist_test.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"67204794","text":"from Agents.Government.Policy import Expenditure, Revenue\n\n\n# This class will be instantiated once and will populate its\n# revenues and expenditures list with objects of the taxes and expenditures\n# these are used in order to contain their own methods of determining the\n# outcome of a change in tax or expenditure\nclass Government:\n def __init__(self, SimulationThread, FormControls):\n self.revenues = []\n self.expenditures = []\n\n self.revenue = 0\n self.expenditure = 0\n\n # Sends status of generation back to user GUI\n def update_progress_bar():\n FormControls.progress_status += 1\n FormControls.progress_var.set(FormControls.progress_status)\n FormControls.progress.update()\n\n for index, value in enumerate(SimulationThread.revenue.items()):\n identifier = value[0]\n amount = value[1]\n\n if identifier not in ('Borrowing', 'Other'):\n if identifier == 'Income tax':\n self.revenues.append(Revenue.IncomeTax(percentage=amount))\n elif identifier == 'VAT':\n self.revenues.append(Revenue.VAT(percentage=amount))\n elif identifier == 'Corporation tax':\n self.revenues.append(Revenue.CorporationTax(percentage=amount))\n elif identifier == 'National insurance':\n self.revenues.append(Revenue.NationalInsurance(percentage=amount))\n else:\n self.revenues.append(Revenue.Tax(identifier, amount))\n else:\n self.revenues.append(Revenue.Revenue(identifier, amount=amount))\n\n SimulationThread.revenue[identifier] = int(self.revenues[index].get_revenue())\n\n if identifier == 'Borrowing':\n gen_status = 'Borrowing £%sbn to finance spending' % amount\n FormControls.generation_status.set(gen_status)\n elif identifier not in ('Excise duties', 'Other', 'Council tax'):\n gen_status = 'Applying {} at {}%'.format(identifier, amount)\n FormControls.generation_status.set(gen_status)\n\n if identifier != 'Borrowing':\n self.revenue += SimulationThread.revenue[identifier]\n\n update_progress_bar()\n\n for index, value in enumerate(SimulationThread.expenditure.items()):\n identifier = value[0]\n amount = value[1]\n\n demand_side = ('Public order & safety', 'Personal social services', 'Social protection including tax credits', 'Other', 'Defence', 'Health')\n supply_side = ('Education', 'Transport', 'Industry, agriculture & employment', 'Housing & environment')\n\n if identifier in demand_side:\n self.expenditures.append(Expenditure.DemandSide(identifier, amount))\n gen_status = 'Gov. spending on {}: £{}bn'.format(identifier, amount)\n FormControls.generation_status.set(gen_status)\n elif identifier in supply_side:\n self.expenditures.append(Expenditure.SupplySide(identifier, amount))\n gen_status = 'Gov. investment in {}: £{}bn'.format(identifier, amount)\n FormControls.generation_status.set(gen_status)\n elif (identifier == 'Debt interest') or (identifier == 'Debt repayment'):\n self.expenditures.append(Expenditure.Expenditure(identifier, amount))\n\n self.expenditure += int(self.expenditures[index].amount)\n update_progress_bar()\n\n if self.expenditure > self.revenue:\n self.borrowing = self.expenditure - self.revenue\n self.revenues.append(Revenue.Revenue('Borrowing', self.borrowing))\n SimulationThread.revenue['Borrowing'] = self.borrowing\n SimulationThread.expenditure['Debt repayment'] = 0\n try:\n assert self.expenditure - self.revenue == self.borrowing\n print('Government borrowing: £{}bn'.format(self.borrowing))\n except AssertionError:\n print('Error calculating Government Borrowing')\n elif self.expenditure < self.revenue:\n self.borrowing = 0\n for i in range(len(self.revenues)):\n if self.revenues[i] == 'Borrowing':\n self.revenues[i].amount = 0\n SimulationThread.revenue['Borrowing'] = 0\n SimulationThread.expenditure['Debt repayment'] = self.revenue - self.expenditure\n\n def calc_revenue(self, SimulationThread):\n self.revenue = 0\n\n for index, value in enumerate(SimulationThread.revenue.items()):\n identifier = value[0]\n amount = value[1]\n if identifier == 'Borrowing':\n self.revenues[index].amount = 0\n SimulationThread.revenue[identifier] = 0\n else:\n self.revenues[index].percent = amount\n SimulationThread.revenue[identifier] = int(self.revenues[index].get_revenue())\n self.revenue += SimulationThread.revenue[identifier]\n\n def calc_expenditure(self, SimulationThread):\n self.expenditure = 0\n for index, value in enumerate(SimulationThread.expenditure.items()):\n identifier = value[0]\n amount = value[1]\n if identifier == 'Debt repayment':\n self.expenditures[index].amount = 0\n SimulationThread.expenditure['Debt repayment'] = 0\n else:\n self.expenditures[index].amount = amount\n self.expenditure += int(self.expenditures[index].amount)\n\n def calc_borrowing(self, SimulationThread):\n\n if self.expenditure > self.revenue:\n self.borrowing = self.expenditure - self.revenue\n for i in range(len(self.revenues)):\n if self.revenues[i].identifier == 'Borrowing':\n self.revenues[i].amount = self.borrowing\n\n SimulationThread.revenue['Borrowing'] = self.borrowing\n try:\n assert self.expenditure - self.revenue == self.borrowing\n print('Government borrowing: £{}bn'.format(self.borrowing))\n except AssertionError:\n print('Error calculating Government Borrowing')\n\n elif self.expenditure < self.revenue:\n self.borrowing = 0\n for i in range(len(self.revenues)):\n if self.revenues[i].identifier == 'Borrowing':\n self.revenues[i].amount = 0\n for i in range(len(self.expenditures)):\n if self.expenditures[i] == 'Debt repayment':\n self.expenditures[i].amount = self.revenue - self.expenditure\n SimulationThread.expenditure['Debt repayment'] = self.revenue - self.expenditure\n","sub_path":"Agents/Government/Government.py","file_name":"Government.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"548878088","text":"# Copyright (c) 2019 NVIDIA Corporation\nimport sys\nimport os\nimport pandas as pd\nimport subprocess\nimport json\nimport argparse\n\nfrom tools.System.config import cfg\nfrom tools.filetools import file_exists\n\ndef tsv_to_dataset(path, tsv_files, manifest_file, prefix):\n manifests = []\n for tfile in tsv_files:\n tsv_file = os.path.join(path, tfile)\n assert(file_exists(tsv_file))\n print('Processing: {0}'.format(tsv_file))\n dt = pd.read_csv(tsv_file, sep='\\t')\n for index, row in dt.iterrows():\n try:\n entry = {}\n wav_dir = os.path.join(path, \"wavs\", prefix)\n os.system(\"mkdir -p {0}\".format(wav_dir))\n\n mp3_file = os.path.join(path,\"clips\",row['path'])\n wav_file = os.path.join(wav_dir, row['path'].replace(\".mp3\",\".wav\"))\n\n if not os.path.exists(wav_file):\n subprocess.check_output(\"sox -v 0.98 {0} -c 1 -r 16000 {1}\".format(\n mp3_file, wav_file), shell=True)\n duration = subprocess.check_output(\"soxi -D {0}\".format(wav_file),\n shell=True)\n entry['audio_filepath'] = wav_file\n entry['duration'] = float(duration)\n entry['text'] = row['sentence'].lower()\n manifests.append(entry)\n except:\n print(\"SOMETHING WENT WRONG - IGNORING ENTRY\")\n\n manifest_file = os.path.join(path, manifest_file)\n print(\"Saving dataset to {}\".format(manifest_file))\n with open(manifest_file, 'w') as fout:\n for m in manifests:\n fout.write(json.dumps(m) + '\\n')\n print('Done!')\n\ndef main():\n parser = argparse.ArgumentParser(description='Build NeMo ready dataset from tsv and mp3 files')\n parser.add_argument('--path', type=str, required=True,\n help='Directory of dataset files')\n parser.add_argument('--tsv_files', type=str, required=True,\n help='List of tsv files to convert')\n parser.add_argument('--output', type=str, required=True,\n help='Output dataset (.json) filename')\n parser.add_argument('--id', type=str, required=True,\n help='Prefix name to id dataset')\n args = parser.parse_args()\n\n tsvs=args.tsv_files.split(\",\")\n tsv_to_dataset(args.path, tsvs, args.output, args.id)\n\nif __name__ == \"__main__\":\n main()","sub_path":"tools/NeMo/.ipynb_checkpoints/create_common_voice_dataset-checkpoint.py","file_name":"create_common_voice_dataset-checkpoint.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"589552720","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 7 08:19:56 2018\n\n@author: dolson\n\"\"\"\n###################################################################################################################################\n# Import Libraries\n###################################################################################################################################\nimport arcpy\nimport time\n# import psutil\n\n###################################################################################################################################\n# Mosaic Images\n###################################################################################################################################\n# Get the start time\nstart_time = time.time()\n\n# set the workspace\narcpy.env.workspace = r\"\\\\166.2.126.25\\teui1\\4_Derek\\Region_8_Topographic_Derivatives\\AR\\AR_DEMs\"\n\n# Set location of rasters to mosaic\npath = r\"\\\\166.2.126.25\\teui1\\4_Derek\\Region_8_Topographic_Derivatives\\AR\\AR_DEMs\"\n\n# Get a list of the rasters to mosaic\nrasterList = arcpy.ListRasters(\"*\", \"TIF\")\n\n# set the out raster name\noutRas = \"AR_mosaic.img\"\n\n# Mosaic the rasters\narcpy.MosaicToNewRaster_management(input_rasters = rasterList, output_location = path, raster_dataset_name_with_extension = outRas, pixel_type = \"32_BIT_Float\", cellsize= \"1\", number_of_bands= \"1\")\n\n# Get the total time in minutes and seconds\nend_time = time.time()\ntime_minutes = (end_time - start_time) / 60\nprint(time_minutes, \"minutes\")\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n# ###################################################################################################################################\n# # Clip to HT admin bound\n# ###################################################################################################################################\n# # Get the start time\n# start_time2 = time.time()\n#\n# # Get the raster to be clipped\n# clipRas = path + outRas\n#\n# # Get the admin bound\n# clipShp = \"F:/HT_LTA_Development/Imagery/Landsat/AdminBound/AdminBound.shp\"\n#\n# # Set the output image name\n# clipOut = clipRas.split(\".\")[0] + \"adminBound.img\"\n#\n# # Clip the image\n# arcpy.Clip_management(clipRas, \"#\" , clipOut, clipShp, \"#\", \"ClippingGeometry\", \"NO_MAINTAIN_EXTENT\")\n#\n# # Get the total time in minutes and seconds\n# end_time2 = time.time()\n# time_minutes = (end_time2 - start_time2) / 60\n# print(time_minutes, \"minutes\")\n# print(\"--- %s seconds ---\" % (time.time() - start_time2))\n#\n# ###################################################################################################################################\n# # mosaic with gdal\n# ###################################################################################################################################\n# # Import libraries\n# import gdal, glob, os, subprocess, time\n# from subprocess import call\n#\n# # Get the start time\n# start_time = time.time()\n#\n# outNameList = list()\n# fileList = list()\n# for root, dirs, files in os.walk('F:/HT_LTA_Development/Imagery/Landsat/ToMosaic'):\n# for file in files:\n# if file.endswith('.tif'):\n# fileName = os.path.join(root, file)\n# fileList.append(fileName)\n#\n#\n# driver = gdal.GetDriverByName(\"VRT\")\n#\n# vrt_options = gdal.BuildVRTOptions(resampleAlg='nearest', addAlpha=False)\n# vrt = gdal.BuildVRT('F:/HT_LTA_Development/Imagery/Landsat/ToMosaic/temp.vrt', glob.glob(\"F:/HT_LTA_Development/Imagery/Landsat/ToMosaic/*.img\"), options = vrt_options)\n#\n# gdal.Info(vrt)\n#\n# driver.CreateCopy('E:/Data/HT_Landsat_Mosaic_Data_from_GEE/temp4.vrt', vrt)\n# gdalTranslate = r'C:\\OSGeo4W64\\bin\\gdal_translate.exe'\n# vrt = \"F:/HT_LTA_Development/Imagery/Landsat/ToMosaic/temp.vrt\"\n# dst = \"F:/HT_LTA_Development/Imagery/Landsat/ToMosaic/gdalMosaic.tif\"\n# cmd = \"-ot int16 -outsize 30 30\"\n#\n#\n# def youCanQuoteMe(item):\n# return \"\\\"\" + item + \"\\\"\"\n#\n#\n# fullCmd = ' '.join([gdalTranslate, youCanQuoteMe(vrt), youCanQuoteMe(dst)])\n#\n# subprocess.call(fullCmd)\n#\n# # Get the total time in minutes and seconds\n# end_time = time.time()\n# time_minutes = (end_time - start_time) / 60\n# print(time_minutes, \"minutes\")\n# print(\"--- %s seconds ---\" % (time.time() - start_time))\n#\n# # Write times to text file\n# text_file = open(\"F:/HT_LTA_Development/Imagery/Landsat/ToMosaic/Raster_Mosaic_Time.txt\", \"w\")\n# text_file.write(\"Mosaic Time: %s\" % time_minutes)\n# text_file.close()\n\n\n\n\n\n\n","sub_path":"arcpy_mosaic_rasters_time.py","file_name":"arcpy_mosaic_rasters_time.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"50221680","text":"import sys\nimport math\n\ni = 0;\nx1 = int(sys.argv[1])\nx2 = int(sys.argv[2])\n#e = 0.01\ne = 0.001\neuler = 2.71828182845904523536\n\n#x1 = x1/10\n#x2 = x2/10\ndef Metodo(x1,xi,i):\n\t#Ejemplo libreta\n\t#Puntos (0,1)\n\t#fx1 = pow(euler,-x1) - x1\n\t#fxi = pow(euler,-xi) - xi\n\t#Ejemplo libreta\n\t#Puntos (1.1,1.4)\n\t#fx1 = math.sin(pow(euler,x1))\n\t#fxi = math.sin(pow(euler,xi))\n\n\t#Puntos (0,43) <- No cumple las condiciones (?\n\t#fx1 = pow(x1,2) - 43\n\t#fxi = pow(xi,2) - 43\n\t#Puntos (2,3)\n\tfx1 = pow(x1,3) - (2*x1) - 5\n\tfxi = pow(xi,3) - (2*xi) - 5\n\txi_next = (x1*fxi-xi*fx1)/(fxi-fx1)\n\tdelta = abs((xi_next-xi)/xi)\n\tif(delta > e):\n\t\txi = xi_next\n\t\ti+=1\n\t\tprint(str(i) + \".- x1 = \" + str(x1) + \"\\txi = \" + str(xi))\n\t\tMetodo(x1,xi,i)\n\telse:\n\t\tprint(\"La raiz es: \" + str(xi_next))\n\n\nMetodo(x1,x2,i)\n","sub_path":"Métodos numéricos/metodo_interpolacion_lineal.py","file_name":"metodo_interpolacion_lineal.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"21098666","text":"from GetActualSignal import GetActualSignal\nfrom ThresholdSelect import ThSelect\nfrom ThresholdFunction import ThFuction\nimport numpy as np\nimport pywt\nimport matplotlib.pyplot as plt\nfrom DenoiseResult import DenoiseRsult\nfrom WtProcess import DWTP\nfrom AcSNR import AcSNR\n\nif __name__ == '__main__':\n ad = GetActualSignal()\n ad.getwhvalue()\n od1 = ad.selectvalue(start=0, end=23552)\n dwt = DWTP(od1)\n coeffs = dwt.dwtdec(wtname='db4', delevel=4)\n nscoeffs = dwt.thprocess(coeffs, thf='soft')\n nhcoeffs = dwt.thprocess(coeffs, thf='hard')\n spd = dwt.dwtrec(nscoeffs, wtname='db4')\n hpd = dwt.dwtrec(nhcoeffs, wtname='db4')\n ad.outputdata(startidex=0, ogdata=od1, pddata=spd)\n\n ssm = DenoiseRsult([], spd).smooth(od1)\n hsm = DenoiseRsult([], hpd).smooth(od1)\n slrepv = DenoiseRsult([], spd).lrepv(od1, 128)\n hlrepv = DenoiseRsult([], hpd).lrepv(od1, 128)\n\n print('ssm = {0}'.format(ssm))\n print('hsm = {0}'.format(hsm))\n print('slrepv = {0}'.format(slrepv))\n print('hlrepv = {0}'.format(hlrepv))\n\n x = [x for x in range(4096)]\n plt.figure()\n plt.subplot(3, 1, 1)\n plt.title('Original signal')\n plt.plot(x, od1)\n plt.subplot(3, 1, 2)\n plt.title('Signal processed by st')\n plt.plot(x, spd)\n plt.subplot(3, 1, 3)\n plt.title('Signal processed by ht')\n plt.plot(x, hpd)\n\n plt.tight_layout()\n plt.show()","sub_path":"Experiment 2 actual signal/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"456493917","text":"import pickle\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Input\nfrom keras.utils import plot_model\n\n\nwith open('pwm_data.pkl', 'rb') as f:\n pwm_data = pickle.load(f)\n x = pwm_data\n\nwith open('vel_data.pkl', 'rb') as g:\n velo_data = pickle.load(g)\n vel_data = velo_data[0:-3]\n y = vel_data\n\nwith open('ster_data.pkl', 'rb') as h:\n ster_data = pickle.load(h)\n z = ster_data\n\nmodel = Sequential()\nmodel.add(Dense(2, input_dim=1, activation='relu'))\nmodel.add(Dense(2, activation='relu'))\nmodel.add(Dense(1, activation='linear'))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(x, y, epochs=850, verbose=0)\n\n# save model\n\nmodel.save(\"vel_model.h5\")\nprint(\"Model saved\")\n","sub_path":"model/Model_with_data/vel_model.py","file_name":"vel_model.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"529801732","text":"##############################################################################\n#\n# Copyright (C) Zenoss, Inc. 2015, all rights reserved.\n#\n# This content is made available according to terms specified in\n# License.zenoss under the directory where your Zenoss product is installed.\n#\n##############################################################################\n\nfrom __future__ import unicode_literals\n\nimport json\nfrom mock import Mock, patch\n\nfrom Products.Five import zcml\nfrom Products.ZenTestCase.BaseTestCase import BaseTestCase\n\nfrom .create_fake_devices import get_device, add_interface, random_mac\n\nimport ZenPacks.zenoss.Layer2\nfrom ZenPacks.zenoss.Layer2.network_tree import\\\n (get_connections, serialize, NodeAdapter, get_connections_json)\nfrom ZenPacks.zenoss.Layer2 import connections\n\n\nclass TestSerialize(BaseTestCase):\n\n def test_exception(self):\n self.assertEqual(json.loads(serialize(Exception('test'))), dict(\n error='test'\n ))\n\n def test_text(self):\n self.assertEqual(json.loads(serialize('test')), dict(\n error='test'\n ))\n\n\nclass TestGetConnections(BaseTestCase):\n def afterSetUp(self):\n super(TestGetConnections, self).afterSetUp()\n zcml.load_config('configure.zcml', ZenPacks.zenoss.Layer2)\n connections.clear()\n\n @patch('ZenPacks.zenoss.Layer2.network_tree.get_connections')\n @patch('ZenPacks.zenoss.Layer2.network_tree.serialize')\n def test_get_connections_json(self, mock_serialize, mock_get_connections):\n self.data_root = Mock()\n self.data_root.Devices.findDevice.return_value = 'TEST'\n self.data_root.dmd.getObjByPath.return_value = None\n get_connections_json(self.data_root, 'TEST')\n self.assertTrue(mock_get_connections.called)\n mock_get_connections.assert_called_with('TEST', 1, None, False, False)\n self.assertTrue(mock_serialize.called)\n\n def test_get_vlan_connections_with_unaware_node(self):\n a = get_device('a', self.dmd)\n mac_a = random_mac()\n mac_b = random_mac()\n b = get_device('b', self.dmd)\n\n # make a look like a switch\n add_interface(\n a, macaddress=mac_a, clientmacs=[mac_b],\n vlans=['vlan1']\n )\n\n # make b look like a server\n add_interface(b, macaddress=mac_b, clientmacs=[], vlans=[])\n\n connections.update_node(a)\n connections.update_node(b)\n\n res = get_connections(a, depth=3, layers=['vlan1'])\n self.assertIn(\"links\", res)\n self.assertEqual(len(res[\"links\"]), 1)\n self.assertTrue(\"color\" in res[\"links\"][0])\n self.assertTrue(\"vlan1\" in res[\"links\"][0][\"color\"])\n self.assertTrue(\"layer2\" in res[\"links\"][0][\"color\"])\n\n\nclass TestNodeAdapter(BaseTestCase):\n def afterSetUp(self):\n obj = Mock()\n obj.macaddress = 'TE:ST:12:34:56:78'\n obj.getPrimaryUrlPath.return_value = '/zport/dmd/Devices/Test'\n obj.getEventSummary.return_value = [\n ['zenevents_5_noack noack', 0, 1],\n ['zenevents_4_noack noack', 0, 0],\n ['zenevents_3_noack noack', 0, 0],\n ['zenevents_2_noack noack', 0, 0],\n ['zenevents_1_noack noack', 0, 0]]\n self.instance = NodeAdapter(obj, '', {})\n self.properties = dir(self.instance)\n\n def test_instance_attributes(self):\n self.assertIn('id', self.properties)\n self.assertIn('path', self.properties)\n self.assertIn('name', self.properties)\n self.assertIn('image', self.properties)\n\n def test_path(self):\n self.assertEqual(self.instance.path, '/zport/dmd/Devices/Test')\n\n def test_name(self):\n self.assertEqual(self.instance.name, 'TE:ST:12:34:56:78')\n obj = Mock(spec=['getNetworkName'])\n obj.getNetworkName.return_value = 'network'\n self.assertEqual(NodeAdapter(obj, '', {}).name, 'network')\n\n def test_image(self):\n self.assertEqual(\n self.instance.image,\n '/++resource++ZenPacks_zenoss_Layer2/img/link.png')\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestSerialize))\n suite.addTest(makeSuite(TestGetConnections))\n suite.addTest(makeSuite(TestNodeAdapter))\n return suite\n","sub_path":"ZenPacks/zenoss/Layer2/tests/test_network_tree.py","file_name":"test_network_tree.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"575401143","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 22 10:09:21 2015\n\n@author: jordan\n\"\"\"\nfrom Serre2 import *\nfrom scipy import *\nimport csv\nimport os\nfrom numpy.linalg import norm \nfrom matplotlib.pyplot import plot,ylim\nfrom scipy.special import ellipj,ellipk,ellipe\n\nfrom scipy.optimize import bisect\n \ndef copyarraytoC(a):\n n = len(a)\n b = mallocPy(n)\n for i in range(n):\n writetomem(b,i,a[i])\n return b\n \ndef copywritearraytoC(a,b):\n n = len(a)\n for i in range(n):\n writetomem(b,i,a[i])\n \ndef copyarrayfromC(a,n):\n b = [0]*n\n for i in range(n):\n b[i] = readfrommem(a,i)\n \n return b\n \ndef makevar(sx,ex,dx,st,et,dt): \n x = arange(sx, ex, dx)\n t = arange(st, et, dt)\n \n return x,t \n\n \n#gives exact up to linears, so is second order accurate huzzah \ndef getGfromupy(h,u,bed,u0,u1,h0,h1,b0,b1,dx):\n idx = 1.0 / dx\n ithree = 1.0 / 3.0\n \n n = len(h)\n\n G = zeros(n)\n \n for i in range(1,n-1):\n th = h[i]\n thx = 0.5*idx*(h[i+1] - h[i-1])\n tbx = 0.5*idx*(bed[i+1] - bed[i-1])\n tbxx = idx*idx*(bed[i+1] -2*bed[i] + bed[i-1])\n \n D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx\n \n ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx\n bi = D + 2.0*ithree*idx*idx*th*th*th\n ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx\n \n G[i] = ai*u[i-1] + bi*u[i] + ci*u[i+1]\n \n #boundary \n #i=0\n i=0\n th = h[i]\n thx = 0.5*idx*(h[i+1] - h0)\n tbx = 0.5*idx*(bed[i+1] - b0)\n tbxx = idx*idx*(bed[i+1] -2*bed[i] + b0)\n \n D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx\n \n ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx\n bi = D + 2.0*ithree*idx*idx*th*th*th\n ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx\n \n G[i] = ai*u0 + bi*u[i] + ci*u[i+1] \n #i = n-1\n i = n-1\n\n th = h[i]\n thx = 0.5*idx*(h1 - h[i-1])\n tbx = 0.5*idx*(b1 - bed[i-1])\n tbxx = idx*idx*(b1 -2*bed[i] + bed[i-1])\n \n D = th + th*thx*tbx + 0.5*th*th*tbxx + th*tbx*tbx\n \n ai = -ithree*idx*idx*th*th*th + 0.5*idx*th*th*thx\n bi = D + 2.0*ithree*idx*idx*th*th*th\n ci = -ithree*idx*idx*th*th*th - 0.5*idx*th*th*thx\n\n G[i] = ai*u[i-1] + bi*u[i] + ci*u1\n \n return G \n\n\ndef ForcedbedM(x,t,a0,a1,a2,a3,a4,a5,a6,a7,g,dx):\n n = len(x)\n h = zeros(n)\n w = zeros(n)\n b= zeros(n)\n u = zeros(n)\n G = zeros(n)\n \n for i in range(n):\n phi = x[i] - a2*t \n \n \n \n h[i] = a0 + a1*exp(-(phi - a3)**2/(2*a4))\n u[i] = a5*exp(-(phi - a3)**2/(2*a4))\n b[i] = a6*sin(a7*x[i])\n w[i] = h[i] + b[i]\n \n hxi = -a1/a4*(phi - a3)*exp(-(phi - a3)**2/(2*a4))\n uxi = -a5/a4*(phi - a3)*exp(-(phi - a3)**2/(2*a4))\n\n uxxi = -a5/(a4**2)*exp(-(phi - a3)**2/(2*a4))*(a4 - ((phi) - a3)**2)\n \n bxi = a6*a7*cos(a7*x[i]) \n bxxi = -a6*a7**2*sin(a7*x[i])\n \n \n G[i] = u[i]*h[i]*(1 + hxi*bxi + 0.5*h[i]*bxxi + bxi*bxi) - h[i]*h[i]*hxi*uxi - h[i]*h[i]*h[i]/3.0*uxxi\n \n return h,u,G,b,w\n \n #Forcing Problem \nwdir = \"../../../../../../../data/raw/Forced/FDVM2Bed/GaussBedAll/RTNbed/\" \n\nif not os.path.exists(wdir):\n os.makedirs(wdir)\n\nfor j in range(15):\n g =9.81\n\n a0 = 1\n a1 = 0.2\n a2 = 1.3\n a3 = 0.4\n a4 = 1.5\n a5 = 0.6\n a6 = 0.1\n a7 = 0.5\n \n width = 50\n \n g = 9.81\n \n dx = width / (2.0)**(j)\n l = 0.5 / (a5 + sqrt(g*(a0 + a1)))\n dt = l*dx\n startx = -width/2\n endx = width/2\n startt = 0.0\n endt = 0.1\n \n \n t = startt\n \n nBCn = 3\n nBC = 6\n \n #x,t = makevar(startx,endx +0.1*dx,dx,startt,endt,dt)\n \n x = arange(startx,endx +0.1*dx, dx)\n \n xbeg = arange(startx - nBC*dx ,x[0], dx)\n xend = arange(x[-1] + dx ,x[-1] + (nBC+ 0.1)*dx, dx)\n \n\n ts = []\n \n n = len(x) \n theta = 2\n \n gap = int(1.0/dt)\n nBCn = 3\n nBC = 6\n \n h,u,G,b,w = ForcedbedM(x,t,a0,a1,a2,a3,a4,a5,a6,a7,g,dx)\n #bM = cos(a5*x)\n \n \n print(t)\n hbeg,ubeg,Gbeg,bbeg,wbeg = ForcedbedM(xbeg,t,a0,a1,a2,a3,a4,a5,a6,a7,g,dx)\n hend,uend,Gend,bend,wend = ForcedbedM(xend,t,a0,a1,a2,a3,a4,a5,a6,a7,g,dx)\n \n ubc_c = mallocPy(n+2*nBCn)\n Gbc_c = mallocPy(n+2*nBCn)\n hbc_c = mallocPy(n+2*nBCn)\n \n\n h_c = copyarraytoC(h)\n G_c = copyarraytoC(G)\n x_c = copyarraytoC(x)\n b_c = copyarraytoC(b)\n u_c = mallocPy(n)\n \n hbeg_c = copyarraytoC(hbeg)\n hend_c = copyarraytoC(hend)\n wbeg_c = copyarraytoC(wbeg)\n wend_c = copyarraytoC(wend)\n bbeg_c = copyarraytoC(bbeg)\n bend_c = copyarraytoC(bend)\n Gbeg_c = copyarraytoC(Gbeg)\n Gend_c = copyarraytoC(Gend) \n ubeg_c = copyarraytoC(ubeg)\n uend_c = copyarraytoC(uend)\n\n t = 0.0\n ts.append(t)\n #Just an FEM solve here\n while t < endt: \n evolvewrapBC(G_c,h_c,b_c,hbeg_c,hend_c,ubeg_c,uend_c,Gbeg_c,Gend_c,hbeg_c,hend_c,ubeg_c,uend_c,Gbeg_c,Gend_c,bbeg_c,bend_c,g,dx,dt, n, nBC, nBCn,theta, hbc_c,Gbc_c,ubc_c,x_c,t,a0,a1,a2,a3,a4,a5,a6,a7)\n t = t + dt\n ts.append(t)\n print(t)\n \n \n hC = copyarrayfromC(h_c,n)\n GC = copyarrayfromC(G_c,n) \n \n getufromG(h_c,G_c,b_c,ubeg[-1],uend[0],hbeg[-1],hend[0], bbeg[-1],bend[0], dx ,n,u_c)\n\n uC = copyarrayfromC(u_c,n)\n \n hA,uA,GA,bA,wA = ForcedbedM(x,t,a0,a1,a2,a3,a4,a5,a6,a7,g,dx)\n \n hnorm = norm(hC - hA, ord=2)/ norm(hC, ord=2)\n unorm = norm(uC - uA, ord=2)/ norm(uC, ord=2)\n Gnorm = norm(GC - GA, ord=2)/ norm(GC, ord=2)\n \n \n \n s = wdir + \"h.dat\"\n with open(s,'a') as file1:\n s =\"%3.8f%5s%1.15f\\n\" %(dx,\" \",hnorm)\n file1.write(s)\n \n s = wdir + \"G.dat\"\n with open(s,'a') as file1:\n s =\"%3.8f%5s%1.15f\\n\" %(dx,\" \",Gnorm)\n file1.write(s) \n \n s = wdir + \"u.dat\"\n with open(s,'a') as file1:\n s =\"%3.8f%5s%1.15f\\n\" %(dx,\" \",unorm)\n file1.write(s) \n \n deallocPy(h_c)\n deallocPy(G_c)\n deallocPy(u_c)\n deallocPy(b_c)\n \n deallocPy(ubc_c)\n deallocPy(hbc_c)\n deallocPy(Gbc_c) \n \n deallocPy(hbeg_c)\n deallocPy(Gbeg_c)\n deallocPy(ubeg_c)\n deallocPy(hend_c)\n deallocPy(Gend_c)\n deallocPy(uend_c)\n deallocPy(wbeg_c)\n deallocPy(wend_c)\n\n\n\n\n \n\"\"\"\n### Cnoidal wave with BC\nwdatadir = \"../../../data/raw/cnoidaltestfixlong/o2/\"\n\nif not os.path.exists(wdatadir):\n os.makedirs(wdatadir)\n \ns = wdatadir + \"savenorms.txt\"\nwith open(s,'a') as file1:\n writefile = csv.writer(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\n writefile.writerow([\"dx\",'theta','l1h', 'l1u']) \nfor ij in range(13,14):\n a0 = 1.0\n a1 = 0.1\n k = 0.99\n \n ### WAVE LENGTH\n \n m = k*k\n Kc = sqrt(float(3*a1) / (4*a0*(a0 + a1)*(a0 + (1-m)*a1)))\n \n lamb = 2*ellipk(m) / Kc\n \n g = 9.81\n dx = 100.0 / 2**ij\n Cr = 0.5\n l = Cr / (sqrt(g*(a0 +a1) ))\n dt = l*dx\n theta = 2\n startx = 0.0\n endx = 9*lamb\n startt = 0.0\n endt = 100 + dt \n \n wdir = wdatadir + str(ij) + \"/\"\n \n if not os.path.exists(wdir):\n os.makedirs(wdir)\n \n \n nBCn = 3\n nBC = 6\n \n xbc,t = makevar(startx - nBC*dx,endx + nBC*dx,dx,startt,endt,dt)\n \n x = xbc[nBC: -nBC]\n xbeg = xbc[:nBC]\n xend = xbc[-nBC:] \n \n n = len(x)\n m = len(t)\n \n gap = int(10.0/dt)\n \n t0 = 0.0\n \n \n #initial conditions for time steps\n tij = 0.0\n hBC,uBC,GBC,bedBC = cnoidalwaves(xbc,tij,dx,a0,a1,g,k)\n h = hBC[nBC:-nBC]\n h0 = hBC[:nBC]\n h1 = hBC[-nBC:]\n u = uBC[nBC:-nBC]\n u0 = uBC[:nBC]\n u1 = uBC[-nBC:]\n G = GBC[nBC:-nBC]\n G0 = GBC[:nBC]\n G1 = GBC[-nBC:]\n bed = bedBC[nBC:-nBC]\n b0 = bedBC[:nBC]\n b1 = bedBC[-nBC:]\n \n h_c = copyarraytoC(h)\n G_c = copyarraytoC(G)\n bed_c = copyarraytoC(bed)\n x_c = copyarraytoC(x)\n u_c = mallocPy(n)\n\n \n hBCh,uBCh,GBCh,bedBCh = cnoidalwaves(xbc,tij + dt,dx,a0,a1,g,k)\n h0h = hBCh[:nBC]\n h1h = hBCh[-nBC:]\n u0h = uBCh[:nBC]\n u1h = uBCh[-nBC:]\n G0h = GBCh[:nBC]\n G1h = GBCh[-nBC:]\n b0h = bedBCh[:nBC]\n b1h = bedBCh[-nBC:]\n \n un_c = mallocPy(n+2*nBCn)\n Gn_c = mallocPy(n+2*nBCn)\n hn_c = mallocPy(n+2*nBCn)\n \n h0_c = mallocPy(nBC)\n h1_c = mallocPy(nBC)\n u0_c = mallocPy(nBC)\n u1_c = mallocPy(nBC)\n G0_c = mallocPy(nBC)\n G1_c = mallocPy(nBC)\n b0_c = mallocPy(nBC)\n b1_c = mallocPy(nBC)\n \n h0h_c = mallocPy(nBC)\n h1h_c = mallocPy(nBC)\n u0h_c = mallocPy(nBC)\n u1h_c = mallocPy(nBC)\n G0h_c = mallocPy(nBC)\n G1h_c = mallocPy(nBC)\n b0h_c = mallocPy(nBC)\n b1h_c = mallocPy(nBC)\n \n hi,ui,Gi,bedi = cnoidalwaves(x,tij,dx,a0,a1,g,k)\n \n copywritearraytoC(h0,h0_c)\n copywritearraytoC(h1,h1_c)\n copywritearraytoC(u0,u0_c)\n copywritearraytoC(u1,u1_c)\n copywritearraytoC(G0,G0_c)\n copywritearraytoC(G1,G1_c)\n copywritearraytoC(b0,b0_c)\n copywritearraytoC(b1,b1_c)\n \n copywritearraytoC(h0h,h0h_c)\n copywritearraytoC(h1h,h1h_c)\n copywritearraytoC(u0h,u0h_c)\n copywritearraytoC(u1h,u1h_c)\n copywritearraytoC(G0h,G0h_c)\n copywritearraytoC(G1h,G1h_c)\n copywritearraytoC(b0h,b0h_c)\n copywritearraytoC(b1h,b1h_c) \n \n \n for i in range(1,len(t)): \n \n \n evolvewrapBC(G_c,h_c,bed_c,h0_c,h1_c,u0_c,u1_c,G0_c,G1_c,h0h_c,h1h_c,u0h_c,u1h_c,G0h_c,G1h_c,b0_c,b1_c,g,dx,dt, n, nBC, nBCn,theta, hn_c,Gn_c,un_c)\n \n #evolvewrapperiodic(G_c,h_c,bed_c,g,dx,dt,n,nBCn,theta,hn_c, Gn_c,un_c); \n print (t[i])\n \n copywritearraytoC(h0h,h0_c)\n copywritearraytoC(h1h,h1_c)\n copywritearraytoC(u0h,u0_c)\n copywritearraytoC(u1h,u1_c)\n copywritearraytoC(G0h,G0_c)\n copywritearraytoC(G1h,G1_c)\n copywritearraytoC(b0h,b0_c)\n copywritearraytoC(b1h,b1_c)\n \n hBCh,uBCh,GBCh,bedBCh = cnoidalwaves(xbc,t[i] + dt,dx,a0,a1,g,k)\n h0h = hBCh[:nBC]\n h1h = hBCh[-nBC:]\n u0h = uBCh[:nBC]\n u1h = uBCh[-nBC:]\n G0h = GBCh[:nBC]\n G1h = GBCh[-nBC:]\n b0h = bedBCh[:nBC]\n b1h = bedBCh[-nBC:]\n \n copywritearraytoC(h0h,h0h_c)\n copywritearraytoC(h1h,h1h_c)\n copywritearraytoC(u0h,u0h_c)\n copywritearraytoC(u1h,u1h_c)\n copywritearraytoC(G0h,G0h_c)\n copywritearraytoC(G1h,G1h_c)\n copywritearraytoC(b0h,b0h_c)\n copywritearraytoC(b1h,b1h_c) \n \n tij = t[i]\n \n #getufromGperiodic(h_c,G_c,bed_c, dx ,n,u_c)\n \n #something weird with u at boundaires\n haBC,uaBC,GaBC,bedaBC = cnoidalwaves(xbc,tij,dx,a0,a1,g,k)\n ha = haBC[nBC:-nBC]\n h0 = haBC[:nBC]\n h1 = haBC[-nBC:]\n ua = uaBC[nBC:-nBC]\n u0 = uaBC[:nBC]\n u1 = uaBC[-nBC:]\n Ga = GaBC[nBC:-nBC]\n G0 = GaBC[:nBC]\n G1 = GaBC[-nBC:]\n beda = bedaBC[nBC:-nBC]\n b0 = bedaBC[:nBC]\n b1 = bedaBC[-nBC:]\n \n getufromG(h_c,G_c,bed_c,u0[-1],u1[0],h0[-1],h1[0], 0.0, 0.0, dx ,n,u_c)\n u = copyarrayfromC(u_c,n)\n G = copyarrayfromC(G_c,n)\n h = copyarrayfromC(h_c,n)\n \n un = copyarrayfromC(un_c,n+2*nBCn)\n Gn = copyarrayfromC(Gn_c,n+2*nBCn)\n hn = copyarrayfromC(hn_c,n+2*nBCn)\n \n ha,ua,Ga,beda = cnoidalwaves(x,t[-1],dx,a0,a1,g,k) \n \n s = wdir + \"outlast.txt\"\n with open(s,'a') as file2:\n writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n writefile2.writerow(['dx' ,'dt','time' ,\"cell midpoint\", 'height(m)', 'G' , 'u(m/s)' ,'ha', 'u']) \n \n for j in range(n):\n writefile2.writerow([str(dx),str(dt),str(t[-1]),str(x[j]) ,str(h[j]) , str(G[j]) , str(u[j]), str(ha[j]), str(ua[j])]) \n \n normhdiffi = norm(h - ha,ord=1) / norm(ha,ord=1)\n normudiffi = norm(u -ua,ord=1) / norm(ua,ord=1) \n \n s = wdatadir + \"savenorms.txt\"\n with open(s,'a') as file1:\n writefile = csv.writer(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n writefile.writerow([str(dx),str(theta),str(normhdiffi), str(normudiffi)]) \n \n deallocPy(u_c)\n deallocPy(G_c)\n deallocPy(h_c)\n deallocPy(h0_c)\n deallocPy(h1_c)\n deallocPy(u0_c)\n deallocPy(u1_c)\n deallocPy(G0_c)\n deallocPy(G1_c)\n deallocPy(b0_c)\n deallocPy(b1_c)\n deallocPy(h0h_c)\n deallocPy(h1h_c)\n deallocPy(u0h_c)\n deallocPy(u1h_c)\n deallocPy(G0h_c)\n deallocPy(G1h_c)\n deallocPy(b0h_c)\n deallocPy(b1h_c) \n#### Cnoidal Waves\n\"\"\"\n\n","sub_path":"CODE/experimentcode/Thesis/Forced/GaussianBumpoverPeriodicBed/o2bedBC/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":12678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"380622700","text":"import sys\nfrom robot import Robot\n\ndef parse_problem(world_file, problem_file):\n world_data = open(world_file)\n problem_data = open(problem_file)\n\n world_list = []\n problem_list = []\n\n robot_dimensions = world_data.readline().split(' ')\n robot = Robot(float(robot_dimensions[0]), float(robot_dimensions[1]))\n\n for curr_obj in world_data: \n coords = curr_obj.split(' ')\n \n curr_obj = []\n for i in range(0, len(coords), 2):\n curr_obj.insert(0,(float(coords[i]), float(coords[i+1])))\n\n world_list.append(curr_obj)\n\n for path in problem_data:\n coords = path.split(' ')\n\n problem_list.append([\n (float(coords[0]), float(coords[1]), float(coords[2])),\n (float(coords[3]), float(coords[4]), float(coords[5]))])\n\n world_data.close()\n problem_data.close()\n\n return (robot, world_list, problem_list)\n\nif __name__ == \"__main__\":\n result = parse_problem(sys.argv[1], sys.argv[2])\n if result is not None:\n print('Robot:', (result[0].width, result[0].height))\n for x in range(len(result[1])):\n print('Obstacle ' + str(x) + ': ' + str(result[1][x]))\n for x in range(len(result[2])):\n print('Problem ' + str(x) + ': ' + str(result[2][x]))","sub_path":"CS460/HW2/3/file_parse.py","file_name":"file_parse.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"193362690","text":"#!/usr/bin/env python\n#\n# tournament.py -- implementation of a Swiss-system tournament\n#\n\nimport psycopg2\nimport tournament_dbsql_ec\n\n\ndef connect():\n \"\"\"Connect to the PostgreSQL database. Returns a database connection.\"\"\"\n return psycopg2.connect(\"dbname=tournament\")\n\n\ndef deleteMatches():\n \"\"\"Remove all the match records from the database.\"\"\"\n\n connection = connect()\n cursor = connection.cursor()\n cursor.execute(tournament_dbsql_ec.deleteMatchesSQL())\n connection.commit()\n connection.close\n\n\ndef deletePlayers():\n \"\"\"Remove all the player records from the database.\n \"\"\"\n\n connection = connect()\n cursor = connection.cursor()\n cursor.execute(tournament_dbsql_ec.deletePlayersSQL())\n connection.commit()\n connection.close\n\n\ndef countPlayers(tournament):\n \"\"\"Returns the number of players currently registered.\n\n Args:\n tournament: the tournament number\n \"\"\"\n\n connection = connect()\n cursor = connection.cursor()\n data = (tournament,)\n cursor.execute(tournament_dbsql_ec.selectAllPlayersSQL(), data)\n row_count = cursor.rowcount\n connection.close\n return row_count\n\n\ndef registerPlayer(tournament, name):\n \"\"\"Adds a player to the tournament database.\n \n The database assigns a unique serial id number for the player. (This\n should be handled by your SQL database schema, not in your Python code.)\n \n Args:\n name: the player's full name (need not be unique).\n \"\"\"\n connection = connect()\n cursor = connection.cursor()\n data = (tournament, name)\n cursor.execute(tournament_dbsql_ec.registerPlayerSQL(), data)\n connection.commit()\n connection.close\n\n\ndef playerStandings(tournament):\n\n \"\"\"Reurns a list of the players in order of their standings\n\n Args:\n tournament: the tournament number\n\n Returns a list of the players and their win records, sorted by wins.\n\n The first entry in the list should be the player in first place,\n or a player tied for first place if there is currently a tie.\n\n Returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won\n matches: the number of matches the player has played\n \"\"\"\n connection = connect()\n cursor = connection.cursor()\n\n # Query player standings\n cursor.execute(tournament_dbsql_ec.playerStandingsSQL(), (tournament,))\n \n rows = cursor.fetchall()\n\n connection.close\n\n return rows\n\n\ndef reportMatch(tournament, winner, loser, round_num, tie_ind):\n \"\"\"Records the outcome of a single match between two players.\n\n Args:\n tournament: the tournament number\n winner: the id number of the player who won\n loser: the id number of the player who lost\n round_num: the current round of matches\n tie_ind: indicates whether the match was a tie\n \"\"\"\n\n \"\"\"\n I always put the lesser player id in the player 1 id to limit the\n number of rows and keep the data cleaner\n \"\"\"\n if winner > loser:\n player1 = loser\n player2 = winner\n else:\n player2 = loser\n player1 = winner\n\n # Insert record for matches table\n connection = connect()\n cursor = connection.cursor()\n data = (tournament, player1, player2, round_num, tie_ind)\n cursor.execute(tournament_dbsql_ec.reportMatchInsertSQL(), data)\n\n \"\"\"\n Insert a record in the matches table when there is a tie\n to give the second player credit\n \"\"\"\n \n if tie_ind == \"Y\":\n data = (tournament, player2, player1, round_num, tie_ind)\n cursor.execute(tournament_dbsql_ec.reportMatchInsertSQL(), data)\n \n connection.commit()\n connection.close\n\n \ndef swissPairings(tournament, round_num):\n \"\"\"Returns a list of pairs of players for the next round of a match.\n \n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n \n Args:\n tournament: the tournament number\n round_num: the current round of matches\n\n Returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id\n name2: the second player's name\n \"\"\"\n connection = connect()\n cursor = connection.cursor()\n cursor.execute(tournament_dbsql_ec.playerOrderSQL(), (tournament,))\n\n count = 0\n row_count = 0\n pairing = ()\n pairing_list = []\n\n matchq_connection = connect()\n match_connection = connect()\n\n for row in cursor:\n count += 1\n row_count += 1\n\n if count < 2:\n # Hold the first of each pair for later matching\n player1 = row\n else:\n # Make sure the smaller value is in first position\n if player1[0] < row[0]:\n player1_id_h = player1\n player2_id_h = row\n else:\n player2_id_h = player1\n player1_id_h = row\n \n # check for duplicates - Extra Credit 1 - I also put\n # constraints on the database.\n\n matchq_cursor = connection.cursor()\n data = (tournament, player1_id_h[0], player2_id_h[0])\n matchq_cursor.execute(tournament_dbsql_ec.checkMatchExistsSQL(), data)\n matchq_connection.commit()\n count = 0;\n # if no match found, pair the players\n if matchq_cursor.rowcount == 0:\n pairing = player1_id_h + player2_id_h\n pairing_list.append(pairing)\n pairing = ()\n \n # Insert the matched players into the match table\n\n matchq_connection.close\n connection.close\n\n \"\"\"\n Append last player with a dummy negative record so they get credit for a win,\n but do not appear in a match. I removed the table contraint to allow for -1\n \"\"\"\n\n if (round_num == 0 and cursor.rowcount % 2 > 0 and\n row_count == cursor.rowcount):\n pairing = player1 + (-1, ' ')\n pairing_list.append(pairing)\n # print pairing_list\n\n return pairing_list\n","sub_path":"vagrant/tournament/tournament_ec.py","file_name":"tournament_ec.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"429233691","text":"## This is course material for Introduction to Modern Artificial Intelligence\n## Example code: test_ResNet50.py\n## Author: Allen Y. Yang\n##\n## (c) Copyright 2020. Intelligent Racing Inc. Not permitted for commercial use\nfrom keras.datasets import cifar100\nimport matplotlib.pyplot as plt\nfrom keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions\nimport numpy as np\nimport cv2\n\nmodel = ResNet50(weights='imagenet')\nprint(model.summary())\n\n# load the data built in Keras, split between train and test sets\n(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode = 'fine')\n\n# Test the x_test images without training\nplt.figure(1)\ntest_count = 6\nfor i in range(test_count):\n test_image = x_test[i]\n test_image = cv2.resize(test_image, (224, 224))\n display_image = test_image.copy()\n\n # Convert a single image into batch Keras training format\n test_image = test_image.reshape((1, 224, 224, 3))\n test_image = test_image.astype('float32')\n test_image = preprocess_input(test_image)\n\n # Predict and extract the top 3 text labels\n y_predict = model.predict(test_image)\n label = decode_predictions(y_predict)\n display_labels = str([label[0][0][1], label[0][1][1], label[0][2][1]])\n\n # Display the test result\n plt.imshow(display_image, cmap = plt.cm.binary)\n plt.title(display_labels)\n plt.show()","sub_path":"samples/test_ResNet50.py","file_name":"test_ResNet50.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"534741994","text":"x=float(input(\"Введіть змінну x=\"))\r\ne=float(input(\"Введіть точність е=\"))\r\nimport math\r\nd=x\r\nn=2\r\nwhile math.fabs(1-(x**2/((n-1)**2)*(math.pi**2)))>e:\r\n d*=(1-(x**2/((n-1)**2)*(math.pi**2)))\r\n n+=1\r\nprint(\"Добуток:{0}\".format(d))\r\nif math.sin(x)-d Optional[str]:\n if isinstance(result, JobResult):\n return result.next_job_id if result.next_job_id else None\n\n\ndef insert_job_id(result: JobResult, job_id: str) -> JobResult:\n if isinstance(result, JobResult):\n if not result.job_id:\n result.job_id = job_id\n return result\n\n\ndef job_wrapper(func):\n \"\"\"Decorator to save job status in job tracker database.\"\"\"\n def wrapper(job_id: Optional[str]=None, *args, **kwargs):\n if job_id:\n job = Jobtracker()\n try:\n job.load(job_id)\n except:\n job = None\n if job:\n job.start(fname = func.__name__)\n try:\n # kwargs is contained in an item called kwargs because of the apscheduler.add_job call\n res = func(*args, **kwargs['kwargs'])\n if job_id:\n res = insert_job_id(res, job_id)\n except Exception as e:\n tb = traceback.format_exc()\n logger.debug(\"Exception traceback in job_wrapper: {}\".format(tb))\n if job:\n job.finish_exception(e, tb)\n raise e\n else:\n if job:\n job.finish_success(res, find_nextjob(res))\n return res\n return wrapper\n\n","sub_path":"src/cnaas_nms/scheduler/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"419634212","text":"\"\"\"\nThis is an XBlock designed to allow people to provide feedback on our\ncourse resources.\n\"\"\"\n\nimport random\n\nimport pkg_resources\n\nfrom xblock.core import XBlock\nfrom xblock.fields import Scope, Integer, String, List, Float\nfrom xblock.fragment import Fragment\nimport six\n\n\n@XBlock.needs('i18n')\nclass RateXBlock(XBlock):\n \"\"\"\n This is an XBlock -- eventually, hopefully an aside -- which\n allows you to rate content in the course. We've wanted this for a\n long time, but Dartmouth finally encourage me to start to build\n this.\n \"\"\"\n # This is a list of prompts. If we have multiple elements in the\n # list, one will be chosen at random. This is currently not\n # exposed in the UX. If the prompt is missing any portions, we\n # will default to the ones in default_prompt.\n prompts = List(\n default=[\n {'freeform': \"Please provide us feedback on this section\",\n 'likert': \"Please rate your overall experience with this section\"}\n ],\n scope=Scope.settings,\n help=\"Freeform user prompt\",\n xml_node=True\n )\n\n prompt_choice = Integer(\n default=-1, scope=Scope.user_state,\n help=\"Random number generated for p. -1 if uninitialized\"\n )\n\n user_vote = Integer(\n default=-1, scope=Scope.user_state,\n help=\"How user voted. -1 if didn't vote\"\n )\n\n p = Float(\n default=100, scope=Scope.settings,\n help=\"What percent of the time should this show?\"\n )\n\n p_user = Float(\n default=-1, scope=Scope.user_state,\n help=\"Random number generated for p. -1 if uninitialized\"\n )\n\n vote_aggregate = List(\n default=None, scope=Scope.user_state_summary,\n help=\"A list of user votes\"\n )\n\n user_freeform = String(default=\"\", scope=Scope.user_state,\n help=\"Feedback\")\n\n display_name = String(\n display_name=\"Display Name\",\n default=\"Provide Feedback\",\n scopde=Scope.settings\n )\n\n def resource_string(self, path):\n \"\"\"Handy helper for getting resources from our kit.\"\"\"\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")\n\n def get_prompt(self, index=-1):\n \"\"\"\n Return the current prompt dictionary, doing appropriate\n randomization if necessary, and falling back to defaults when\n necessary.\n \"\"\"\n if index == -1:\n index = self.prompt_choice\n\n _ = self.runtime.service(self, 'i18n').ugettext\n prompt = {\n 'freeform': _(\"Please provide us feedback on this section.\"),\n 'likert': _(\"Please rate your overall experience \"\n \"with this section.\"),\n 'mouseovers': [_(\"Excellent\"),\n _(\"Good\"),\n _(\"Average\"),\n _(\"Fair\"),\n _(\"Poor\")],\n 'icons': [\"😁\", \"😊\", \"😐\", \"😞\", \"😭\"]\n }\n\n prompt.update(self.prompts[index])\n return prompt\n\n def student_view(self, context=None):\n \"\"\"\n The primary view of the RateXBlock, shown to students\n when viewing courses.\n \"\"\"\n # Figure out which prompt we show. We set self.prompt_choice to\n # the index of the prompt. We set it if it is out of range (either\n # uninitiailized, or incorrect due to changing list length). Then,\n # we grab the prompt, prepopulated with defaults.\n if self.prompt_choice < 0 or self.prompt_choice >= len(self.prompts):\n self.prompt_choice = random.randint(0, len(self.prompts) - 1)\n prompt = self.get_prompt()\n\n # Now, we render the RateXBlock. This may be redundant, since we\n # don't always show it.\n html = self.resource_string(\"static/html/rate.html\")\n # The replace allows us to format the HTML nicely without getting\n # extra whitespace\n if self.vote_aggregate and self.is_staff():\n scale_item = self.resource_string(\"static/html/staff_item.html\")\n else:\n scale_item = self.resource_string(\"static/html/scale_item.html\")\n scale_item = scale_item.replace('\\n', '')\n indexes = list(range(len(prompt['icons'])))\n active_vote = [\"checked\" if i == self.user_vote else \"\"\n for i in indexes]\n self.init_vote_aggregate()\n votes = self.vote_aggregate\n scale = \"\".join(\n scale_item.format(level=l, icon=icon, i=i, active=a, votes=v) for\n (l, icon, i, a, v) in\n zip(prompt['mouseovers'], prompt['icons'], indexes, active_vote, votes)\n )\n if self.user_vote != -1:\n _ = self.runtime.service(self, 'i18n').ugettext\n response = _(\"Thank you for voting!\")\n else:\n response = \"\"\n rendered = html.format(self=self,\n scale=scale,\n freeform_prompt=prompt['freeform'],\n likert_prompt=prompt['likert'],\n response=response)\n\n # We initialize self.p_user if not initialized -- this sets whether\n # or not we show it. From there, if it is less than odds of showing,\n # we set the fragment to the rendered XBlock. Otherwise, we return\n # empty HTML. There ought to be a way to return None, but XBlocks\n # doesn't support that.\n if self.p_user == -1:\n self.p_user = random.uniform(0, 100)\n if self.p_user < self.p:\n frag = Fragment(rendered)\n else:\n frag = Fragment(\"\")\n\n # Finally, we do the standard JS+CSS boilerplate. Honestly, XBlocks\n # ought to have a sane default here.\n frag.add_css(self.resource_string(\"static/css/rate.css\"))\n frag.add_javascript(self.resource_string(\"static/js/src/rate.js\"))\n frag.initialize_js('RateXBlock')\n return frag\n\n def studio_view(self, context):\n \"\"\"\n Create a fragment used to display the edit view in the Studio.\n \"\"\"\n html_str = self.resource_string(\"static/html/studio_view.html\")\n prompt = self.get_prompt(0)\n frag = Fragment(str(html_str).format(**prompt))\n js_str = self.resource_string(\"static/js/src/studio.js\")\n frag.add_javascript(str(js_str))\n frag.initialize_js('RateBlock')\n return frag\n\n @XBlock.json_handler\n def studio_submit(self, data, suffix=''):\n \"\"\"\n Called when submitting the form in Studio.\n \"\"\"\n self.prompts[0]['freeform'] = data.get('freeform')\n self.prompts[0]['likert'] = data.get('likert')\n return {'result': 'success'}\n\n def init_vote_aggregate(self):\n # Make sure we're initialized\n if not self.vote_aggregate:\n self.vote_aggregate = [0] * (len(self.get_prompt()['mouseovers']))\n\n def vote(self, data):\n \"\"\"\n Handle voting\n \"\"\"\n # prompt_choice is initialized by student view.\n # Ideally, we'd break this out into a function.\n prompt = self.get_prompt(self.prompt_choice)\n\n # Make sure we're initialized\n self.init_vote_aggregate()\n\n # Remove old vote if we voted before\n if self.user_vote != -1:\n self.vote_aggregate[self.user_vote] -= 1\n\n self.user_vote = data['vote']\n self.vote_aggregate[self.user_vote] += 1\n\n @XBlock.json_handler\n def feedback(self, data, suffix=''):\n '''\n Allow students to submit feedback, both numerical and\n qualitative. We only update the specific type of feedback\n submitted.\n\n We return the current state. While this is not used by the\n client code, it is helpful for testing. For staff users, we\n also return the aggregate results.\n '''\n _ = self.runtime.service(self, 'i18n').ugettext\n\n if 'freeform' not in data and 'vote' not in data:\n response = {\"success\": False,\n \"response\": _(\"Please vote!\")}\n self.runtime.publish(self,\n 'edx.ratexblock.nothing_provided',\n {})\n if 'freeform' in data:\n response = {\"success\": True,\n \"response\": _(\"Thank you for your feedback!\")}\n self.runtime.publish(self,\n 'edx.ratexblock.freeform_provided',\n {'old_freeform': self.user_freeform,\n 'new_freeform': data['freeform']})\n self.user_freeform = data['freeform']\n if 'vote' in data:\n response = {\"success\": True,\n \"response\": _(\"Thank you for voting!\")}\n self.runtime.publish(self,\n 'edx.ratexblock.likert_provided',\n {'old_vote': self.user_vote,\n 'new_vote': data['vote']})\n self.vote(data)\n\n response.update({\n \"freeform\": self.user_freeform,\n \"vote\": self.user_vote\n })\n\n if self.is_staff():\n response['aggregate'] = self.vote_aggregate\n\n return response\n\n # TO-DO: change this to create the scenarios you'd like to see in the\n # workbench while developing your XBlock.\n @staticmethod\n def workbench_scenarios():\n \"\"\"A canned scenario for display in the workbench.\"\"\"\n return [\n (\"RateXBlock\",\n \"\"\"\n \n \n \n \n \"\"\"),\n ]\n\n def is_staff(self):\n \"\"\"\n Return self.xmodule_runtime.user_is_staff if available\n\n This is not a supported part of the XBlocks API in all\n runtimes, and this is a workaround so something reasonable\n happens in both workbench and edx-platform\n \"\"\"\n if hasattr(self, \"xmodule_runtime\") and \\\n hasattr(self.xmodule_runtime, \"user_is_staff\"):\n return self.xmodule_runtime.user_is_staff\n else:\n # In workbench and similar settings, always return true\n return True\n","sub_path":"rate/rate.py","file_name":"rate.py","file_ext":"py","file_size_in_byte":10308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"551342713","text":"# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\n\nfrom nikola.plugin_categories import RestExtension\n\nclass Plugin(RestExtension):\n\n name = \"rest_advert\"\n\n def set_site(self, site):\n self.site = site\n directives.register_directive('advert', Advert)\n return super(Plugin, self).set_site(site)\n\n\nclass Advert(Directive):\n \"\"\" Restructured text extension for inserting Google Adsense advert.\n\n Usage: .. advert:: advert-type\n\n Currently advert-type is blog-responsive-sidebar or blog-responsive-banner\n and the result is one fixed chunk of HTML or another one.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n has_content = False\n\n def run(self):\n if self.arguments[0] == \"blog-responsive-sidebar\":\n return [nodes.raw('', \"\"\"\n\n\n \n\n
\"\"\", format='html')]\n else:\n return [nodes.raw('', \"\"\"\n\n\n \n\n
\"\"\", format='html')]\n","sub_path":"nikola/plugins/compile/rest/advert.py","file_name":"advert.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"342309392","text":"import numpy as np\n\nX = np.genfromtxt('X_train.csv', delimiter=';', dtype=float)\nY = np.genfromtxt('data_train_Y.csv', delimiter=';', dtype=float)\nW1 = np.zeros([35,15])\nW2 = np.zeros([15,10])\nteta = 0.3\n\nfor i in range(35):\n for j in range(15):\n W1[i][j] = float(np.random.uniform(-1,1))\n\nfor i in range(15):\n for j in range(10):\n W2[i][j] = float(np.random.uniform(-1,1))\n\ndef n(X,W):\n return np.dot(X,W)\n\ndef f(net):\n return 1/(1 + np.exp(-0.1*net))\n\ndef dy(net):\n return (0.1 * np.exp(-net * 0.1)) / ((np.exp(-net * 0.1) + 1) ** 2)\n\nfor t in range(1000):\n for g in range(1010):\n input = np.array([X[g]])\n net1 = n(input,W1)\n output = np.array([Y[g]])\n y1 = f(net1)\n net2 = n(y1,W2)\n y2 = f(net2)\n\n d_out = (output - y2) * dy(net2)\n\n delta_h = np.zeros(15)\n for i in range(15):\n for j in range(10):\n delta_h[i] += d_out[0][j] * W2[i][j] \n delta_h[i] *= dy(net1[0][i])\n\n d_W1 = np.zeros([35, 15])\n for i in range(35):\n for j in range(15):\n d_W1[i][j] = delta_h[j] * input[0][i]\n d_W1 *= teta \n W1 += d_W1\n\n d_W2 = np.zeros([15, 10])\n for i in range(15): \n for j in range(10): \n d_W2[i][j] = d_out[0][j] * y1[0][i]\n d_W2 *= teta \n W2 += d_W2 \n\n'''\nX_test = np.genfromtxt('data_1_test_3_bits.csv', delimiter=';')\nfor i in range(10):\n input_1 = np.array([X_test[i]])\n net_1 = n(input_1,W1)\n y_1 = f(net_1)\n net_2 = n(y_1,W2)\n y_2 = f(net_2)\n for i in range(10):\n print(y_2[0][i])\n print('Number:')\n print(np.argmax(y_2[0]))\n print('--------------------------')'''\n \n","sub_path":"Экспериментальные проекты/GoPro проекты/Распознование цифр с GoPro/Для обучения/classificator_2.py","file_name":"classificator_2.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"369383220","text":"# ===============================================================================\n# qt resources\n# ===============================================================================\n\n\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtWidgets\n\n# from PyQt5 import QtGui as Q_Gui\n\nQt = QtCore.Qt\n# caused some problems when saving automaton to file.....\n\nleft = int(Qt.LeftButton)\nright = int(Qt.RightButton)\nmiddle = int(Qt.MidButton)\nnobut = int(Qt.NoButton)\nLEFT = left # Qt.LeftButton\nMIDDLE = middle # Qt.MidButton\nRIGHT = right # Qt.RightButton\nNO_BUT = nobut # Qt.NoButton\nYES = QtWidgets.QMessageBox.Yes\nNO = QtWidgets.QMessageBox.No\nOK = QtWidgets.QMessageBox.Ok\n\nKEYS = {\n Qt.Key_Escape : 'Key_Escape',\n Qt.Key_Tab : 'Key_Tab',\n Qt.Key_Backtab : 'Key_Backtab',\n Qt.Key_Backspace : 'Key_Backspace',\n Qt.Key_Return : 'Key_Return',\n Qt.Key_Enter : 'Key_Enter',\n Qt.Key_Insert : 'Key_Insert',\n Qt.Key_Delete : 'Key_Delete',\n Qt.Key_Pause : 'Key_Pause',\n Qt.Key_Print : 'Key_Print',\n Qt.Key_SysReq : 'Key_SysReq',\n Qt.Key_Home : 'Key_Home',\n Qt.Key_End : 'Key_End',\n Qt.Key_Left : 'Key_Left',\n Qt.Key_Up : 'Key_Up',\n Qt.Key_Right : 'Key_Right',\n Qt.Key_Down : 'Key_Down',\n # Qt.Key_Prior : 'Key_Prior' ,\n # Qt.Key_Next : 'Key_Next' ,\n Qt.Key_Shift : 'Key_Shift',\n Qt.Key_Control : 'Key_Control',\n Qt.Key_Meta : 'Key_Meta',\n Qt.Key_Alt : 'Key_Alt',\n Qt.Key_CapsLock : 'Key_CapsLock',\n Qt.Key_NumLock : 'Key_NumLock',\n Qt.Key_ScrollLock : 'Key_ScrollLock',\n Qt.Key_Clear : 'Key_Clear',\n Qt.Key_F1 : 'Key_F1',\n Qt.Key_F2 : 'Key_F2',\n Qt.Key_F3 : 'Key_F3',\n Qt.Key_F4 : 'Key_F4',\n Qt.Key_F5 : 'Key_F5',\n Qt.Key_F6 : 'Key_F6',\n Qt.Key_F7 : 'Key_F7',\n Qt.Key_F8 : 'Key_F8',\n Qt.Key_F9 : 'Key_F9',\n Qt.Key_F10 : 'Key_F10',\n Qt.Key_F11 : 'Key_F11',\n Qt.Key_F12 : 'Key_F12',\n Qt.Key_F13 : 'Key_F13',\n Qt.Key_F14 : 'Key_F14',\n Qt.Key_F15 : 'Key_F15',\n Qt.Key_F16 : 'Key_F16',\n Qt.Key_F17 : 'Key_F17',\n Qt.Key_F18 : 'Key_F18',\n Qt.Key_F19 : 'Key_F19',\n Qt.Key_F20 : 'Key_F20',\n Qt.Key_F21 : 'Key_F21',\n Qt.Key_F22 : 'Key_F22',\n Qt.Key_F23 : 'Key_F23',\n Qt.Key_F24 : 'Key_F24',\n Qt.Key_F25 : 'Key_F25',\n Qt.Key_F26 : 'Key_F26',\n Qt.Key_F27 : 'Key_F27',\n Qt.Key_F28 : 'Key_F28',\n Qt.Key_F29 : 'Key_F29',\n Qt.Key_F30 : 'Key_F30',\n Qt.Key_F31 : 'Key_F31',\n Qt.Key_F32 : 'Key_F32',\n Qt.Key_F33 : 'Key_F33',\n Qt.Key_F34 : 'Key_F34',\n Qt.Key_F35 : 'Key_F35',\n Qt.Key_Super_L : 'Key_Super_L',\n Qt.Key_Super_R : 'Key_Super_R',\n Qt.Key_Menu : 'Key_Menu',\n Qt.Key_Hyper_L : 'Key_Hyper_L',\n Qt.Key_Hyper_R : 'Key_Hyper_R',\n Qt.Key_Help : 'Key_Help',\n Qt.Key_Space : 'Key_Space',\n Qt.Key_Any : 'Key_Any',\n Qt.Key_Exclam : 'Key_Exclam',\n Qt.Key_QuoteDbl : 'Key_QuoteDbl',\n Qt.Key_NumberSign : 'Key_NumberSign',\n Qt.Key_Dollar : 'Key_Dollar',\n Qt.Key_Percent : 'Key_Percent',\n Qt.Key_Ampersand : 'Key_Ampersand',\n Qt.Key_Apostrophe : 'Key_Apostrophe',\n Qt.Key_ParenLeft : 'Key_ParenLeft',\n Qt.Key_ParenRight : 'Key_ParenRight',\n Qt.Key_Asterisk : 'Key_Asterisk',\n Qt.Key_Plus : 'Key_Plus',\n Qt.Key_Comma : 'Key_Comma',\n Qt.Key_Minus : 'Key_Minus',\n Qt.Key_Period : 'Key_Period',\n Qt.Key_Slash : 'Key_Slash',\n Qt.Key_0 : 'Key_0',\n Qt.Key_1 : 'Key_1',\n Qt.Key_2 : 'Key_2',\n Qt.Key_3 : 'Key_3',\n Qt.Key_4 : 'Key_4',\n Qt.Key_5 : 'Key_5',\n Qt.Key_6 : 'Key_6',\n Qt.Key_7 : 'Key_7',\n Qt.Key_8 : 'Key_8',\n Qt.Key_9 : 'Key_9',\n Qt.Key_Colon : 'Key_Colon',\n Qt.Key_Semicolon : 'Key_Semicolon',\n Qt.Key_Less : 'Key_Less',\n Qt.Key_Equal : 'Key_Equal',\n Qt.Key_Greater : 'Key_Greater',\n Qt.Key_Question : 'Key_Question',\n Qt.Key_At : 'Key_At',\n Qt.Key_A : 'Key_A',\n Qt.Key_B : 'Key_B',\n Qt.Key_C : 'Key_C',\n Qt.Key_D : 'Key_D',\n Qt.Key_E : 'Key_E',\n Qt.Key_F : 'Key_F',\n Qt.Key_G : 'Key_G',\n Qt.Key_H : 'Key_H',\n Qt.Key_I : 'Key_I',\n Qt.Key_J : 'Key_J',\n Qt.Key_K : 'Key_K',\n Qt.Key_L : 'Key_L',\n Qt.Key_M : 'Key_M',\n Qt.Key_N : 'Key_N',\n Qt.Key_O : 'Key_O',\n Qt.Key_P : 'Key_P',\n Qt.Key_Q : 'Key_Q',\n Qt.Key_R : 'Key_R',\n Qt.Key_S : 'Key_S',\n Qt.Key_T : 'Key_T',\n Qt.Key_U : 'Key_U',\n Qt.Key_V : 'Key_V',\n Qt.Key_W : 'Key_W',\n Qt.Key_X : 'Key_X',\n Qt.Key_Y : 'Key_Y',\n Qt.Key_Z : 'Key_Z',\n Qt.Key_BracketLeft : 'Key_BracketLeft',\n Qt.Key_Backslash : 'Key_Backslash',\n Qt.Key_BracketRight : 'Key_BracketRight',\n Qt.Key_AsciiCircum : 'Key_AsciiCircum',\n Qt.Key_Underscore : 'Key_Underscore',\n Qt.Key_QuoteLeft : 'Key_QuoteLeft',\n Qt.Key_BraceLeft : 'Key_BraceLeft',\n Qt.Key_Bar : 'Key_Bar',\n Qt.Key_BraceRight : 'Key_BraceRight',\n Qt.Key_AsciiTilde : 'Key_AsciiTilde',\n Qt.Key_nobreakspace : 'Key_nobreakspace',\n Qt.Key_exclamdown : 'Key_exclamdown',\n Qt.Key_cent : 'Key_cent',\n Qt.Key_sterling : 'Key_sterling',\n Qt.Key_currency : 'Key_currency',\n Qt.Key_yen : 'Key_yen',\n Qt.Key_brokenbar : 'Key_brokenbar',\n Qt.Key_section : 'Key_section',\n Qt.Key_diaeresis : 'Key_diaeresis',\n Qt.Key_copyright : 'Key_copyright',\n Qt.Key_ordfeminine : 'Key_ordfeminine',\n Qt.Key_guillemotleft : 'Key_guillemotleft',\n Qt.Key_notsign : 'Key_notsign',\n Qt.Key_hyphen : 'Key_hyphen',\n Qt.Key_registered : 'Key_registered',\n Qt.Key_macron : 'Key_macron',\n Qt.Key_degree : 'Key_degree',\n Qt.Key_plusminus : 'Key_plusminus',\n Qt.Key_twosuperior : 'Key_twosuperior',\n Qt.Key_threesuperior : 'Key_threesuperior',\n Qt.Key_acute : 'Key_acute',\n Qt.Key_mu : 'Key_mu',\n Qt.Key_paragraph : 'Key_paragraph',\n Qt.Key_periodcentered: 'Key_periodcentered',\n Qt.Key_cedilla : 'Key_cedilla',\n Qt.Key_onesuperior : 'Key_onesuperior',\n Qt.Key_masculine : 'Key_masculine',\n Qt.Key_guillemotright: 'Key_guillemotright',\n Qt.Key_onequarter : 'Key_onequarter',\n Qt.Key_onehalf : 'Key_onehalf',\n Qt.Key_threequarters : 'Key_threequarters',\n Qt.Key_questiondown : 'Key_questiondown',\n Qt.Key_Agrave : 'Key_Agrave',\n Qt.Key_Aacute : 'Key_Aacute',\n Qt.Key_Acircumflex : 'Key_Acircumflex',\n Qt.Key_Atilde : 'Key_Atilde',\n Qt.Key_Adiaeresis : 'Key_Adiaeresis',\n Qt.Key_Aring : 'Key_Aring',\n Qt.Key_AE : 'Key_AE',\n Qt.Key_Ccedilla : 'Key_Ccedilla',\n Qt.Key_Egrave : 'Key_Egrave',\n Qt.Key_Eacute : 'Key_Eacute',\n Qt.Key_Ecircumflex : 'Key_Ecircumflex',\n Qt.Key_Ediaeresis : 'Key_Ediaeresis',\n Qt.Key_Igrave : 'Key_Igrave',\n Qt.Key_Iacute : 'Key_Iacute',\n Qt.Key_Icircumflex : 'Key_Icircumflex',\n Qt.Key_Idiaeresis : 'Key_Idiaeresis',\n Qt.Key_ETH : 'Key_ETH',\n Qt.Key_Ntilde : 'Key_Ntilde',\n Qt.Key_Ograve : 'Key_Ograve',\n Qt.Key_Oacute : 'Key_Oacute',\n Qt.Key_Ocircumflex : 'Key_Ocircumflex',\n Qt.Key_Otilde : 'Key_Otilde',\n Qt.Key_Odiaeresis : 'Key_Odiaeresis',\n Qt.Key_multiply : 'Key_multiply',\n Qt.Key_Ooblique : 'Key_Ooblique',\n Qt.Key_Ugrave : 'Key_Ugrave',\n Qt.Key_Uacute : 'Key_Uacute',\n Qt.Key_Ucircumflex : 'Key_Ucircumflex',\n Qt.Key_Udiaeresis : 'Key_Udiaeresis',\n Qt.Key_Yacute : 'Key_Yacute',\n Qt.Key_THORN : 'Key_THORN',\n Qt.Key_ssharp : 'Key_ssharp',\n # Qt.Key_agrave : 'Key_agrave' ,\n # Qt.Key_aacute : 'Key_aacute' ,\n # Qt.Key_acircumflex : 'Key_acircumflex' ,\n # Qt.Key_atilde : 'Key_atilde' ,\n # Qt.Key_adiaeresis : 'Key_adiaeresis' ,\n # Qt.Key_aring : 'Key_aring' ,\n # Qt.Key_ae : 'Key_ae' ,\n # Qt.Key_ccedilla : 'Key_ccedilla' ,\n # Qt.Key_egrave : 'Key_egrave' ,\n # Qt.Key_eacute : 'Key_eacute' ,\n # Qt.Key_ecircumflex : 'Key_ecircumflex' ,\n # Qt.Key_ediaeresis : 'Key_ediaeresis' ,\n # Qt.Key_igrave : 'Key_igrave' ,\n # Qt.Key_iacute : 'Key_iacute' ,\n # Qt.Key_icircumflex : 'Key_icircumflex' ,\n # Qt.Key_idiaeresis : 'Key_idiaeresis' ,\n # Qt.Key_eth : 'Key_eth' ,\n # Qt.Key_ntilde : 'Key_ntilde' ,\n # Qt.Key_ograve : 'Key_ograve' ,\n # Qt.Key_oacute : 'Key_oacute' ,\n # Qt.Key_ocircumflex : 'Key_ocircumflex' ,\n # Qt.Key_otilde : 'Key_otilde' ,\n # Qt.Key_odiaeresis : 'Key_odiaeresis' ,\n # Qt.Key_division : 'Key_division' ,\n # Qt.Key_oslash : 'Key_oslash' ,\n # Qt.Key_ugrave : 'Key_ugrave' ,\n # Qt.Key_uacute : 'Key_uacute' ,\n # Qt.Key_ucircumflex : 'Key_ucircumflex' ,\n # Qt.Key_udiaeresis : 'Key_udiaeresis' ,\n # Qt.Key_yacute : 'Key_yacute' ,\n # Qt.Key_thorn : 'Key_thorn' ,\n # Qt.Key_ydiaeresis : 'Key_ydiaeresis' ,\n # Qt.Key_Back : 'Key_Back' ,\n # Qt.Key_Forward : 'Key_Forward' ,\n # Qt.Key_Stop : 'Key_Stop' ,\n # Qt.Key_Refresh : 'Key_Refresh' ,\n # Qt.Key_VolumeDown : 'Key_VolumeDown' ,\n # Qt.Key_VolumeMute : 'Key_VolumeMute' ,\n # Qt.Key_VolumeUp : 'Key_VolumeUp' ,\n # Qt.Key_BassBoost : 'Key_BassBoost' ,\n # Qt.Key_BassUp : 'Key_BassUp' ,\n # Qt.Key_BassDown : 'Key_BassDown' ,\n # Qt.Key_TrebleUp : 'Key_TrebleUp' ,\n # Qt.Key_TrebleDown : 'Key_TrebleDown' ,\n # Qt.Key_MediaPlay : 'Key_MediaPlay' ,\n # Qt.Key_MediaStop : 'Key_MediaStop' ,\n # Qt.Key_MediaPrev : 'Key_MediaPrev' ,\n # Qt.Key_MediaNext : 'Key_MediaNext' ,\n # Qt.Key_MediaRecord : 'Key_MediaRecord' ,\n # Qt.Key_HomePage : 'Key_HomePage' ,\n # Qt.Key_Favorites : 'Key_Favorites' ,\n # Qt.Key_Search : 'Key_Search' ,\n # Qt.Key_Standby : 'Key_Standby' ,\n # Qt.Key_OpenUrl : 'Key_OpenUrl' ,\n # Qt.Key_LaunchMail : 'Key_LaunchMail' ,\n # Qt.Key_LaunchMedia : 'Key_LaunchMedia' ,\n # Qt.Key_Launch0 : 'Key_Launch0' ,\n # Qt.Key_Launch1 : 'Key_Launch1' ,\n # Qt.Key_Launch2 : 'Key_Launch2' ,\n # Qt.Key_Launch3 : 'Key_Launch3' ,\n # Qt.Key_Launch4 : 'Key_Launch4' ,\n # Qt.Key_Launch5 : 'Key_Launch5' ,\n # Qt.Key_Launch6 : 'Key_Launch6' ,\n # Qt.Key_Launch7 : 'Key_Launch7' ,\n # Qt.Key_Launch8 : 'Key_Launch8' ,\n # Qt.Key_Launch9 : 'Key_Launch9' ,\n # Qt.Key_LaunchA : 'Key_LaunchA' ,\n # Qt.Key_LaunchB : 'Key_LaunchB' ,\n # Qt.Key_LaunchC : 'Key_LaunchC' ,\n # Qt.Key_LaunchD : 'Key_LaunchD' ,\n # Qt.Key_LaunchE : 'Key_LaunchE' ,\n # Qt.Key_LaunchF : 'Key_LaunchF' ,\n # Qt.Key_MediaLast : 'Key_MediaLast' ,\n Qt.Key_unknown : 'Key_unknown',\n Qt.Key_Direction_L : 'Key_Direction_L',\n Qt.Key_Direction_R : 'Key_Direction_R',\n }\n\nPEN_STYLES = {\n 'no pen' : Qt.NoPen,\n 'solid' : Qt.SolidLine,\n 'dashed' : Qt.DashLine,\n 'dotted' : Qt.DotLine,\n 'dash dot' : Qt.DashDotLine,\n 'dash dot dot': Qt.DashDotDotLine,\n 'custom dash' : Qt.CustomDashLine\n }\n\nPEN_STYLES_R = {}\nfor i in list(PEN_STYLES.keys()):\n PEN_STYLES_R[PEN_STYLES[i]] = i\n\nBUTTON_NAMES = {\n left : 'left',\n middle: 'middle',\n right : 'right',\n nobut : 'none'\n }\n\nPRESET_COLOURS = {\n \"yellow\": Qt.yellow\n }\n\n\nclass ModellerRadioButton(QtWidgets.QRadioButton):\n radio_signal = QtCore.pyqtSignal(str, str, str, bool)\n\n def __init__(self, token_class, token, strID, label, autoexclusive=True, height=20):\n QtWidgets.QRadioButton.__init__(self, label)\n self.token_class = token_class\n self.token = token\n self.strID = strID\n self.setFixedHeight(height)\n self.setAutoExclusive(autoexclusive)\n self.toggled.connect(self.beenToggled)\n self.initialisation = True\n\n def beenToggled(self, value):\n self.radio_signal.emit(self.token_class, self.token, self.strID, value)\n\n\nclass StackControl():\n def __init__(self, stack, button_up, button_down, icon_left=None, icon_right=None, index=0, identifier='none'):\n self.stack = stack\n self.up = button_up\n self.down = button_down\n self.index = index\n self.identifier = identifier\n self.count = stack.count()\n self.direction = 0\n if icon_left:\n button_down.setIcon(icon_left)\n button_down.setText(\"\")\n if icon_right:\n button_up.setIcon(icon_right)\n button_up.setText(\"\")\n self.showHide()\n\n def increment(self, step=1):\n self.stepping(step)\n self.showHide()\n self.direction = step\n return self.index\n\n def decrement(self, step=-1):\n self.stepping(step)\n self.showHide()\n self.direction = step\n return self.index\n\n def stepping(self, step):\n self.index += step\n if self.index > self.count:\n self.index = self.count\n if self.index < 0:\n self.index = 0\n\n def reset(self):\n self.index = 0\n self.showHide()\n\n def showHide(self):\n if self.index == 0:\n self.down.hide()\n self.up.show()\n elif self.index == self.count - 1:\n self.down.show()\n self.up.hide()\n else:\n self.down.show()\n self.up.show()\n\n self.stack.setCurrentIndex(self.index)\n # print(\">>>stack countrol %s seeting index : %s\"%(self.identifier, self.index))\n\n def currentIndex(self):\n return self.stack.currentIndex()\n\n\ndef clearLayout(layout):\n \"\"\" removes the widgets from the layout\n had some problems with memory - seems to resolved with phyt 5\n \"\"\"\n # print(\"clean layout\", layout, layout.count())\n if layout.count() > 0:\n for i in reversed(range(layout.count())):\n widgetToRemove = layout.itemAt(i).widget()\n # remove it from the layout list\n layout.removeWidget(widgetToRemove)\n # print(\"remove\", widgetToRemove)\n # remove it from the gui\n try:\n widgetToRemove.setParent(None)\n except:\n pass\n\n\n# def clearLayout(layout):\n# while layout.count():\n# child = layout.takeAt(0)\n# if child.widget() is not None:\n# child.widget().deleteLater()\n# elif child.layout() is not None:\n# clearLayout(child.layout())","sub_path":"qt_resources.py","file_name":"qt_resources.py","file_ext":"py","file_size_in_byte":18476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"378032405","text":"# usr/bin/python3\n# coding:utf-8\n\nimport sys\nimport os \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time \n\nfrom PyQt5 import QtWidgets \nfrom PyQt5.QtCore import Qt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas \n\nimport glob\nfrom scipy.stats import norm\nfrom scipy.stats import expon\nfrom scipy.stats import gamma\nfrom scipy.stats import beta\nfrom scipy.stats import poisson\n\n### plot setting\nplt.rcParams[\"axes.grid\"] = True\nplt.rcParams[\"grid.linestyle\"] = \"--\"\nplt.rcParams[\"grid.linewidth\"] = 0.3\n\ndef GetNDigit(v,n = 5):\n v = str(v) \n if len(v.replace(\".\",\"\")) < n + 1:\n return(v)\n else:\n ind = 0 \n s = \"\"\n for i in range(1000):\n if v[i] == \".\" :\n s += v[i]\n continue\n else:\n s += v[i]\n ind += 1\n if ind >= n:\n return(s)\n \n\nclass Application(QtWidgets.QMainWindow):\n \n def __init__(self):\n super().__init__()\n\n self.initUI()\n \n # initialize File list\n self.SetFileList()\n self.initFigure()\n self.initControlWidget()\n self.initResultWidget()\n self.initInfoWidget()\n self.UpdateDist()\n\n\n \n # define event when a file is changed \n self.FileList.itemSelectionChanged.connect(self.FileListChanged)\n\n # initialize UI \n def initUI(self):\n # For FigureWidget\n self.FigureWidget = QtWidgets.QWidget(self)\n # make FigureLayout. This FigureLayout will be added by vbox.\n self.FigureLayout = QtWidgets.QVBoxLayout(self.FigureWidget)\n # delett margin \n self.FigureLayout.setContentsMargins(0,0,0,0)\n \n\n # make FileList,Info,Control,and Result widgests \n self.FileList = QtWidgets.QListWidget(self)\n self.ControlWidget = QtWidgets.QWidget(self)\n self.ResultWidget = QtWidgets.QWidget(self)\n\n self.InfoWidget = QtWidgets.QWidget(self)\n self.InfoLayout = QtWidgets.QVBoxLayout(self.InfoWidget)\n self.InfoLayout.setContentsMargins(0,0,0,0)\n\n # alinment \n nx = 1000\n ny = 650\n self.setGeometry(0,0,nx,ny)\n self.FileList.setGeometry(0,0,nx*0.2,ny)\n self.FigureWidget.setGeometry(nx*0.2,0,nx*0.5,ny*0.7) \n self.InfoWidget.setGeometry(nx*0.7,0,nx*0.3,ny*0.7)\n self.ControlWidget.setGeometry(nx*0.2,ny*0.7,nx*0.6,ny*0.3)\n self.ResultWidget.setGeometry(nx*0.8,ny*0.7,nx*0.2,ny*0.3)\n\n def initFigure(self):\n # make Figure \n self.Figure = plt.figure()\n # add Figure to FigureCanvas \n self.FigureCanvas = FigureCanvas(self.Figure)\n\n # add FigureCanvas to Layout\n self.FigureLayout.addWidget(self.FigureCanvas)\n \n self.axis = self.Figure.add_subplot(1,1,1)\n# self.axis.plot(1,1)\n \n def initControlWidget(self):\n \n self.LineLayout = QtWidgets.QGridLayout()\n # creat Option widgets\n self.SelectType = QtWidgets.QComboBox(self)\n\n self.SetSelectType(\"continuous\")\n self.SelectType.highlighted.connect(self.UpdateDist)\n\n self.initExpoControl()\n \n self.PauseButton = QtWidgets.QPushButton(\"Pause\")\n self.PauseButton.setCheckable(True)\n self.OneUpdateButton= QtWidgets.QPushButton(\"Update\")\n self.ClearButton = QtWidgets.QPushButton(\"Clear\")\n\n self.OptionLayout = QtWidgets.QHBoxLayout()\n self.OptionLayout.addWidget(self.SelectType)\n self.OptionLayout.addStretch(1)\n self.OptionLayout.addWidget(self.PauseButton)\n self.OptionLayout.addWidget(self.OneUpdateButton)\n self.OptionLayout.addWidget(self.ClearButton)\n \n # signals for Option widgets\n self.OneUpdateFlag = 0\n def flag1() : self.OneUpdateFlag = 1\n def flag0() : self.OneUpdateFlag = 0\n self.OneUpdateButton.clicked.connect(flag1)\n self.OneUpdateButton.clicked.connect(self.AddDist)\n self.OneUpdateButton.clicked.connect(flag0)\n self.ClearButton.clicked.connect(self.UpdateDist)\n \n \n # set this Control in ControlWidget\n self.vbox = QtWidgets.QVBoxLayout(self.ControlWidget)\n self.vbox.addLayout(self.LineLayout)\n self.vbox.addLayout(self.OptionLayout)\n self.vbox.addStretch(1)\n\n def SetSelectType(self,mode):\n if self.SelectType != None:\n self.SelectType.clear()\n if mode == \"continuous\":\n self.SelectType.addItem(\"Probability Density Function\")\n self.SelectType.addItem(\"Cumulative Density Function\")\n if mode == \"discrete\":\n self.SelectType.addItem(\"Probability Mass Function\")\n self.SelectType.addItem(\"Cumulative Density Function\")\n\n def initControl(self):\n\n self.slds = []\n self.minimumSpinBoxs = []\n self.maximumSpinBoxs = []\n self.binSpinBoxs = []\n self.CurrentValues = []\n for i in range(self.npar):\n self.sld = QtWidgets.QSlider(Qt.Horizontal, self)\n self.sld.setFocusPolicy(Qt.NoFocus)\n self.sld.setGeometry(30, 40, 100, 30)\n \n self.sld.setFocusPolicy(Qt.StrongFocus)\n self.sld.setTickPosition(QtWidgets.QSlider.TicksBothSides)\n self.sld.setTickInterval(30)\n self.sld.setSingleStep(1)\n self.sld.setMinimum(1)\n self.sld.setMaximum(10)\n\n # minimum and maximum box\n self.minimumSpinBox = QtWidgets.QDoubleSpinBox()\n self.minimumSpinBox.setRange(self.MinSpinRange[i][0],self.MinSpinRange[i][1])\n self.minimumSpinBox.setSingleStep(1)\n self.minimumSpinBox.setValue(self.MinSpinInit[i])\n self.maximumSpinBox = QtWidgets.QDoubleSpinBox()\n self.maximumSpinBox.setRange(self.MaxSpinRange[i][0],self.MaxSpinRange[i][1])\n self.maximumSpinBox.setSingleStep(1)\n self.maximumSpinBox.setValue(self.MaxSpinInit[i])\n\n # set Bin Box \n self.binSpinBox = QtWidgets.QSpinBox()\n self.binSpinBox.setRange(1,10000)\n self.binSpinBox.setSingleStep(1)\n self.binSpinBox.setValue(10)\n\n # Showing current value \n self.CurrentValue= QtWidgets.QDoubleSpinBox()\n self.CurrentValue.setRange(self.CurrentRange[i][0],self.CurrentRange[i][1])\n self.CurrentValue.setSingleStep(0.01)\n self.CurrentValue.setValue(self.CurrentInit[i])\n \n # add to 's \n self.slds.append(self.sld)\n self.minimumSpinBoxs.append(self.minimumSpinBox)\n self.maximumSpinBoxs.append(self.maximumSpinBox)\n self.binSpinBoxs.append(self.binSpinBox)\n self.CurrentValues.append(self.CurrentValue)\n\n ##### setting signals #####\n def f0(): self.ParameterIndex = 0\n def f1(): self.ParameterIndex = 1\n def f2(): self.ParameterIndex = 2\n def f3(): self.ParameterIndex = 3\n for i in range(self.npar):\n if i ==0:\n self.minimumSpinBoxs[i].valueChanged.connect(f0)\n self.maximumSpinBoxs[i].valueChanged.connect(f0)\n self.binSpinBoxs[i].valueChanged.connect(f0)\n self.slds[i].valueChanged.connect(f0)\n if i == 1:\n self.minimumSpinBoxs[i].valueChanged.connect(f1)\n self.maximumSpinBoxs[i].valueChanged.connect(f1)\n self.binSpinBoxs[i].valueChanged.connect(f1)\n self.slds[i].valueChanged.connect(f1)\n if i == 2:\n self.minimumSpinBoxs[i].valueChanged.connect(f2)\n self.maximumSpinBoxs[i].valueChanged.connect(f2)\n self.binSpinBoxs[i].valueChanged.connect(f2)\n self.slds[i].valueChanged.connect(f2)\n if i == 3:\n self.minimumSpinBoxs[i].valueChanged.connect(f3)\n self.maximumSpinBoxs[i].valueChanged.connect(f3)\n self.binSpinBoxs[i].valueChanged.connect(f3)\n self.slds[i].valueChanged.connect(f3)\n\n self.minimumSpinBoxs[i].valueChanged.connect(self.CalcCurrentValue)\n self.maximumSpinBoxs[i].valueChanged.connect(self.CalcCurrentValue)\n self.binSpinBoxs[i].valueChanged.connect(self.slds[i].setMaximum)\n\n self.slds[i].valueChanged.connect(self.CalcCurrentValue)\n # for updating figure\n self.CurrentValues[i].valueChanged.connect(self.AddDist)\n\n ###### set layout of control box #####\n self.LineLayout.addWidget(QtWidgets.QLabel(\"pars\"),0,0)\n self.LineLayout.addWidget(QtWidgets.QLabel(\"Min\"),0,1)\n self.LineLayout.addWidget(QtWidgets.QLabel(\"Max\"),0,2)\n self.LineLayout.addWidget(QtWidgets.QLabel(\"Bin\"),0,3)\n self.LineLayout.addWidget(QtWidgets.QLabel(\"Slider\"),0,4)\n self.LineLayout.addWidget(QtWidgets.QLabel(\"Value\"),0,5)\n\n ParName = [\"tau\"]\n for i in range(self.npar):\n self.LineLayout.addWidget(QtWidgets.QLabel(self.ParName[i]),i+1,0)\n self.LineLayout.addWidget(self.minimumSpinBoxs[i],i+1,1)\n self.LineLayout.addWidget(self.maximumSpinBoxs[i],i+1,2)\n self.LineLayout.addWidget(self.binSpinBoxs[i],i+1,3)\n self.LineLayout.addWidget(self.slds[i],i+1,4)\n self.LineLayout.addWidget(self.CurrentValues[i],i+1,5)\n\n def initExpoControl(self):\n # for parameter index\n self.ParameterIndex = 0\n # for initialize Comobox\n self.SetSelectType(\"continuous\")\n\n self.npar = 1\n\n self.MinSpinRange = [[-100000,10000]]\n self.MaxSpinRange = self.MinSpinRange\n self.MinSpinInit = [1]\n self.MaxSpinInit = [3]\n self.CurrentRange = self.MinSpinRange\n self.CurrentInit = [1]\n self.ParName = [\"tau\"]\n\n self.initControl()\n\n\n def initNormControl(self):\n # for parameter index\n self.ParameterIndex = 0\n self.SetSelectType(\"continuous\")\n\n self.npar = 2\n\n self.MinSpinRange = [[-100000,10000],[0,10000]]\n self.MaxSpinRange = self.MinSpinRange\n self.MinSpinInit = [-5,1]\n self.MaxSpinInit = [5,5]\n self.CurrentRange = self.MinSpinRange\n self.CurrentInit = [0,1]\n self.ParName = [\"mu\",\"sigma\"]\n\n self.initControl()\n\n def initGammaControl(self):\n # for parameter index\n self.ParameterIndex = 0\n self.SetSelectType(\"continuous\")\n\n self.npar = 2\n\n self.MinSpinRange = [[0,10000],[0,10000]]\n self.MaxSpinRange = self.MinSpinRange\n self.MinSpinInit = [1,1]\n self.MaxSpinInit = [5,5]\n self.CurrentRange = self.MinSpinRange\n self.CurrentInit = [1,1]\n self.ParName = [\"shape(a)\",\"rate(b)\"]\n\n self.initControl()\n\n\n def initBetaControl(self):\n # for parameter index\n self.ParameterIndex = 0\n self.SetSelectType(\"continuous\")\n\n self.npar = 2\n\n self.MinSpinRange = [[0,10000],[0,10000]]\n self.MaxSpinRange = self.MinSpinRange\n self.MinSpinInit = [1,1]\n self.MaxSpinInit = [10,10]\n self.CurrentRange = self.MinSpinRange\n self.CurrentInit = [1,1]\n self.ParName = [\"a\",\"b\"]\n\n self.initControl()\n\n def initPoissonControl(self):\n\n # for parameter index\n self.ParameterIndex = 0\n # for initialize Comobox\n self.SetSelectType(\"discrete\")\n\n self.npar = 1\n\n self.MinSpinRange = [[0,10000]]\n self.MaxSpinRange = self.MinSpinRange\n self.MinSpinInit = [1]\n self.MaxSpinInit = [10]\n self.CurrentRange = self.MinSpinRange\n self.CurrentInit = [1]\n self.ParName = [\"lambda\"]\n\n self.initControl()\n\n\n #### for ControlWidget ##### \n def CalcCurrentValue(self):\n ind = self.ParameterIndex\n min_ = self.minimumSpinBoxs[ind].value()\n max_ = self.maximumSpinBoxs[ind].value()\n bin_ = self.binSpinBoxs[ind].value()\n sld_v = self.slds[ind].value()\n\n CurrentValue_ = np.linspace(min_,max_,bin_)[sld_v-1]\n self.CurrentValues[ind].setValue(CurrentValue_)\n\n #### for ResultWidget##### \n def initResultWidget(self):\n\n self.MeanDisplay = QtWidgets.QLineEdit()\n self.VarianceDisplay = QtWidgets.QLineEdit()\n self.MedianDisplay = QtWidgets.QLineEdit()\n self.MeanDisplay.setReadOnly(True)\n self.VarianceDisplay.setReadOnly(True)\n self.MedianDisplay.setReadOnly(True)\n\n\n SubLineLayout = QtWidgets.QGridLayout()\n SubLineLayout.addWidget(QtWidgets.QLabel(\"Mean\"),0,0)\n SubLineLayout.addWidget(self.MeanDisplay,0,1)\n SubLineLayout.addWidget(QtWidgets.QLabel(\"Variance\"),1,0)\n SubLineLayout.addWidget(self.VarianceDisplay,1,1)\n SubLineLayout.addWidget(QtWidgets.QLabel(\"Median\"),2,0)\n SubLineLayout.addWidget(self.MedianDisplay,2,1)\n\n self.SubWidgetLayout = QtWidgets.QVBoxLayout(self.ResultWidget)\n self.SubWidgetLayout.addLayout(SubLineLayout)\n self.SubWidgetLayout.addStretch(1)\n\n def initInfoWidget(self):\n # make Figure \n self.InfoFigure= plt.figure()\n # add Figure to FigureCanvas \n self.InfoCanvas = FigureCanvas(self.InfoFigure)\n # add InfoCanvas to Layout\n self.InfoLayout.addWidget(self.InfoCanvas)\n\n self.AxisInfo = self.InfoFigure.add_subplot(1,1,1)\n \n self.initExpoInfo()\n\n self.AxisInfo.get_xaxis().set_visible(False)\n self.AxisInfo.get_yaxis().set_visible(False)\n self.AxisInfo.spines[\"right\"].set_visible(False)\n self.AxisInfo.spines[\"left\"].set_visible(False)\n self.AxisInfo.spines[\"top\"].set_visible(False)\n self.AxisInfo.spines[\"bottom\"].set_visible(False)\n \n def initExpoInfo(self):\n self.AxisInfo.clear()\n characters = [\"PDF\",\"CDF\",\"Mean\",\"Variance\",\"Median\"]\n\n expressions ={} \n expressions[\"PDF\"] = r\"$\\frac{1}{\\tau}e^{-\\frac{x}{\\tau}}$\" \n expressions[\"CDF\"] = r\"$1 - e^{-\\frac{x}{\\tau}} $\"\n expressions[\"Mean\"] = r\"$\\tau$\"\n expressions[\"Variance\"] = r\"$\\tau^2 $\"\n expressions[\"Median\"] = r\"$\\tau ln(2) $\"\n for i, ch in enumerate(characters):\n self.AxisInfo.text(-0.1,1- i*0.1,ch)\n self.AxisInfo.text(0.2,1- i*0.1,\":\")\n self.AxisInfo.text(0.25,1- i*0.1, expressions[ch],fontsize = 12)\n\n self.InfoCanvas.draw()\n\n def initNormInfo(self):\n self.AxisInfo.clear()\n characters = [\"PDF\",\"CDF\",\"Mean\",\"Variance\",\"Median\"]\n\n expressions ={} \n expressions[\"PDF\"] = r\"$\\frac{1}{\\sqrt{2\\pi \\sigma^2}}\\exp(-\\frac{1}{2}(\\frac{x - \\mu}{\\sigma})^2)$\" \n expressions[\"CDF\"] = r\"$ \\frac{1}{\\sqrt{2\\pi \\sigma^2}}\\int_{-\\infty}^x \\exp(-\\frac{1}{2}(\\frac{x-\\mu}{\\sigma})^2)dx $\"\n expressions[\"Mean\"] = r\"$\\mu$\"\n expressions[\"Variance\"] = r\"$\\sigma^2 $\"\n expressions[\"Median\"] = r\"$\\mu $\"\n for i, ch in enumerate(characters):\n self.AxisInfo.text(-0.1,1- i*0.1,ch)\n self.AxisInfo.text(0.2,1- i*0.1,\":\")\n self.AxisInfo.text(0.25,1- i*0.1, expressions[ch],fontsize = 10)\n\n self.InfoCanvas.draw()\n\n def initGammaInfo(self):\n self.AxisInfo.clear()\n characters = [\"PDF\",\"CDF\",\"Mean\",\"Variance\",\"Median\"]\n\n expressions ={} \n expressions[\"PDF\"] = r\"$\\frac{b^a}{\\Gamma(a)}x^{a-1}e^{-bx}$\" \n expressions[\"CDF\"] = r\"$ \\frac{b^a}{\\Gamma(a)}\\int_0^x x^{a-1}e^{-bx}dx$\"\n expressions[\"Mean\"] = r\"$\\frac{a}{b}$\"\n expressions[\"Variance\"] = r\"$\\frac{a}{b^2} $\"\n expressions[\"Median\"] = \"not simple\"\n for i, ch in enumerate(characters):\n self.AxisInfo.text(-0.1,1- i*0.1,ch)\n self.AxisInfo.text(0.2,1- i*0.1,\":\")\n self.AxisInfo.text(0.25,1- i*0.1, expressions[ch],fontsize = 10)\n\n self.InfoCanvas.draw()\n\n def initBetaInfo(self):\n self.AxisInfo.clear()\n characters = [\"PDF\",\"CDF\",\"Mean\",\"Variance\",\"Median\"]\n\n expressions ={} \n expressions[\"PDF\"] = r\"$\\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}x^{a-1}(1-x)^{b-1}$\"\n expressions[\"CDF\"] = r\"$\\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}\\int_0^{\\infty} x^{a-1}(1-x)^{b-1}dx $\"\n expressions[\"Mean\"] = r\"$\\frac{a}{a+b}$\"\n expressions[\"Variance\"] = r\"$\\frac{ab}{(a+b)^2(a+b+1) }$\"\n expressions[\"Median\"] = r\"$\\approx \\frac{a - \\frac{1}{3}}{a+b - \\frac{2}{3}}$\"\n for i, ch in enumerate(characters):\n self.AxisInfo.text(-0.1,1- i*0.1,ch)\n self.AxisInfo.text(0.2,1- i*0.1,\":\")\n self.AxisInfo.text(0.25,1- i*0.1, expressions[ch],fontsize = 10)\n\n self.InfoCanvas.draw()\n\n def initPoissonInfo(self):\n self.AxisInfo.clear()\n characters = [\"PDF\",\"CDF\",\"Mean\",\"Variance\",\"Median\"]\n\n expressions ={} \n expressions[\"PDF\"] = r\"$\\frac{\\lambda^k e^{-\\lambda}}{k!}$\"\n expressions[\"CDF\"] = r\"$e^{-\\lambda}\\sum_{i=0}^{\\lfloor k \\rfloor}\\frac{\\lambda^i}{i!}$\"\n expressions[\"Mean\"] = r\"$\\lambda$\"\n expressions[\"Variance\"] = r\"$\\lambda$\"\n expressions[\"Median\"] = r\"$\\approx \\lfloor \\lambda + 1/3 - 0.02/\\lambda \\rfloor$\"\n for i, ch in enumerate(characters):\n self.AxisInfo.text(-0.1,1- i*0.1,ch)\n self.AxisInfo.text(0.2,1- i*0.1,\":\")\n self.AxisInfo.text(0.25,1- i*0.1, expressions[ch],fontsize = 10)\n\n self.InfoCanvas.draw()\n\n def UpdateDist(self):\n self.axis.cla()\n self.FigureCanvas.draw()\n self.AddDist()\n\n \n def AddDist(self):\n if self.PauseButton.isChecked() and self.OneUpdateFlag == 0 :\n return(0)\n \n if self.FileName == \"Exponential\":\n self.AddExponential()\n if self.FileName == \"Normal\":\n self.AddNormal()\n if self.FileName == \"Gamma\":\n self.AddGamma()\n if self.FileName == \"Beta\":\n self.AddBeta()\n if self.FileName == \"Poisson\":\n self.AddPoisson()\n \n def AddExponential(self):\n tau = self.CurrentValues[0].value()\n \n #self.axis = self.Figure.add_subplot(1,1,1)\n ex = expon(scale=tau)\n x = np.linspace(ex.ppf(0.05),ex.ppf(0.95),1000)\n if self.SelectType.currentText() == \"Probability Density Function\":\n y = ex.pdf(x)\n elif self.SelectType.currentText() == \"Cumulative Density Function\":\n y = ex.cdf(x)\n else:\n x = 1\n y = 1\n\n # add to graph\n cm = plt.get_cmap(\"rainbow\")\n n_bin = self.binSpinBox.value()\n i_th = self.sld.value()\n \n self.axis.plot(x,y ,color = cm(i_th/n_bin))\n self.axis.set_title(\"Exponential Distribution\")\n self.FigureCanvas.draw()\n\n ### for obtaining mean,variance, and median\n self.MeanDisplay.setText(GetNDigit( tau))\n self.VarianceDisplay.setText(GetNDigit(tau**2))\n self.MedianDisplay.setText(GetNDigit(tau*np.log(2)))\n\n def AddNormal(self):\n mu = self.CurrentValues[0].value()\n sigma = self.CurrentValues[1].value()\n \n nm= norm(loc = mu,scale = sigma)\n x = np.linspace(nm.ppf(0.05),nm.ppf(0.95),1000)\n if self.SelectType.currentText() == \"Probability Density Function\":\n y = nm.pdf(x)\n elif self.SelectType.currentText() == \"Cumulative Density Function\":\n y = nm.cdf(x)\n\n # add to graph\n cm = plt.get_cmap(\"rainbow\")\n n_bin = self.binSpinBox.value()\n i_th = self.sld.value()\n \n self.axis.plot(x,y,color = cm(i_th/n_bin))\n self.axis.set_title(\"Normal Distribution\")\n self.FigureCanvas.draw()\n\n ### for obtaining mean,variance, and median\n self.MeanDisplay.setText(GetNDigit( mu))\n self.VarianceDisplay.setText(GetNDigit(sigma**2))\n self.MedianDisplay.setText(GetNDigit(mu))\n\n def AddGamma(self):\n a = self.CurrentValues[0].value()\n b = self.CurrentValues[1].value()\n scale = 1/b\n \n gm= gamma(a = a,scale = scale)\n x = np.linspace(gm.ppf(0.05),gm.ppf(0.95),1000)\n if self.SelectType.currentText() == \"Probability Density Function\":\n y = gm.pdf(x)\n elif self.SelectType.currentText() == \"Cumulative Density Function\":\n y = gm.cdf(x)\n\n # add to graph\n cm = plt.get_cmap(\"rainbow\")\n n_bin = self.binSpinBox.value()\n i_th = self.sld.value()\n \n self.axis.plot(x,y,color = cm(i_th/n_bin))\n self.axis.set_title(\"Gamma Distribution\")\n self.FigureCanvas.draw()\n\n ### for obtaining mean,variance, and median\n self.MeanDisplay.setText(GetNDigit( a/b))\n self.VarianceDisplay.setText(GetNDigit(a/b**2))\n self.MedianDisplay.setText(\"not simple\")\n\n def AddBeta(self):\n a = self.CurrentValues[0].value()\n b = self.CurrentValues[1].value()\n \n self.axis = self.Figure.add_subplot(1,1,1)\n be= beta(a = a,b= b)\n x = np.linspace(be.ppf(0.05),be.ppf(0.95),1000)\n if self.SelectType.currentText() == \"Probability Density Function\":\n y = be.pdf(x)\n elif self.SelectType.currentText() == \"Cumulative Density Function\":\n y = be.cdf(x)\n\n # add to graph\n cm = plt.get_cmap(\"rainbow\")\n n_bin = self.binSpinBox.value()\n i_th = self.sld.value()\n \n self.axis.plot(x,y,color = cm(i_th/n_bin))\n self.axis.set_title(\"Beta Distribution\")\n self.FigureCanvas.draw()\n\n ### for obtaining mean,variance, and median\n self.MeanDisplay.setText(GetNDigit( a/(a+b)))\n self.VarianceDisplay.setText(GetNDigit(a*b/(a+b)**2/(a+b+1)))\n self.MedianDisplay.setText(GetNDigit((a-1/3)/(a+b-2/3)))\n\n def AddPoisson(self):\n lam = self.CurrentValues[0].value()\n \n self.axis = self.Figure.add_subplot(1,1,1)\n pois = poisson(lam)\n x = [i for i in range(int(pois.ppf(0.05)),int(pois.ppf(0.95)+1))]\n if self.SelectType.currentText() == \"Probability Mass Function\":\n y = pois.pmf(x)\n elif self.SelectType.currentText() == \"Cumulative Density Function\":\n y = pois.cdf(x)\n\n # add to graph\n cm = plt.get_cmap(\"rainbow\")\n n_bin = self.binSpinBox.value()\n i_th = self.sld.value()\n \n self.axis.plot(x,y,color = cm(i_th/n_bin))\n self.axis.set_title(\"Poisson Distribution\")\n self.FigureCanvas.draw()\n\n ### for obtaining mean,variance, and median\n self.MeanDisplay.setText(GetNDigit( lam))\n self.VarianceDisplay.setText(GetNDigit(lam))\n if lam != 0:\n self.MedianDisplay.setText(GetNDigit(lam + 1/3 - 0.02/lam))\n else:\n self.MedianDisplay.setText(\"error\")\n\n\n def SetFileList(self):\n # add names to File list\n self.names = [\"Exponential\",\"Normal\",\"Poisson\",\"Gamma\",\"Beta\"]\n \n for name in self.names:\n self.FileList.addItem(name)\n\n self.FileName = self.names[0] \n\n\n def FileListChanged(self):\n # get file name opening \n self.FileName = self.FileList.selectedItems()[0].text()\n\n if self.FileName == \"Exponential\":\n self.clearLayout(self.LineLayout)\n self.initExpoControl() \n self.initExpoInfo()\n self.UpdateDist()\n\n if self.FileName == \"Normal\":\n self.clearLayout(self.LineLayout)\n self.initNormControl()\n self.initNormInfo()\n self.UpdateDist()\n\n if self.FileName == \"Gamma\":\n self.clearLayout(self.LineLayout)\n self.initGammaControl()\n self.initGammaInfo()\n self.UpdateDist()\n \n if self.FileName == \"Beta\":\n self.clearLayout(self.LineLayout)\n self.initBetaControl()\n self.initBetaInfo()\n self.UpdateDist()\n\n if self.FileName == \"Poisson\":\n self.clearLayout(self.LineLayout)\n self.initPoissonControl()\n self.initPoissonInfo()\n self.UpdateDist()\n\n #this function is for reference \n def clearLayout(self, layout):\n if layout is not None:\n while layout.count():\n item = layout.takeAt(0)\n \n widget = item.widget()\n if widget is not None:\n widget.deleteLater()\n else:\n self.clearLayout(item.layout())\n\nif __name__==\"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n qapp = Application()\n qapp.show()\n sys.exit(app.exec_())\n\n\n\n\n","sub_path":"prj_input/pyqt5_basic/dist_gui/compile/2_dist_GUI.py","file_name":"2_dist_GUI.py","file_ext":"py","file_size_in_byte":24890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"633262986","text":"import datetime\nimport time\n\n\nimport coref_metrics\nimport debug_utils\nimport inference_utils\nfrom input_utils import pad_batch_tensors\nimport operator\nimport srl_eval_utils\nimport util\n\n\nclass LSGNEvaluator(object):\n def __init__(self, config):\n self.config = config\n self.eval_data = None\n\n # TODO: Split to multiple functions.\n def evaluate(self, session, data, predictions, loss, official_stdout=False):\n if self.eval_data is None:\n self.eval_data, self.eval_tensors, self.coref_eval_data = data.load_eval_data()\n\n def _k_to_tag(k):\n if k == -3:\n return \"oracle\"\n elif k == -2:\n return \"actual\"\n elif k == -1:\n return \"exact\"\n elif k == 0:\n return \"threshold\"\n else:\n return \"{}%\".format(k)\n\n # Retrieval evaluators.\n arg_evaluators = { k:util.RetrievalEvaluator() for k in [-3, -2, -1, 30, 40, 50, 80, 100, 120, 150] }\n predicate_evaluators = { k:util.RetrievalEvaluator() for k in [-3, -2, -1, 10, 20, 30, 40, 50, 70] }\n mention_evaluators = { k:util.RetrievalEvaluator() for k in [-3, -2, -1, 10, 20, 30, 40, 50] }\n entity_evaluators = { k:util.RetrievalEvaluator() for k in [-3, -2, -1, 10, 20, 30, 40, 50, 70] }\n\n total_loss = 0\n total_num_predicates = 0\n total_gold_predicates = 0\n\n srl_comp_sents = 0\n srl_predictions = []\n ner_predictions = []\n rel_predictions = []\n coref_predictions = {}\n coref_evaluator = coref_metrics.CorefEvaluator()\n all_gold_predicates = []\n all_guessed_predicates = []\n\n start_time = time.time()\n debug_printer = debug_utils.DebugPrinter()\n\n # Simple analysis.\n unique_core_role_violations = 0\n continuation_role_violations = 0\n reference_role_violations = 0\n gold_u_violations = 0\n gold_c_violations = 0\n gold_r_violations = 0\n\n # Global sentence ID.\n rel_sent_id = 0\n srl_sent_id = 0\n\n for i, doc_tensors in enumerate(self.eval_tensors):\n feed_dict = dict(list(zip(\n data.input_tensors,\n [pad_batch_tensors(doc_tensors, tn) for tn in data.input_names + data.label_names])))\n predict_names = []\n for tn in data.predict_names:\n if tn in predictions:\n predict_names.append(tn)\n predict_tensors = [predictions[tn] for tn in predict_names] + [loss]\n predict_tensors = session.run(predict_tensors, feed_dict=feed_dict)\n predict_dict = dict(list(zip(predict_names + [\"loss\"], predict_tensors)))\n\n doc_size = len(doc_tensors)\n doc_example = self.coref_eval_data[i]\n sentences = doc_example[\"sentences\"]\n decoded_predictions = inference_utils.mtl_decode(\n sentences, predict_dict, data.ner_labels_inv, data.rel_labels_inv,\n self.config)\n\n # Relation extraction.\n if \"rel\" in decoded_predictions:\n rel_predictions.extend(decoded_predictions[\"rel\"])\n for j in range(len(sentences)):\n sent_example = self.eval_data[rel_sent_id][3] # sentence, srl, ner, relations\n text_length = len(sentences[j])\n ne = predict_dict[\"num_entities\"][j]\n gold_entities = set([])\n for rel in sent_example:\n gold_entities.update([rel[:2], rel[2:4]])\n srl_eval_utils.evaluate_retrieval(\n predict_dict[\"candidate_starts\"][j], predict_dict[\"candidate_ends\"][j],\n predict_dict[\"candidate_entity_scores\"][j], predict_dict[\"entity_starts\"][j][:ne],\n predict_dict[\"entity_ends\"][j][:ne], gold_entities, text_length, entity_evaluators)\n rel_sent_id += 1\n\n\n if \"ner\" in decoded_predictions:\n ner_predictions.extend(decoded_predictions[\"ner\"])\n\n if \"predicted_clusters\" in decoded_predictions:\n gold_clusters = [tuple(tuple(m) for m in gc) for gc in doc_example[\"clusters\"]]\n gold_mentions = set([])\n mention_to_gold = {}\n for gc in gold_clusters:\n for mention in gc:\n mention_to_gold[mention] = gc\n gold_mentions.add(mention)\n coref_evaluator.update(decoded_predictions[\"predicted_clusters\"], gold_clusters, decoded_predictions[\"mention_to_predicted\"],\n mention_to_gold)\n coref_predictions[doc_example[\"doc_key\"]] = decoded_predictions[\"predicted_clusters\"]\n \n # Evaluate retrieval.\n doc_text_length = sum([len(s) for s in sentences])\n srl_eval_utils.evaluate_retrieval(\n predict_dict[\"candidate_mention_starts\"], predict_dict[\"candidate_mention_ends\"],\n predict_dict[\"candidate_mention_scores\"], predict_dict[\"mention_starts\"], predict_dict[\"mention_ends\"],\n gold_mentions, doc_text_length, mention_evaluators)\n\n total_loss += predict_dict[\"loss\"]\n if (i + 1) % 50 == 0:\n print((\"Evaluated {}/{} documents.\".format(i + 1, len(self.coref_eval_data))))\n\n debug_printer.close()\n summary_dict = {}\n task_to_f1 = {} # From task name to F1.\n elapsed_time = time.time() - start_time\n\n sentences, gold_srl, gold_ner, gold_relations = list(zip(*self.eval_data))\n\n # Summarize results.\n if self.config[\"relation_weight\"] > 0:\n precision, recall, f1 = (\n srl_eval_utils.compute_relation_f1(sentences, gold_relations, rel_predictions))\n task_to_f1[\"relations\"] = f1\n summary_dict[\"Relation F1\"] = f1\n summary_dict[\"Relation precision\"] = precision\n summary_dict[\"Relation recall\"] = recall\n for k, evaluator in sorted(list(entity_evaluators.items()), key=operator.itemgetter(0)):\n tags = [\"{} {} @ {}\".format(\"Entities\", t, _k_to_tag(k)) for t in (\"R\", \"P\", \"F\")]\n results_to_print = []\n for t, v in zip(tags, evaluator.metrics()):\n results_to_print.append(\"{:<10}: {:.4f}\".format(t, v))\n summary_dict[t] = v\n print(\", \".join(results_to_print))\n \n\n if self.config[\"ner_weight\"] > 0:\n ner_precision, ner_recall, ner_f1, ul_ner_prec, ul_ner_recall, ul_ner_f1, ner_label_mat = (\n srl_eval_utils.compute_span_f1(gold_ner, ner_predictions, \"NER\"))\n summary_dict[\"NER F1\"] = ner_f1\n summary_dict[\"NER precision\"] = ner_precision\n summary_dict[\"NER recall\"] = ner_recall\n summary_dict[\"Unlabeled NER F1\"] = ul_ner_f1\n summary_dict[\"Unlabeled NER precision\"] = ul_ner_prec\n summary_dict[\"Unlabeled NER recall\"] = ul_ner_recall\n\n # Write NER prediction to IOB format and run official eval script.\n srl_eval_utils.print_to_iob2(sentences, gold_ner, ner_predictions, self.config[\"ner_conll_eval_path\"])\n task_to_f1[\"ner\"] = ner_f1\n #for label_pair, freq in ner_label_mat.most_common():\n # if label_pair[0] != label_pair[1] and freq > 10:\n # print (\"{}\\t{}\\t{}\".format(label_pair[0], label_pair[1], freq))\n\n\n if self.config[\"coref_weight\"] > 0:\n #conll_results = conll.evaluate_conll(self.config[\"conll_eval_path\"], coref_predictions, official_stdout)\n #coref_conll_f1 = sum(results[\"f\"] for results in conll_results.values()) / len(conll_results)\n #summary_dict[\"Average F1 (conll)\"] = coref_conll_f1\n #print \"Average F1 (conll): {:.2f}%\".format(coref_conll_f1)\n\n p,r,f = coref_evaluator.get_prf()\n summary_dict[\"Average Coref F1 (py)\"] = f\n print(\"Average F1 (py): {:.2f}%\".format(f * 100))\n summary_dict[\"Average Coref precision (py)\"] = p\n print(\"Average precision (py): {:.2f}%\".format(p * 100))\n summary_dict[\"Average Coref recall (py)\"] = r\n print(\"Average recall (py): {:.2f}%\".format(r * 100))\n\n task_to_f1[\"coref\"] = f * 100 # coref_conll_f1\n for k, evaluator in sorted(list(mention_evaluators.items()), key=operator.itemgetter(0)):\n tags = [\"{} {} @ {}\".format(\"Mentions\", t, _k_to_tag(k)) for t in (\"R\", \"P\", \"F\")]\n results_to_print = []\n for t, v in zip(tags, evaluator.metrics()):\n results_to_print.append(\"{:<10}: {:.4f}\".format(t, v))\n summary_dict[t] = v\n print(\", \".join(results_to_print))\n\n summary_dict[\"Dev Loss\"] = total_loss / len(self.coref_eval_data)\n\n print(\"Decoding took {}.\".format(str(datetime.timedelta(seconds=int(elapsed_time)))))\n print(\"Decoding speed: {}/document, or {}/sentence.\".format(\n str(datetime.timedelta(seconds=int(elapsed_time / len(self.coref_eval_data)))),\n str(datetime.timedelta(seconds=int(elapsed_time / len(self.eval_data))))\n ))\n\n metric_names = self.config[\"main_metrics\"].split(\"_\")\n main_metric = sum([task_to_f1[t] for t in metric_names]) / len(metric_names)\n print(\"Combined metric ({}): {}\".format(self.config[\"main_metrics\"], main_metric))\n\n return util.make_summary(summary_dict), main_metric, task_to_f1\n\n","sub_path":"lsgn_evaluator.py","file_name":"lsgn_evaluator.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"231173897","text":"#!/usr/bin/python3\n\n#\tsearchJobs.py - goes to three job websites, gathers information, and then puts them on Excel.\n#\tProject will use a program to automatically at certain times.\n#\tAll items sorted by date. All items returned are on the first page, only.\n\n# \tversion 0.0.1\n\n# Future Improvements: In later versions, go over verbose code and simplify them.\n# Future Improvements: Make the code Object Oriented.\n# Future Improvements: Add more features.\n# Future Improvements: Improve on the Monster Search to include only organic companies.\n# Future Improvements: Make it ask the user what job they want to search.\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport openpyxl\nimport sys\nimport logging\nimport pprint\nimport time\nimport os\n\n# For the future, wrewrite this code and make it better.\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s %(levelname)s %(message)s')\n\n# webscrape Monster.com\ndef scrapeMonster(jobs, location):\n\t# scrape Monster.com\n\t# test http://jobs.monster.com/search/?q=Research-Assistant&where=New%20York%2C%20NY&sort=dt.rv.di'\n\tlogging.debug('Searching Monster.com...')\n\tmonsterSearch \t= requests.get('http://jobs.monster.com/search/Full-Time_8?q=Research-Assistant&where=New-York__2c-NY')\n\t#monsterSearch \t= requests.get('http://jobs.monster.com/search/?q=' + jobs + '&where=' + location + '&sort=dt.rv.di')\n\tdata \t\t\t= monsterSearch.text\n\n\tlogging.debug('Parsing information...')\n\tmonster_soup \t= BeautifulSoup(data, 'lxml')\n\t#pprint.pprint(monster_soup)\n\n\t# This has all the content, including the links.\n\tfilter_script\t= monster_soup.find_all('div', {'class': 'js_result_container clearfix'})\n\t\n\t#for number, items in enumerate(filter_script):\n\t#\tprint(str(number) + '\\n\\n\\n')\n\t#\tprint(items)\n\n\t\"\"\"\n\t# This to get: Job Title, Link, Date\n\tmonsterSearchResults\t= {}\n\n\t# cleans up everything in h2 and creates a new dictionary...\n\tjobTitles \t\t= [] # Testing purpouses, name of job.\n\twebAddress \t\t= [] # Testing purposes, jobs Monster link.\n\tpostingTime\t\t= [] # Testing purposes, date of postage.\n\tbiz_address\t\t= [] # Testing purposes, location of job.\n\tfor items in filter_script[13]:\n\t\titems \t\t\t= str(items)\n\t\tsouped\t\t\t= BeautifulSoup(items, 'lxml')\n\t\n\n\t\t# Get the job title\n\t\tjobTitle_in_HTML\t= souped.find('span', {'itemprop': 'title'})\n\t\tjobTitle_in_HTML \t= str(jobTitle_in_HTML)\n\t\tHTMLsoupTitle\t\t= BeautifulSoup(jobTitle_in_HTML, 'lxml' )\n\t\tcleaned_JobTitles \t= HTMLsoupTitle.get_text()\n\t\tjobTitles.append(cleaned_JobTitles) # For testing purposes.\t\n\t\t\n\t\t#get the job's web address\n\t\tweb_address \t= souped.a['href']\n\t\twebAddress.append(web_address) # For testing purposes.\n\t\t\n\t\t# Get the date of posting\n\t\ttime \t\t\t= souped.time['datetime']\n\t\t\n\t\t# Get the location's address \n\t\tjob_location\t= souped.find('span', {'itemprop': 'address' })\n\t\tjob_location\t= str(job_location)\n\t\tlocationSoup\t= BeautifulSoup(job_location, 'lxml')\n\t\tcleanLocation\t= locationSoup.get_text(strip=True)\n\n\t\t# Finish the dictionary...\n\t\tmonsterSearchResults.setdefault(cleaned_JobTitles, {})\n\t\tmonsterSearchResults[cleaned_JobTitles].setdefault('Web Address', web_address)\n\t\tmonsterSearchResults[cleaned_JobTitles].setdefault('Time', time)\n\t\t\n\t\tif cleanLocation == 'None':\n\t\t\tmonsterSearchResults[cleaned_JobTitles].setdefault('Location' ,'None Found.')\n\t\telse:\n\t\t\tmonsterSearchResults[cleaned_JobTitles].setdefault('Location', cleanLocation)\n\n\tlogging.debug('Finished scraping monster...')\n\treturn monsterSearchResults \n\t\"\"\"\n\t\n\n\ndef scrapeIndeed(jobs, location):\n\t# Call website.\n\t# http://www.indeed.com/jobs?q=Python+Developer&l=New+York%2C+NY&radius=5&sort=date&fromage=last \n\tlogging.debug('Getting info from indeed.com ...')\n\t#logging.debug('link: ' + 'http://www.indeed.com/jobs?q=' + str(jobs) + '&l='+ str(location) + '%2C&radius=5&sort=date&fromage=last')\n\tsearchIndeed \t= requests.get('http://www.indeed.com/jobs?q=' + str(jobs) + '&l='+ str(location) + '%2C&radius=5&sort=date&fromage=last')\n\t# jobs?q=Python+Developer&l=New+York,+NY&radius=5&sort=date&fromage=last\n\n\t# Turn the HTML code into a string.\t\n\tdata \t\t\t= searchIndeed.text\n\t\n\t# parse the website.\n\tlogging.debug('Parsing information...')\n\tindeed_soup\t\t= BeautifulSoup(data, 'lxml')\n\t#print(indeed_soup.prettify()) # for testing purposes\n\n\t# Create a list of BeautifulSoup elements that have the information needed.\n\tseparate_divs\t= indeed_soup.find_all(attrs={'data-tn-component': 'organicJob'}) # This finds a div tag\n\t# pprint.pprint(separate_divs[0]) # tests the main div \n\t\n\t# Dictionary for the final result\n\tindeed_search_results = {}\n\n\t# For testing purposes\n\tjob_title_result= []\n\tweb_address\t\t= []\n\tjob_location\t= []\n\tjob_date\t\t= []\n\t# Create a loop to do these things:\n\tfor items in separate_divs:\n\t\t# Create another soup for further parsing.\n\t\titems \t\t= str(items)\n\t\tsoup \t\t= BeautifulSoup(items, 'lxml')\n\n\t\t# Find Job Title\n\t\tlogging.debug('Searching for job title...')\n\t\tjob_in_HTML \t= soup.find_all('a', {'data-tn-element': 'jobTitle'})\n\t\tjob_in_HTML \t= str(job_in_HTML)\n\t\tjobSoup\t\t\t= BeautifulSoup(job_in_HTML, 'lxml')\n\t\tjobsFound\t\t= jobSoup.get_text(strip=True)\n\t\tjobsFound \t\t= jobsFound.strip('[]')\n\t\tjob_title_result.append(jobsFound)\n\t\tlogging.debug('Found the job title...')\n\t\t\n\t\t#get_website_address\n\t\tlogging.debug('Finding web address...')\n\t\tget_website_address = soup.a['href']\n\t\tfinishedLink \t\t= 'http://www.indeed.com' + get_website_address\n\t\tweb_address.append(finishedLink)\n\t\tlogging.debug('Website address found...')\n\n\t\t# Find job location\n\t\tlogging.debug('Finding location...')\n\t\tfind_location\t= soup.find_all('span', {'itemprop': 'addressLocality'})\n\t\tfind_location\t= str(find_location)\n\t\tlocationSoup\t= BeautifulSoup(find_location, 'lxml')\n\t\tlocation_found\t= locationSoup.get_text().strip('[]')\n\t\tjob_location.append(location_found)\n\t\tlogging.debug('Job locations found...')\n\n\t\t# Find Find Job Date\n\t\tlogging.debug('Finding posting date...')\n\t\tfind_date\t\t= soup.find_all('span', {'class':'date'})\n\t\tfind_date\t\t= str(find_date)\n\t\tdateSoup\t\t= BeautifulSoup(find_date, 'lxml')\n\t\tdateText\t\t= dateSoup.get_text(strip=True).strip('[]')\n\t\tjob_date.append(dateText)\n\t\tlogging.debug('Found the posting dates...')\n\n\t\t# Build the dictionary.\n\t\tlogging.debug('Adding to the dictionary...')\n\t\tindeed_search_results.setdefault(jobsFound, {})\n\t\tindeed_search_results[jobsFound].setdefault('Web Address', finishedLink)\n\t\tindeed_search_results[jobsFound].setdefault('Location', location_found)\n\t\tindeed_search_results[jobsFound].setdefault('Time', dateText)\t\t\n\n\tlogging.debug('Finished scraping Indeed.com ...')\n\treturn indeed_search_results\n\t\ndef scrapeSimplyHired(jobs, location):\n\t\n\t# use requests to call the website.\n\tlogging.debug('Getting info from SimplyHired.com ...')\n\t# http://www.simplyhired.com/search?q=research+assistant&l=New+York,+NY&mi=10\n\tlink \t\t\t\t\t\t= 'http://www.simplyhired.com/search?q=' + jobs + '&l=' + location + '&sb=dd&mi=10'\n\tsearchSimplyHired\t\t\t= requests.get(link)\n\tlogging.debug('Info gathered from SimplyHired.com ...')\n\t\n\t# convert object to text.\n\tdata \t\t\t\t\t\t= searchSimplyHired.text\n\tlogging.debug('research results converted to string for parsing...')\n\n\t# pass the requests object to BeautifulSoup for parsing.\n\tsimplyHiredSoup\t\t\t\t= BeautifulSoup(data, 'lxml')\n\t\n\t\n\t# Parse for the main tag that encompasses most/all of the information required.\n\tsimplyhired_filtered\t\t= simplyHiredSoup.find_all('div', {'itemtype': 'http://schema.org/JobPosting'})\n\t#print(simplyhired_filtered)\n\t# create a dictionary for a final return.\n\tsimplyhired_search_results \t= {}\n\t\t\n\n\t# Create test lists for testing.\n\tsimplyhired_jobs \t\t\t= []\n\tsimplyhired_webadd\t\t\t= []\n\tsimplyhired_time\t\t\t= []\n\tsimplyhired_location\t\t= []\n\t# Create a for loop to filter the data.\n\tfor items in simplyhired_filtered:\n\t\t# for each item in the for loop, turn it into a string using str()\n\t\titems \t\t\t\t\t= str(items)\n\t\t# pass another BeautifulSoup module\n\t\tsoup \t\t\t\t\t= BeautifulSoup(items, 'lxml')\n\n\t\t# Find Job Title\n\t\tlogging.debug('Finding the job titles...')\n\t\tfindJobTitle\t\t\t= soup.find_all('span', {'class': 'serp-title-text'})\n\t\tfindJobTitle \t\t\t= str(findJobTitle)\n\t\tjobSoup\t\t\t\t\t= BeautifulSoup(findJobTitle, 'lxml')\n\t\tjobTitle \t\t\t\t= jobSoup.get_text(strip=True).strip('[]') # Data to use\n\t\tsimplyhired_jobs.append(jobTitle) # for testing\n\t\tlogging.debug('Found job titles...')\n\t\n\t\t# Find Web Address\n\t\tlogging.debug('finding web address...')\n\t\tfind_web_address\t\t= soup.find_all('a', href=True)\n\t\tweb_address \t\t\t= 'http://www.simplyhired.com' + str(find_web_address[0].get('href'))\n\t\tsimplyhired_webadd.append(web_address) # for testing\n\t\t\n\t\t# Find Date of Posting\n\t\tlogging.debug('Finding time of posting...')\n\t\tposting_time \t\t\t= soup.find_all('span', {'class': 'serp-timestamp'})\n\t\tposting_time\t\t\t= str(posting_time)\n\t\ttimeSoup\t\t\t\t= BeautifulSoup(posting_time, 'lxml')\n\t\ttime \t \t\t\t\t= timeSoup.get_text(strip=True).strip('[]') # Data to use\n\t\tsimplyhired_time.append(time) # for testing\n\t\tlogging.debug('Time of posting for jobs found...')\n\n\t\t# Find Location\n\t\tlogging.debug('Searching job locations...')\n\t\tjob_location\t\t\t= soup.find_all('span', {'itemprop': 'addressLocality'})\n\t\tjob_location\t\t\t= str(job_location)\n\t\tlocationSoup\t\t\t= BeautifulSoup(job_location, 'lxml')\n\t\tlocation \t\t\t\t= locationSoup.get_text(strip=True).strip('[]') # Data to use\n\t\tsimplyhired_location.append(location) # for testing\n\t\tlogging.debug('Found job locations...')\n\t\t\n\t\t# Build the dictionary.\n\t\tlogging.debug('Building dictionary...')\n\t\tsimplyhired_search_results.setdefault(jobTitle, {})\n\t\tsimplyhired_search_results[jobTitle].setdefault('Web Address', web_address)\n\t\tsimplyhired_search_results[jobTitle].setdefault('Time', time)\n\t\tsimplyhired_search_results[jobTitle].setdefault('Location', location)\n\t\tlogging.debug('Dictionary built...')\n\t\n\treturn simplyhired_search_results\n\t\n\ndef makeLog(monster_results, indeed_results, simplyhired_results):\n\t\n\tresultFile = open('searchJobsResults.py', 'a')\n\tresultFile.write('#Search made on: ' + time.asctime() + '\\n')\n\tresultFile.write('monster_results = ' + str(monster_results))\n\tresultFile.write('\\nindeed_results = ' + str(indeed_results))\n\tresultFile.write('\\nsimplyhired_results = ' + str(simplyhired_results) + '\\n\\n')\n\tresultFile.close()\n\t\n\ndef passToExcel(monster_results, indeed_results, simplyhired_results):\n\tlogging.debug('Excel function initiated...')\n\t\n\tfrom openpyxl.styles import Font\n\tfrom openpyxl.styles import Border\n\n\t# Test to see if the file exists or not. \n\t# Make new file if it doesn't exist. Open an existing one if it does exist.\n\tif os.path.exists('./jobSearchResults.xlsx'):\n\t\t# This is file is made in the directory where your terminal starts off typically in your /home/NAME directory when running cron.\n\t\tfrom openpyxl import load_workbook\n\t\twb \t\t= load_workbook('jobSearchResults.xlsx')\n\t\t\n\t\tmonster_sheet\t\t= wb.get_sheet_by_name('Monster.com Data')\n\t\tindeed_sheet \t\t= wb.get_sheet_by_name('Indeed.com Data')\n\t\tsimplyhired_sheet \t= wb.get_sheet_by_name('SimplyHired.com Data')\n\n\telse:\n\t\t# Create a new workbook.\n\t\tfrom openpyxl import Workbook\n\t\twb \t\t\t\t\t= Workbook()\n\t\t\n\t\t# Set the font styles and sizes.\n\t\ttitle_font \t\t\t= Font(size=13, bold=True)\n\t\tcategory_font \t\t= Font(bold=True)\n\t\t## LATER IN FUTURE: Turn these massive blocks into a loop.\n\t\t# Create sheet for Monster.\n\t\tmonster_sheet\t\t\t\t= wb.get_sheet_by_name('Sheet')\n\t\tmonster_sheet.title \t\t= 'Monster.com Data'\n\t\tmonster_sheet['A1'] \t\t= 'Sheet created on %s' % (time.asctime()) \n\t\tmonster_sheet['A2'] \t\t= 'Monster.com Search Results'\n\t\tmonster_sheet['A2'].font \t= title_font\n\t\tmonster_sheet['A3']\t\t\t= 'Job Title'\n\t\tmonster_sheet['A3'].font = category_font\n\t\tmonster_sheet['B3']\t\t \t= 'Posting Time'\n\t\tmonster_sheet['B3'].font \t= category_font\n\t\tmonster_sheet['C3'] \t\t= 'Location'\n\t\tmonster_sheet['C3'].font \t= category_font\n\t\tmonster_sheet['D3'] \t\t= 'Web Address'\n\t\tmonster_sheet['D3'].font \t= category_font\n\t\tmonster_sheet.merge_cells('A1:D1')\n\t\tmonster_sheet.merge_cells('A2:D2')\n\t\tmonster_sheet.column_dimensions['A'].width = 30\n\t\tmonster_sheet.column_dimensions['B'].width = 15\n\t\tmonster_sheet.column_dimensions['C'].width = 15\n\t\tmonster_sheet.column_dimensions['D'].width = 15\n\n\t\t# Create sheet for Indeed.\n\t\tindeed_sheet \t\t\t\t= wb.create_sheet()\n\t\tindeed_sheet.title \t\t\t= 'Indeed.com Data'\n\t\tindeed_sheet['A1']\t\t\t= 'Sheet created on %s' % (time.asctime())\n\t\tindeed_sheet['A2']\t\t\t= 'Indeed.com Search Results'\n\t\tindeed_sheet['A2'].font \t= title_font\n\t\tindeed_sheet['A3'] \t\t\t= 'Job Title'\n\t\tindeed_sheet['A3'].font \t= category_font\n\t\tindeed_sheet['B3'] \t\t\t= 'Posting Time'\n\t\tindeed_sheet['B3'].font \t= category_font\n\t\tindeed_sheet['C3'] \t\t\t= 'Location'\n\t\tindeed_sheet['C3'].font \t= category_font\n\t\tindeed_sheet['D3'] \t\t\t= 'Web Address'\n\t\tindeed_sheet['D3'].font \t= category_font\n\t\tindeed_sheet.merge_cells('A1:D1')\n\t\tindeed_sheet.merge_cells('A2:D2')\n\t\tindeed_sheet.column_dimensions['A'].width = 30\n\t\tindeed_sheet.column_dimensions['B'].width = 15\n\t\tindeed_sheet.column_dimensions['C'].width = 15\n\t\tindeed_sheet.column_dimensions['D'].width = 15\n\t\n\t\t# Create sheet for SimplyHired.\n\t\tsimplyhired_sheet \t\t\t= wb.create_sheet()\n\t\tsimplyhired_sheet.title \t= 'SimplyHired.com Data'\n\t\tsimplyhired_sheet['A1']\t\t= 'Sheet created on %s' % (time.asctime())\n\t\tsimplyhired_sheet['A2']\t\t= 'SimplyHired.com Search Results'\n\t\tsimplyhired_sheet['A2'].font= title_font\n\t\tsimplyhired_sheet['A3']\t\t= 'Job Title'\n\t\tsimplyhired_sheet['A3'].font= category_font\n\t\tsimplyhired_sheet['B3']\t\t= 'Posting Time'\n\t\tsimplyhired_sheet['B3'].font= category_font\n\t\tsimplyhired_sheet['C3']\t\t= 'Location'\n\t\tsimplyhired_sheet['C3'].font= category_font\n\t\tsimplyhired_sheet['D3']\t\t= 'Web Address'\n\t\tsimplyhired_sheet['D3'].font= category_font\n\t\tsimplyhired_sheet.merge_cells('A1:D1')\n\t\tsimplyhired_sheet.merge_cells('A2:D2')\n\t\tsimplyhired_sheet.column_dimensions['A'].width = 30\n\t\tsimplyhired_sheet.column_dimensions['B'].width = 15\n\t\tsimplyhired_sheet.column_dimensions['C'].width = 15\n\t\tsimplyhired_sheet.column_dimensions['D'].width = 15\n\t\n\t# Find Empty Cells on Monster.com Sheet.\n\tmonster_empty_cell \t\t\t= len(monster_sheet.rows)\n\t\n\t# Formatting purposes for Monster.com Data\n\tmonster_sheet['A' + str(monster_empty_cell + 1)] \t\t= '---'\n\tmonster_sheet['A' + str(monster_empty_cell + 1)].font \t= Font(color='FFFFFF')\n\tmonster_sheet['A' + str(monster_empty_cell + 2)] \t\t= 'Date gathered on: %s' % (time.asctime())\n\tmonster_sheet['A' + str(monster_empty_cell + 2)].font \t= Font(italic=True, bold=True)\n\n\t#Write the results from Monster.com\n\tmonster_working_cell \t\t= monster_empty_cell + 3\n\tfor number, items in enumerate(monster_results):\n\t\tmonster_sheet['A' + str(monster_working_cell + number)] \t= items\n\t\tmonster_sheet['B' + str(monster_working_cell + number)] \t= monster_results[items].get('Time')\n\t\tmonster_sheet['C' + str(monster_working_cell + number)] \t= monster_results[items].get('Location')\n\t\tmonster_sheet['D' + str(monster_working_cell + number)] \t= monster_results[items].get('Web Address')\n\n\t\n\t# Find Empty Cells on Indeed.com Sheet.\n\tindeed_empty_cell \t\t\t= len(indeed_sheet.rows)\n\n\t# Formatting Purposes for Indeed.com Data.\n\tindeed_sheet['A' + str(indeed_empty_cell + 1)]\t\t\t= '---'\n\tindeed_sheet['A' + str(indeed_empty_cell + 1)].font \t= Font(color='FFFFFF')\n\tindeed_sheet['A' + str(indeed_empty_cell + 2)]\t\t\t= 'Date gathered on: %s' % (time.asctime())\n\tindeed_sheet['A' + str(indeed_empty_cell + 2)].font \t= Font(italic=True, bold=True)\n\n\t# Write the results from Indeed.com.\n\tindeed_working_cell \t\t= indeed_empty_cell + 3\n\tfor number, items in enumerate(indeed_results):\n\t\tindeed_sheet['A' + str(indeed_working_cell + number)] \t= items\n\t\tindeed_sheet['B' + str(indeed_working_cell + number)] \t= indeed_results[items].get('Time') \n\t\tindeed_sheet['C' + str(indeed_working_cell + number)] \t= indeed_results[items].get('Location') \n\t\tindeed_sheet['D' + str(indeed_working_cell + number)] \t= indeed_results[items].get('Web Address') \n\t\n\t# Find Empty Cells on SimplyHired.com Data.\n\tsimplyhired_empty_cell\t\t= len(simplyhired_sheet.rows)\n\n\t# Formatting Purpose for SimplyHired.com.\n\tsimplyhired_sheet['A' + str(simplyhired_empty_cell + 1)] \t\t= '---'\n\tsimplyhired_sheet['A' + str(simplyhired_empty_cell + 1)].font\t= Font(color='FFFFFF')\n\tsimplyhired_sheet['A' + str(simplyhired_empty_cell + 2)] \t\t= 'Date gathered on: %s' % (time.asctime())\n\tsimplyhired_sheet['A' + str(simplyhired_empty_cell + 2)].font\t= Font(italic=True, bold=True)\n\n\t# Write the results from SimplyHired.com \n\tsimplyhired_working_cell\t= simplyhired_empty_cell + 3\n\tfor number, items in enumerate(simplyhired_results):\n\t\tsimplyhired_sheet['A' + str(simplyhired_working_cell + number)] = items \n\t\tsimplyhired_sheet['B' + str(simplyhired_working_cell + number)] = simplyhired_results[items].get('Time') \n\t\tsimplyhired_sheet['C' + str(simplyhired_working_cell + number)] = simplyhired_results[items].get('Location') \n\t\tsimplyhired_sheet['D' + str(simplyhired_working_cell + number)] = simplyhired_results[items].get('Web Address') \n\t\n\twb.save('jobSearchResults.xlsx')\n\t# Figure out how to turn all the blocks of code into a loop.\n\n\nif __name__ == '__main__':\n\t\n\tif len(sys.argv) == 2 and sys.argv[1] == 'debug':\n\t\tlogging.debug('Starting program. Debug mode on...')\n\telse:\n\t\tlogging.disable(logging.CRITICAL)\n\t\tprint('Starting program. Debug mode off...')\n\t\n\tif len(sys.argv) == 2 and sys.argv[1] == 'test_excel':\n\t\tfrom searchJobsResults import monster_results_active\n\t\tfrom searchJobsResults import indeed_results_active\n\t\tfrom searchJobsResults import simplyhired_results_active\n\n\t\tpassToExcel(monster_results_active, indeed_results_active, simplyhired_results_active)\n\t\n\telif len(sys.argv) == 2 and sys.argv[1] == 'test_sandbox':\n\t\t\"\"\"\n\t\tThis is empty space to make any sort of random test.\n\t\t\"\"\"\n\t\tmonster_results \t= scrapeMonster('research%20assistant', 'New york%2C NY')\n\n\telse:\n\t\t# comment or uncomment as necessary when testing mode.\n\t\tfrom searchJobsResults import monster_results_active\n\t\t#from searchJobsResults import indeed_results_active\n\t\t#from searchJobsResults import simplyhired_results_active\n\n\t\t#monster_results \t= scrapeMonster('research%20assistant', 'New york%2C NY')\n\t\tindeed_results \t\t= scrapeIndeed('research+assistant', 'New york%2C NY')\n\t\tsimplyhired_results = scrapeSimplyHired('research+assistant', 'New+York,+NY')\n\n\t\tmakeLog(monster_results_active, indeed_results, simplyhired_results)\n\t\t\n\t\tpassToExcel(monster_results_active, indeed_results, simplyhired_results)\n\t","sub_path":"searchJobs/searchJobs.py","file_name":"searchJobs.py","file_ext":"py","file_size_in_byte":18381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"200000785","text":"#!/usr/bin/env python# Major library imports\nfrom numpy import zeros\n\n# Enthought library imports\nfrom enthought.enable.api import Component, ComponentEditor\nfrom enthought.traits.api import HasTraits, Instance, Tuple, Float, Function, File\nfrom enthought.traits.ui.api import Item, Group, View\n\n# Chaco imports\nfrom enthought.chaco.api import ArrayPlotData, Plot,jet, ImageData, gray\nfrom enthought.chaco.tools.api import PanTool, ZoomTool\nfrom enthought.chaco.tools.image_inspector_tool import ImageInspectorTool, \\\n ImageInspectorOverlay\n\nfrom enthought.chaco.api import AbstractController\n\n\nclass DataPrinter(AbstractController):\n point0 = Tuple(Float,Float)\n point1 = Tuple(Float,Float)\n process = Function\n \n def dispatch(self, event, suffix):\n if suffix in ('left_up', 'left_down') and event.handled == False:\n x = self.component.x_mapper.map_data(event.x)\n y = self.component.y_mapper.map_data(event.y)\n if suffix == 'left_down':\n self.point0 = x,y\n else:\n self.point1 = x,y\n self.process(self.point0, self.point1)\n \n def _process_default(self):\n def process(point0, point1):\n print('selection', point0, point1)\n return process\n \n \ndef toRGB(image):\n \"\"\"\n return a rgb from grayscale image, because it is dipslayed much faster\n \"\"\"\n# if len(image.shape) == 2:\n# a = numpy.empty(shape = image.shape + (3,), dtype = 'uint8') \n# im = numpy.array(255. * image / image.max(),dtype = 'uint8')\n# a[:,:,0] = im[:,:]\n# a[:,:,1] = im[:,:]\n# a[:,:,2] = im[:,:]\n# return a\n# else:\n return image\n\n#===============================================================================\n# Attributes to use for the plot view.\nsize = (600, 600)\ntitle=\"Simple image plot\"\nbg_color=\"lightgray\"\n \n#===============================================================================\n# # Figure class for inclusion in other traits\n#===============================================================================\n\nfigure_view = View(\n Group('file',\n Item('plot', editor=ComponentEditor(size=size,\n bgcolor=bg_color), \n show_label=False),\n orientation = \"vertical\"),\n resizable=True, title=title\n )\n\n\nclass Figure(HasTraits):\n pd = Instance(ArrayPlotData, transient = True)\n plot = Instance(Component,transient = True)\n process_selection = Function(transient = True)\n file = File('/home/andrej/Pictures/img_4406.jpg')\n \n traits_view = View(\n Group(\n Item('plot', editor=ComponentEditor(size=size,\n bgcolor=bg_color), \n show_label=False),\n orientation = \"vertical\"),\n resizable = True\n )\n \n def __init__(self,**kwds):\n super(Figure,self).__init__(**kwds)\n self.pd = self._pd_default()\n self.plot = self._plot_default()\n\n def _process_selection_default(self):\n def process(point0, point1):\n print('selection', point0, point1)\n return process\n \n def _pd_default(self):\n image = zeros(shape = (300,400)) \n pd = ArrayPlotData()\n pd.set_data(\"imagedata\", toRGB(image)) \n return pd\n \n def _plot_default(self):\n return self._create_plot_component()\n\n def _create_plot_component(self):\n pd = self.pd\n\n # Create the plot\n plot = Plot(pd, default_origin=\"top left\",orientation=\"h\")\n shape = pd.get_data('imagedata').shape\n plot.aspect_ratio = float(shape[1]) / shape[0]\n plot.x_axis.orientation = \"top\"\n #plot.y_axis.orientation = \"top\"\n #img_plot = plot.img_plot(\"imagedata\",colormap = jet)[0]\n img_plot = plot.img_plot(\"imagedata\",name = 'image', colormap = jet)[0]\n \n # Tweak some of the plot properties\n #plot.bgcolor = \"white\"\n plot.bgcolor = bg_color\n \n # Attach some tools to the plot\n plot.tools.append(PanTool(plot,constrain_key=\"shift\", drag_button = 'right'))\n printer = DataPrinter(component=plot, process = self.process_selection)\n plot.tools.append(printer)\n plot.overlays.append(ZoomTool(component=plot, \n tool_mode=\"box\", always_on=False))\n #plot.title = 'Default image'\n \n imgtool = ImageInspectorTool(img_plot)\n img_plot.tools.append(imgtool)\n plot.overlays.append(ImageInspectorOverlay(component=img_plot, \n image_inspector=imgtool))\n return plot\n \n def _file_changed(self, new):\n image = ImageData.fromfile(new)\n self.update_image(image.data)\n \n def update_image(self,data):\n image = toRGB(data)\n shape = image.shape\n self.pd.set_data(\"imagedata\", image) \n self.plot.aspect_ratio = float(shape[1]) / shape[0] \n self.plot.delplot('image')\n img_plot = self.plot.img_plot(\"imagedata\",name = 'image', colormap = jet)[0]\n imgtool = ImageInspectorTool(img_plot)\n img_plot.tools.append(imgtool)\n self.plot.overlays.pop()\n self.plot.overlays.append(ImageInspectorOverlay(component=img_plot, \n image_inspector=imgtool))\n\n \n #self.plot.plot('rectangle1',)\n self.plot.request_redraw()\n \n \n def plot_data(self, x, y, name = 'data 0', color = 'black'):\n xname = 'x_' + name\n yname = 'y_' + name\n self.pd.set_data(xname,x)\n self.pd.set_data(yname,y)\n self.del_plot(name)\n self.plot.plot((xname,yname), name = name, color = color)\n self.plot.request_redraw()\n \n def del_plot(self, name):\n try:\n self.plot.delplot(name)\n except:\n pass \n \n \n \nif __name__ == \"__main__\":\n demo = Figure()\n demo.configure_traits()\n#--EOF---\n","sub_path":"labtools/analysisold/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"161466131","text":"#coding:utf-8\n\nimport os\n\n# 查报告是否存在\n# 判断报告目录是否存在,不存在择新建\n# 如果存在,则删除历史报告\ndef findreport(val):\n\tif not os.path.exists('result/'):\n\t\tos.mkdir('result/')\n\telse:\n\t\ttarget = 'result/'+val\n\t\tif os.path.exists(target):\n\t\t\tos.remove(target)\n","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"272857382","text":"import imutils\nimport os\nimport cv2\nimport sys\nfrom pathlib import Path\nimport Document as document\nimport TemplateData as templates\nimport Transform as transform\nimport numpy as np\n\n# TODO -- GOALS -- TODO\n# 1. Better Template - check?\n# 2. Improve background removal step - check?\n# 3. Improve OCR - IN PROGRESS\n# 4. Improve Alignment - check?\n# 5. Improve Pre-Screen - next\n\n#global variable declaration\nGOOD_MATCH_PERCENT = .15\nSRC_PATH = \"/Users/ngover/Documents/TestPrograms/Images/\"\nIMG = \"Samples/test47.jpg\"\nBLUR_THRESHOLD = 34\nDARKNESS_THRESHOLD = 50\n\n#start of main\n#\tThis function serves as a driver, first by calling the function to match the image to the best template,\n#\tthen aligning the image to the template, then pulling the data from the ID by referencing the location\n#\tof the bounding boxes which are expected to contain the text we are interested in.\ndef main():\n\t\n\timg = cv2.imread(SRC_PATH + IMG)\n\n\tif img is None:\n\t\tprint(\"Image could not be opened.\")\n\t\tsys.exit(0)\n\n\t#call to removeBorder, removes border from image and warps perspective if edges can be detected\n\t#removeBorder() will return false if the image contains background, or true if removeBorder()\n\t#was not able to locate the document, and the document still has the original background..\n\timgNoBackground, background = transform.removeBackground(img)\t\n\t\t\n\t#prescreen\n\tif preScreen(imgNoBackground) is False:\n\t\tprint(\"Image quality too low, try retaking.\")\n\t\tsys.exit(0)\n\n\t#search for best template match, image stored in template, the name of the template stored in docType\n\ttemplate, docType = selectTemplate(imgNoBackground, background)\n\t#line up the input image with the selected template so that the data will be exactly where we expect\n\timgAligned = alignToTemplate(img, template)\n\t#call to the document constructor, which will set up the object and read ROIs based on the info. passed\n\tmyDoc = document.documentFromImage(imgAligned, docType)\n\t#call to object's toString() method.\n\tprint(\"\\n\" + myDoc.__str__())\n\t\t\n\t#TODO remove -- display for debugging/testing\n\tcv2.imshow(\"Original\", imutils.resize(img, height=500))\n\tcv2.imshow(\"Template Selection\", imutils.resize(template, height=500))\n\tcv2.imshow(\"Original Aligned to Template\", imutils.resize(drawBoxes(imgAligned, docType), height=500))\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n\t\n\treturn myDoc\n#end of main\n\n\n#start of preScreen()\n#\tThis function performs a very simple prelim check on the image before template match/align is attempted. It\n#\ttakes the result from the call to removeBorder() and measures the darkness and blur of the image. If one of\n#\tthese values is too low, the method returns false to the calling function. Else, returns true\ndef preScreen(img):\n\t#convert image to greyscale, get the variance of laplacian distribution\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tfocusMeasure = cv2.Laplacian(gray, cv2.CV_64F).var()\n\t\n\tprint(\"Focus measure: {}\".format(focusMeasure))\n\t\n\t#check whether the value is above or beneath the threshold for blur\n\tif focusMeasure < BLUR_THRESHOLD:\n\t\treturn False\n\n\t#measure the mean darkness level for the image\n\t#(more true to document readability when background is successfully removed)\n\tlight = np.mean(img)\n\n\tprint(\"Darkness level: {}\".format(light))\t\n\n\tif light < DARKNESS_THRESHOLD:\n\t\treturn False\n\n\treturn True\n#end of preScreen\n\n\t\n#start of selectTemplate\n#\tThis function is passed an image, the flag returned from removeBorder() and the location of the folder containing the tepmlates\n#\tfile system. The program first checks if the background is still in the image. If there is not a background, the function\n#\tcan tell without using any searching/matching process whether the license is vertical or horizontal based on aspect ratio.\n#\tThen, the function loops through all the identifiers that distinguish each category of document. After this search, the \n#\tfunction checks whether it has enough information to identify the document it has. If it does, it returns the template and\n#\tthe corresponding name of the template. If it does not, it performs a secondary search which looks for unique identifiers\n#\tfor each derived form of the document, then returns the best match.\ndef selectTemplate(img, background, location=SRC_PATH+\"Templates/\"):\n\th,w = img.shape[:2]\n\torientation = \"\"\n\n\t#check if image still contains background. If it does not, we can figure out whether the document submitted is horizontal or\n\t#vertical based on ratio of h:w\n\tif not background:\n\t\th,w = img.shape[:2]\n\t\tif h > w: #document is vertical\n\t\t\torientation = \"_V\"\n\t\telif w > h: #document is horizontal\n\t\t\torientation = \"_H\"\n\n\t#TODO remove this -- for debug\n\tcv2.imshow(\"Corrected\", imutils.resize(img, height=500))\n\tcv2.waitKey(0)\n\n\t#Get the filename of the format that had the best match in the input image. the split() function gives the name of the file without\n\t#the .jpg or .png extension.\n\tform = multiScaleTemplateSelect(img, location, background).split('.')[0]\n\n\tprint(\"Searching \" + form + \" directory...\")\t\n\t\n\t#update location to be the subdirectory containing all of the images for the specified form\n\tlocation = location + form + \"/\"\n\t#update form so that it will contain the filename and orientation if orientation was already determined\n\tform += orientation\t\n\t\n\t#check if the file we're looking for with the specified form and orientation exists.\n\tif os.path.isfile(location + form + \".jpg\"):\n\t\tbestTemplate = cv2.imread(location + form + \".jpg\")\n\telif os.path.isfile(location + form + \".png\"):\n\t\tbestTemplate = cv2.imread(location + form + \".png\")\n\t#if the first two branches of elif fail, then the file does not exist, that means one of 3 cases:\n\t#1. outline of document was not found to assign a vertical or horizontal orientation\n\t#2. outline found, but there are multiple forms of the document with the determined orientation\n\t#3. (most unlikely) false positive match to the state/template occurred in the first loop\n\telif background is True:\n\t\t#Folder will have a /Features subdirectory if there is more than one form of the doctype\n\t\tif os.path.exists(location + \"Features/\"):\n\t\t\t#go into features subdirectory, this contains all the unique features for each license type.\n\t\t\t#The best match will contain\n\t\t\tbestFeatureMatch = multiScaleTemplateSelect(img, location + \"Features/\", background)\n\t\t\tform = form + \"_\" + bestFeatureMatch.split(\"_\")[1].split('.')[0]\n\t\t#Else, the only image in the directory should be the correct form.\n\t\telse:\n\t\t\tfor filename in os.listdir(location):\n\t\t\t\tif filename.endswith(\".png\") or filename.endswith (\".jpg\"):\n\t\t\t\t\tform = filename.split(\".\")[0]\n\n\t\t#now that we have form name, attempt to read the CORRECT template from the /Templates/State/ folder.\n\t\tbestTemplate = None\n\t\tbestTemplate = cv2.imread(location + form + \".jpg\")\n\t\tif bestTemplate is None:\n\t\t\tbestTemplate = cv2.imread(location + form + \".png\")\n\n\tprint(\"FORM: {}\".format(form))\n\treturn bestTemplate, form\n#end of selectTemplate\n\t\t\n\n#start of multiScaleTemplateSelect()\n#\tThis function loops through all templates in the subdirectory, whose path is stored as a string in the variable named\n#\tlocation. The function is also passed the image itself as an openCV object. The function loops over multiple scales\n#\tof the input image, trying to match each template file in the subdirectory to the image. When the loop finishes, the filename\n#\tof the image that best matched the input image is returned in the form of a string.\ndef multiScaleTemplateSelect(img, location, background):\n\tgrayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tgrayImg = cv2.GaussianBlur(grayImg, (3,3), 0)\n\t\n\t#if background has NOT been removed yet, increase scale of the image, we will have more space to search. If the background\n\t#is already removed, we don't have to resize, the algorithm will run quicker.\n\tif not background:\n\t\tif grayImg.shape[0] > grayImg.shape[1]: #if height > width\n\t\t\tgrayImg = imutils.resize(grayImg, height=500)\n\t\telif grayImg.shape[1] > grayImg.shape[0]:\n\t\t\tgrayImg = imutils.resize(grayImg, width=500)\n\telif grayImg.shape[0] > 2000:\n\t\tgrayImg = imutils.resize(grayImg, height=1500)\n\telif grayImg.shape[1] > 2000:\n\t\tgrayImg = imutils.resize(grayImg, width=1500)\n\telse:\n\t\tprint(\"DIMENSIONS: {}x{}\".format(grayImg.shape[0], grayImg.shape[1]))\n\n\tbestScore = 0\n\tbestMatch = None\n\n\t#Loop through all files in the subdirectory stored in the variable location\n\tfor filename in os.listdir(location):\n\t\tif filename.endswith(\".png\") or filename.endswith(\".jpg\"): #All the templates expected to be jpg/png files\n\t\t\ttemplate = cv2.imread(location + filename)\n\t\t\ttemplate = imutils.resize(template, height=45)\n\t\t\tgrayTemplate = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n\t\t\tgrayTemplate = cv2.GaussianBlur(grayTemplate, (1,1), 0)\n\t\t\t(tH, tW) = template.shape[:2]\n\n\t\t\tbestStateScore = 0 # TODO TODO TODO remove\t\t\t\n\n\t\t\t#Loop over different scales of the image\n\t\t\tfor scale in np.linspace(0.1, 1.0, 30)[::-1]:\n\t\t\t\tresized = imutils.resize(grayImg, width=int(grayImg.shape[1] * scale))\n\n\t\t\t\t#Break if the resized image is smaller than the template.\t\n\t\t\t\tif resized.shape[0] < tH or resized.shape[1] < tW:\n\t\t\t\t\tbreak\n\n\t\t\t\t#Edge detection\n\t\t\t\tedgedImg = cv2.Canny(resized, 50, 250)\n\t\t\t\tedgedTemplate = cv2.Canny(grayTemplate, 50, 250)\n\t\t\t\t\n\t\t\t\t#get list of matches, compare best match score to bestScore\n\t\t\t\tresult = cv2.matchTemplate(edgedImg, edgedTemplate, cv2.TM_CCORR_NORMED)\n\t\t\t\tminScore,maxScore,_,_ = cv2.minMaxLoc(result)\t\n\t\t\t\t\t\n\t\t\t\t#TODO TODO TODO remove this if block\n\t\t\t\tif maxScore > bestStateScore:\n\t\t\t\t\tbestStateScore = maxScore\n\n\t\t\t\tif maxScore > bestScore:\n\t\t\t\t\tbestScore = maxScore\n\t\t\t\t\tbestMatch = filename\n\t\t\t\t\t#more than a 50% match is a pretty good match. This is to save time on the search.\n\t\t\t\t\tif maxScore > 0.5:\n\t\t\t\t\t\tprint(\"BEST MATCH: {}, SCORE: {},\".format(bestMatch,bestScore))\n\t\t\t\t\t\treturn bestMatch\n\t\t\tprint(\"FILE: {}, SCORE: {}\".format(filename, bestStateScore)) #TODO TODO TODO remove this\n\n\tprint(\"BEST MATCH: {}, SCORE: {}\".format(bestMatch, bestScore)) #TODO remove-- this was for debug\n\treturn bestMatch\n#end of multiScaleTemplateSelect\n\n\n#start of alignToTemplate\n#\tThis function uses takes an image and the template it has been matched to, identifies the areas\n#\tof the image that correspond, and calculates the homography (essentially the relationship between\n#\ttwo perspectives of the same image, takes into account rotation and translation) using the cv2.findHomography()\n#\tmethod. The template and the original image are assumed to be the same image related by this homography, which\n#\tis used to warp the perspective of the input image so that it aligns with the template. Returns the input image,\n#\taligned to the template.\ndef alignToTemplate(img, template):\n\t#image prep to improve OCR and alignment\n\timgClean = cleanImage(img)\n\ttemplateClean = cleanImage(template)\n\t\n\t# Detect image keypoints & descriptors using the BRISK algorithm\n\tbrisk = cv2.BRISK_create()\n\timgKeypoints, imgDescriptors = brisk.detectAndCompute(imgClean, None)\n\ttemplateKeypoints, templateDescriptors = brisk.detectAndCompute(templateClean, None)\n\t\n\t# Match corresponding features between the images\n\tdescriptorMatcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n\tmatches = descriptorMatcher.match(imgDescriptors, templateDescriptors, None)\n\t\n\t# Sort matches by score, and we only want to care about the best x% of matches.\n\tmatches.sort(key=lambda m: m.distance, reverse=False)\n\tnumGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n\tmatches = matches[:numGoodMatches]\n\t\n\t# Pull the coordinates of the best matches\n\timgPoints = np.zeros((len(matches), 2), dtype=np.float32)\n\ttemplatePoints = np.zeros((len(matches), 2), dtype=np.float32)\n\t\n\tfor i, match in enumerate(matches):\n\t\timgPoints[i,:] = imgKeypoints[match.queryIdx].pt\n\t\ttemplatePoints[i,:] = templateKeypoints[match.trainIdx].pt\n\t\n\t# find homography\n\th, mask = cv2.findHomography(imgPoints, templatePoints, cv2.RANSAC)\n\t\n\t# apply homography, warping the image to appear as if it were directly below the camera.\n\theight, width, channels = template.shape\n\timgAligned = cv2.warpPerspective(img, h, (width, height))\n\treturn imgAligned\n#end of alignToTemplate\n\n\n#start of drawBoxes\n#\tThis function is mostly for demonstration/debugging purposes. It references the ID's template data to\n#\tdraw boxes around the regions it is reading text from.\ndef drawBoxes(img, docType):\n\ttemplateData = getattr(templates, docType)\n\t# coordinates of each field are stored as a tuple in the corresponding class for the DL\n\t# dob\n\tcoords = getattr(templateData, \"dob\")\n\tcv2.rectangle(img, coords[0], coords[1], (0,255,0), 3)\n\t# name\n\tcoords = getattr(templateData, \"name\")\n\tcv2.rectangle(img, coords[0], coords[1], (0,255,0), 3)\n\t# address\n\tcoords = getattr(templateData, \"address\")\n\tcv2.rectangle(img, coords[0], coords[1], (0,255,0), 3)\t\n\t# expiration\n\tcoords = getattr(templateData, \"expiration\")\n\tcv2.rectangle(img, coords[0], coords[1], (0,255,0), 3)\n\treturn img\n#end of drawBoxes\n\n\n#start of cleanImage\n#\tThis function optomizes an image before it is passed to one of the openCV matching/alignment\n#\tmethods. The function first converts the image to greyscale, then performs a simple noise\n#\treduction.\ndef cleanImage(img):\n # img prep (color, blur correction, noise removal)\n\timgClean = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\timgClean = cv2.GaussianBlur(imgClean, (5,5), 0) \n\treturn imgClean\n#end of cleanImage\n\n\n#call to main\nif __name__ == \"__main__\":\n main()\t\n","sub_path":"ScanID.py","file_name":"ScanID.py","file_ext":"py","file_size_in_byte":13456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"72341846","text":"\"\"\"\nDerived from sqlalchemy_utils/types/{scalar_coercible,uuid}.py\n\nCopyright (c) 2012, Konsta Vesterinen\nMIT License\n\nSee https://github.com/kvesteri/sqlalchemy-utils\n\"\"\"\n\nimport uuid\n\nfrom sqlalchemy import types, util\nfrom sqlalchemy.dialects import mssql, postgresql\n\n\nclass ScalarCoercible(types.TypeDecorator):\n cache_ok = True\n\n def _coerce(self, value):\n raise NotImplementedError\n\n def coercion_listener(self, target, value, oldvalue, initiator):\n return self._coerce(value)\n\n\nclass UUID(ScalarCoercible, types.TypeDecorator):\n \"\"\"\n Stores a UUID in the database natively when it can and falls back to\n a BINARY(16) or a CHAR(32) when it can't.\n\n ::\n\n from sqlalchemy_utils import UUIDType\n import uuid\n\n class User(Base):\n __tablename__ = 'user'\n\n # Pass `binary=False` to fallback to CHAR instead of BINARY\n id = sa.Column(UUIDType(binary=False), primary_key=True)\n \"\"\"\n\n impl = types.BINARY(16)\n\n python_type = uuid.UUID\n\n def __init__(self, binary=True, native=True):\n \"\"\"\n :param binary: Whether to use a BINARY(16) or CHAR(32) fallback.\n \"\"\"\n self.binary = binary\n self.native = native\n\n def __repr__(self):\n return util.generic_repr(self)\n\n def load_dialect_impl(self, dialect):\n if self.native and dialect.name in (\"postgresql\", \"cockroachdb\"):\n # Use the native UUID type.\n return dialect.type_descriptor(postgresql.UUID())\n\n if dialect.name == \"mssql\" and self.native:\n # Use the native UNIQUEIDENTIFIER type.\n return dialect.type_descriptor(mssql.UNIQUEIDENTIFIER())\n\n else:\n # Fallback to either a BINARY or a CHAR.\n kind = self.impl if self.binary else types.CHAR(32)\n return dialect.type_descriptor(kind)\n\n @staticmethod\n def _coerce(value):\n if value and not isinstance(value, uuid.UUID):\n try:\n value = uuid.UUID(value)\n\n except (TypeError, ValueError):\n value = uuid.UUID(bytes=value)\n\n return value\n\n def process_literal_param(self, value, dialect):\n return \"'{}'\".format(value) if value else value\n\n def process_bind_param(self, value, dialect):\n if value is None:\n return value\n\n if not isinstance(value, uuid.UUID):\n value = self._coerce(value)\n\n if self.native and dialect.name in (\"postgresql\", \"mssql\", \"cockroachdb\"):\n return str(value)\n\n return value.bytes if self.binary else value.hex\n\n def process_result_value(self, value, dialect):\n if value is None:\n return value\n\n if self.native and dialect.name in (\"postgresql\", \"mssql\", \"cockroachdb\"):\n if isinstance(value, uuid.UUID):\n # Some drivers convert PostgreSQL's uuid values to\n # Python's uuid.UUID objects by themselves\n return value\n return uuid.UUID(value)\n\n return uuid.UUID(bytes=value) if self.binary else uuid.UUID(value)\n\n\n# vim: set et ts=4 sw=4:\n","sub_path":"fact/controller/database_types.py","file_name":"database_types.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"252097173","text":"from typing import Dict, List, Optional, Union\nfrom urllib.parse import urlparse\n\nfrom eth_tester.exceptions import TransactionFailed\nfrom hexbytes import HexBytes\nfrom web3 import Web3\nfrom web3.contract import Contract\nfrom web3.exceptions import (\n BadFunctionCallOutput,\n BlockNotFound,\n ContractLogicError,\n InvalidAddress,\n TransactionNotFound,\n)\nfrom web3.types import BlockData, ChecksumAddress, TxData\n\nfrom app.config import EnvConfig\n\nZERO_HASH = \"0x0000000000000000000000000000000000000000000000000000000000000000\"\n\nABI_FUNC_ALLOWANCE = {\n \"constant\": True,\n \"inputs\": [\n {\"name\": \"_owner\", \"type\": \"address\"},\n {\"name\": \"_spender\", \"type\": \"address\"},\n ],\n \"name\": \"allowance\",\n \"outputs\": [{\"name\": \"remaining\", \"type\": \"uint256\"}],\n \"payable\": False,\n \"stateMutability\": \"view\",\n \"type\": \"function\",\n}\n\nABI_FUNC_BALANCE_OF = {\n \"constant\": True,\n \"inputs\": [{\"name\": \"who\", \"type\": \"address\"}],\n \"name\": \"balanceOf\",\n \"outputs\": [{\"name\": \"\", \"type\": \"uint256\"}],\n \"payable\": False,\n \"stateMutability\": \"view\",\n \"type\": \"function\",\n}\n\nABI_FUNC_TOTAL_SUPPLY = {\n \"constant\": True,\n \"inputs\": [],\n \"name\": \"totalSupply\",\n \"outputs\": [{\"name\": \"\", \"type\": \"uint256\"}],\n \"payable\": False,\n \"stateMutability\": \"view\",\n \"type\": \"function\",\n}\n\nERC20_ABI_VIEWS = [ABI_FUNC_ALLOWANCE, ABI_FUNC_BALANCE_OF, ABI_FUNC_TOTAL_SUPPLY]\n\n\nclass NotFoundException(Exception):\n pass\n\n\nclass ContractNotERC20(Exception):\n pass\n\n\nclass Web3Client:\n def __init__(self, w3: Optional[Web3] = None):\n if w3:\n self.w3 = w3\n else:\n self.w3 = _get_w3(EnvConfig().WEB3_PROVIDER_URL)\n\n def get_block_by_hash(\n self, hash: Union[str, HexBytes], full_transactions: bool = False\n ) -> BlockData:\n try:\n return self.w3.eth.get_block(hash, full_transactions=full_transactions)\n except (ValueError, BlockNotFound):\n raise NotFoundException\n\n def get_latest_block(self, full_transactions: bool = False) -> BlockData:\n return self.w3.eth.get_block(\"latest\", full_transactions=full_transactions)\n\n def get_parent_block(\n self, block: BlockData, full_transactions: bool = False\n ) -> Optional[BlockData]:\n parent_hash = block[\"parentHash\"]\n if parent_hash.hex() == ZERO_HASH:\n return None\n return self.w3.eth.get_block(parent_hash, full_transactions=full_transactions)\n\n def get_transaction_by_hash(self, hash: Union[str, HexBytes]) -> TxData:\n try:\n return self.w3.eth.get_transaction(hash)\n except (ValueError, TransactionNotFound):\n raise NotFoundException\n\n def is_transaction_contract_creation(self, transaction: TxData) -> bool:\n return transaction[\"to\"] is None\n\n def get_contract_address_by_transaction_hash(\n self, hash: Union[str, HexBytes]\n ) -> ChecksumAddress:\n try:\n receipt = self.w3.eth.getTransactionReceipt(hash)\n except (ValueError, TransactionNotFound):\n raise NotFoundException\n contract_address = receipt[\"contractAddress\"]\n if not contract_address:\n raise NotFoundException\n return contract_address\n\n def get_contract(self, address: ChecksumAddress, abi: List[Dict]) -> Contract:\n try:\n return self.w3.eth.contract(address=address, abi=abi)\n except (InvalidAddress, BadFunctionCallOutput):\n raise NotFoundException\n\n def is_contract_erc20(self, contract_address: ChecksumAddress) -> bool:\n try:\n contract = self.get_contract(contract_address, ERC20_ABI_VIEWS)\n except NotFoundException:\n return False\n try:\n test_addr = contract_address\n contract.functions.totalSupply().call()\n contract.functions.balanceOf(test_addr).call()\n contract.functions.allowance(test_addr, test_addr).call()\n except (TransactionFailed, BadFunctionCallOutput, ContractLogicError):\n return False\n return True\n\n def get_eoa_token_balance(self, eoa_address: str, token_address: str) -> int:\n eoa_checksum_addr = self.w3.toChecksumAddress(eoa_address)\n token_checksum_addr = self.w3.toChecksumAddress(token_address)\n\n contract = self.get_contract(token_checksum_addr, [ABI_FUNC_BALANCE_OF])\n try:\n res = contract.functions.balanceOf(eoa_checksum_addr).call()\n except TransactionFailed:\n raise ContractNotERC20\n return res\n\n\ndef _get_w3(web3_provider_url: Optional[str] = None):\n if not web3_provider_url:\n url = EnvConfig().WEB3_PROVIDER_URL\n else:\n url = web3_provider_url\n\n parsed_url = urlparse(url)\n if parsed_url.scheme in (\"ws\", \"wss\"):\n return Web3(Web3.WebsocketProvider(url))\n elif parsed_url.scheme in (\"http\", \"https\"):\n return Web3(Web3.HTTPProvider(url))\n else:\n raise ValueError(\"invalid scheme for web3 provider url\")\n","sub_path":"app/web3_client.py","file_name":"web3_client.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"316710277","text":"import glob\nimport os\nimport json\nimport re\n\n\nread_files = glob.glob(os.path.join(os.getcwd(), \"Document\", \"*\"))\nfor f in read_files:\n with open (f) as f_input:\n next(f_input)\n next(f_input)\n next(f_input)\n contents = f_input.read()\n contents = re.sub('-1', '', contents)\n record = [f, contents]\n print(json.dumps(record))\n","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"258943753","text":"import unittest\nimport numpy as np\nfrom keras_clinical import applications\n\n\nclass GraceMortality(unittest.TestCase):\n\n def test_single_patient(self):\n gm = applications.GRACE_MORTALITY()\n pretend_patient_1 = np.array([[57, 70, 110, 1.2, 3, 1, 0, 1]], dtype=np.float32)\n pt1 = gm.predict(pretend_patient_1)\n print(pt1)\n self.assertEqual(pt1, np.array([[0.21693133]], dtype=np.float32))\n\n def test_multiple_patients(self):\n gm = applications.GRACE_MORTALITY()\n pretend_patients = np.array(\n [[57, 70, 110, 1.2, 3, 1, 0, 1],\n [57, 70, 110, 1.2, 3, 1, 0, 1],\n [57, 70, 110, 1.2, 3, 1, 0, 1],\n [57, 70, 110, 1.2, 3, 1, 0, 1],\n [57, 70, 110, 1.2, 3, 1, 0, 1]],\n dtype=np.float32\n )\n pts = gm.predict(pretend_patients)\n self.assertTrue((pts.tolist() == np.array([[ 0.21693127],\n [ 0.21693127],\n [ 0.21693127],\n [ 0.21693127],\n [ 0.21693127]], dtype=np.float32)).all())","sub_path":"tests/grace_mortality.py","file_name":"grace_mortality.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"439344009","text":"import sqlite3\r\nfrom sqlite3 import Error\r\nimport datetime\r\n\r\ndef create_connection(db_file):\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n return conn\r\n except Error as e:\r\n print(e)\r\n\r\n return None\r\n\r\ndef create_roster(conn,create_table_sql):\r\n try:\r\n c = conn.cursor()\r\n c.execute(create_table_sql)\r\n except Error as e:\r\n print(e)\r\n\r\ndef main():\r\n database = 'C://Users//devashishpoudel//Desktop//Project//roster.sqlite'\r\n sql_create_roster_table = \"\"\"CREATE TABLE IF NOT EXISTS roster(\r\n id integer PRIMARY KEY,\r\n Shift text,\r\n date text\r\n );\"\"\"\r\n \r\n roster_start_date = input(\"Enter the roster start date(yyyy-mm-dd): \")\r\n no_of_days = input(\"Enter the number of days: \")\r\n \r\n dates = []\r\n for i in range(int(no_of_days)):\r\n dates.append(datetime.datetime.strptime(roster_start_date, '%Y/%m/%d') + datetime.timedelta(i))\r\n \r\n print(dates)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n \r\n \r\n\r\n","sub_path":"roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"257931653","text":"import numpy as np\nfrom Actions import *\nfrom math import floor\n\nSTANDARD_REWARD = -0.01\nCRASH_REWARD = -2\nEND_REWARD = -2\nEVASION_REWARD = 2\nACTION_SUCCESSFUL_RATE = 0.6\nN_ROW = 4\nN_COL = 4\nCRASH_BLOCKS = [(0,1),(2,0),(3,2)]\nEVASION_BLOCKS = [(0, 0), (3, 3)]\nINIT_STATE = (3, 0, 0, 3)\n\nDEBUG = False\n\nclass Game:\n def __init__(self):\n self.n_absorbing_type = 4\n self.n_rows = N_ROW\n self.n_cols = N_COL\n self.crash_blocks = CRASH_BLOCKS\n self.n_blocks = self.n_rows * self.n_cols\n\n self.states = self.__initialize_states()\n self.crash_states_p1, self.crash_states_p2 = self.__initialize_crash_states()\n self.evasion_states = self.__initalize_evasion_states()\n self.terminal_states, self.terminal_type = self.__initialize_terminal_states()\n self.rewards = self.__initialize_rewards()\n self.transitions, self.transition_from, self.transition_to = self.__initialize_transitions()\n\n #self.transition_from, self.transition_to = self.__convert_transition()\n\n print('Game initilized')\n\n def get_state_absorbing_type(self, state):\n if not self.is_terminal_state(state):\n raise Exception(\"State is not an absorbing state\")\n return self.terminal_type[state]\n\n def get_absorbing_type_reward(self, type):\n if type == 0:\n return CRASH_REWARD\n elif type == 1:\n return -CRASH_REWARD\n elif type == 2:\n return END_REWARD\n elif type == 3:\n return EVASION_REWARD\n else:\n raise Exception(\"Absorbing type error\")\n\n def is_crash_state(self, state):\n return state in self.crash_states_p1 or state in self.crash_states_p2\n\n def is_terminal_state(self, state):\n return state in self.terminal_states\n\n def get_n_states(self):\n '''\n Get the number of states of the game\n :return: number of states\n '''\n return len(self.states)\n\n def get_state_reward(self, state):\n '''\n Get the reward of each state\n :param state: index of state\n :return: the reward of that state\n '''\n return self.rewards[state]\n\n def get_state_transition(self, action_pair):\n '''\n Get the transition matrix for pair of actions\n :param action_pair: tuple of actions (action of player1, action of player 2)\n :return: transition matrix given that pair of action\n '''\n return self.transitions[(action_pair[0], action_pair[1])]\n\n def __convert_transition(self):\n from_mapping = [] # state -> {next_state : {action pair : prob}}, from state to next state\n to_mapping = [] # from previous state to state\n for state in range(self.get_n_states()):\n from_mapping.append({})\n to_mapping.append({})\n for i in range(N_ACTIONS):\n for j in range(N_ACTIONS):\n from_vector = self.get_state_transition((i, j))[state]\n to_vector = self.get_state_transition((i, j))[:, state]\n for next_state in range(len(from_vector)):\n probability = from_vector[next_state]\n if probability != 0:\n if next_state not in from_mapping[state]:\n from_mapping[state][next_state] = {}\n from_mapping[state][next_state][(i, j)] = probability\n\n for previous_state in range(len(to_vector)):\n probability = from_vector[previous_state]\n if probability != 0:\n if previous_state not in to_mapping[state]:\n to_mapping[state][previous_state] = {}\n to_mapping[state][previous_state][(i, j)] = probability\n return from_mapping, to_mapping\n\n def rc2state(self, row1, col1, row2, col2):\n '''\n Convert row, col of player 1 and 2 to index of state\n :param row1: row of player1\n :param col1: col of player1\n :param row2: row of player2\n :param col2: col of player2\n :return: converted index of state\n '''\n state1 = row1 * self.n_cols + col1\n state2 = row2 * self.n_cols + col2\n state = state1 * self.n_blocks + state2\n return state\n\n def state2rc(self, state):\n '''\n Convert index of state to row, col of player 1 and 2\n :param state: index of state\n :return: row, col of player 1 and row, col of player 2\n '''\n state1 = floor(state / self.n_blocks)\n state2 = state % self.n_blocks\n row1 = floor(state1 / self.n_cols)\n col1 = state1 % self.n_cols\n row2 = floor(state2 / self.n_cols)\n col2 = state2 % self.n_cols\n return row1, col1, row2, col2\n\n def __initialize_crash_states(self):\n '''\n Get list of crash states [[p1_crash], [p2_crash]]\n :return: an array of crash states' index\n '''\n crash_states_p1 = []\n crash_states_p2 = []\n for crash_block in self.crash_blocks:\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n if (i, j) != crash_block:\n crash_states_p2.append(self.rc2state(i, j, crash_block[0], crash_block[1]))\n crash_states_p1.append(self.rc2state(crash_block[0], crash_block[1], i, j))\n return crash_states_p1, crash_states_p2\n\n def __initalize_evasion_states(self):\n evaison_states = []\n for state in EVASION_BLOCKS:\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n evaison_states.append(self.rc2state( state[0], state[1],i,j))\n return evaison_states\n\n def __initialize_states(self):\n return np.arange(0, self.n_blocks * self.n_blocks)\n\n def __initialize_rewards(self):\n rewards = np.ones(self.get_n_states()) * STANDARD_REWARD\n for crash_state in self.crash_states_p1:\n rewards[crash_state] = CRASH_REWARD\n for crash_state in self.crash_states_p2:\n rewards[crash_state] = -CRASH_REWARD\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n rewards[self.rc2state(i, j, i, j)] = END_REWARD\n for evasion_state in self.evasion_states:\n rewards[evasion_state] = EVASION_REWARD\n return rewards\n\n def __initialize_terminal_states(self):\n terminal_states = []\n terminal_type = {}\n for crash_state in self.crash_states_p1:\n terminal_states.append(crash_state)\n terminal_type[crash_state] = 0\n for crash_state in self.crash_states_p2:\n terminal_states.append(crash_state)\n terminal_type[crash_state] = 1\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n terminal_states.append(self.rc2state(i, j, i, j))\n terminal_type[self.rc2state(i, j, i, j)] = 2\n\n for evasion_state in self.evasion_states:\n terminal_states.append(evasion_state)\n terminal_type[evasion_state] = 3\n\n return terminal_states, terminal_type\n\n def __initialize_transitions(self):\n whole_transition_matrix = {}\n possible_from = [] # state -> {next_state}\n possible_to = [] # from previous state to state\n for i in range(self.get_n_states()):\n possible_from.append(set())\n possible_to.append(set())\n\n n_states = self.get_n_states()\n for action1 in range(N_ACTIONS):\n for action2 in range(N_ACTIONS):\n transition_matrix = np.zeros((n_states, n_states))\n for i in range(n_states):\n state_transition = self.__get_possible_transition(i, action1, action2)\n for state_prob_tuple in state_transition:\n next_state = int(state_prob_tuple[0]) # avoid type bug\n prob = state_prob_tuple[1]\n transition_matrix[i, next_state] = prob\n if prob != 0:\n possible_from[i].add(next_state)\n possible_to[next_state].add(i)\n whole_transition_matrix[(action1, action2)] = transition_matrix\n return whole_transition_matrix, possible_from, possible_to\n\n def __get_possible_transition(self, state, action1, action2):\n '''\n For one state (pos1,pos2) and action pair, get all possible transitions and corresponding probability\n :param state: pair of state\n :param action1: action1\n :param action2:action2\n :return: list of tuples of new state and possibility.\n '''\n res = []\n row1, col1, row2, col2 = self.state2rc(state)\n single_transition_1 = self.__get_single_state_transition(row1, col1, action1)\n single_transition_2 = self.__get_single_state_transition(row2, col2, action2)\n # Start cross product to create full transition\n for transition_1 in single_transition_1:\n for transition_2 in single_transition_2:\n prob = transition_1[1] * transition_2[1]\n state = self.rc2state(transition_1[0][0], transition_1[0][1], transition_2[0][0], transition_2[0][1])\n res.append((state, prob))\n return res\n\n def __get_single_state_transition(self, row, col, action):\n '''\n Get the transition prob for one player using the action.\n Ex: Player@(row = 2, col =1) using action LEFT, should return\n [ ((2,0),0.9), ((2,1),0.033), ((1,1),0.033), ((2,2),0.033) ]\n :param row: row of the player\n :param col: col of the player\n :param action: action id\n :return: list of (state, prob)\n '''\n actions = [0, 1, 2, 3]\n probs = []\n actions.remove(action)\n if self.__is_action_valid(row, col, action):\n probs.append((self.__create_new_rc_from_action(row, col, action), ACTION_SUCCESSFUL_RATE))\n possible_count = 1 # at original state is always possible\n for a in actions:\n if self.__is_action_valid(row, col, a):\n possible_count += 1\n rest_prob = (1 - ACTION_SUCCESSFUL_RATE) / possible_count\n probs.append(((row, col), rest_prob))\n for a in actions:\n if self.__is_action_valid(row, col, a):\n probs.append((self.__create_new_rc_from_action(row, col, a), rest_prob))\n else:\n probs.append(((row, col),\n ACTION_SUCCESSFUL_RATE)) # if action is invalid, most of the time should stay in original state\n possible_count = 0\n for a in actions:\n if self.__is_action_valid(row, col, a):\n possible_count += 1\n rest_prob = (1 - ACTION_SUCCESSFUL_RATE) / possible_count\n for a in actions:\n if self.__is_action_valid(row, col, a):\n probs.append((self.__create_new_rc_from_action(row, col, a), rest_prob))\n\n return probs\n\n def __create_new_rc_from_action(self, row, col, action):\n return row + get_movement(action)[0], col + get_movement(action)[1]\n\n def __is_action_valid(self, row, col, action):\n new_row = row + get_movement(action)[0]\n new_col = col + get_movement(action)[1]\n if 0 <= new_row < self.n_rows and 0 <= new_col < self.n_cols:\n return True\n return False\n\n def get_success_next_state(self, state, action1, action2):\n '''\n return next state if action 1 and action 2 BOTH SUCCEED\n :param state: current state\n :param action1: action executed by p1\n :param action2: action executed by p2\n :return: next state if action1 and action2 both succeed\n '''\n row1, col1, row2, col2 = self.state2rc(state)\n if self.__is_action_valid(row1, col1, action1):\n new_pos1 = self.__create_new_rc_from_action(row1, col1, action1)\n else:\n new_pos1 = row1, col1\n if self.__is_action_valid(row2, col2, action2):\n new_pos2 = self.__create_new_rc_from_action(row2, col2, action2)\n else:\n new_pos2 = row2, col2\n return self.rc2state(new_pos1[0], new_pos1[1], new_pos2[0], new_pos2[1])\n\n\n\n def print_state(self, state):\n row1, col1, row2, col2 = self.state2rc(state)\n print('----------------')\n for i in range(self.n_rows):\n line = '|'\n for j in range(self.n_cols):\n if i == row1 == row2 and j == col1 == col2:\n line += (' O |')\n else:\n if (i, j) in self.crash_blocks:\n line += ' C |'\n elif i == row1 and j == col1:\n line += (' X |')\n elif i == row2 and j == col2:\n line += (' Y |')\n else:\n line += (' |')\n print(line)\n #print('----------------')\n\n mat = np.zeros((self.n_rows, self.n_cols))\n for i in range(self.n_rows):\n for j in range(self.n_cols):\n if i == row1 == row2 and j == col1 == col2:\n mat[i, j] = 3\n else:\n if (i, j) in self.crash_blocks:\n mat[i, j] = -1\n elif i == row1 and j == col1:\n mat[i, j] = 1\n elif i == row2 and j == col2:\n mat[i, j] = 2\n\n return mat\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":13778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"63893970","text":"import numpy as np\n\ndef get_obs(z,restwave):\n return restwave*(1.+z)\n\ndef get_wv(z,restwave):\n return restwave*(1.+z)\n\ndef z(obswv,restwv):\n return (obswv/restwv) -1.\n\ndef get_vel(ref,wv,restwv=1215.6701):\n \"\"\"\n if a wv is redder than ref, then vel should be positive\n\n gets velocity shift of wv to wv1\n\n input parameters:\n -----------------\n wv1, wv : the actual wavelength of the data\n restwv : rest wavelength of the desired center of the desired absorption line\n\n output:\n -------\n velocity shift to restwv \n \"\"\"\n\n assert(ref>0. and restwv>0. and wv>0.)\n z1=z(ref,restwv)\n z2=z(wv,restwv)\n return 299792.458*(z2-z1)/(1.+z1)\n\nclass Datum(object):\n def __init__(self,data,ion_dict,**kwargs):\n for key, val in ion_dict.items():\n try:\n setattr(self,key,float(val))\n except:\n setattr(self,key,val)\n try:\n self.trans = int(self.trans)\n except:\n print(type(self.trans))\n raise Exception('self.trans='+str(self.trans)+':\\n trans must be specified and must be an int. (counting from 0)')\n self.vel_offset = self._get_offset(ion_dict,**kwargs)\n print(self.name, self.vel_offset)\n self.waverange=kwargs.get('waverange',False)\n self.velrange =kwargs.get('velrange',False)\n self.label = kwargs.get('label','')\n ind = self._get_indices(data)\n\n setattr(self,'waves',list(data['waves'][ind]))\n setattr(self,'data',list(data['data'][ind]))\n #need to do this bc cont and fit are lists of np arrays\n cont = []\n fit = []\n for i in range(len(data['cont'])): \n cont.append(data['cont'][i][ind])\n fit.append(data['fit'][i][ind])\n setattr(self,'cont',cont)\n setattr(self,'fit',fit)\n\n center = get_wv(self.zabs,self.restwave)\n vel = [get_vel(center, wv, self.restwave) for wv in self.waves]\n self.x = vel if self.velrange else self.waves\n\n def _get_offset(self,ion_dict,**kwargs):\n linedict = kwargs.get('line_dict')\n self.restwave = float(linedict[self.name][self.trans]['rest'])\n try:\n self.obs = float(ion_dict['obs'])\n ref_line = kwargs.get('ref_line',False)\n if not bool(self.obs) or not bool(ref_line):\n raise KeyError\n #get z_abs of the reference line (typically Lyman-alpha or H-alpha)\n self.zabs=float(ref_line['z'])\n except (IndexError, KeyError):\n self.zabs=float(kwargs.get('zabs',False))\n if not bool(zabs):\n raise Exception(\"need to specify either `ref_line` or `zabs`\")\n return 299792.458*(self.z-self.zabs)/(1.+self.zabs)\n\n\n def _get_indices(self,data):\n \"\"\"gets the indices of interest\"\"\"\n ind = []\n if self.velrange and not self.waverange:\n for i in range(0,data['waves'].shape[0]):\n vel=get_vel(self.obs, data['waves'][i], self.restwave)\n if np.fabs(vel)=data['waves'])[0])\n ind = list(set(lst1+lst2))\n else:\n raise Exception(\"need to specify either velrange or waverange, but not both\")\n return ind\n\n\n","sub_path":"parse_veldata.py","file_name":"parse_veldata.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"472695491","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nfrom core.models.booking import Booking\n\n\ndef update_sources(apps, schema_editor):\n \"\"\"\n For all booking sources, create a source.\n \"\"\"\n Source = apps.get_model('core','Source')\n for idx,source in enumerate(Booking.BOOKING_SOURCES):\n Source.objects.create(source=source[0], order_num=idx, source_desc=source[1])\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0178_workshop_is_doorstep'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Source',\n fields=[\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('source', models.CharField(max_length=16, serialize=False, primary_key=True)),\n ('source_desc', models.CharField(max_length=128)),\n ('active', models.BooleanField(default=True)),\n ('order_num', models.IntegerField()),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.RunPython(update_sources, reverse_code=migrations.RunPython.noop),\n migrations.AlterField(\n model_name='booking',\n name='source',\n field=models.ForeignKey(related_name='booking_sources', db_column=b'source', blank=True, to='core.Source', null=True),\n ),\n migrations.AlterField(\n model_name='bumperuser',\n name='source',\n field=models.CharField(blank=True, max_length=12, null=True, choices=[(b'web', b'Web'), (b'mobile-web', b'Mobile Web'), (b'desktop-web', b'Desktop Web'), (b'email', b'Email'), (b'sms', b'SMS'), (b'chat', b'Chat'), (b'call', b'Call'), (b'app', b'App'), (b'event', b'Event'), (b'uber', b'Uber'), (b'opsPanel', b'opsPanel'), (b'android', b'android'), (b'iphone', b'iphone'), (b'facebook', b'Facebook'), (b'referral', b'Referral'), (b'helpshift', b'Helpshift'), (b'justdial', b'JustDial'), (b'drwheelz', b'drwheelz'), (b'incomingCall', b'Incoming Call'), (b'urbanClap', b'UrbanClap'), (b'cars24', b'Cars 24'), (b'hp', b'HP Petrol Pump')]),\n ),\n migrations.AlterField(\n model_name='carmodel',\n name='end_year',\n field=models.IntegerField(blank=True, null=True, choices=[(1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017)]),\n ),\n migrations.AlterField(\n model_name='carmodel',\n name='start_year',\n field=models.IntegerField(blank=True, null=True, choices=[(1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017)]),\n ),\n migrations.AlterField(\n model_name='historicalbooking',\n name='source',\n field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_column=b'source', db_constraint=False, blank=True, to='core.Source', null=True),\n ),\n migrations.AlterField(\n model_name='historicaluserinquiry',\n name='source',\n field=models.CharField(blank=True, max_length=12, null=True, choices=[(b'web', b'Web'), (b'mobile-web', b'Mobile Web'), (b'desktop-web', b'Desktop Web'), (b'email', b'Email'), (b'sms', b'SMS'), (b'chat', b'Chat'), (b'call', b'Call'), (b'app', b'App'), (b'event', b'Event'), (b'uber', b'Uber'), (b'opsPanel', b'opsPanel'), (b'android', b'android'), (b'iphone', b'iphone'), (b'facebook', b'Facebook'), (b'referral', b'Referral'), (b'helpshift', b'Helpshift'), (b'justdial', b'JustDial'), (b'drwheelz', b'drwheelz'), (b'incomingCall', b'Incoming Call'), (b'urbanClap', b'UrbanClap'), (b'cars24', b'Cars 24'), (b'hp', b'HP Petrol Pump')]),\n ),\n migrations.AlterField(\n model_name='messages',\n name='action',\n field=models.SmallIntegerField(blank=True, null=True, choices=[(128, 128), (1, 1), (2, 2), (3, 3), (4, 4), (133, 133), (6, 6), (129, 129), (8, 8), (9, 9), (10, 10), (132, 132), (12, 12), (130, 130), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (131, 131), (20, 20), (21, 21), (22, 22), (151, 151), (152, 152), (153, 153), (26, 26), (5, 5), (7, 7), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (23, 23), (19, 19), (25, 25), (13, 13), (56, 56), (24, 24), (216, 216), (101, 101), (102, 102), (103, 103), (104, 104), (105, 105), (106, 106), (107, 107), (108, 108), (109, 109), (111, 111), (112, 112), (113, 113), (114, 114), (115, 115), (116, 116), (117, 117), (118, 118), (119, 119), (120, 120), (121, 121), (122, 122), (123, 123), (124, 124), (125, 125), (126, 126), (127, 127)]),\n ),\n migrations.AlterField(\n model_name='messages',\n name='label',\n field=models.PositiveSmallIntegerField(help_text=b'This is to identify which screen will open in UI', null=True, choices=[(1, b'Status'), (2, b'Feedback'), (3, b'Update'), (4, b'Offer'), (5, b'Referral'), (6, b'RateUs'), (7, b'FillProfile'), (8, b'FillCarInfo'), (9, b'EOD')]),\n ),\n migrations.AlterField(\n model_name='notifications',\n name='push_level',\n field=models.SmallIntegerField(default=1, null=True, blank=True, choices=[(1, b'Status'), (2, b'Feedback'), (3, b'Update'), (4, b'Offer'), (5, b'Referral'), (6, b'RateUs'), (7, b'FillProfile'), (8, b'FillCarInfo'), (9, b'EOD')]),\n ),\n migrations.AlterField(\n model_name='userinquiry',\n name='source',\n field=models.CharField(blank=True, max_length=12, null=True, choices=[(b'web', b'Web'), (b'mobile-web', b'Mobile Web'), (b'desktop-web', b'Desktop Web'), (b'email', b'Email'), (b'sms', b'SMS'), (b'chat', b'Chat'), (b'call', b'Call'), (b'app', b'App'), (b'event', b'Event'), (b'uber', b'Uber'), (b'opsPanel', b'opsPanel'), (b'android', b'android'), (b'iphone', b'iphone'), (b'facebook', b'Facebook'), (b'referral', b'Referral'), (b'helpshift', b'Helpshift'), (b'justdial', b'JustDial'), (b'drwheelz', b'drwheelz'), (b'incomingCall', b'Incoming Call'), (b'urbanClap', b'UrbanClap'), (b'cars24', b'Cars 24'), (b'hp', b'HP Petrol Pump')]),\n ),\n ]\n","sub_path":"bumper2/core/old_migrations/0179_auto_20170102_1334.py","file_name":"0179_auto_20170102_1334.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"420899850","text":"#!/usr/bin/python\n# Global sync with all the trimmings\n#\n# Initial script by Benjamin for RTS\n# Updated for AR1 by Ruby\n# Benjamin added PPS offsets\n# Added timestamp of sync output for easier use\n# Tiyani replaced dig_sync_epoch with dig_l_band_time_sync\n# Reset capture destination\n# Cleanup by Martin to remove redundant instructions now handled by CAM\n# Tiyani: wait for dmc to update epoch before resetting capture destinations and\n# querying digitiser epoch\n# Ruby stabilising script by exiting cleanly and allowing enough time for sensor updates\n# Anton: limit cam object to components required, error on timeout, minor cleanup\n\nfrom __future__ import with_statement\n\nimport time\n\nfrom ast import literal_eval\nfrom concurrent.futures import TimeoutError\n\nimport katconf\nfrom katcorelib import standard_script_options, verify_and_connect\nfrom katcorelib import cambuild\n\n\n# Parse command-line options that allow the defaults to be overridden\nparser = standard_script_options(usage=\"usage: %prog [options]\",\n description=\"MeerKAT Global Sync Script ver 2\\n\" +\n \"Performs a global sync,\\n\" +\n \"Starts data stream from digitisers,\\n\" +\n \"Resets capture destination to clear IP assignments\")\nparser.add_option('--configdelayfile', type=\"string\",\n default='katconfig/user/delay-models/mkat/pps_delays.csv',\n help='Specify the katconfig path to the csv file containing receptor '\n 'delays in the format m0xx, (default=\"%default\")')\nparser.add_option('--all', action=\"store_true\", default=False,\n help='Include all antennas in the global sync')\n# assume basic options passed from instruction_set\nparser.set_defaults(description='MeerKAT Global sync')\n(opts, args) = parser.parse_args()\nprint(\"global_sync_MeerKAT script: start\")\n\nbands = ['l', 'u']\ndmc_epoch = None\nwith verify_and_connect(opts) as kat:\n print(\"_______________________\")\n print(kat.controlled_objects)\n print(kat.ants.clients)\n print(opts)\n print(\"_______________________\")\n\n subarrays = kat.katpool.sensor.subarrays.get_value()\n subarrays_free = kat.katpool.sensor.subarrays_free.get_value()\n assert subarrays == subarrays_free, (\"Please free all subarrays before \"\n \"running this script.\")\n try:\n cam = None\n if not kat.dry_run:\n print('Building CAM object')\n ant_names = [ant.name for ant in kat.ants]\n components = ['mcp'] + ant_names\n cam = cambuild(password=\"camcam\", full_control=\"all\",\n require=components, conn_clients=components)\n try:\n cam.until_synced(timeout=90)\n except TimeoutError:\n print(\"ERROR: Could not sync CAM container. \")\n for comp in cam.children.values():\n if not comp.synced:\n print(' CAM component not synced: ', comp.name)\n raise RuntimeError(\"CAM clients did not sync.\")\n\n delay_list = {}\n print(\"PPS delay values from file: {}\".format(opts.configdelayfile))\n try:\n delay_values = katconf.resource_string(opts.configdelayfile).split('\\n')\n for line in delay_values:\n x = ((line.strip('\\n')).split(','))\n if (len(x[0]) == 4 and x[0][0] == 'm'):\n delay_list[x[0]] = int(x[1])\n for ant in sorted(delay_list):\n print(' Receptor: {} delay: {}'.format(ant, delay_list[ant]))\n except Exception as exc:\n raise RuntimeError('Failed to read pps delay file from config! '\n 'File: {}. Exception: {}.'\n .format(opts.configdelayfile, exc))\n\n if opts.all:\n ants_active = list(cam.ants)\n else:\n ants_active = [ant for ant in cam.ants if ant.name not in\n kat.katpool.sensor.resources_in_maintenance.get_value()]\n ants_active.sort(key=lambda ant: ant.name)\n\n print('\\nSet PPS delay compensation for digitisers.')\n for ant in ants_active:\n # look at current delay and program in delay specified in CSV\n if ant.name in delay_list:\n # set the delay compensations for a digitiser (both L and U band)\n for band in bands:\n try:\n # Check if antenna has either l/u-band digitizer\n sensor_avail = getattr(\n ant.sensor, \"dig_{}_band_marking\".format(band)\n ).get_value()\n assert sensor_avail == 'ready'\n response = ant.req.dig_digitiser_offset(band)\n except AssertionError:\n print(\n \"[WARNING] Skipping antenna {}, \"\n \"it's missing the {}-band digitiser\".format(\n ant.name, band.upper())\n )\n except Exception as msg:\n print('Caught exception antenna %s' % ant.name)\n print(msg)\n raise\n else:\n curr_delay = literal_eval(response.reply.arguments[1])\n if curr_delay == delay_list[ant.name]:\n print(\n '{} on {}-band: no change to PPS delay offset.'\n .format(ant.name, band.upper())\n )\n else:\n print(\"{} {}-band current delay : {}.\".format(\n ant.name, band.upper(), curr_delay)\n )\n digitiser_offset = ant.req.dig_digitiser_offset(band,\n delay_list[ant.name]\n )\n print(\"{} {}-band PPS delay offset : {}.\".format(\n ant.name, band.upper(), digitiser_offset)\n )\n\n init_epoch = cam.mcp.sensor.dmc_synchronisation_epoch.get_value()\n # Takes approximately 5 seconds per digitiser, assuming the are 128 digitisers\n # gives 640 seconds.\n serial_sync_timeout = 640 # seconds\n print('Performing global sync on MeerKAT (timeout: {})...'.format(\n serial_sync_timeout)\n )\n start_time = time.time()\n cam.mcp.req.dmc_global_synchronise(timeout=serial_sync_timeout)\n print(\"Duration of global sync: {}\".format(time.time() - start_time))\n\n print('Previous sync time {:d}, waiting for new sync time'.format(init_epoch))\n cam_sleep = 2 # seconds to wait for CAM sensor retry\n wait_time = 0 # seconds\n while cam.mcp.sensor.dmc_synchronisation_epoch.get_value() == init_epoch:\n time.sleep(cam_sleep)\n wait_time += cam_sleep\n if wait_time >= serial_sync_timeout: # seconds\n raise RuntimeError(\"dmc could not sync, investigation is required...\")\n\n dmc_epoch = cam.mcp.sensor.dmc_synchronisation_epoch.get_value()\n for ant in ants_active:\n for band in bands:\n try:\n # Check if sensor is available in this antenna, and confirm if\n # digitiser has the freq band if not raise AssertionError\n sensor_avail = getattr(\n ant.sensor, \"dig_{}_band_marking\".format(band)\n ).get_value()\n assert sensor_avail == 'ready'\n except AssertionError:\n print(\n \"[WARNING] Skipping antenna {}, \"\n \"it's missing the {}-band digitiser\".format(\n ant.name, band.upper())\n )\n except Exception as errmsg:\n print(\"Caught an exception: {}\".format(str(errmsg)))\n else:\n print(\n \"Verify digitiser epoch for antenna {} in {}-band\"\n .format(ant.name, band.upper())\n )\n sensor_name = (\n \"dig_{}_band_time_synchronisation_epoch\".format(band)\n )\n epoch_sensor = getattr(ant.sensor, sensor_name)\n if not epoch_sensor:\n raise AttributeError(\n \"Missing `cam.ant[x].sensor.{}`\".format(sensor_name)\n )\n\n dig_sleep = 2 # seconds\n wait_time = 0 # seconds\n while epoch_sensor.get_value() != dmc_epoch:\n time.sleep(dig_sleep)\n wait_time += dig_sleep\n if wait_time >= 60: # seconds\n print(\n \" ant {} on {}-band could not sync with DMC, \"\n \"investigation is required...!!!!!\".format(\n ant.name, band.upper())\n )\n break\n print(\" {} sync epoch: {}\".format(\n ant.name, epoch_sensor.get_value())\n )\n\n print(\" Resetting capture destination {}\".format(ant.name))\n ant.req.deactivate()\n capture_list = str(ant.req.dig_capture_list())\n for line in capture_list.splitlines():\n print('\\t{}'.format(line))\n\n print(\"\\nScript complete\")\n finally:\n if cam:\n print(\"Cleaning up cam object\")\n cam.disconnect()\n if dmc_epoch:\n print(\"\\nGlobal Sync Date {}\".format(time.ctime(dmc_epoch)))\n# -fin-\n","sub_path":"utility/global_sync.py","file_name":"global_sync.py","file_ext":"py","file_size_in_byte":10606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"374568747","text":"\n\n#calss header\nclass _TRANSLUCENT():\n\tdef __init__(self,): \n\t\tself.name = \"TRANSLUCENT\"\n\t\tself.definitions = [u'If an object or a substance is translucent, it is almost transparent, allowing some light through it in an attractive way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_translucent.py","file_name":"_translucent.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"235601079","text":"# Commented out IPython magic to ensure Python compatibility.\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom torchvision.utils import make_grid\nimport pandas as pd\nimport imageio\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.logging import TensorBoardLogger\n\nimport os\nimport shutil\n#from os import join\nfrom time import time\nimport numpy as np \nimport cv2 \n\nimport pandas as pd\nfrom time import time\n\nimport random\n\n# %run Utils.ipynb\n# %run Visualize.ipynb\n# %run Model.ipynb\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.cm import jet\n\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\n# %matplotlib inline\n\nfrom scipy.optimize import minimize\nfrom numpy import linalg as lin\nfrom shapely.geometry import Polygon as P\nfrom shapely.geometry import box\n\nfrom sklearn.metrics import confusion_matrix, average_precision_score, f1_score, precision_score, recall_score, precision_recall_curve\nfrom scipy.spatial.transform import Rotation\n\npath = \"/content/1000/captures_buffalo_1000_random\"\n\nimage_folder = os.path.join(path, 'OutputKP')\noutput_folder = os.path.join(path, 'output')\n\nos.makedirs(output_folder, exist_ok=True)\nos.makedirs(image_folder, exist_ok=True)\n\n#saveFileToDrive('/content/Can/1000/captures_can_1000_random/captures/GroundTruth', 'CameraMatrix.txt')\n\n\"\"\"--------------**Uitility**-------------------\"\"\"\n\nclass Dataset(Dataset):\n\n def __init__(self, root_dir = 'captures', folder=\"Train/\" , kp_file = 'image_%05d_img', transform=None, length=5):\n self.root_dir = os.path.join(path, root_dir, folder)\n \n self.key_pts_file = os.path.join(self.root_dir, kp_file)\n self.transform = transform\n \n files = os.listdir(self.root_dir)\n self.dataLen = int(len(files)/length)\n\n def __len__(self):\n return self.dataLen - 1\n\n def __getitem__(self, idx):\n # ignoring the first image as, they are not properly annotated\n idx = idx + 1\n \n image = imageio.imread(os.path.join(self.root_dir, \"{}.png\".format(self.key_pts_file %idx)))\n if(image.shape[2] == 4):\n image = image[:,:,0:3]\n \n initialKPs = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-ORB.txt\".format(self.key_pts_file %idx)), header=None))\n initialKPs = np.c_[ initialKPs, np.ones(initialKPs.shape[0]) ]\n \n KPs = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-GT.txt\".format(self.key_pts_file %idx)), header=None))\n KPs = np.c_[ KPs, np.ones(KPs.shape[0]) ]\n \n ## Area of image\n scaleArea = box(0,0,image.shape[0], image.shape[1]).area\n \n image = Image.fromarray(image)\n \n bb = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-BOUND.txt\".format(self.key_pts_file %idx)), header=None)).ravel()\n rot = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-Rot.txt\".format(self.key_pts_file %idx)), header=None, sep=' '))\n trans = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-Trans.txt\".format(self.key_pts_file %idx)), header=None, sep=' ')).reshape((1,3))\n \n item = {'image': image, 'original_image': np.asarray(image) ,'bb': bb, 'initial_keypoints' : initialKPs, 'keypoints': KPs, 'rot':rot, 'trans':trans,'scaleArea':scaleArea}\n if self.transform is not None:\n item = self.transform(item)\n return item\n\nclass DatasetReal(Dataset):\n\n def __init__(self, root_dir = 'captures', folder=\"RealTest/\" , kp_file = 'image_%05d_img', transform=None, length=4):\n self.root_dir = os.path.join(path, root_dir, folder)\n \n self.key_pts_file = os.path.join(self.root_dir, kp_file)\n self.transform = transform\n \n files = os.listdir(self.root_dir)\n self.dataLen = int(len(files)/length)\n\n def __len__(self):\n return self.dataLen\n\n def __getitem__(self, idx):\n \n image = imageio.imread(os.path.join(self.root_dir, \"{}.png\".format(self.key_pts_file %idx)))\n if(image.shape[2] == 4):\n image = image[:,:,0:3]\n \n scaleArea = box(0,0,image.shape[0], image.shape[1]).area\n \n image = Image.fromarray(image)\n\n KPs = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-GT.txt\".format(self.key_pts_file %idx)), header=None, sep=' '))\n KPs = np.c_[ KPs, np.ones(KPs.shape[0]) ]\n #print('real KPS' , KPs)\n \n bb = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-BOUND.txt\".format(self.key_pts_file %idx)), header=None, sep=' ')).reshape((4))\n #bb = np.array([ [b[0], b[1]], [b[0], b[3]], [b[2], b[1]], [b[2], b[3]] ])\n \n camera = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-Camera.txt\".format(self.key_pts_file %idx)), header=None, sep=' ')).reshape((3,3))\n rot = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-Rot.txt\".format(self.key_pts_file %idx)), header=None, sep=' ')).reshape((3,3))\n trans = np.array(pd.read_csv(os.path.join(self.root_dir, \"{}-Trans.txt\".format(self.key_pts_file %idx)), header=None, sep=' ')).reshape((1,3))\n \n item = {'image': image, 'original_image': np.asarray(image), 'initial_keypoints' : KPs.copy(), 'keypoints': KPs, 'bb': bb, 'camera':camera, 'rot':rot, 'trans':trans, 'scaleArea':scaleArea}\n if self.transform is not None:\n item = self.transform(item)\n return item\n\n#Transformations\n\ndef generate_heatmap(heatmap, pt, sigma):\n heatmap[int(pt[1])][int(pt[0])] = 1\n heatmap = cv2.GaussianBlur(heatmap, sigma, 0)\n am = np.amax(heatmap)\n heatmap /= am\n return heatmap\n\ndef render_onehot_heatmap(coord, input_shape,output_shape):\n #print(coord.shape)\n num_kps = 18\n batch_size = 1\n\n x = np.reshape(coord[:,0] / input_shape[1] * output_shape[1],[-1])\n y = np.reshape(coord[:,1] / input_shape[0] * output_shape[0],[-1])\n x_floor = np.floor(x)\n y_floor = np.floor(y)\n\n x_floor = np.clip(x_floor, 0, output_shape[1] - 1) # fix out-of-bounds x\n y_floor = np.clip(y_floor, 0, output_shape[0] - 1) # fix out-of-bounds y\n #print(\"floor \", x_floor, y_floor)\n indices_batch = np.expand_dims(\\\n np.reshape(\\\n np.transpose(\\\n np.tile(\\\n np.expand_dims(np.arange(batch_size),0)\\\n ,[num_kps,1])\\\n ,[1,0])\\\n ,[-1]).astype(float),1)\n #print(\"indices_batch\" , indices_batch.shape)\n indices_batch = np.concatenate([indices_batch, indices_batch, indices_batch, indices_batch], axis=0)\n indices_joint = np.expand_dims(np.tile(np.arange(num_kps),[batch_size]),1).astype(float)\n indices_joint = np.concatenate([indices_joint, indices_joint, indices_joint, indices_joint], axis=0)\n #print(\"indices_joint\" , indices_joint.shape)\n indices_lt = np.concatenate([np.expand_dims(y_floor-1,1), np.expand_dims(x_floor-1,1)], axis=1)\n indices_lb = np.concatenate([np.expand_dims(y_floor,1), np.expand_dims(x_floor-1,1)], axis=1)\n indices_rt = np.concatenate([np.expand_dims(y_floor-1,1), np.expand_dims(x_floor,1)], axis=1)\n indices_rb = np.concatenate([np.expand_dims(y_floor,1), np.expand_dims(x_floor,1)], axis=1)\n\n indices = np.concatenate([indices_lt, indices_lb, indices_rt, indices_rb], axis=0)\n #print(\"indices\" , indices.shape, np.where(indices==64))\n indices = np.concatenate([indices_batch, indices, indices_joint], axis=1).astype(int)\n\n prob_lt = (1 - (x - x_floor)) * (1 - (y - y_floor))\n prob_lb = (1 - (x - x_floor)) * (y - y_floor)\n prob_rt = (x - x_floor) * (1 - (y - y_floor))\n prob_rb = (x - x_floor) * (y - y_floor)\n probs = np.concatenate([prob_lt, prob_lb, prob_rt, prob_rb], axis=0)\n\n heatmap = scatter_nd_numpy(indices, probs, (batch_size, *output_shape, num_kps))\n normalizer = np.reshape(np.sum(heatmap,axis=(1,2)),[batch_size,1,1,num_kps])\n normalizer = np.where(np.equal(normalizer,0),np.ones_like(normalizer),normalizer)\n heatmap = heatmap / normalizer\n \n return np.squeeze(heatmap) \n \ndef scatter_nd_numpy(indices, updates, shape):\n target = np.zeros(shape, dtype=updates.dtype)\n indices = tuple(indices.reshape(-1, indices.shape[-1]).T)\n updates = updates.ravel()\n np.add.at(target, indices, updates)\n return target\n\ndef render_gaussian_heatmap(coord, output_shape, input_shape, sigma):\n \n x = [i for i in range(output_shape[1])]\n y = [i for i in range(output_shape[0])]\n xx,yy = np.meshgrid(y,x, indexing='ij')\n xx = np.reshape(xx, (*output_shape,1))\n yy = np.reshape(yy, (*output_shape,1))\n \n \n x = np.reshape(coord[:,0],[1,1,coord.shape[0]]) / input_shape[1] * output_shape[1]\n y = np.reshape(coord[:,1],[1,1,coord.shape[0]]) / input_shape[0] * output_shape[0]\n \n heatmap = np.exp(-(((xx-x)/np.float(sigma))**2)/np.float(2) -(((yy-y)/np.float(sigma))**2)/np.float(2))\n #print(\"heatmap.shape \", heatmap.shape)\n return heatmap * 255.\n\n\ndef heatmaps_to_locs(heatmaps, outSize = (64, 64)):\n heatmaps = heatmaps.cpu().numpy()\n conf = np.max(heatmaps, axis=(-2,-1))\n locs = np.argmax(heatmaps.reshape((*heatmaps.shape[:2], -1)), axis=-1)\n locs = np.stack(np.unravel_index(locs, outSize)[::-1], axis=-1) # reverse x,y\n return torch.from_numpy(np.concatenate([locs, conf[..., None]], axis=-1).astype('float64'))\n\n\nclass CropAndPad:\n\n def __init__(self, out_size=(256,256), train=True, real=True):\n self.out_size = out_size[::-1]\n self.train = train\n self.real = real\n\n def __call__(self, sample):\n image, bb = sample['image'], sample['bb']\n # img_size = image.size\n if self.train:\n min_x,max_y,max_x,min_y = bb[0], bb[1], bb[2], bb[3]\n else:\n if self.real:\n ## This is for Homebrew dataset\n ##min_x,max_y,max_x,min_y = bb[0]-25, bb[1]-25, bb[0] + bb[2] + 25 , bb[1] + bb[3] + 25\n ## For others, probabaly\n min_x,max_y,max_x,min_y = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]\n else:\n min_x,max_y,max_x,min_y = bb[0], bb[1], bb[2], bb[3]\n\n center_x = (min_x + max_x) / 2\n center_y = (min_y + max_y) / 2\n width, height = max_x-min_x, max_y-min_y\n \n scaleFactor = max([width, height])\n min_x = int(center_x) - int(scaleFactor)//2 \n min_y = int(center_y) - int(scaleFactor)//2\n max_x = int(center_x) + int(scaleFactor)//2 \n max_y = int(center_y) + int(scaleFactor)//2 \n ## This is for Homebrew dataset\n ## Image crop works in a way (0, 0, 10, 10) but here the \n ## image coordinates are revresed on Y-axis nd so the crop.\n sample['image'] = image.crop(box=(min_x,min_y,max_x,max_y))\n sample['orig_image'] = image\n sample['center'] = np.array([center_x, center_y], dtype=np.float32)\n sample['width'] = width\n sample['height'] = height\n ## This sclae is used for OKS calculation\n sample['scaleArea'] = np.sqrt(np.divide(box(min_x,min_y,max_x,max_y).area, sample['scaleArea']))\n #print(sample['scaleArea'])\n \n w, h= self.out_size\n ## Crop and scale\n sample['crop'] = np.array([min_x, min_y], dtype=np.float32)\n sample['scale'] = np.array([w/scaleFactor, h/scaleFactor] , dtype=np.float32)\n \n if width != self.out_size[0]:\n sample['image'] = sample['image'].resize((w, h))\n if 'mask' in sample:\n sample['mask'] = sample['mask'].crop(box=(min_x,min_y,max_x,max_y)).resize((w, h))\n if 'keypoints' in sample:\n keypoints = sample['keypoints']\n for i in range(keypoints.shape[0]):\n if keypoints[i,0] < min_x or keypoints[i,0] > max_x or keypoints[i,1] < min_y or keypoints[i,1] > max_y:\n keypoints[i,:] = [0,0,0]\n else:\n keypoints[i,:2] = (keypoints[i,:2]-sample['crop'] )*sample['scale']\n sample['keypoints'] = keypoints\n \n if 'initial_keypoints' in sample:\n initial_keypoints = sample['initial_keypoints']\n for i in range(initial_keypoints.shape[0]):\n if initial_keypoints[i,0] < min_x or initial_keypoints[i,0] > max_x \\\n or initial_keypoints[i,1] < min_y or initial_keypoints[i,1] > max_y:\n initial_keypoints[i,:] = [0,0,0]\n else:\n initial_keypoints[i,:2] = (initial_keypoints[i,:2]-sample['crop'] )*sample['scale']\n \n sample['initial_keypoints'] = initial_keypoints\n sample.pop('bb')\n return sample\n\n# Convert keypoint locations to heatmaps\nclass LocsToHeatmaps:\n\n def __init__(self, img_size=(256,256), out_size=(64,64), sigma=1, algo : str=None):\n self.img_size = img_size\n self.out_size = out_size\n self.x_scale = 1.0 * out_size[0]/img_size[0]\n self.y_scale = 1.0 * img_size[0]/img_size[0]\n self.sigma=sigma\n x = np.arange(0, out_size[1], dtype=np.float)\n y = np.arange(0, out_size[0], dtype=np.float)\n self.yg, self.xg = np.meshgrid(y,x, indexing='ij')\n self.algo = algo\n \n return\n\n def __call__(self, sample):\n sigma = 7\n gaussian_hm = np.zeros((self.out_size[0], self.out_size[1], sample['keypoints'].shape[0]))\n if self.algo == 'PoseFix':\n gaussian_hm = render_onehot_heatmap(sample['keypoints'], self.img_size, self.out_size)\n #print(gaussian_hm.shape)\n #gaussian_hm = render_gaussian_heatmap(sample['keypoints'], self.img_size, self.img_size, sigma)\n #print(gaussian_hm.shape)\n else:\n for i,keypoint in enumerate(sample['keypoints']):\n if keypoint[2] != 0:\n gaussian_hm[:,:,i] = generate_heatmap(gaussian_hm[:,:,i], tuple(keypoint.astype(np.int) * self.x_scale), (sigma, sigma))\n sample['keypoint_locs'] = sample['keypoints'][:,:2]\n sample['visible_keypoints'] = sample['keypoints'][:,2]\n sample['keypoint_heatmaps'] = gaussian_hm\n \n gaussian_hm_init = np.zeros((self.img_size[0], self.img_size[1], sample['initial_keypoints'].shape[0]))\n #print(\" gaussian_hm_init : \", gaussian_hm_init.shape)\n if self.algo == 'PoseFix':\n gaussian_hm_init = render_gaussian_heatmap(sample['keypoints'], self.img_size, self.img_size, sigma)\n else:\n for i,initial_keypoints in enumerate(sample['initial_keypoints']):\n if initial_keypoints[2] != 0:\n gaussian_hm_init[:,:,i] = generate_heatmap(gaussian_hm_init[:,:,i], tuple(initial_keypoints.astype(np.int) * self.x_scale ), \\\n (sigma, sigma))\n sample['initial_keypoints_locs'] = sample['initial_keypoints'][:,:2]\n sample['visible_initial_keypoints'] = sample['initial_keypoints'][:,2]\n sample['initial_keypoints_heatmaps'] = gaussian_hm_init\n \n return sample\n\n# Convert numpy arrays to Tensor objects\n# Permute the image dimensions\nclass ToTensor:\n\n def __init__(self, downsample_mask=False):\n self.tt = transforms.ToTensor()\n self.downsample_mask=downsample_mask\n\n def __call__(self, sample):\n sample['image'] = self.tt(sample['image'])\n if 'orig_image' in sample:\n sample['orig_image'] = self.tt(sample['orig_image'])\n if 'mask' in sample:\n if self.downsample_mask:\n sample['mask'] = self.tt(sample['mask'].resize((64,64), Image.ANTIALIAS))\n else:\n sample['mask'] = self.tt(sample['mask'])\n if 'in_mask' in sample:\n sample['in_mask'] = self.tt(sample['in_mask'])\n # sample['in_mask'] = sample['in_mask'].unsqueeze(0)\n if 'keypoint_heatmaps' in sample:\n sample['keypoint_heatmaps'] =\\\n torch.from_numpy(sample['keypoint_heatmaps'].astype(np.float32).transpose(2,0,1))\n sample['keypoint_locs'] =\\\n torch.from_numpy(sample['keypoint_locs'].astype(np.float32))\n sample['visible_keypoints'] =\\\n torch.from_numpy(sample['visible_keypoints'].astype(np.float32))\n \n if 'initial_keypoints_heatmaps' in sample:\n sample['initial_keypoints_heatmaps'] =\\\n torch.from_numpy(sample['initial_keypoints_heatmaps'].astype(np.float32).transpose(2,0,1))\n sample['initial_keypoints_locs'] =\\\n torch.from_numpy(sample['initial_keypoints_locs'].astype(np.float32))\n sample['visible_initial_keypoints'] =\\\n torch.from_numpy(sample['visible_initial_keypoints'].astype(np.float32))\n \n return sample\n\nclass Normalize:\n\n def __call__(self, sample):\n sample['image'] = 2*(sample['image']-0.5)\n if 'in_mask' in sample:\n sample['in_mask'] = 2*(sample['in_mask']-0.5)\n return sample\n\n\n\"\"\"---------------**MODEL**-----------------------\"\"\"\n\n'''\nCode is from https://github.com/bearpaw/pytorch-pose\nHourglass network inserted in the pre-activated Resnet\nUse lr=0.01 for current version\n(c) YANG, Wei\n'''\n__all__ = ['HourglassNet', 'hg']\n\nclass Bottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=True)\n self.bn3 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=True)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n #print(\"Bottleneck \", out.shape)\n return out\n\n\nclass Hourglass(nn.Module):\n def __init__(self, block, num_blocks, planes, depth):\n super(Hourglass, self).__init__()\n self.depth = depth\n self.block = block\n self.hg = self._make_hour_glass(block, num_blocks, planes, depth)\n\n def _make_residual(self, block, num_blocks, planes):\n layers = []\n for i in range(0, num_blocks):\n layers.append(block(planes*block.expansion, planes))\n return nn.Sequential(*layers)\n\n def _make_hour_glass(self, block, num_blocks, planes, depth):\n hg = []\n for i in range(depth):\n res = []\n for j in range(3):\n res.append(self._make_residual(block, num_blocks, planes))\n if i == 0:\n res.append(self._make_residual(block, num_blocks, planes))\n hg.append(nn.ModuleList(res))\n return nn.ModuleList(hg)\n\n def _hour_glass_forward(self, n, x):\n up1 = self.hg[n-1][0](x)\n low1 = F.max_pool2d(x, 2, stride=2)\n low1 = self.hg[n-1][1](low1)\n\n if n > 1:\n low2 = self._hour_glass_forward(n-1, low1)\n else:\n low2 = self.hg[n-1][3](low1)\n low3 = self.hg[n-1][2](low2)\n up2 = F.interpolate(low3, scale_factor=2)\n out = up1 + up2\n #print(\"Hourglass \", out.shape)\n return out\n\n def forward(self, x):\n return self._hour_glass_forward(self.depth, x)\n\n\nclass HourglassNet(nn.Module):\n '''Hourglass model from Newell et al ECCV 2016'''\n def __init__(self, block, num_stacks=2, num_blocks=4, num_classes=16, ch_input = 21):\n super(HourglassNet, self).__init__()\n\n self.inplanes = 256\n self.num_feats = 256\n self.num_stacks = num_stacks\n self.conv1 = nn.Conv2d(ch_input, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=True)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_residual(block, self.inplanes, 1)\n self.layer2 = self._make_residual(block, self.inplanes, 1)\n self.layer3 = self._make_residual(block, self.num_feats, 1)\n #self.layer4 = self._make_residual(block, self.num_feats, 1)\n self.maxpool = nn.MaxPool2d(2, stride=2)\n\n # build hourglass modules\n ch = self.num_feats*block.expansion\n hg, res, fc, score, fc_, score_ = [], [], [], [], [], []\n for i in range(num_stacks):\n hg.append(Hourglass(block, num_blocks, self.num_feats, 4))\n res.append(self._make_residual(block, self.num_feats, num_blocks))\n fc.append(self._make_fc(ch, ch))\n score.append(nn.Conv2d(ch, num_classes, kernel_size=1, bias=True))\n if i < num_stacks-1:\n fc_.append(nn.Conv2d(ch, ch, kernel_size=1, bias=True))\n score_.append(nn.Conv2d(num_classes, ch, kernel_size=1, bias=True))\n self.hg = nn.ModuleList(hg)\n self.res = nn.ModuleList(res)\n self.fc = nn.ModuleList(fc)\n self.score = nn.ModuleList(score)\n self.fc_ = nn.ModuleList(fc_)\n self.score_ = nn.ModuleList(score_)\n\n def _make_residual(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=True),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _make_fc(self, inplanes, outplanes):\n bn = nn.BatchNorm2d(inplanes)\n conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=True)\n return nn.Sequential(\n conv,\n bn,\n self.relu,\n )\n\n def forward(self, x):\n out = []\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.maxpool(x)\n x = self.layer2(x)\n x = self.layer3(x)\n #x = self.layer4(x)\n\n for i in range(self.num_stacks):\n y = self.hg[i](x)\n y = self.res[i](y)\n y = self.fc[i](y)\n score = self.score[i](y)\n out.append(score)\n if i < self.num_stacks-1:\n fc_ = self.fc_[i](y)\n score_ = self.score_[i](score)\n x = x + fc_ + score_\n return out\n\n\ndef hg(num_stacks=1, num_blocks=1, num_classes=10, ch_input=21):\n model = HourglassNet(Bottleneck, num_stacks=num_stacks, num_blocks=num_blocks, num_classes=num_classes, ch_input = ch_input)\n return model\n\n\n\"\"\"---------------**Trainer**-----------------------\"\"\"\n\n\"\"\"----------------**Train/Test Parallel**----------------\"\"\"\n\nclass EstimatePose(pl.LightningModule):\n\n def __init__(self, root_dir = 'captures', num_classes = 51, batch_size = 1, length=10, algo = None, loadModel = None):\n super(EstimatePose, self).__init__()\n\n self.root_dir = root_dir\n self.length = length\n self.batch_size = batch_size\n self.loadModel = loadModel\n\n self.num_classes = num_classes\n self.model = hg(num_stacks=1, num_blocks=1, num_classes=self.num_classes, ch_input=3) \n \n self.train_transform_list = [CropAndPad(out_size=(256, 256)),LocsToHeatmaps(out_size=(64, 64), algo = algo),ToTensor(), Normalize()]\n \n self.heatmap_loss = torch.nn.MSELoss()\n \n ## Values\n self.running_loss = 0\n self.running_val_loss = 0\n self.last_epoch = 1\n self.last_val_epoch = 1\n\n def forward(self, x):\n self.model.forward()\n\n @pl.data_loader\n def train_dataloader(self):\n # REQUIRED\n self.train_ds = Dataset(root_dir=self.root_dir, transform=transforms.Compose(self.train_transform_list), length=self.length)\n self.train_data_loader = DataLoader(self.train_ds, batch_size=self.batch_size,\n num_workers=8,\n pin_memory=True,\n shuffle=True)\n return self.train_data_loader\n\n @pl.data_loader\n def val_dataloader(self):\n # Optional\n self.val_ds = DatasetReal(root_dir=self.root_dir, transform=transforms.Compose(self.train_transform_list), length=6)\n self.val_data_loader = DataLoader(self.val_ds, batch_size=self.batch_size,\n num_workers=8,\n pin_memory=True,\n shuffle=True)\n return self.val_data_loader\n\n def validation_step(self, batch, batch_idx):\n\n if self.last_val_epoch != self.current_epoch:\n\n self.last_val_epoch = self.current_epoch\n self.running_val_loss = 0\n\n pred_heatmap_list = self.model(batch['image'])\n self.val_loss = self.heatmap_loss(batch['keypoint_heatmaps'], pred_heatmap_list[-1])\n\n self.running_val_loss += self.val_loss.item() \n\n return {'val_loss': self.val_loss}\n\n def validation_end(self, outputs):\n \n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n tensorboard_logs = {'val_loss_avg': avg_loss, 'val_loss':self.running_val_loss}\n return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}\n\n def training_step(self, batch, batch_idx):\n \n if self.last_epoch != self.current_epoch:\n self.last_epoch = self.current_epoch\n self.running_loss = 0\n\n pred_heatmap_list = self.model(batch['image'])\n self.loss = self.heatmap_loss(batch['keypoint_heatmaps'], pred_heatmap_list[-1])\n\n self.running_loss += self.loss.item() \n tensorboard_logs = {'train_loss': self.running_loss, 'loss':self.loss, 'val_loss': self.running_val_loss}\n\n output = {\n 'loss': self.loss,\n 'progress_bar': tensorboard_logs,\n 'log': tensorboard_logs\n }\n return output\n\n def configure_optimizers(self):\n \"\"\"\n return whatever optimizers we want here\n :return: list of optimizers\n \"\"\"\n optimizer = torch.optim.Adam(self.model.parameters(), lr=2.5e-4)\n return [optimizer]\n \n \n\"\"\"----------------**Main**----------------\"\"\"\n\ndef main():\n checkpoint = ModelCheckpoint(\n filepath=output_folder,\n verbose=1,\n save_top_k=-1,\n monitor='val_loss',\n save_weights_only=False,\n period=50,\n prefix='semanticKDD_'\n )\n\n trainer = pl.Trainer(train_percent_check=1,\n val_percent_check=1,\n logger=logger,\n min_epochs=300, \n gpus=1,\n show_progress_bar=True, \n checkpoint_callback=checkpoint,\n early_stop_callback=False,\n resume_from_checkpoint=None)\n\n trainer.fit(model)\n\nmain()","sub_path":"Deep-NN/3DPoseTracking/Colab/.ipynb_checkpoints/poseestimation_colab_v2-checkpoint.py","file_name":"poseestimation_colab_v2-checkpoint.py","file_ext":"py","file_size_in_byte":28432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"507026513","text":"# Fit the clusters to a line.\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\noFileName = \"thermals\\\\triggers.txt\"\nwith open(oFileName, \"w\") as oFile:\n\twith open(\"thermals\\\\lines.txt\", \"r\") as thermalLines:\n\t\tcentroidLatitudes, centroidLongitudes = [], []\n\t\tfor line in thermalLines.readlines():\n\t\t\ttokens = line.split()\n\t\t\tcentroidLatitudes.append(float(tokens[1]))\n\t\t\tcentroidLongitudes.append(float(tokens[2]))\n\t\t\n\t\tfig = plt.subplot()\n\t\tfig.scatter(centroidLatitudes, centroidLongitudes)\n\t\tplt.show()","sub_path":"group_thermals.py","file_name":"group_thermals.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"31293783","text":"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='GPU')\n\n\nclass StridedSlice(nn.Cell):\n def __init__(self):\n super(StridedSlice, self).__init__()\n self.stridedslice = P.StridedSlice()\n\n def construct(self, x):\n return self.stridedslice(x, (2, 0, 0), (3, 2, 3), (1, 1, 1))\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.env_onecard\ndef test_slice():\n x = Tensor(np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 7, 8]]]).astype(np.int32))\n stridedslice = StridedSlice()\n output = stridedslice(x)\n expect = [[[5., 5., 5.],\n [6., 7., 8.]]]\n assert (output.asnumpy() == expect).all()\n","sub_path":"tests/st/ops/gpu/test_stridedslice_op.py","file_name":"test_stridedslice_op.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"453692887","text":"from flask import Flask, redirect, request, jsonify , g , url_for , session , current_app\nfrom app.bll import UserBLL\nfrom app.utils import BaseModel\nfrom .. import api\nimport uuid\nimport time\nimport json\nfrom app.utils import codeenum as CE\n\nclass AuthController:\n \n @api.route('/auth/login' , methods=['POST'])\n def login():\n model = BaseModel()\n mobile = request.get_json().get('mobile')\n password = request.get_json().get('password')\n data = UserBLL.login(mobile,password)\n if data['Code'] != '000000':\n model.ErrCode = data['Code']\n model.Message = data['Message']\n model.Code = '1'\n return jsonify(model.to_json())\n user = data['Data']\n userContent = {'userid' : user.id , 'online' : '1'}\n token = ''.join(str(uuid.uuid3(uuid.NAMESPACE_URL , str(user.id))).split('-'))\n tokenContent = {'userid' : user.id , 'expired' : int(time.time()) + 7200}\n refreshToken = ''.join(str(uuid.uuid3(uuid.NAMESPACE_URL , token)).split('-'))\n refreshContent = {'token' : token , 'expired' : int(time.time()) + 7200 * 2}\n current_app.redis.set(token , json.dumps(tokenContent))\n current_app.redis.set(refreshToken , json.dumps(refreshContent))\n current_app.redis.set(str(user.id) , json.dumps(userContent))\n model.Code = '0'\n model.ErrCode = '000000'\n model.Data = {'userid' : user.id , 'token' : token , 'refreshtoken' : refreshToken}\n return jsonify(model.to_json())\n\n @api.route('/auth/refreshtoken' , methods=['POST'])\n def refreshToken():\n model = BaseModel()\n token = request.get_json().get('token')\n refreshtoken = request.get_json().get('refreshtoken')\n if not token or not refreshtoken:\n model.Code = '1'\n model.ErrCode = CE.CommonErrorCode.ParametersError.value\n model.Message = CE.CommonErrorCode.ParametersError.name\n return jsonify(model.to_json())\n refreshContent = json.loads(current_app.redis.get(refreshtoken))\n if not refreshContent or refreshContent['expired'] < int(time.time()):\n model.Code = '1'\n model.ErrCode = CE.UserErrorCode.UserOffline.value\n model.Message = CE.UserErrorCode.UserOffline.name\n return jsonify(model.to_json())\n if token != refreshContent['token']:\n model.Code = '1'\n model.ErrCode = CE.CommonErrorCode.TokenInvalid.value\n model.Message = CE.CommonErrorCode.TokenInvalid.name\n return jsonify(model.to_json())\n tokenContent = json.loads(current_app.redis.get(token))\n current_app.redis.delete(token)\n current_app.reids.delete(refreshtoken)\n newtoken = ''.join(str(uuid.uuid3(uuid.NAMESPACE_URL , str(tokenContent['userid']))).split('-'))\n tokenContent['expired'] = int(time.time()) + 7200\n newrefreshtoken = ''.join(str(uuid.uuid3(uuid.NAMESPACE_URL , newtoken)).split('-'))\n refreshContent['token'] = newtoken\n refreshContent['expired'] = int(time.time()) + 14400\n current_app.redis.set(newtoken , json.dumps(tokenContent))\n current_app.redis.set(newrefreshtoken , json.dumps(refreshContent))\n model.Code = '0'\n model.ErrCode = '000000'\n model.Data = {'userid' : tokenContent['userid'] , 'token' : newtoken , 'refreshtoken' : newrefreshtoken}\n return jsonify(model.to_json())\n\n @api.before_request\n def before_request():\n token = request.headers.get('token')\n tokenContentStr = current_app.redis.get(token)\n if not tokenContentStr:\n return\n user = json.loads(tokenContentStr)\n if user:\n g.current_user = UserBLL.getUserById(user['userid'])\n return","sub_path":"Api/app/app_1_0/controllers/authcontroller.py","file_name":"authcontroller.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"173718356","text":"#!/usr/bin/python3\n# coding: utf-8\n\nfrom functools import wraps\n\nPERMISSIONS = {\n \"view\": [\n \"teachers\",\n \"date\",\n \"classrooms\",\n \"groups\",\n \"title\",\n \"id\",\n ],\n \"user\": [\n \"create\",\n \"update\",\n \"remove\",\n \"permissions\",\n ]\n}\n\n\n# Decorator\ndef checkPermissionExist(fn):\n @wraps(fn)\n def decorator(namespace, name):\n if namespace not in PERMISSIONS:\n raise Exception(\n 'Namespace %s not in available permissions' % namespace)\n\n if name not in PERMISSIONS[namespace]:\n raise Exception(\n 'Permission name %s not in namespace %s' % (name, namespace))\n\n return fn(namespace, name)\n return decorator\n","sub_path":"PlanningServer/authorisation/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"380796232","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n substrings = []\n \n for i in range(len(s)):\n ptr = i\n dict = {}\n str = \"\"\n \n while ptr < len(s) and not dict.get(s[ptr]):\n dict[s[ptr]] = True\n str += s[ptr]\n ptr += 1\n \n substrings.append(len(str)) \n \n return max(substrings) if substrings else 0","sub_path":"LeetCode/Python/Medium/3-LongestSubstringWithoutRepeatingChars.py","file_name":"3-LongestSubstringWithoutRepeatingChars.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"632287756","text":"import pygame\nfrom pygame.locals import *\n\nfrom foreverdrive.base import ForeverMain\nfrom foreverdrive.sprite import Sprite\n\ndef main():\n game = ForeverMain()\n sprite = Sprite()\n game.listen_arrows(sprite.handle_event)\n group = pygame.sprite.RenderUpdates()\n group.add(sprite)\n game.groups.append(group)\n game.run()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"demos/test_sprite.py","file_name":"test_sprite.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"170719291","text":"import re\nfrom os.path import join, dirname\n\nfrom nose.tools import assert_equal\n\nfrom semanticizest import Semanticizer\nfrom semanticizest._semanticizer import create_model\n\n\n# create in-memory db\ndb = create_model(join(dirname(__file__),\n 'nlwiki-20140927-pages-articles-sample.xml'), 'nlwiki_test.sqlite3')\nsem = Semanticizer('nlwiki_test.sqlite3', N=2)\n\n\ndef test_semanticizer():\n text = \"\"\"Aangezien de aarde een planeet is, kunnen de aardwetenschappen\nook als een tak van de planetologie beschouwd worden. Aardwetenschappelijke\nkennis, met name geomorfologie, wordt bijvoorbeeld ook toegepast voor de\nzoektocht naar sporen van water, sneeuw en ijs op de planeet Mars.\"\"\"\n tokens = re.split(r'\\W+', text)\n\n expected = set(['Planeet', 'Planetologie', 'Kennis (wetenschap)',\n 'Geomorfologie', 'Mars (planeet)'])\n concepts = set(string for _, _, string, _ in sem.all_candidates(tokens))\n\n assert_equal(expected, concepts)\n\n\ndef test_semanticizer_redirect():\n text = \"\"\"In 1902 werkte hij even bij een Architekt.\"\"\"\n tokens = re.split(r'\\W+', text)\n\n expected = set(['Architect'])\n actual = set(string for _, _, string, _ in sem.all_candidates(tokens))\n\n assert_equal(expected, actual)\n","sub_path":"semanticizest/tests/test_semanticizer.py","file_name":"test_semanticizer.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"374996327","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom old_code.layers import GraphConvolution, CreateKernal\nimport torch\nimport pdb\nclass GCN(nn.Module):\n def __init__(self, nfeat, nhid, nclass, emb_size, ker_size):\n super(GCN, self).__init__()\n\n self.gc1_0 = CreateKernal(emb_size, ker_size)\n self.gc1_1 = GraphConvolution(4, 256, ker_size)\n self.gc1_2 = nn.LeakyReLU(0.01)\n\n self.gc2_0 = CreateKernal(emb_size, ker_size)\n self.gc2_1 = GraphConvolution(4+256, 128, ker_size)\n self.gc2_2 = nn.LeakyReLU(0.01)\n\n self.gc3_0 = CreateKernal(emb_size, ker_size)\n self.gc3_1 = GraphConvolution(4+256+128, 64, ker_size)\n self.gc3_2 = nn.LeakyReLU(0.01)\n\n self.gc4_0 = CreateKernal(emb_size, ker_size)\n self.gc4_1 = GraphConvolution(4+256+128+64, 32, ker_size)\n self.gc4_2 = nn.LeakyReLU(0.01)\n\n def forward(self, data):\n\n domain = data._x[:, :3]\n diff = domain[data._edge_idx[0, :], :] - domain[data._edge_idx[1, :], :]\n\n x1 = self.gc1_0(diff)\n x1 = self.gc1_1(data, x1)\n x1 = self.gc1_2(x1)\n data._x = torch.cat((x1,data._x),1)\n \n x2 = self.gc2_0(diff)\n x2 = self.gc2_1(data, x2)\n x2 = self.gc2_2(x2)\n data._x = torch.cat((x2,data._x),1)\n\n x3 = self.gc3_0(diff)\n x3 = self.gc3_1(data, x3)\n x3 = self.gc3_2(x3)\n data._x = torch.cat((x3,data._x),1)\n\n x4 = self.gc4_0(diff)\n x4 = self.gc4_1(data, x4)\n x4 = self.gc4_2(x4)\n\n\n return x4\n","sub_path":"old_code/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}
+{"seq_id":"3688952","text":"import math\n\n\nclass complex_number:\n def __init__(self, real=0, imag=0):\n self.real = real\n self.imag = imag\n\n def __add__(self, other):\n return complex_number(self.real + other.real,\n self.imag + other.imag)\n\n def __sub__(self, other):\n return complex_number(self.real - other.real,\n self.imag - other.imag)\n\n def __mul__(self, other):\n return complex_number(self.real*other.real - self.imag*other.imag,\n self.imag*other.real + self.real*other.imag)\n \n def __truediv__(self, other):\n sr, si, o_r, oi = self.real, self.imag,other.real, other.imag # short forms\n r = float(o_r**2 + oi**2)\n return (sr*o_r+si*oi)/r, (si*o_r-sr*oi)/r\n\n def abs(self):\n return math.sqrt(self.real**2 + self.imag**2)\n \n def conjugate(self): \n return complex_number(self.real, -self.imag)\n \n \n def argument(self): \n return math.degrees((math.atan(self.imag/self.real)))\n \n def __str__(self):\n if self.imag > 0:\n return str(self.real)+'+'+'i'+str(self.imag) \n else:\n return str(self.real)+'-'+'i'+str(abs(self.imag))\n \n\nprint('Select operation.')\nprint('1.Add')\nprint('2.Subtract')\nprint('3.Multiply')\nprint('4.Divide')\nprint('5.Absolute')\nprint('6.Conjugate')\nprint('7.Argument')\n\n\n# Take input from the user \nchoice = input('Enter choice(1/2/3/4/5/6/7):')\n\nnum1 = int(input('Enter First_Real number: '))\nnum2 = int(input('Enter First_imag number: '))\nnum3 = int(input('Enter Second_Real number: '))\nnum4 = int(input('Enter Second_imag number: '))\nc1 = complex_number(num1,num2)\nc2 = complex_number(num3,num4)\nif choice == '1':\n print(c1,'+',c2,'=', (c1+c2))\n\nelif choice == '2':\n print(c1,'-',c2,'=',(c1-c2))\n\nelif choice == '3':\n print(c1,'*',c2,'=',(c1*c2))\n\nelif choice == '4':\n print(c1,'/',c2,'=',(c1/c2))\n\nelif choice == '5':\n print(c1,'=',c1.absu())\n\nelif choice == '6':\n print(c1 ,'=',c1.cong())\n\nelif choice == '7':\n print(c1 ,'=',c1.argu())\n\n\nelse:\n print('Invalid input')\n\n\n\n","sub_path":"q01_create_class/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}